Skip to content

Commit 9501c3f

Browse files
V4.7-alpha (labring#985)
Co-authored-by: heheer <[email protected]>
1 parent 5bca15f commit 9501c3f

File tree

170 files changed

+5785
-2341
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

170 files changed

+5785
-2341
lines changed

docSite/content/docs/development/configuration.md

+21-6
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,13 @@ llm模型全部合并
2020
```json
2121
{
2222
"systemEnv": {
23-
"openapiPrefix": "fastgpt",
2423
"vectorMaxProcess": 15,
2524
"qaMaxProcess": 15,
26-
"pgHNSWEfSearch": 100
25+
"pgHNSWEfSearch": 100 // 向量搜索参数。越大,搜索越精确,但是速度越慢。设置为100,有99%+精度。
2726
},
2827
"llmModels": [
2928
{
30-
"model": "gpt-3.5-turbo-1106", // 模型名
29+
"model": "gpt-3.5-turbo", // 模型名
3130
"name": "gpt-3.5-turbo", // 别名
3231
"maxContext": 16000, // 最大上下文
3332
"maxResponse": 4000, // 最大回复
@@ -37,12 +36,16 @@ llm模型全部合并
3736
"censor": false,
3837
"vision": false, // 是否支持图片输入
3938
"datasetProcess": false, // 是否设置为知识库处理模型(QA),务必保证至少有一个为true,否则知识库会报错
40-
"toolChoice": true, // 是否支持工具选择
41-
"functionCall": false, // 是否支持函数调用
39+
"usedInClassify": true, // 是否用于问题分类(务必保证至少有一个为true)
40+
"usedInExtractFields": true, // 是否用于内容提取(务必保证至少有一个为true)
41+
"useInToolCall": true, // 是否用于工具调用(务必保证至少有一个为true)
42+
"usedInQueryExtension": true, // 是否用于问题优化(务必保证至少有一个为true)
43+
"toolChoice": true, // 是否支持工具选择(务必保证至少有一个为true)
44+
"functionCall": false, // 是否支持函数调用(特殊功能,会优先使用 toolChoice,如果为false,则使用 functionCall,如果仍为 false,则使用提示词模式)
4245
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
4346
"customExtractPrompt": "", // 自定义内容提取提示词
4447
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
45-
"defaultConfig":{} // 对话默认配置(比如 GLM4 的 top_p
48+
"defaultConfig":{} // LLM默认配置,可以针对不同模型设置特殊值(比如 GLM4 的 top_p
4649
},
4750
{
4851
"model": "gpt-3.5-turbo-16k",
@@ -55,6 +58,10 @@ llm模型全部合并
5558
"censor": false,
5659
"vision": false,
5760
"datasetProcess": true,
61+
"usedInClassify": true,
62+
"usedInExtractFields": true,
63+
"useInToolCall": true,
64+
"usedInQueryExtension": true,
5865
"toolChoice": true,
5966
"functionCall": false,
6067
"customCQPrompt": "",
@@ -73,6 +80,10 @@ llm模型全部合并
7380
"censor": false,
7481
"vision": false,
7582
"datasetProcess": false,
83+
"usedInClassify": true,
84+
"usedInExtractFields": true,
85+
"useInToolCall": true,
86+
"usedInQueryExtension": true,
7687
"toolChoice": true,
7788
"functionCall": false,
7889
"customCQPrompt": "",
@@ -91,6 +102,10 @@ llm模型全部合并
91102
"censor": false,
92103
"vision": true,
93104
"datasetProcess": false,
105+
"usedInClassify": false,
106+
"usedInExtractFields": false,
107+
"useInToolCall": false,
108+
"usedInQueryExtension": false,
94109
"toolChoice": true,
95110
"functionCall": false,
96111
"customCQPrompt": "",

docSite/content/docs/development/one-api.md

+4
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,10 @@ CHAT_API_KEY=sk-xxxxxx
120120
"censor": false,
121121
"vision": false, // 是否支持图片输入
122122
"datasetProcess": false, // 是否设置为知识库处理模型
123+
"usedInClassify": true, // 是否用于问题分类
124+
"usedInExtractFields": true, // 是否用于字段提取
125+
"useInToolCall": true, // 是否用于工具调用
126+
"usedInQueryExtension": true, // 是否用于问题优化
123127
"toolChoice": true, // 是否支持工具选择
124128
"functionCall": false, // 是否支持函数调用
125129
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
---
2+
title: 'V4.7(进行中)'
3+
description: 'FastGPT V4.7更新说明'
4+
icon: 'upgrade'
5+
draft: false
6+
toc: true
7+
weight: 826
8+
---
9+
10+
## 修改配置文件
11+
12+
增加一些 Boolean 值,用于决定不同功能块可以使用哪些模型:[点击查看最新的配置文件](/docs/development/configuration/)
13+
14+
15+
## V4.7 更新说明
16+
17+
1. 新增 - 工具调用模块,可以让LLM模型根据用户意图,动态的选择其他模型或插件执行。
18+
2. 新增 - 分类和内容提取支持 functionCall 模式。部分模型支持 functionCall 不支持 ToolCall,也可以使用了。需要把 LLM 模型配置文件里的 `functionCall` 设置为 `true``toolChoice`设置为 `false`。如果 `toolChoice` 为 true,会走 tool 模式。
19+
3. 优化 - 高级编排性能

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"prepare": "husky install",
77
"format-code": "prettier --config \"./.prettierrc.js\" --write \"./**/src/**/*.{ts,tsx,scss}\"",
88
"format-doc": "zhlint --dir ./docSite *.md --fix",
9-
"gen:theme-typings": "chakra-cli tokens projects/app/src/web/styles/theme.ts --out node_modules/.pnpm/node_modules/@chakra-ui/styled-system/dist/theming.types.d.ts",
9+
"gen:theme-typings": "chakra-cli tokens packages/web/styles/theme.ts --out node_modules/.pnpm/node_modules/@chakra-ui/styled-system/dist/theming.types.d.ts",
1010
"postinstall": "sh ./scripts/postinstall.sh",
1111
"initIcon": "node ./scripts/icon/init.js",
1212
"previewIcon": "node ./scripts/icon/index.js"

packages/global/common/string/tiktoken/index.ts

+82-19
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,15 @@
11
/* Only the token of gpt-3.5-turbo is used */
22
import type { ChatItemType } from '../../../core/chat/type';
33
import { Tiktoken } from 'js-tiktoken/lite';
4-
import { adaptChat2GptMessages } from '../../../core/chat/adapt';
5-
import { ChatCompletionRequestMessageRoleEnum } from '../../../core/ai/constant';
4+
import { chats2GPTMessages } from '../../../core/chat/adapt';
65
import encodingJson from './cl100k_base.json';
7-
import { ChatMessageItemType } from '../../../core/ai/type';
6+
import {
7+
ChatCompletionMessageParam,
8+
ChatCompletionContentPart,
9+
ChatCompletionCreateParams,
10+
ChatCompletionTool
11+
} from '../../../core/ai/type';
12+
import { ChatCompletionRequestMessageRoleEnum } from '../../../core/ai/constants';
813

914
/* init tikToken obj */
1015
export function getTikTokenEnc() {
@@ -29,18 +34,25 @@ export function getTikTokenEnc() {
2934

3035
/* count one prompt tokens */
3136
export function countPromptTokens(
32-
prompt = '',
33-
role: '' | `${ChatCompletionRequestMessageRoleEnum}` = '',
34-
tools?: any
37+
prompt: string | ChatCompletionContentPart[] | null | undefined = '',
38+
role: '' | `${ChatCompletionRequestMessageRoleEnum}` = ''
3539
) {
3640
const enc = getTikTokenEnc();
37-
const toolText = tools
38-
? JSON.stringify(tools)
39-
.replace('"', '')
40-
.replace('\n', '')
41-
.replace(/( ){2,}/g, ' ')
42-
: '';
43-
const text = `${role}\n${prompt}\n${toolText}`.trim();
41+
const promptText = (() => {
42+
if (!prompt) return '';
43+
if (typeof prompt === 'string') return prompt;
44+
let promptText = '';
45+
prompt.forEach((item) => {
46+
if (item.type === 'text') {
47+
promptText += item.text;
48+
} else if (item.type === 'image_url') {
49+
promptText += item.image_url.url;
50+
}
51+
});
52+
return promptText;
53+
})();
54+
55+
const text = `${role}\n${promptText}`.trim();
4456

4557
try {
4658
const encodeText = enc.encode(text);
@@ -50,15 +62,66 @@ export function countPromptTokens(
5062
return text.length;
5163
}
5264
}
65+
export const countToolsTokens = (
66+
tools?: ChatCompletionTool[] | ChatCompletionCreateParams.Function[]
67+
) => {
68+
if (!tools || tools.length === 0) return 0;
69+
70+
const enc = getTikTokenEnc();
71+
72+
const toolText = tools
73+
? JSON.stringify(tools)
74+
.replace('"', '')
75+
.replace('\n', '')
76+
.replace(/( ){2,}/g, ' ')
77+
: '';
78+
79+
return enc.encode(toolText).length;
80+
};
5381

5482
/* count messages tokens */
55-
export const countMessagesTokens = (messages: ChatItemType[], tools?: any) => {
56-
const adaptMessages = adaptChat2GptMessages({ messages, reserveId: true });
83+
export const countMessagesTokens = (messages: ChatItemType[]) => {
84+
const adaptMessages = chats2GPTMessages({ messages, reserveId: true });
5785

58-
return countGptMessagesTokens(adaptMessages, tools);
86+
return countGptMessagesTokens(adaptMessages);
5987
};
60-
export const countGptMessagesTokens = (messages: ChatMessageItemType[], tools?: any) =>
61-
messages.reduce((sum, item) => sum + countPromptTokens(item.content, item.role, tools), 0);
88+
export const countGptMessagesTokens = (
89+
messages: ChatCompletionMessageParam[],
90+
tools?: ChatCompletionTool[],
91+
functionCall?: ChatCompletionCreateParams.Function[]
92+
) =>
93+
messages.reduce((sum, item) => {
94+
// Evaluates the text of toolcall and functioncall
95+
const functionCallPrompt = (() => {
96+
let prompt = '';
97+
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
98+
const toolCalls = item.tool_calls;
99+
prompt +=
100+
toolCalls
101+
?.map((item) => `${item?.function?.name} ${item?.function?.arguments}`.trim())
102+
?.join('') || '';
103+
104+
const functionCall = item.function_call;
105+
prompt += `${functionCall?.name} ${functionCall?.arguments}`.trim();
106+
}
107+
return prompt;
108+
})();
109+
110+
const contentPrompt = (() => {
111+
if (!item.content) return '';
112+
if (typeof item.content === 'string') return item.content;
113+
return item.content
114+
.map((item) => {
115+
if (item.type === 'text') return item.text;
116+
return '';
117+
})
118+
.join('');
119+
})();
120+
121+
return sum + countPromptTokens(`${contentPrompt}${functionCallPrompt}`, item.role);
122+
}, 0) +
123+
countToolsTokens(tools) +
124+
countToolsTokens(functionCall);
62125

63126
/* slice messages from top to bottom by maxTokens */
64127
export function sliceMessagesTB({
@@ -68,7 +131,7 @@ export function sliceMessagesTB({
68131
messages: ChatItemType[];
69132
maxTokens: number;
70133
}) {
71-
const adaptMessages = adaptChat2GptMessages({ messages, reserveId: true });
134+
const adaptMessages = chats2GPTMessages({ messages, reserveId: true });
72135
let reduceTokens = maxTokens;
73136
let result: ChatItemType[] = [];
74137

packages/global/core/ai/constant.ts

-7
This file was deleted.

packages/global/core/ai/constants.ts

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
export enum ChatCompletionRequestMessageRoleEnum {
2+
'System' = 'system',
3+
'User' = 'user',
4+
'Assistant' = 'assistant',
5+
'Function' = 'function',
6+
'Tool' = 'tool'
7+
}
8+
9+
export enum ChatMessageTypeEnum {
10+
text = 'text',
11+
image_url = 'image_url'
12+
}
13+
14+
export enum LLMModelTypeEnum {
15+
all = 'all',
16+
classify = 'classify',
17+
extractFields = 'extractFields',
18+
toolCall = 'toolCall',
19+
queryExtension = 'queryExtension'
20+
}
21+
export const llmModelTypeFilterMap = {
22+
[LLMModelTypeEnum.all]: 'model',
23+
[LLMModelTypeEnum.classify]: 'usedInClassify',
24+
[LLMModelTypeEnum.extractFields]: 'usedInExtractFields',
25+
[LLMModelTypeEnum.toolCall]: 'usedInToolCall',
26+
[LLMModelTypeEnum.queryExtension]: 'usedInQueryExtension'
27+
};

packages/global/core/ai/model.d.ts

+7-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,13 @@ export type LLMModelItemType = {
1010

1111
censor?: boolean;
1212
vision?: boolean;
13-
datasetProcess?: boolean;
13+
14+
// diff function model
15+
datasetProcess?: boolean; // dataset
16+
usedInClassify?: boolean; // classify
17+
usedInExtractFields?: boolean; // extract fields
18+
usedInToolCall?: boolean; // tool call
19+
usedInQueryExtension?: boolean; // query extension
1420

1521
functionCall: boolean;
1622
toolChoice: boolean;

packages/global/core/ai/type.d.ts

+26-10
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,40 @@
1+
import openai from 'openai';
12
import type {
2-
ChatCompletion,
3-
ChatCompletionCreateParams,
3+
ChatCompletionMessageToolCall,
44
ChatCompletionChunk,
55
ChatCompletionMessageParam,
6-
ChatCompletionContentPart
6+
ChatCompletionToolMessageParam,
7+
ChatCompletionAssistantMessageParam
78
} from 'openai/resources';
9+
import { ChatMessageTypeEnum } from './constants';
810

9-
export type ChatCompletionContentPart = ChatCompletionContentPart;
10-
export type ChatCompletionCreateParams = ChatCompletionCreateParams;
11-
export type ChatMessageItemType = Omit<ChatCompletionMessageParam, 'name'> & {
12-
name?: any;
11+
export * from 'openai/resources';
12+
13+
export type ChatCompletionMessageParam = ChatCompletionMessageParam & {
1314
dataId?: string;
14-
content: any;
15-
} & any;
15+
};
16+
export type ChatCompletionToolMessageParam = ChatCompletionToolMessageParam & { name: string };
17+
export type ChatCompletionAssistantToolParam = {
18+
role: 'assistant';
19+
tool_calls: ChatCompletionMessageToolCall[];
20+
};
1621

17-
export type ChatCompletion = ChatCompletion;
22+
export type ChatCompletionMessageToolCall = ChatCompletionMessageToolCall & {
23+
toolName?: string;
24+
toolAvatar?: string;
25+
};
26+
export type ChatCompletionMessageFunctionCall = ChatCompletionAssistantMessageParam.FunctionCall & {
27+
id?: string;
28+
toolName?: string;
29+
toolAvatar?: string;
30+
};
1831
export type StreamChatType = Stream<ChatCompletionChunk>;
1932

2033
export type PromptTemplateItem = {
2134
title: string;
2235
desc: string;
2336
value: string;
2437
};
38+
39+
export default openai;
40+
export * from 'openai';

0 commit comments

Comments
 (0)