From b6642189723ede87bb6e13a31f2a429214612f67 Mon Sep 17 00:00:00 2001 From: dingyi Date: Fri, 22 Sep 2023 22:07:59 +0800 Subject: [PATCH] style: format code --- .eslintrc.yml | 2 +- .prettierrc | 4 +- packages/bard-adapter/src/index.ts | 10 +- packages/bard-adapter/src/requester.ts | 69 +++-- packages/chatglm-adapter/src/client.ts | 9 +- packages/chatglm-adapter/src/index.ts | 17 +- packages/chatglm-adapter/src/requester.ts | 36 ++- packages/chatglm-adapter/src/types.ts | 12 +- packages/chatglm-adapter/src/utils.ts | 12 +- packages/claude2-adapter/src/client.ts | 6 +- packages/claude2-adapter/src/index.ts | 16 +- packages/claude2-adapter/src/requester.ts | 31 ++- packages/claude2-adapter/src/utils.ts | 3 +- packages/core/src/chains/chain.ts | 72 +++-- packages/core/src/chains/rooms.ts | 123 ++++++--- packages/core/src/command.ts | 6 +- packages/core/src/commands/chat.ts | 5 +- packages/core/src/commands/providers.ts | 20 +- packages/core/src/commands/room.ts | 80 +++--- packages/core/src/config.ts | 48 +++- packages/core/src/index.ts | 4 +- packages/core/src/llm-core/chain/base.ts | 6 +- .../src/llm-core/chain/browsing_chat_chain.ts | 75 ++++-- .../core/src/llm-core/chain/chat_chain.ts | 65 +++-- .../chain/function_calling_browsing_chain.ts | 69 +++-- .../core/src/llm-core/chain/out_parsers.ts | 5 +- .../src/llm-core/chain/plugin_chat_chain.ts | 19 +- packages/core/src/llm-core/chain/prompt.ts | 247 ++++++++++++------ .../core/src/llm-core/chain/wrapper_chain.ts | 16 +- packages/core/src/llm-core/chat/app.ts | 117 ++++++--- packages/core/src/llm-core/chat/default.ts | 62 +++-- .../memory/message/database_memory.ts | 28 +- packages/core/src/llm-core/model/base.ts | 6 +- packages/core/src/llm-core/model/in_memory.ts | 3 +- packages/core/src/llm-core/platform/api.ts | 10 +- packages/core/src/llm-core/platform/config.ts | 8 +- packages/core/src/llm-core/platform/model.ts | 57 +++- .../core/src/llm-core/platform/service.ts | 97 +++++-- packages/core/src/llm-core/platform/types.ts | 8 +- .../llm-core/prompt/preset_prompt_parse.ts | 16 +- .../core/src/llm-core/utils/count_tokens.ts | 9 +- packages/core/src/llm-core/utils/tiktoken.ts | 9 +- packages/core/src/middleware.ts | 6 +- packages/core/src/middlewares/add_preset.ts | 12 +- .../core/src/middlewares/add_room_to_group.ts | 18 +- packages/core/src/middlewares/allow_reply.ts | 12 +- packages/core/src/middlewares/black_list.ts | 4 +- packages/core/src/middlewares/censor.ts | 5 +- .../src/middlewares/chat_time_limit_check.ts | 5 +- packages/core/src/middlewares/check_room.ts | 7 +- packages/core/src/middlewares/clear_room.ts | 27 +- .../core/src/middlewares/cooldown_time.ts | 4 +- packages/core/src/middlewares/create_room.ts | 41 ++- .../core/src/middlewares/delete_preset.ts | 11 +- packages/core/src/middlewares/delete_room.ts | 20 +- packages/core/src/middlewares/invite_room.ts | 33 ++- packages/core/src/middlewares/join_room.ts | 18 +- packages/core/src/middlewares/kick_member.ts | 32 ++- packages/core/src/middlewares/leave_room.ts | 9 +- .../src/middlewares/list_all_embeddings.ts | 3 +- .../core/src/middlewares/list_all_model.ts | 3 +- .../core/src/middlewares/list_all_preset.ts | 3 +- .../src/middlewares/list_all_vectorstore.ts | 3 +- packages/core/src/middlewares/list_room.ts | 10 +- packages/core/src/middlewares/mute_user.ts | 18 +- .../core/src/middlewares/read_chat_message.ts | 67 +++-- .../core/src/middlewares/request_model.ts | 49 +++- .../core/src/middlewares/resolve_model.ts | 9 +- packages/core/src/middlewares/resolve_room.ts | 29 +- packages/core/src/middlewares/room_info.ts | 6 +- .../core/src/middlewares/room_permission.ts | 36 ++- .../src/middlewares/set_default_embeddings.ts | 11 +- .../middlewares/set_default_vectorstore.ts | 8 +- packages/core/src/middlewares/set_room.ts | 39 ++- packages/core/src/middlewares/switch_room.ts | 3 +- .../src/middlewares/thinking_message_send.ts | 26 +- .../core/src/middlewares/transfer_room.ts | 20 +- packages/core/src/middlewares/wipe.ts | 4 +- packages/core/src/preset.ts | 19 +- packages/core/src/render.ts | 13 +- packages/core/src/renders/image.ts | 26 +- packages/core/src/renders/mixed-image.ts | 50 ++-- packages/core/src/renders/mixed-voice.ts | 15 +- packages/core/src/renders/raw.ts | 5 +- packages/core/src/renders/text.ts | 5 +- packages/core/src/renders/voice.ts | 15 +- packages/core/src/services/chat.ts | 93 +++++-- packages/core/src/utils/error.ts | 4 +- packages/core/src/utils/pagination.ts | 8 +- packages/core/src/utils/queue.ts | 4 +- packages/core/src/utils/request.ts | 16 +- packages/core/src/utils/sse.ts | 28 +- .../src/embeddings/huggingface.ts | 26 +- packages/embeddings-service/src/index.ts | 4 +- packages/gptfree-adapter/src/client.ts | 5 +- packages/gptfree-adapter/src/index.ts | 6 +- packages/gptfree-adapter/src/requester.ts | 36 ++- packages/gptfree-adapter/src/types.ts | 12 +- packages/gptfree-adapter/src/utils.ts | 9 +- packages/lmsys-adapter/src/client.ts | 5 +- packages/lmsys-adapter/src/index.ts | 14 +- packages/lmsys-adapter/src/requester.ts | 80 ++++-- packages/lmsys-adapter/src/types.ts | 8 +- packages/lmsys-adapter/src/utils.ts | 3 +- packages/newbing-adapter/src/constants.ts | 31 ++- packages/newbing-adapter/src/index.ts | 24 +- packages/newbing-adapter/src/requester.ts | 111 ++++++-- packages/newbing-adapter/src/types.ts | 3 +- packages/openai-adapter/src/client.ts | 23 +- packages/openai-adapter/src/index.ts | 13 +- packages/openai-adapter/src/requester.ts | 67 +++-- packages/openai-adapter/src/types.ts | 12 +- packages/openai-adapter/src/utils.ts | 12 +- packages/plugin-common/src/index.ts | 12 +- packages/plugin-common/src/plugin.ts | 12 +- packages/plugin-common/src/plugins/fs.ts | 10 +- packages/plugin-common/src/plugins/request.ts | 11 +- packages/poe-adapter/src/index.ts | 20 +- packages/poe-adapter/src/requester.ts | 89 +++++-- packages/poe-adapter/src/utils.ts | 26 +- packages/rwkv-adapter/src/client.ts | 9 +- packages/rwkv-adapter/src/index.ts | 16 +- packages/rwkv-adapter/src/requester.ts | 33 ++- packages/rwkv-adapter/src/types.ts | 12 +- packages/rwkv-adapter/src/utils.ts | 12 +- packages/search-service/src/index.ts | 36 ++- packages/search-service/src/tools/baidu.ts | 24 +- packages/search-service/src/tools/bing-api.ts | 3 +- packages/search-service/src/tools/bing-web.ts | 4 +- .../src/tools/duckduckgo-lite.ts | 20 +- packages/search-service/src/tools/serper.ts | 4 +- packages/search-service/src/webbrowser.ts | 89 ++++--- packages/spark-adapter/src/client.ts | 5 +- packages/spark-adapter/src/index.ts | 20 +- packages/spark-adapter/src/requester.ts | 41 ++- packages/spark-adapter/src/utils.ts | 8 +- packages/test-adapter/src/index.ts | 11 +- packages/vector-store-service/src/index.ts | 16 +- .../vector-store-service/src/vectorstore.ts | 12 +- .../src/vectorstore/faiss.ts | 17 +- .../src/vectorstore/lancedb.ts | 11 +- packages/zhipu-adapter/src/client.ts | 12 +- packages/zhipu-adapter/src/index.ts | 9 +- packages/zhipu-adapter/src/requester.ts | 19 +- packages/zhipu-adapter/src/types.ts | 6 +- packages/zhipu-adapter/src/utils.ts | 8 +- 146 files changed, 2644 insertions(+), 926 deletions(-) diff --git a/.eslintrc.yml b/.eslintrc.yml index f21890fe..efd117c6 100644 --- a/.eslintrc.yml +++ b/.eslintrc.yml @@ -27,7 +27,7 @@ plugins: rules: prettier/prettier: - error - - printWidth: 100 + - printWidth: 80 - tabWidth: 4 array-callback-return: off diff --git a/.prettierrc b/.prettierrc index 23e92547..15f598c2 100644 --- a/.prettierrc +++ b/.prettierrc @@ -1,6 +1,6 @@ { "trailingComma": "none", - "printWidth": 100, + "printWidth": 80, "tabWidth": 4, "semi": false, "singleQuote": true, @@ -8,4 +8,4 @@ "htmlWhitespaceSensitivity": "ignore", "arrowParens": "always", "overrides": [] -} +} \ No newline at end of file diff --git a/packages/bard-adapter/src/index.ts b/packages/bard-adapter/src/index.ts index d59f7b90..acafe460 100644 --- a/packages/bard-adapter/src/index.ts +++ b/packages/bard-adapter/src/index.ts @@ -24,7 +24,9 @@ export function apply(ctx: Context, config: Config) { }) }) - await plugin.registerClient((_, clientConfig) => new BardClient(ctx, config, clientConfig)) + await plugin.registerClient( + (_, clientConfig) => new BardClient(ctx, config, clientConfig) + ) await plugin.initClients() }) @@ -40,9 +42,9 @@ export const Config: Schema = Schema.intersect([ ChatHubPlugin.Config, Schema.object({ - cookies: Schema.array(Schema.string().role('secret').required()).description( - '在 bard.google.com 登录后获取的 Cookie' - ) + cookies: Schema.array( + Schema.string().role('secret').required() + ).description('在 bard.google.com 登录后获取的 Cookie') }).description('请求设置') ]) diff --git a/packages/bard-adapter/src/requester.ts b/packages/bard-adapter/src/requester.ts index e30fe424..d079ce0d 100644 --- a/packages/bard-adapter/src/requester.ts +++ b/packages/bard-adapter/src/requester.ts @@ -3,10 +3,18 @@ import { ModelRequestParams } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/api' import { ClientConfig } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/config' -import { AIMessageChunk, BaseMessage, ChatGeneration, ChatGenerationChunk } from 'langchain/schema' +import { + AIMessageChunk, + BaseMessage, + ChatGeneration, + ChatGenerationChunk +} from 'langchain/schema' import { createLogger } from '@dingyi222666/koishi-plugin-chathub/lib/utils/logger' import { chathubFetch } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { Random } from 'koishi' import { BardRequestInfo, BardResponse, BardWebRequestInfo } from './types' import { SESSION_HEADERS } from './utils' @@ -26,7 +34,9 @@ export class BardRequester extends ModelRequester { super() } - async *completionStream(params: ModelRequestParams): AsyncGenerator { + async *completionStream( + params: ModelRequestParams + ): AsyncGenerator { // the bard not support event stream, so just call completion const result = await this.completion(params) @@ -97,7 +107,8 @@ export class BardRequester extends ModelRequester { const bardResponse = await this._parseResponse(await response.text()) - this._bardRequestInfo.requestId = this._bardRequestInfo.requestId + 100000 + this._bardRequestInfo.requestId = + this._bardRequestInfo.requestId + 100000 this._bardRequestInfo.conversation = { c: bardResponse.conversationId, @@ -163,30 +174,39 @@ export class BardRequester extends ModelRequester { } // data:image/ - const imageName = 'bard-ai.' + image.match(/data:image\/(\w+);base64,(.+)/)?.[1] + const imageName = + 'bard-ai.' + image.match(/data:image\/(\w+);base64,(.+)/)?.[1] logger.debug(`Uploading image ${imageName}`) - const imageData = Buffer.from(image.replace(/^data:image\/\w+;base64,/, ''), 'base64') + const imageData = Buffer.from( + image.replace(/^data:image\/\w+;base64,/, ''), + 'base64' + ) const size = imageData.byteLength.toString() const formBody = [ - `${encodeURIComponent('File name')}=${encodeURIComponent(imageName)}` + `${encodeURIComponent('File name')}=${encodeURIComponent( + imageName + )}` ].join('') try { - let response = await chathubFetch('https://content-push.googleapis.com/upload/', { - method: 'POST', - headers: { - 'X-Goog-Upload-Command': 'start', - 'X-Goog-Upload-Protocol': 'resumable', - 'X-Goog-Upload-Header-Content-Length': size, - 'X-Tenant-Id': 'bard-storage', - 'Push-Id': 'feeds/mcudyrk2a4khkz' - }, - body: formBody, - credentials: 'include' - }) + let response = await chathubFetch( + 'https://content-push.googleapis.com/upload/', + { + method: 'POST', + headers: { + 'X-Goog-Upload-Command': 'start', + 'X-Goog-Upload-Protocol': 'resumable', + 'X-Goog-Upload-Header-Content-Length': size, + 'X-Tenant-Id': 'bard-storage', + 'Push-Id': 'feeds/mcudyrk2a4khkz' + }, + body: formBody, + credentials: 'include' + } + ) const uploadUrl = response.headers.get('X-Goog-Upload-URL') @@ -223,7 +243,9 @@ export class BardRequester extends ModelRequester { this._bardWebRequestInfo = await this._getInitParams() - logger.info(`bard init params: ${JSON.stringify(this._bardWebRequestInfo)}`) + logger.info( + `bard init params: ${JSON.stringify(this._bardWebRequestInfo)}` + ) if (this._bardRequestInfo.conversation == null) { this._bardRequestInfo.conversation = { @@ -340,7 +362,9 @@ export class BardRequester extends ModelRequester { } private _buildHeader(isUploadImage: boolean = false) { - const base: typeof SESSION_HEADERS & { cookie?: string } = { ...SESSION_HEADERS } + const base: typeof SESSION_HEADERS & { cookie?: string } = { + ...SESSION_HEADERS + } base.cookie = this._config.apiKey @@ -354,7 +378,8 @@ export class BardRequester extends ModelRequester { headers: { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36', - 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8', + 'Content-Type': + 'application/x-www-form-urlencoded;charset=UTF-8', cookie: this._config.apiKey }, credentials: 'same-origin' diff --git a/packages/chatglm-adapter/src/client.ts b/packages/chatglm-adapter/src/client.ts index f6b0c440..f691cffe 100644 --- a/packages/chatglm-adapter/src/client.ts +++ b/packages/chatglm-adapter/src/client.ts @@ -11,7 +11,10 @@ import { } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/types' import { Context } from 'koishi' import { Config } from '.' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { OpenLLMRequester } from './requester' export class OpenLLMClient extends PlatformModelAndEmbeddingsClient { @@ -69,7 +72,9 @@ export class OpenLLMClient extends PlatformModelAndEmbeddingsClient = Schema.intersect([ Schema.object({ apiKeys: Schema.array( Schema.tuple([ - Schema.string().role('secret').default('').description('OpenAI 的 API Key'), + Schema.string() + .role('secret') + .default('') + .description('OpenAI 的 API Key'), Schema.string() .description('请求 API for Open LLMs 自搭建后端的地址') .default('http://127.0.0.1:8000') @@ -52,7 +55,9 @@ export const Config: Schema = Schema.intersect([ ) .description('API for Open LLMs 服务的 API Key 和请求地址列表') .default([['', 'http://127.0.0.1:8000']]), - embeddings: Schema.string().description('Embeddings 模型的名称').default('moka-ai/m3e-base') + embeddings: Schema.string() + .description('Embeddings 模型的名称') + .default('moka-ai/m3e-base') }).description('请求设置'), Schema.object({ @@ -71,13 +76,17 @@ export const Config: Schema = Schema.intersect([ .step(0.1) .default(0.8), presencePenalty: Schema.number() - .description('重复惩罚,越高越不易重复出现过至少一次的 Token(-2~2,每步0.1)') + .description( + '重复惩罚,越高越不易重复出现过至少一次的 Token(-2~2,每步0.1)' + ) .min(-2) .max(2) .step(0.1) .default(0.2), frequencyPenalty: Schema.number() - .description('频率惩罚,越高越不易重复出现次数较多的 Token(-2~2,每步0.1)') + .description( + '频率惩罚,越高越不易重复出现次数较多的 Token(-2~2,每步0.1)' + ) .min(-2) .max(2) .step(0.1) diff --git a/packages/chatglm-adapter/src/requester.ts b/packages/chatglm-adapter/src/requester.ts index 373b9289..5915cb2d 100644 --- a/packages/chatglm-adapter/src/requester.ts +++ b/packages/chatglm-adapter/src/requester.ts @@ -13,7 +13,10 @@ import { ChatCompletionResponseMessageRoleEnum, CreateEmbeddingResponse } from './types' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { sseIterable } from '@dingyi222666/koishi-plugin-chathub/lib/utils/sse' import { convertDeltaToMessageChunk, @@ -21,12 +24,17 @@ import { langchainMessageToOpenAIMessage } from './utils' -export class OpenLLMRequester extends ModelRequester implements EmbeddingsRequester { +export class OpenLLMRequester + extends ModelRequester + implements EmbeddingsRequester +{ constructor(private _config: ClientConfig) { super() } - async *completionStream(params: ModelRequestParams): AsyncGenerator { + async *completionStream( + params: ModelRequestParams + ): AsyncGenerator { try { const response = await this._post( 'chat/completions', @@ -70,7 +78,10 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques if ((data as any).error) { throw new ChatHubError( ChatHubErrorCode.API_REQUEST_FAILED, - new Error('error when calling completion, Result: ' + chunk) + new Error( + 'error when calling completion, Result: ' + + chunk + ) ) } @@ -80,7 +91,10 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques } const { delta } = choice - const messageChunk = convertDeltaToMessageChunk(delta, defaultRole) + const messageChunk = convertDeltaToMessageChunk( + delta, + defaultRole + ) messageChunk.content = content + messageChunk.content @@ -107,7 +121,9 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques } } - async embeddings(params: EmbeddingsRequestParams): Promise { + async embeddings( + params: EmbeddingsRequestParams + ): Promise { // eslint-disable-next-line @typescript-eslint/no-explicit-any let data: CreateEmbeddingResponse | any @@ -122,7 +138,9 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques data = JSON.parse(data) as CreateEmbeddingResponse if (data.data && data.data.length > 0) { - return (data as CreateEmbeddingResponse).data.map((it) => it.embedding) + return (data as CreateEmbeddingResponse).data.map( + (it) => it.embedding + ) } throw new Error() @@ -149,7 +167,9 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques // eslint-disable-next-line @typescript-eslint/no-explicit-any return ([]>data.data).map((model) => model.id) } catch (e) { - const error = new Error('error when listing models, Result: ' + JSON.stringify(data)) + const error = new Error( + 'error when listing models, Result: ' + JSON.stringify(data) + ) error.stack = e.stack error.cause = e.cause diff --git a/packages/chatglm-adapter/src/types.ts b/packages/chatglm-adapter/src/types.ts index 28bc3514..a838a785 100644 --- a/packages/chatglm-adapter/src/types.ts +++ b/packages/chatglm-adapter/src/types.ts @@ -9,7 +9,11 @@ export interface ChatCompletionResponse { object: string created: number model: string - usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number } + usage: { + prompt_tokens: number + completion_tokens: number + total_tokens: number + } } export interface ChatCompletionResponseMessage { @@ -113,4 +117,8 @@ export interface CreateEmbeddingResponseUsage { total_tokens: number } -export type ChatCompletionResponseMessageRoleEnum = 'system' | 'assistant' | 'user' | 'function' +export type ChatCompletionResponseMessageRoleEnum = + | 'system' + | 'assistant' + | 'user' + | 'function' diff --git a/packages/chatglm-adapter/src/utils.ts b/packages/chatglm-adapter/src/utils.ts index bab57954..0d9844c5 100644 --- a/packages/chatglm-adapter/src/utils.ts +++ b/packages/chatglm-adapter/src/utils.ts @@ -29,7 +29,9 @@ export function langchainMessageToOpenAIMessage( }) } -export function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum { +export function messageTypeToOpenAIRole( + type: MessageType +): ChatCompletionResponseMessageRoleEnum { switch (type) { case 'system': return 'system' @@ -44,11 +46,15 @@ export function messageTypeToOpenAIRole(type: MessageType): ChatCompletionRespon } } -export function formatToolsToOpenAIFunctions(tools: StructuredTool[]): ChatCompletionFunctions[] { +export function formatToolsToOpenAIFunctions( + tools: StructuredTool[] +): ChatCompletionFunctions[] { return tools.map(formatToolToOpenAIFunction) } -export function formatToolToOpenAIFunction(tool: StructuredTool): ChatCompletionFunctions { +export function formatToolToOpenAIFunction( + tool: StructuredTool +): ChatCompletionFunctions { return { name: tool.name, description: tool.description, diff --git a/packages/claude2-adapter/src/client.ts b/packages/claude2-adapter/src/client.ts index 65823870..8db8ca48 100644 --- a/packages/claude2-adapter/src/client.ts +++ b/packages/claude2-adapter/src/client.ts @@ -58,7 +58,11 @@ export class Claude2Client extends PlatformModelClient { protected _createModel(model: string): ChatHubChatModel { return new ChatHubChatModel({ - requester: new Claude2Requester(this.ctx, this._clientConfig, this._organizationId), + requester: new Claude2Requester( + this.ctx, + this._clientConfig, + this._organizationId + ), model, modelMaxContextSize: 10000, timeout: this._config.timeout, diff --git a/packages/claude2-adapter/src/index.ts b/packages/claude2-adapter/src/index.ts index 606e37dc..213fd51b 100644 --- a/packages/claude2-adapter/src/index.ts +++ b/packages/claude2-adapter/src/index.ts @@ -6,7 +6,11 @@ import { Claude2ClientConfig } from './types' export function apply(ctx: Context, config: Config) { config.chatConcurrentMaxSize = 1 - const plugin = new ChatHubPlugin(ctx, config, 'claude2') + const plugin = new ChatHubPlugin( + ctx, + config, + 'claude2' + ) ctx.on('ready', async () => { await plugin.registerToService() @@ -45,13 +49,15 @@ export const Config: Schema = Schema.intersect([ ChatHubPlugin.Config, Schema.object({ - cookies: Schema.array(Schema.string().role('secret').required()).description( - 'Claude 账号的 Cookie' - ) + cookies: Schema.array( + Schema.string().role('secret').required() + ).description('Claude 账号的 Cookie') }).description('请求设置'), Schema.object({ - formatMessages: Schema.boolean().description('是否使用历史聊天消息').default(false) + formatMessages: Schema.boolean() + .description('是否使用历史聊天消息') + .default(false) }).description('对话设置') ]) diff --git a/packages/claude2-adapter/src/requester.ts b/packages/claude2-adapter/src/requester.ts index 07917090..c1f604af 100644 --- a/packages/claude2-adapter/src/requester.ts +++ b/packages/claude2-adapter/src/requester.ts @@ -4,8 +4,14 @@ import { } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/api' import { AIMessageChunk, ChatGenerationChunk } from 'langchain/schema' import { createLogger } from '@dingyi222666/koishi-plugin-chathub/lib/utils/logger' -import { chathubFetch, randomUA } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + chathubFetch, + randomUA +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { sseIterable } from '@dingyi222666/koishi-plugin-chathub/lib/utils/sse' import { Context, sleep } from 'koishi' import { v4 as uuid } from 'uuid' @@ -45,7 +51,9 @@ export class Claude2Requester extends ModelRequester { // this._headers['User-Agent'] = this._ua } - async *completionStream(params: ModelRequestParams): AsyncGenerator { + async *completionStream( + params: ModelRequestParams + ): AsyncGenerator { if (this._organizationId == null || this._conversationId == null) { await this.init(params.id) } @@ -216,7 +224,9 @@ export class Claude2Requester extends ModelRequester { } if (this._conversationId == null) { - const conversationId = await this.ctx.chathub.cache.get(`claude2-${id}`) + const conversationId = await this.ctx.chathub.cache.get( + `claude2-${id}` + ) this._conversationId = conversationId } @@ -228,7 +238,10 @@ export class Claude2Requester extends ModelRequester { await this.ctx.chathub.cache.set(`claude2-${id}`, this._conversationId) } - private async _deleteConversation(conversationId: string, id?: string): Promise { + private async _deleteConversation( + conversationId: string, + id?: string + ): Promise { const headers = { ...this._headers } @@ -252,7 +265,9 @@ export class Claude2Requester extends ModelRequester { }) try { - await this.ctx.chathub.cache.delete(`claude2-${id ?? conversationId}`) + await this.ctx.chathub.cache.delete( + `claude2-${id ?? conversationId}` + ) logger.debug(`Claude2 deleteConversation: ${response.status}`) } catch (e) { @@ -265,7 +280,9 @@ export class Claude2Requester extends ModelRequester { await this.init() } - const url = this._concatUrl(`api/organizations/${this._organizationId}/chat_conversations`) + const url = this._concatUrl( + `api/organizations/${this._organizationId}/chat_conversations` + ) const result = await chathubFetch(url, { headers: this._headers, diff --git a/packages/claude2-adapter/src/utils.ts b/packages/claude2-adapter/src/utils.ts index 70469be1..636ad10b 100644 --- a/packages/claude2-adapter/src/utils.ts +++ b/packages/claude2-adapter/src/utils.ts @@ -39,6 +39,7 @@ export async function formatMessages(messages: BaseMessage[]) { } function formatMessage(message: BaseMessage) { - const roleType = message._getType() === 'human' ? 'user' : message._getType() + const roleType = + message._getType() === 'human' ? 'user' : message._getType() return `${roleType}: ${message.content}` } diff --git a/packages/core/src/chains/chain.ts b/packages/core/src/chains/chain.ts index 9a38f399..5baf48f3 100644 --- a/packages/core/src/chains/chain.ts +++ b/packages/core/src/chains/chain.ts @@ -23,7 +23,9 @@ export class ChatChain { const defaultChatChainSender = new DefaultChatChainSender(config) - this._senders.push((session, messages) => defaultChatChainSender.send(session, messages)) + this._senders.push((session, messages) => + defaultChatChainSender.send(session, messages) + ) } async receiveMessage(session: Session) { @@ -41,7 +43,9 @@ export class ChatChain { clearTimeout(context.options.thinkingTimeoutObject.timeout!) if (context.options.thinkingTimeoutObject.autoRecallTimeout) { - clearTimeout(context.options.thinkingTimeoutObject.autoRecallTimeout!) + clearTimeout( + context.options.thinkingTimeoutObject.autoRecallTimeout! + ) } if (context.options.thinkingTimeoutObject.recallFunc) { @@ -116,7 +120,10 @@ export class ChatChain { this._senders.push(sender) } - private async _runMiddleware(session: Session, context: ChainMiddlewareContext) { + private async _runMiddleware( + session: Session, + context: ChainMiddlewareContext + ) { // 手动 polyfill,呃呃呃呃呃 if (session.isDirect == null) { session.isDirect = session.subtype === 'private' @@ -145,7 +152,9 @@ export class ChatChain { if (error instanceof ChatHubError) { await this.sendMessage(session, error.message) } else { - logger.error(`chat-chain: ${middleware.name} error ${error}`) + logger.error( + `chat-chain: ${middleware.name} error ${error}` + ) logger.error(error) @@ -169,13 +178,18 @@ export class ChatChain { middleware.name !== 'allow_reply' && executedTime > 10 ) { - logger.debug(`chat-chain: ${middleware.name} executed in ${executedTime}ms`) + logger.debug( + `chat-chain: ${middleware.name} executed in ${executedTime}ms` + ) isOutputLog = true } if (result === ChainMiddlewareRunStatus.STOP) { // 中间件说这里不要继续执行了 - if (context.message != null && context.message !== originMessage) { + if ( + context.message != null && + context.message !== originMessage + ) { // 消息被修改了 await this.sendMessage(session, context.message) } @@ -202,10 +216,14 @@ export class ChatChain { return true } - private async sendMessage(session: Session, message: h[] | h[][] | h | string) { + private async sendMessage( + session: Session, + message: h[] | h[][] | h | string + ) { // check if message is a two-dimensional array - const messages: (h[] | h | string)[] = message instanceof Array ? message : [message] + const messages: (h[] | h | string)[] = + message instanceof Array ? message : [message] for (const sender of this._senders) { await sender(session, messages) @@ -272,7 +290,10 @@ class ChatChainDependencyGraph { } // Set a dependency between two tasks - before(taskA: ChainMiddleware | string, taskB: ChainMiddleware | string): void { + before( + taskA: ChainMiddleware | string, + taskB: ChainMiddleware | string + ): void { if (taskA instanceof ChainMiddleware) { taskA = taskA.name } @@ -290,7 +311,10 @@ class ChatChainDependencyGraph { } // Set a reverse dependency between two tasks - after(taskA: ChainMiddleware | string, taskB: ChainMiddleware | string): void { + after( + taskA: ChainMiddleware | string, + taskB: ChainMiddleware | string + ): void { if (taskA instanceof ChainMiddleware) { taskA = taskA.name } @@ -357,7 +381,9 @@ class ChatChainDependencyGraph { // Dequeue all the tasks in the queue and add them to the level while (queue.length > 0) { const task = queue.shift() - result.push(this._tasks.find((t) => t.name === task)!.middleware!) + result.push( + this._tasks.find((t) => t.name === task)!.middleware! + ) // For each dependency of the dequeued task for (const dep of this._dependencies.get(task) ?? []) { // Decrement its indegree by one @@ -399,7 +425,8 @@ export class ChainMiddleware { // 如果当前添加的依赖是生命周期,那么我们需要找到这个生命周期的下一个生命周期 if (lifecycleName.includes(name)) { - const lastLifecycleName = lifecycleName[lifecycleName.indexOf(name) - 1] + const lastLifecycleName = + lifecycleName[lifecycleName.indexOf(name) - 1] if (lastLifecycleName) { this.graph.after(this.name, lastLifecycleName) @@ -411,8 +438,8 @@ export class ChainMiddleware { // 如果不是的话,我们就需要寻找依赖锚定的生命周期 this.graph.once('build_node', () => { - const befores = [...this.graph.getDependencies(name)].filter((name) => - name.startsWith('lifecycle-') + const befores = [...this.graph.getDependencies(name)].filter( + (name) => name.startsWith('lifecycle-') ) const afters = this.graph .getDependents(name) @@ -443,7 +470,8 @@ export class ChainMiddleware { // 如果当前添加的依赖是生命周期,那么我们需要找到这个生命周期的下一个生命周期 if (lifecycleName.includes(name)) { - const nextLifecycleName = lifecycleName[lifecycleName.indexOf(name) + 1] + const nextLifecycleName = + lifecycleName[lifecycleName.indexOf(name) + 1] if (nextLifecycleName) { this.graph.before(this.name, nextLifecycleName) @@ -454,8 +482,8 @@ export class ChainMiddleware { // 如果不是的话,我们就需要寻找依赖锚定的生命周期 this.graph.once('build_node', () => { - const befores = [...this.graph.getDependencies(name)].filter((name) => - name.startsWith('lifecycle-') + const befores = [...this.graph.getDependencies(name)].filter( + (name) => name.startsWith('lifecycle-') ) const afters = this.graph .getDependents(name) @@ -526,7 +554,10 @@ class DefaultChatChainSender { for (const element of messageFragment) { // 语音,消息 不能引用 - if (element.type === 'audio' || element.type === 'message') { + if ( + element.type === 'audio' || + element.type === 'message' + ) { messageFragment.shift() break } @@ -570,7 +601,10 @@ export type ChainMiddlewareFunction = ( context: ChainMiddlewareContext ) => Promise -export type ChatChainSender = (session: Session, message: (h[] | h | string)[]) => Promise +export type ChatChainSender = ( + session: Session, + message: (h[] | h | string)[] +) => Promise export enum ChainMiddlewareRunStatus { SKIPPED = 0, diff --git a/packages/core/src/chains/rooms.ts b/packages/core/src/chains/rooms.ts index 3bd1f8ee..ee7a7e6b 100644 --- a/packages/core/src/chains/rooms.ts +++ b/packages/core/src/chains/rooms.ts @@ -6,11 +6,17 @@ import { Config } from '../config' import { ChatHubError, ChatHubErrorCode } from '../utils/error' import { ModelType } from '../llm-core/platform/types' -export async function queryJoinedConversationRoom(ctx: Context, session: Session, name?: string) { +export async function queryJoinedConversationRoom( + ctx: Context, + session: Session, + name?: string +) { if (name != null) { const joinedRooms = await getAllJoinedConversationRoom(ctx, session) - return joinedRooms.find((it) => it.roomName === name || it.roomId === parseInt(name)) + return joinedRooms.find( + (it) => it.roomName === name || it.roomId === parseInt(name) + ) } const userRoomInfoList = await ctx.database.get('chathub_user', { @@ -30,7 +36,10 @@ export async function queryJoinedConversationRoom(ctx: Context, session: Session return await resolveConversationRoom(ctx, userRoomInfo.defaultRoomId) } -export async function queryPublicConversationRoom(ctx: Context, session: Session) { +export async function queryPublicConversationRoom( + ctx: Context, + session: Session +) { // 如果是私聊,直接返回 null if (session.isDirect) { @@ -39,10 +48,13 @@ export async function queryPublicConversationRoom(ctx: Context, session: Session // 如果是群聊,那么就查询群聊的公共房间 - const groupRoomInfoList = await ctx.database.get('chathub_room_group_member', { - groupId: session.guildId, - roomVisibility: 'public' - }) + const groupRoomInfoList = await ctx.database.get( + 'chathub_room_group_member', + { + groupId: session.guildId, + roomVisibility: 'public' + } + ) let roomId: number @@ -51,7 +63,8 @@ export async function queryPublicConversationRoom(ctx: Context, session: Session } else if (groupRoomInfoList.length === 1) { roomId = groupRoomInfoList[0].roomId } else { - const groupRoomInfo = groupRoomInfoList[randomInt(groupRoomInfoList.length)] + const groupRoomInfo = + groupRoomInfoList[randomInt(groupRoomInfoList.length)] roomId = groupRoomInfo.roomId } @@ -77,7 +90,8 @@ export async function getTemplateConversationRoom( if (config.defaultModel == null) { const models = ctx.chathub.platform.getAllModels(ModelType.llm) - const model = models.find((model) => model.includes('3.5-turbo')) ?? models[0] + const model = + models.find((model) => model.includes('3.5-turbo')) ?? models[0] config.defaultModel = model } @@ -106,7 +120,11 @@ export async function getTemplateConversationRoom( } export async function getConversationRoomCount(ctx: Context) { - const counts: number = await ctx.database.eval('chathub_room', (row) => $.max(row.roomId), {}) + const counts: number = await ctx.database.eval( + 'chathub_room', + (row) => $.max(row.roomId), + {} + ) return counts } @@ -169,7 +187,11 @@ export async function transferConversationRoom( ]) } -export async function switchConversationRoom(ctx: Context, session: Session, id: string | number) { +export async function switchConversationRoom( + ctx: Context, + session: Session, + id: string | number +) { let joinedRoom = await getAllJoinedConversationRoom(ctx, session) const parsedId = typeof id === 'number' ? id : parseInt(id) @@ -246,7 +268,9 @@ export async function getAllJoinedConversationRoom( } for (const room of roomList) { - const memberOfTheRoom = memberList.some((it) => it.roomId === room.roomId) + const memberOfTheRoom = memberList.some( + (it) => it.roomId === room.roomId + ) if ( (!session.isDirect && memberOfTheRoom) || @@ -278,7 +302,11 @@ export async function leaveConversationRoom( }) } -export async function queryConversationRoom(ctx: Context, session: Session, name: string) { +export async function queryConversationRoom( + ctx: Context, + session: Session, + name: string +) { const roomId = parseInt(name) const roomList = Number.isNaN(roomId) @@ -295,20 +323,29 @@ export async function queryConversationRoom(ctx: Context, session: Session, name // 在限定搜索到群里一次。 if (session.isDirect === false && !Number.isNaN(roomId)) { - const groupRoomList = await ctx.database.get('chathub_room_group_member', { - groupId: session.guildId, - roomId: { - $in: roomList.map((it) => it.roomId) + const groupRoomList = await ctx.database.get( + 'chathub_room_group_member', + { + groupId: session.guildId, + roomId: { + $in: roomList.map((it) => it.roomId) + } } - }) + ) if (groupRoomList.length === 1) { - return roomList.find((it) => it.roomId === groupRoomList[0].roomId) + return roomList.find( + (it) => it.roomId === groupRoomList[0].roomId + ) } else if (groupRoomList.length > 1) { - throw new ChatHubError(ChatHubErrorCode.THE_NAME_FIND_IN_MULTIPLE_ROOMS) + throw new ChatHubError( + ChatHubErrorCode.THE_NAME_FIND_IN_MULTIPLE_ROOMS + ) } } else { - throw new ChatHubError(ChatHubErrorCode.THE_NAME_FIND_IN_MULTIPLE_ROOMS) + throw new ChatHubError( + ChatHubErrorCode.THE_NAME_FIND_IN_MULTIPLE_ROOMS + ) } } else if (roomList.length === 0) { return null @@ -363,7 +400,10 @@ export async function joinConversationRoom( ) { // 接下来检查房间的权限和当前所处的环境 - const room = typeof roomId === 'number' ? await resolveConversationRoom(ctx, roomId) : roomId + const room = + typeof roomId === 'number' + ? await resolveConversationRoom(ctx, roomId) + : roomId await ctx.database.upsert('chathub_user', [ { @@ -376,10 +416,13 @@ export async function joinConversationRoom( if (isDirect === false) { // 如果是群聊,那么就需要检查群聊的权限 - const groupMemberList = await ctx.database.get('chathub_room_group_member', { - groupId: session.guildId, - roomId: room.roomId - }) + const groupMemberList = await ctx.database.get( + 'chathub_room_group_member', + { + groupId: session.guildId, + roomId: room.roomId + } + ) if (groupMemberList.length === 0) { await ctx.database.create('chathub_room_group_member', { @@ -410,7 +453,10 @@ export async function getConversationRoomUser( roomId: number | ConversationRoom, userId: string = session.userId ) { - const room = typeof roomId === 'number' ? await resolveConversationRoom(ctx, roomId) : roomId + const room = + typeof roomId === 'number' + ? await resolveConversationRoom(ctx, roomId) + : roomId const memberList = await ctx.database.get('chathub_room_member', { roomId: room.roomId, @@ -427,7 +473,10 @@ export async function setUserPermission( permission: 'member' | 'admin', userId: string = session.userId ) { - const room = typeof roomId === 'number' ? await resolveConversationRoom(ctx, roomId) : roomId + const room = + typeof roomId === 'number' + ? await resolveConversationRoom(ctx, roomId) + : roomId const memberList = await ctx.database.get('chathub_room_member', { roomId: room.roomId, @@ -453,7 +502,10 @@ export async function addConversationRoomToGroup( roomId: number | ConversationRoom, groupId: string = session.guildId ) { - const room = typeof roomId === 'number' ? await resolveConversationRoom(ctx, roomId) : roomId + const room = + typeof roomId === 'number' + ? await resolveConversationRoom(ctx, roomId) + : roomId const memberList = await ctx.database.get('chathub_room_group_member', { roomId: room.roomId, @@ -475,7 +527,10 @@ export async function muteUserFromConversationRoom( roomId: number | ConversationRoom, userId: string ) { - const room = typeof roomId === 'number' ? await resolveConversationRoom(ctx, roomId) : roomId + const room = + typeof roomId === 'number' + ? await resolveConversationRoom(ctx, roomId) + : roomId const memberList = await ctx.database.get('chathub_room_member', { roomId: room.roomId, @@ -501,7 +556,10 @@ export async function kickUserFromConversationRoom( roomId: number | ConversationRoom, userId: string ) { - const room = typeof roomId === 'number' ? await resolveConversationRoom(ctx, roomId) : roomId + const room = + typeof roomId === 'number' + ? await resolveConversationRoom(ctx, roomId) + : roomId const memberList = await ctx.database.get('chathub_room_member', { roomId: room.roomId, @@ -543,7 +601,8 @@ export async function createConversationRoom( await ctx.database.create('chathub_room_member', { userId: session.userId, roomId: room.roomId, - roomPermission: session.userId === room.roomMasterId ? 'owner' : 'member' + roomPermission: + session.userId === room.roomMasterId ? 'owner' : 'member' }) await joinConversationRoom(ctx, session, room) diff --git a/packages/core/src/command.ts b/packages/core/src/command.ts index 3a7b4583..66bfbdc7 100644 --- a/packages/core/src/command.ts +++ b/packages/core/src/command.ts @@ -13,7 +13,11 @@ export async function command(ctx: Context, config: Config) { } const command: { - apply: (ctx: Context, config: Config, chain: ChatChain) => PromiseLike | void + apply: ( + ctx: Context, + config: Config, + chain: ChatChain + ) => PromiseLike | void } = await require(`./commands/${file}`) if (command.apply) { diff --git a/packages/core/src/commands/chat.ts b/packages/core/src/commands/chat.ts index 8e21099d..f72d6c27 100644 --- a/packages/core/src/commands/chat.ts +++ b/packages/core/src/commands/chat.ts @@ -27,7 +27,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { }) }) - ctx.command('chathub.chat.voice ', '和模型进行对话并输出为语音') + ctx.command( + 'chathub.chat.voice ', + '和模型进行对话并输出为语音' + ) .option('room', '-r 指定房间') .option('speaker', '-s 语音服务的目标人物的ID', { authority: 1 diff --git a/packages/core/src/commands/providers.ts b/packages/core/src/commands/providers.ts index fddbf0ce..b07276e0 100644 --- a/packages/core/src/commands/providers.ts +++ b/packages/core/src/commands/providers.ts @@ -31,17 +31,25 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { }) }) - ctx.command('chathub.embeddings.set ', '设置默认使用的嵌入模型', { - authority: 3 - }).action(async ({ session }, embeddings) => { + ctx.command( + 'chathub.embeddings.set ', + '设置默认使用的嵌入模型', + { + authority: 3 + } + ).action(async ({ session }, embeddings) => { await chain.receiveCommand(session, 'set_embeddings', { setEmbeddings: embeddings }) }) - ctx.command('chathub.vectorstore.set ', '设置默认使用的向量数据库', { - authority: 3 - }).action(async ({ session }, vectorStore) => { + ctx.command( + 'chathub.vectorstore.set ', + '设置默认使用的向量数据库', + { + authority: 3 + } + ).action(async ({ session }, vectorStore) => { await chain.receiveCommand(session, 'set_vector_store', { setVectorStore: vectorStore }) diff --git a/packages/core/src/commands/room.ts b/packages/core/src/commands/room.ts index 27b1f31c..60145cdf 100644 --- a/packages/core/src/commands/room.ts +++ b/packages/core/src/commands/room.ts @@ -37,10 +37,22 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { } ) - ctx.command('chathub.room.kick <...arg:user>', '踢出某个人员在你当前的房间').action( + ctx.command( + 'chathub.room.kick <...arg:user>', + '踢出某个人员在你当前的房间' + ).action(async ({ session }, ...user) => { + const users = user.map((u) => u.split(':')[1]) + await chain.receiveCommand(session, 'kick_member', { + resolve_user: { + id: users + } + }) + }) + + ctx.command('chathub.room.invite <...arg:user>', '邀请进入房间').action( async ({ session }, ...user) => { const users = user.map((u) => u.split(':')[1]) - await chain.receiveCommand(session, 'kick_member', { + await chain.receiveCommand(session, 'invite_room', { resolve_user: { id: users } @@ -48,26 +60,20 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { } ) - ctx.command('chathub.room.invite <...arg:user>', '邀请进入房间').action( - async ({ session }, ...user) => { - const users = user.map((u) => u.split(':')[1]) - await chain.receiveCommand(session, 'invite_room', { - resolve_user: { - id: users + ctx.command('chathub.room.join ', '加入某个房间').action( + async ({ session }, name) => { + await chain.receiveCommand(session, 'join_room', { + room_resolve: { + name } }) } ) - ctx.command('chathub.room.join ', '加入某个房间').action(async ({ session }, name) => { - await chain.receiveCommand(session, 'join_room', { - room_resolve: { - name - } - }) - }) - - ctx.command('chathub.room.add_to_group ', '允许房间在某个群里也可以使用') + ctx.command( + 'chathub.room.add_to_group ', + '允许房间在某个群里也可以使用' + ) .option('group', '-g 群号') .action(async ({ session, options }, name) => { await chain.receiveCommand(session, 'add_room_to_group', { @@ -151,26 +157,28 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { } ) - ctx.command('chathub.room.switch ', '切换到你已经加入了的房间').action( - async ({ session }, name) => { - await chain.receiveCommand(session, 'switch_room', { - room_resolve: { - name, - id: name - } - }) - } - ) + ctx.command( + 'chathub.room.switch ', + '切换到你已经加入了的房间' + ).action(async ({ session }, name) => { + await chain.receiveCommand(session, 'switch_room', { + room_resolve: { + name, + id: name + } + }) + }) - ctx.command('chathub.room.permission ', '修改房间里某人的权限').action( - async ({ session }, user) => { - await chain.receiveCommand(session, 'room_permission', { - resolve_user: { - id: user.split(':')[1] - } - }) - } - ) + ctx.command( + 'chathub.room.permission ', + '修改房间里某人的权限' + ).action(async ({ session }, user) => { + await chain.receiveCommand(session, 'room_permission', { + resolve_user: { + id: user.split(':')[1] + } + }) + }) ctx.command('chathub.room.mute <...user:user>', '禁言某个用户,不让其发言') .option('room', '-r 指定房间') diff --git a/packages/core/src/config.ts b/packages/core/src/config.ts index 4d74909a..00122a37 100644 --- a/packages/core/src/config.ts +++ b/packages/core/src/config.ts @@ -39,14 +39,24 @@ export interface Config { export const Config: Schema = Schema.intersect([ Schema.object({ botName: Schema.string().description('bot 姓名').default('香草'), - isNickname: Schema.boolean().description('允许 bot 配置中的昵称引发回复').default(true) + isNickname: Schema.boolean() + .description('允许 bot 配置中的昵称引发回复') + .default(true) }).description('bot 配置'), Schema.object({ - allowPrivate: Schema.boolean().description('允许私聊触发').default(true), - allowAtReply: Schema.boolean().description('允许 at 回复').default(true), - isReplyWithAt: Schema.boolean().description('回复时引用原消息').default(false), - isForwardMsg: Schema.boolean().description('让消息以转发消息的形式发送').default(false), + allowPrivate: Schema.boolean() + .description('允许私聊触发') + .default(true), + allowAtReply: Schema.boolean() + .description('允许 at 回复') + .default(true), + isReplyWithAt: Schema.boolean() + .description('回复时引用原消息') + .default(false), + isForwardMsg: Schema.boolean() + .description('让消息以转发消息的形式发送') + .default(false), privateChatWithoutCommand: Schema.boolean() .description('私聊可不调用命令直接和 bot 聊天') .default(false), @@ -59,7 +69,9 @@ export const Config: Schema = Schema.intersect([ outputMode: Schema.union([ Schema.const('raw').description('原始(直接输出,不做任何处理)'), - Schema.const('text').description('文本(把回复当成 markdown 渲染)'), + Schema.const('text').description( + '文本(把回复当成 markdown 渲染)' + ), Schema.const('image').description('图片(需要 Puppeteer服务)'), Schema.const('voice').description('语音(需要 vits 服务)'), Schema.const('mixed-image').description('混合(图片和文本)'), @@ -74,7 +86,9 @@ export const Config: Schema = Schema.intersect([ ) .default(false), - censor: Schema.boolean().description('文本审核服务(需要安装censor服务').default(false), + censor: Schema.boolean() + .description('文本审核服务(需要安装censor服务') + .default(false), sendThinkingMessage: Schema.boolean() .description('发送等待消息,在请求时会发送这条消息') @@ -86,7 +100,9 @@ export const Config: Schema = Schema.intersect([ thinkingMessage: Schema.string() .description('等待消息内容') - .default('我还在思考中,前面还有 {count} 条消息等着我回复呢,稍等一下哦~'), + .default( + '我还在思考中,前面还有 {count} 条消息等着我回复呢,稍等一下哦~' + ), randomReplyFrequency: Schema.percent() .description('随机回复频率') @@ -109,7 +125,9 @@ export const Config: Schema = Schema.intersect([ .default(false), blockText: Schema.string() .description('被拉黑用户的固定回复内容') - .default('哎呀(キ`゚Д゚´)!!,你怎么被拉入黑名单了呢?要不你去问问我的主人吧。'), + .default( + '哎呀(キ`゚Д゚´)!!,你怎么被拉入黑名单了呢?要不你去问问我的主人吧。' + ), messageCount: Schema.number() .role('slider') @@ -146,9 +164,15 @@ export const Config: Schema = Schema.intersect([ }).description('模型选项'), Schema.object({ - defaultChatMode: Schema.dynamic('chat-mode').default('chat').description('聊天模式'), - defaultModel: Schema.dynamic('model').description('聊天模型').default('无'), - defaultPreset: Schema.dynamic('preset').description('聊天预设').default('chatgpt') + defaultChatMode: Schema.dynamic('chat-mode') + .default('chat') + .description('聊天模式'), + defaultModel: Schema.dynamic('model') + .description('聊天模型') + .default('无'), + defaultPreset: Schema.dynamic('preset') + .description('聊天预设') + .default('chatgpt') }).description('模板房间选项'), Schema.object({ diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 7beefd3a..158f123f 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -35,7 +35,9 @@ export function apply(ctx: Context, config: Config) { // set proxy before init service if (config.isProxy) { - request.setGlobalProxyAddress(config.proxyAddress ?? ctx.http.config.proxyAgent) + request.setGlobalProxyAddress( + config.proxyAddress ?? ctx.http.config.proxyAgent + ) logger.debug(`proxy: ${config.proxyAddress}`) } diff --git a/packages/core/src/llm-core/chain/base.ts b/packages/core/src/llm-core/chain/base.ts index 00a05c81..354723c3 100644 --- a/packages/core/src/llm-core/chain/base.ts +++ b/packages/core/src/llm-core/chain/base.ts @@ -1,4 +1,8 @@ -import { CallbackManager, CallbackManagerForChainRun, Callbacks } from 'langchain/callbacks' +import { + CallbackManager, + CallbackManagerForChainRun, + Callbacks +} from 'langchain/callbacks' import { BaseChain, ChainInputs, SerializedLLMChain } from 'langchain/chains' import { BaseMessage, ChainValues, HumanMessage } from 'langchain/schema' import { BaseLLMOutputParser } from 'langchain/schema/output_parser' diff --git a/packages/core/src/llm-core/chain/browsing_chat_chain.ts b/packages/core/src/llm-core/chain/browsing_chat_chain.ts index 92ec21ad..6a9a45eb 100644 --- a/packages/core/src/llm-core/chain/browsing_chat_chain.ts +++ b/packages/core/src/llm-core/chain/browsing_chat_chain.ts @@ -1,11 +1,21 @@ -import { AIMessage, BaseMessage, ChainValues, SystemMessage } from 'langchain/schema' +import { + AIMessage, + BaseMessage, + ChainValues, + SystemMessage +} from 'langchain/schema' import { BufferMemory, ConversationSummaryMemory, VectorStoreRetrieverMemory } from 'langchain/memory' -import { ChatHubLLMCallArg, ChatHubLLMChain, ChatHubLLMChainWrapper, SystemPrompts } from './base' +import { + ChatHubLLMCallArg, + ChatHubLLMChain, + ChatHubLLMChainWrapper, + SystemPrompts +} from './base' import { HumanMessagePromptTemplate, MessagesPlaceholder, @@ -14,7 +24,10 @@ import { import { MemoryVectorStore } from 'langchain/vectorstores/memory' import { ChatHubBrowsingPrompt } from './prompt' import { Embeddings } from 'langchain/embeddings/base' -import { ChatHubBrowsingAction, ChatHubBrowsingActionOutputParser } from './out_parsers' +import { + ChatHubBrowsingAction, + ChatHubBrowsingActionOutputParser +} from './out_parsers' import { Tool } from 'langchain/tools' import { ChatHubSaveableVectorStore } from '../model/base' import { createLogger } from '../../utils/logger' @@ -71,7 +84,9 @@ export class ChatHubBrowsingChain // use memory this.searchMemory = new VectorStoreRetrieverMemory({ - vectorStoreRetriever: new MemoryVectorStore(embeddings).asRetriever(6), + vectorStoreRetriever: new MemoryVectorStore(embeddings).asRetriever( + 6 + ), memoryKey: 'long_history', inputKey: 'input', outputKey: 'result', @@ -94,23 +109,32 @@ export class ChatHubBrowsingChain static fromLLMAndTools( llm: ChatHubChatModel, tools: Tool[], - { botName, embeddings, historyMemory, systemPrompts, longMemory }: ChatHubBrowsingChainInput + { + botName, + embeddings, + historyMemory, + systemPrompts, + longMemory + }: ChatHubBrowsingChainInput ): ChatHubBrowsingChain { - const humanMessagePromptTemplate = HumanMessagePromptTemplate.fromTemplate('{input}') + const humanMessagePromptTemplate = + HumanMessagePromptTemplate.fromTemplate('{input}') let conversationSummaryPrompt: SystemMessagePromptTemplate let messagesPlaceholder: MessagesPlaceholder if (historyMemory instanceof ConversationSummaryMemory) { - conversationSummaryPrompt = SystemMessagePromptTemplate.fromTemplate( - // eslint-disable-next-line max-len - `This is some conversation between me and you. Please generate an response based on the system prompt and content below. Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity) Current conversation: {chat_history}` - ) + conversationSummaryPrompt = + SystemMessagePromptTemplate.fromTemplate( + // eslint-disable-next-line max-len + `This is some conversation between me and you. Please generate an response based on the system prompt and content below. Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity) Current conversation: {chat_history}` + ) } else { - conversationSummaryPrompt = SystemMessagePromptTemplate.fromTemplate( - // eslint-disable-next-line max-len - `Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity.)` - ) + conversationSummaryPrompt = + SystemMessagePromptTemplate.fromTemplate( + // eslint-disable-next-line max-len + `Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity.)` + ) messagesPlaceholder = new MessagesPlaceholder('chat_history') } @@ -125,7 +149,8 @@ export class ChatHubBrowsingChain messagesPlaceholder, tokenCounter: (text) => llm.getNumTokens(text), humanMessagePromptTemplate, - sendTokenLimit: llm.invocationParams().maxTokens ?? llm.getModelMaxContextSize() + sendTokenLimit: + llm.invocationParams().maxTokens ?? llm.getModelMaxContextSize() }) const chain = new ChatHubLLMChain({ llm, prompt }) @@ -143,7 +168,9 @@ export class ChatHubBrowsingChain private _selectTool(action: ChatHubBrowsingAction): Tool { if (action.tool === 'search') { - return this.tools.find((tool) => tool.name.toLowerCase().includes('search'))! + return this.tools.find((tool) => + tool.name.toLowerCase().includes('search') + )! } else if (action.tool === 'browse') { return this.tools.find((tool) => tool.name === 'web-browser')! } @@ -159,9 +186,9 @@ export class ChatHubBrowsingChain input: message } - const chatHistory = (await this.historyMemory.loadMemoryVariables(requests))[ - this.historyMemory.memoryKey - ] as BaseMessage[] + const chatHistory = ( + await this.historyMemory.loadMemoryVariables(requests) + )[this.historyMemory.memoryKey] as BaseMessage[] const loopChatHistory = [...chatHistory] @@ -276,9 +303,15 @@ export class ChatHubBrowsingChain loopCount += 1 } - await this.historyMemory.saveContext({ input: message.content }, { output: finalResponse }) + await this.historyMemory.saveContext( + { input: message.content }, + { output: finalResponse } + ) - await this.longMemory.saveContext({ user: message.content }, { your: finalResponse }) + await this.longMemory.saveContext( + { user: message.content }, + { your: finalResponse } + ) const vectorStore = this.longMemory.vectorStoreRetriever.vectorStore diff --git a/packages/core/src/llm-core/chain/chat_chain.ts b/packages/core/src/llm-core/chain/chat_chain.ts index d2ed04bf..7a48a23c 100644 --- a/packages/core/src/llm-core/chain/chat_chain.ts +++ b/packages/core/src/llm-core/chain/chat_chain.ts @@ -5,7 +5,12 @@ import { VectorStoreRetrieverMemory } from 'langchain/memory' -import { ChatHubLLMCallArg, ChatHubLLMChain, ChatHubLLMChainWrapper, SystemPrompts } from './base' +import { + ChatHubLLMCallArg, + ChatHubLLMChain, + ChatHubLLMChainWrapper, + SystemPrompts +} from './base' import { HumanMessagePromptTemplate, MessagesPlaceholder, @@ -28,7 +33,10 @@ export interface ChatHubChatChainInput { historyMemory: ConversationSummaryMemory | BufferMemory } -export class ChatHubChatChain extends ChatHubLLMChainWrapper implements ChatHubChatChainInput { +export class ChatHubChatChain + extends ChatHubLLMChainWrapper + implements ChatHubChatChainInput +{ botName: string longMemory: VectorStoreRetrieverMemory @@ -55,7 +63,9 @@ export class ChatHubChatChain extends ChatHubLLMChainWrapper implements ChatHubC this.longMemory = longMemory ?? new VectorStoreRetrieverMemory({ - vectorStoreRetriever: new MemoryVectorStore(new FakeEmbeddings()).asRetriever(6), + vectorStoreRetriever: new MemoryVectorStore( + new FakeEmbeddings() + ).asRetriever(6), memoryKey: 'long_history', inputKey: 'user', outputKey: 'your', @@ -76,23 +86,26 @@ export class ChatHubChatChain extends ChatHubLLMChainWrapper implements ChatHubC humanMessagePrompt }: ChatHubChatChainInput ): ChatHubLLMChainWrapper { - const humanMessagePromptTemplate = HumanMessagePromptTemplate.fromTemplate( - humanMessagePrompt ?? '{input}' - ) + const humanMessagePromptTemplate = + HumanMessagePromptTemplate.fromTemplate( + humanMessagePrompt ?? '{input}' + ) let conversationSummaryPrompt: SystemMessagePromptTemplate let messagesPlaceholder: MessagesPlaceholder if (historyMemory instanceof ConversationSummaryMemory) { - conversationSummaryPrompt = SystemMessagePromptTemplate.fromTemplate( - // eslint-disable-next-line max-len - `This is some conversation between me and you. Please generate an response based on the system prompt and content below. Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity) Current conversation: {chat_history}` - ) + conversationSummaryPrompt = + SystemMessagePromptTemplate.fromTemplate( + // eslint-disable-next-line max-len + `This is some conversation between me and you. Please generate an response based on the system prompt and content below. Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity) Current conversation: {chat_history}` + ) } else { - conversationSummaryPrompt = SystemMessagePromptTemplate.fromTemplate( - // eslint-disable-next-line max-len - `Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity.)` - ) + conversationSummaryPrompt = + SystemMessagePromptTemplate.fromTemplate( + // eslint-disable-next-line max-len + `Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity.)` + ) messagesPlaceholder = new MessagesPlaceholder('chat_history') } @@ -106,7 +119,8 @@ export class ChatHubChatChain extends ChatHubLLMChainWrapper implements ChatHubC messagesPlaceholder, tokenCounter: (text) => llm.getNumTokens(text), humanMessagePromptTemplate, - sendTokenLimit: llm.invocationParams().maxTokens ?? llm.getModelMaxContextSize() + sendTokenLimit: + llm.invocationParams().maxTokens ?? llm.getModelMaxContextSize() }) const chain = new ChatHubLLMChain({ llm, prompt }) @@ -129,7 +143,8 @@ export class ChatHubChatChain extends ChatHubLLMChainWrapper implements ChatHubC const requests: ChainValues = { input: message } - const chatHistory = await this.historyMemory.loadMemoryVariables(requests) + const chatHistory = + await this.historyMemory.loadMemoryVariables(requests) const longHistory = await this.longMemory.loadMemoryVariables({ user: message.content @@ -159,9 +174,15 @@ export class ChatHubChatChain extends ChatHubLLMChainWrapper implements ChatHubC const responseString = response.text - await this.longMemory.saveContext({ user: message.content }, { your: responseString }) + await this.longMemory.saveContext( + { user: message.content }, + { your: responseString } + ) - await this.historyMemory.saveContext({ input: message.content }, { output: responseString }) + await this.historyMemory.saveContext( + { input: message.content }, + { output: responseString } + ) const vectorStore = this.longMemory.vectorStoreRetriever.vectorStore @@ -173,8 +194,12 @@ export class ChatHubChatChain extends ChatHubLLMChainWrapper implements ChatHubC const aiMessage = new AIMessage(responseString) response.message = aiMessage - if (response.extra != null && 'additionalReplyMessages' in response.extra) { - response.additionalReplyMessages = response.extra.additionalReplyMessages + if ( + response.extra != null && + 'additionalReplyMessages' in response.extra + ) { + response.additionalReplyMessages = + response.extra.additionalReplyMessages } return response diff --git a/packages/core/src/llm-core/chain/function_calling_browsing_chain.ts b/packages/core/src/llm-core/chain/function_calling_browsing_chain.ts index 12e539bc..50f5f165 100644 --- a/packages/core/src/llm-core/chain/function_calling_browsing_chain.ts +++ b/packages/core/src/llm-core/chain/function_calling_browsing_chain.ts @@ -12,7 +12,12 @@ import { ConversationSummaryMemory, VectorStoreRetrieverMemory } from 'langchain/memory' -import { ChatHubLLMCallArg, ChatHubLLMChain, ChatHubLLMChainWrapper, SystemPrompts } from './base' +import { + ChatHubLLMCallArg, + ChatHubLLMChain, + ChatHubLLMChainWrapper, + SystemPrompts +} from './base' import { HumanMessagePromptTemplate, MessagesPlaceholder, @@ -75,7 +80,9 @@ export class ChatHubFunctionCallBrowsingChain // use memory this.searchMemory = new VectorStoreRetrieverMemory({ - vectorStoreRetriever: new MemoryVectorStore(embeddings).asRetriever(6), + vectorStoreRetriever: new MemoryVectorStore(embeddings).asRetriever( + 6 + ), memoryKey: 'long_history', inputKey: 'input', outputKey: 'result', @@ -99,21 +106,24 @@ export class ChatHubFunctionCallBrowsingChain longMemory }: ChatHubFunctionCallBrowsingChainInput ): ChatHubFunctionCallBrowsingChain { - const humanMessagePromptTemplate = HumanMessagePromptTemplate.fromTemplate('{input}') + const humanMessagePromptTemplate = + HumanMessagePromptTemplate.fromTemplate('{input}') let conversationSummaryPrompt: SystemMessagePromptTemplate let messagesPlaceholder: MessagesPlaceholder if (historyMemory instanceof ConversationSummaryMemory) { - conversationSummaryPrompt = SystemMessagePromptTemplate.fromTemplate( - // eslint-disable-next-line max-len - `This is some conversation between me and you. Please generate an response based on the system prompt and content below. Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity) Current conversation: {chat_history}` - ) + conversationSummaryPrompt = + SystemMessagePromptTemplate.fromTemplate( + // eslint-disable-next-line max-len + `This is some conversation between me and you. Please generate an response based on the system prompt and content below. Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity) Current conversation: {chat_history}` + ) } else { - conversationSummaryPrompt = SystemMessagePromptTemplate.fromTemplate( - // eslint-disable-next-line max-len - `Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity.)` - ) + conversationSummaryPrompt = + SystemMessagePromptTemplate.fromTemplate( + // eslint-disable-next-line max-len + `Relevant pieces of previous conversation: {long_history} (You do not need to use these pieces of information if not relevant, and based on these information, generate similar but non-repetitive responses. Pay attention, you need to think more and diverge your creativity.)` + ) messagesPlaceholder = new MessagesPlaceholder('chat_history') } @@ -127,7 +137,8 @@ export class ChatHubFunctionCallBrowsingChain messagesPlaceholder, tokenCounter: (text) => llm.getNumTokens(text), humanMessagePromptTemplate, - sendTokenLimit: llm.invocationParams().maxTokens ?? llm.getModelMaxContextSize() + sendTokenLimit: + llm.invocationParams().maxTokens ?? llm.getModelMaxContextSize() }) const chain = new ChatHubLLMChain({ llm, prompt }) @@ -157,9 +168,9 @@ export class ChatHubFunctionCallBrowsingChain input: message } - const chatHistory = (await this.historyMemory.loadMemoryVariables(requests))[ - this.historyMemory.memoryKey - ] as BaseMessage[] + const chatHistory = ( + await this.historyMemory.loadMemoryVariables(requests) + )[this.historyMemory.memoryKey] as BaseMessage[] const loopChatHistory = [...chatHistory] @@ -198,7 +209,9 @@ export class ChatHubFunctionCallBrowsingChain const responseMessage = rawGeneration.message logger.debug( - `[ChatHubFunctionCallBrowsingChain] response: ${JSON.stringify(responseMessage)}` + `[ChatHubFunctionCallBrowsingChain] response: ${JSON.stringify( + responseMessage + )}` ) if (loopCount === 0) { @@ -209,7 +222,8 @@ export class ChatHubFunctionCallBrowsingChain loopChatHistory.push(responseMessage) if (responseMessage.additional_kwargs?.function_call) { - const functionCall = responseMessage.additional_kwargs.function_call + const functionCall = + responseMessage.additional_kwargs.function_call const tool = this._selectTool(functionCall.name) @@ -221,16 +235,21 @@ export class ChatHubFunctionCallBrowsingChain try { toolResponse = { name: tool.name, - content: await tool.call(JSON.parse(functionCall.arguments)) + content: await tool.call( + JSON.parse(functionCall.arguments) + ) } } catch (e) { toolResponse = { name: tool.name, - content: 'Call tool `' + functionCall.name + '` failed: ' + e + content: + 'Call tool `' + functionCall.name + '` failed: ' + e } } - loopChatHistory.push(new FunctionMessage(toolResponse.content, toolResponse.name)) + loopChatHistory.push( + new FunctionMessage(toolResponse.content, toolResponse.name) + ) } else { finalResponse = responseMessage.content break @@ -243,9 +262,15 @@ export class ChatHubFunctionCallBrowsingChain loopCount++ } - await this.historyMemory.saveContext({ input: message.content }, { output: finalResponse }) + await this.historyMemory.saveContext( + { input: message.content }, + { output: finalResponse } + ) - await this.longMemory.saveContext({ user: message.content }, { your: finalResponse }) + await this.longMemory.saveContext( + { user: message.content }, + { your: finalResponse } + ) const vectorStore = this.longMemory.vectorStoreRetriever.vectorStore diff --git a/packages/core/src/llm-core/chain/out_parsers.ts b/packages/core/src/llm-core/chain/out_parsers.ts index 305bd441..f587ddf5 100644 --- a/packages/core/src/llm-core/chain/out_parsers.ts +++ b/packages/core/src/llm-core/chain/out_parsers.ts @@ -5,7 +5,10 @@ export class ChatHubBrowsingActionOutputParser extends BaseOutputParser { + async parse( + text: string, + callbacks?: Callbacks + ): Promise { let parsed: ChatHubBrowsingAction try { diff --git a/packages/core/src/llm-core/chain/plugin_chat_chain.ts b/packages/core/src/llm-core/chain/plugin_chat_chain.ts index 1eedffdc..c29451eb 100644 --- a/packages/core/src/llm-core/chain/plugin_chat_chain.ts +++ b/packages/core/src/llm-core/chain/plugin_chat_chain.ts @@ -1,8 +1,15 @@ import { AIMessage, ChainValues } from 'langchain/schema' import { BufferMemory, ConversationSummaryMemory } from 'langchain/memory' -import { ChatHubLLMCallArg, ChatHubLLMChainWrapper, SystemPrompts } from './base' +import { + ChatHubLLMCallArg, + ChatHubLLMChainWrapper, + SystemPrompts +} from './base' import { Tool } from 'langchain/tools' -import { AgentExecutor, initializeAgentExecutorWithOptions } from 'langchain/agents' +import { + AgentExecutor, + initializeAgentExecutorWithOptions +} from 'langchain/agents' import { createLogger } from '../../utils/logger' import { ChatHubChatModel } from '../platform/model' @@ -13,7 +20,10 @@ export interface ChatHubPluginChainInput { historyMemory: ConversationSummaryMemory | BufferMemory } -export class ChatHubPluginChain extends ChatHubLLMChainWrapper implements ChatHubPluginChainInput { +export class ChatHubPluginChain + extends ChatHubLLMChainWrapper + implements ChatHubPluginChainInput +{ executor: AgentExecutor historyMemory: ConversationSummaryMemory | BufferMemory @@ -90,7 +100,8 @@ export class ChatHubPluginChain extends ChatHubLLMChainWrapper implements ChatHu input: message } - const memoryVariables = await this.historyMemory.loadMemoryVariables(requests) + const memoryVariables = + await this.historyMemory.loadMemoryVariables(requests) requests['chat_history'] = memoryVariables[this.historyMemory.memoryKey] requests['id'] = conversationId diff --git a/packages/core/src/llm-core/chain/prompt.ts b/packages/core/src/llm-core/chain/prompt.ts index 14fba445..96e1c6ed 100644 --- a/packages/core/src/llm-core/chain/prompt.ts +++ b/packages/core/src/llm-core/chain/prompt.ts @@ -31,7 +31,10 @@ export interface ChatHubChatPromptInput { sendTokenLimit?: number } -export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHubChatPromptInput { +export class ChatHubChatPrompt + extends BaseChatPromptTemplate + implements ChatHubChatPromptInput +{ systemPrompts?: SystemPrompts tokenCounter: (text: string) => Promise @@ -53,7 +56,8 @@ export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHub this.messagesPlaceholder = fields.messagesPlaceholder this.conversationSummaryPrompt = fields.conversationSummaryPrompt this.humanMessagePromptTemplate = - fields.humanMessagePromptTemplate ?? HumanMessagePromptTemplate.fromTemplate('{input}') + fields.humanMessagePromptTemplate ?? + HumanMessagePromptTemplate.fromTemplate('{input}') this.sendTokenLimit = fields.sendTokenLimit ?? 4096 } @@ -64,7 +68,9 @@ export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHub private async _countMessageTokens(message: BaseMessage) { let result = (await this.tokenCounter(message.content)) + - (await this.tokenCounter(messageTypeToOpenAIRole(message._getType()))) + (await this.tokenCounter( + messageTypeToOpenAIRole(message._getType()) + )) if (message.name) { result += await this.tokenCounter(message.name) @@ -99,11 +105,15 @@ export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHub let formatConversationSummary: SystemMessage | null if (!this.messagesPlaceholder) { - const chatHistoryTokens = await this.tokenCounter(chatHistory as string) + const chatHistoryTokens = await this.tokenCounter( + chatHistory as string + ) if (usedTokens + chatHistoryTokens > this.sendTokenLimit) { logger.warn( - `Used tokens: ${usedTokens + chatHistoryTokens} exceed limit: ${ + `Used tokens: ${ + usedTokens + chatHistoryTokens + } exceed limit: ${ this.sendTokenLimit }. Is too long history. Splitting the history.` ) @@ -115,10 +125,15 @@ export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHub if (longHistory.length > 0) { const formatDocuments: Document[] = [] for (const document of longHistory) { - const documentTokens = await this.tokenCounter(document.pageContent) + const documentTokens = await this.tokenCounter( + document.pageContent + ) // reserve 80 tokens for the format - if (usedTokens + documentTokens > this.sendTokenLimit - 80) { + if ( + usedTokens + documentTokens > + this.sendTokenLimit - 80 + ) { break } @@ -126,10 +141,13 @@ export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHub formatDocuments.push(document) } - formatConversationSummary = await this.conversationSummaryPrompt.format({ - long_history: formatDocuments.map((document) => document.pageContent).join(' '), - chat_history: chatHistory - }) + formatConversationSummary = + await this.conversationSummaryPrompt.format({ + long_history: formatDocuments + .map((document) => document.pageContent) + .join(' '), + chat_history: chatHistory + }) } } else { const formatChatHistory: BaseMessage[] = [] @@ -153,10 +171,15 @@ export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHub const formatDocuments: Document[] = [] for (const document of longHistory) { - const documentTokens = await this.tokenCounter(document.pageContent) + const documentTokens = await this.tokenCounter( + document.pageContent + ) // reserve 80 tokens for the format - if (usedTokens + documentTokens > this.sendTokenLimit - 80) { + if ( + usedTokens + documentTokens > + this.sendTokenLimit - 80 + ) { break } @@ -164,14 +187,18 @@ export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHub formatDocuments.push(document) } - formatConversationSummary = await this.conversationSummaryPrompt.format({ - long_history: formatDocuments.map((document) => document.pageContent).join(' ') - }) + formatConversationSummary = + await this.conversationSummaryPrompt.format({ + long_history: formatDocuments + .map((document) => document.pageContent) + .join(' ') + }) } - const formatMessagesPlaceholder = await this.messagesPlaceholder.formatMessages({ - chat_history: formatChatHistory - }) + const formatMessagesPlaceholder = + await this.messagesPlaceholder.formatMessages({ + chat_history: formatChatHistory + }) result.push(...formatMessagesPlaceholder) } @@ -182,15 +209,19 @@ export class ChatHubChatPrompt extends BaseChatPromptTemplate implements ChatHub result.push(input) - logger.debug(`Used tokens: ${usedTokens} exceed limit: ${this.sendTokenLimit}`) + logger.debug( + `Used tokens: ${usedTokens} exceed limit: ${this.sendTokenLimit}` + ) logger.debug(`messages: ${JSON.stringify(result)}`) return result } - // eslint-disable-next-line @typescript-eslint/no-explicit-any - partial(values: PartialValues): Promise> { + partial( + values: PartialValues + // eslint-disable-next-line @typescript-eslint/no-explicit-any + ): Promise> { throw new Error('Method not implemented.') } @@ -242,7 +273,8 @@ export class ChatHubBrowsingPrompt this.messagesPlaceholder = fields.messagesPlaceholder this.conversationSummaryPrompt = fields.conversationSummaryPrompt this.humanMessagePromptTemplate = - fields.humanMessagePromptTemplate ?? HumanMessagePromptTemplate.fromTemplate('{input}') + fields.humanMessagePromptTemplate ?? + HumanMessagePromptTemplate.fromTemplate('{input}') this.sendTokenLimit = fields.sendTokenLimit ?? 4096 } @@ -253,7 +285,9 @@ export class ChatHubBrowsingPrompt private async _countMessageTokens(message: BaseMessage) { let result = (await this.tokenCounter(message.content)) + - (await this.tokenCounter(messageTypeToOpenAIRole(message._getType()))) + (await this.tokenCounter( + messageTypeToOpenAIRole(message._getType()) + )) if (message.name) { result += await this.tokenCounter(message.name) @@ -264,29 +298,29 @@ export class ChatHubBrowsingPrompt private _constructFullSystemPrompt() { return ( - `Constraints: + `Constraints: 1. Always, you must call tools to chat with user by yourself. 2. Always get information from the Internet before answering user questions, and use your own existing knowledge when you can’t get it. - 3. You can only call one tool at a time. + 3. You can only call one tool at a time. 4. Before calling the chat tool, you must call the search or browse tool to get the latest information about events related to the user's chat. 5. Always call the chat tool when you have finished all your objectives. - + Tools: 1. search: A search engine. useful for when you need to answer questions about current events, will return an array of links, titles, and descriptions, args: {"keyword": "Search keywords, you should cut what you are searching for into several keywords and separate them with spaces. For example, 'What is the weather in Beijing today?' would be 'Beijing weather today'"} 2. browse: Useful for when you need to find something on or summarize a webpage., possibly including a webpage summary, HTML text, etc.,args: {"url":"Target link","task":"what you want to find on the page or empty string for a summary"} 3. chat: Generate content to user. When you need to generate content of finished all your objectives, please call this tool.,args: {"response": "Generated content"} - + Resources: 1. Internet access for searches and information gathering. - + Performance Evaluation: - 1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities. - 2. Constructively self-criticize your big-picture behavior constantly. - 3. Reflect on past decisions and strategies to refine your approach. + 1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities. + 2. Constructively self-criticize your big-picture behavior constantly. + 3. Reflect on past decisions and strategies to refine your approach. 4. Every tool has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps. 5. If you are not sure what to do, you can call the chat tool to ask the user for help. - - Preset: + + Preset: ` + this.systemPrompt.content + ` @@ -295,9 +329,9 @@ export class ChatHubBrowsingPrompt You should only respond in JSON format as described below. Response Format: - + {"name":"tool name","args":{"arg name":"value"}} - + Ensure the response can be parsed by javascript JSON.parse.` ) } @@ -328,7 +362,9 @@ export class ChatHubBrowsingPrompt let usedTokens = await this._countMessageTokens(result[0]) const inputTokens = - input.content && input.content.length > 0 ? await this.tokenCounter(input.content) : 0 + input.content && input.content.length > 0 + ? await this.tokenCounter(input.content) + : 0 usedTokens += inputTokens @@ -345,11 +381,15 @@ export class ChatHubBrowsingPrompt if (!this.messagesPlaceholder) { chatHistory = (chatHistory as BaseMessage[])[0].content - const chatHistoryTokens = await this.tokenCounter(chatHistory as string) + const chatHistoryTokens = await this.tokenCounter( + chatHistory as string + ) if (usedTokens + chatHistoryTokens > this.sendTokenLimit) { logger.warn( - `Used tokens: ${usedTokens + chatHistoryTokens} exceed limit: ${ + `Used tokens: ${ + usedTokens + chatHistoryTokens + } exceed limit: ${ this.sendTokenLimit }. Is too long history. Splitting the history.` ) @@ -361,10 +401,15 @@ export class ChatHubBrowsingPrompt if (longHistory.length > 0) { const formatDocuments: Document[] = [] for (const document of longHistory) { - const documentTokens = await this.tokenCounter(document.pageContent) + const documentTokens = await this.tokenCounter( + document.pageContent + ) // reserve 80 tokens for the format - if (usedTokens + documentTokens > this.sendTokenLimit - 80) { + if ( + usedTokens + documentTokens > + this.sendTokenLimit - 80 + ) { break } @@ -372,15 +417,20 @@ export class ChatHubBrowsingPrompt formatDocuments.push(document) } - formatConversationSummary = await this.conversationSummaryPrompt.format({ - long_history: formatDocuments.map((document) => document.pageContent).join(' '), - chat_history: chatHistory - }) + formatConversationSummary = + await this.conversationSummaryPrompt.format({ + long_history: formatDocuments + .map((document) => document.pageContent) + .join(' '), + chat_history: chatHistory + }) } } else { const formatChatHistory: BaseMessage[] = [] - for (const message of (chatHistory).slice(-100).reverse()) { + for (const message of (chatHistory) + .slice(-100) + .reverse()) { const messageTokens = await this._countMessageTokens(message) // reserve 100 tokens for the long history @@ -396,10 +446,15 @@ export class ChatHubBrowsingPrompt const formatDocuments: Document[] = [] for (const document of longHistory) { - const documentTokens = await this.tokenCounter(document.pageContent) + const documentTokens = await this.tokenCounter( + document.pageContent + ) // reserve 80 tokens for the format - if (usedTokens + documentTokens > this.sendTokenLimit - 80) { + if ( + usedTokens + documentTokens > + this.sendTokenLimit - 80 + ) { break } @@ -407,14 +462,18 @@ export class ChatHubBrowsingPrompt formatDocuments.push(document) } - formatConversationSummary = await this.conversationSummaryPrompt.format({ - long_history: formatDocuments.map((document) => document.pageContent).join(' ') - }) + formatConversationSummary = + await this.conversationSummaryPrompt.format({ + long_history: formatDocuments + .map((document) => document.pageContent) + .join(' ') + }) } - const formatMessagesPlaceholder = await this.messagesPlaceholder.formatMessages({ - chat_history: formatChatHistory - }) + const formatMessagesPlaceholder = + await this.messagesPlaceholder.formatMessages({ + chat_history: formatChatHistory + }) result.push(...formatMessagesPlaceholder) } @@ -430,15 +489,19 @@ export class ChatHubBrowsingPrompt result.push(input) } - logger.debug(`Used tokens: ${usedTokens} exceed limit: ${this.sendTokenLimit}`) + logger.debug( + `Used tokens: ${usedTokens} exceed limit: ${this.sendTokenLimit}` + ) logger.debug(`messages: ${JSON.stringify(result)}`) return result } - // eslint-disable-next-line @typescript-eslint/no-explicit-any - partial(values: PartialValues): Promise> { + partial( + values: PartialValues + // eslint-disable-next-line @typescript-eslint/no-explicit-any + ): Promise> { throw new Error('Method not implemented.') } @@ -472,7 +535,8 @@ export class ChatHubOpenAIFunctionCallPrompt this.messagesPlaceholder = fields.messagesPlaceholder this.conversationSummaryPrompt = fields.conversationSummaryPrompt this.humanMessagePromptTemplate = - fields.humanMessagePromptTemplate ?? HumanMessagePromptTemplate.fromTemplate('{input}') + fields.humanMessagePromptTemplate ?? + HumanMessagePromptTemplate.fromTemplate('{input}') this.sendTokenLimit = fields.sendTokenLimit ?? 4096 } @@ -521,11 +585,15 @@ export class ChatHubOpenAIFunctionCallPrompt if (!this.messagesPlaceholder) { chatHistory = (chatHistory as BaseMessage[])[0].content - const chatHistoryTokens = await this.tokenCounter(chatHistory as string) + const chatHistoryTokens = await this.tokenCounter( + chatHistory as string + ) if (usedTokens + chatHistoryTokens > this.sendTokenLimit) { logger.warn( - `Used tokens: ${usedTokens + chatHistoryTokens} exceed limit: ${ + `Used tokens: ${ + usedTokens + chatHistoryTokens + } exceed limit: ${ this.sendTokenLimit }. Is too long history. Splitting the history.` ) @@ -537,10 +605,15 @@ export class ChatHubOpenAIFunctionCallPrompt if (longHistory.length > 0) { const formatDocuments: Document[] = [] for (const document of longHistory) { - const documentTokens = await this.tokenCounter(document.pageContent) + const documentTokens = await this.tokenCounter( + document.pageContent + ) // reserve 80 tokens for the format - if (usedTokens + documentTokens > this.sendTokenLimit - 80) { + if ( + usedTokens + documentTokens > + this.sendTokenLimit - 80 + ) { break } @@ -548,15 +621,20 @@ export class ChatHubOpenAIFunctionCallPrompt formatDocuments.push(document) } - formatConversationSummary = await this.conversationSummaryPrompt.format({ - long_history: formatDocuments.map((document) => document.pageContent).join(' '), - chat_history: chatHistory - }) + formatConversationSummary = + await this.conversationSummaryPrompt.format({ + long_history: formatDocuments + .map((document) => document.pageContent) + .join(' '), + chat_history: chatHistory + }) } } else { const formatChatHistory: BaseMessage[] = [] - for (const message of (chatHistory).slice(-100).reverse()) { + for (const message of (chatHistory) + .slice(-100) + .reverse()) { const messageTokens = await this._countMessageTokens(message) // reserve 100 tokens for the long history @@ -572,10 +650,15 @@ export class ChatHubOpenAIFunctionCallPrompt const formatDocuments: Document[] = [] for (const document of longHistory) { - const documentTokens = await this.tokenCounter(document.pageContent) + const documentTokens = await this.tokenCounter( + document.pageContent + ) // reserve 80 tokens for the format - if (usedTokens + documentTokens > this.sendTokenLimit - 80) { + if ( + usedTokens + documentTokens > + this.sendTokenLimit - 80 + ) { break } @@ -583,14 +666,18 @@ export class ChatHubOpenAIFunctionCallPrompt formatDocuments.push(document) } - formatConversationSummary = await this.conversationSummaryPrompt.format({ - long_history: formatDocuments.map((document) => document.pageContent).join(' ') - }) + formatConversationSummary = + await this.conversationSummaryPrompt.format({ + long_history: formatDocuments + .map((document) => document.pageContent) + .join(' ') + }) } - const formatMessagesPlaceholder = await this.messagesPlaceholder.formatMessages({ - chat_history: formatChatHistory - }) + const formatMessagesPlaceholder = + await this.messagesPlaceholder.formatMessages({ + chat_history: formatChatHistory + }) result.push(...formatMessagesPlaceholder) } @@ -604,15 +691,19 @@ export class ChatHubOpenAIFunctionCallPrompt result.push(input) } - logger.debug(`Used tokens: ${usedTokens} exceed limit: ${this.sendTokenLimit}`) + logger.debug( + `Used tokens: ${usedTokens} exceed limit: ${this.sendTokenLimit}` + ) logger.debug(`messages: ${JSON.stringify(result)}`) return result } - // eslint-disable-next-line @typescript-eslint/no-explicit-any - partial(values: PartialValues): Promise> { + partial( + values: PartialValues + // eslint-disable-next-line @typescript-eslint/no-explicit-any + ): Promise> { throw new Error('Method not implemented.') } diff --git a/packages/core/src/llm-core/chain/wrapper_chain.ts b/packages/core/src/llm-core/chain/wrapper_chain.ts index b42b0d40..0f6b076b 100644 --- a/packages/core/src/llm-core/chain/wrapper_chain.ts +++ b/packages/core/src/llm-core/chain/wrapper_chain.ts @@ -49,7 +49,8 @@ export class ChatHubWrapperChain const requests: ChainValues = { [this.inputKey]: message } - const chatHistory = await this.historyMemory.loadMemoryVariables(requests) + const chatHistory = + await this.historyMemory.loadMemoryVariables(requests) requests['chat_history'] = chatHistory[this.historyMemory.memoryKey] @@ -71,13 +72,20 @@ export class ChatHubWrapperChain const responseString = response[this.chain.outputKeys[0]] - await this.historyMemory.saveContext({ input: message.content }, { output: responseString }) + await this.historyMemory.saveContext( + { input: message.content }, + { output: responseString } + ) const aiMessage = new AIMessage(responseString) response.message = aiMessage - if (response.extra != null && 'additionalReplyMessages' in response.extra) { - response.additionalReplyMessages = response.extra.additionalReplyMessages + if ( + response.extra != null && + 'additionalReplyMessages' in response.extra + ) { + response.additionalReplyMessages = + response.extra.additionalReplyMessages } return response diff --git a/packages/core/src/llm-core/chat/app.ts b/packages/core/src/llm-core/chat/app.ts index 4bfaaa9b..c7ff1f70 100644 --- a/packages/core/src/llm-core/chat/app.ts +++ b/packages/core/src/llm-core/chat/app.ts @@ -1,5 +1,9 @@ import { BaseChatMessageHistory, ChainValues } from 'langchain/schema' -import { ChatHubLLMCallArg, ChatHubLLMChainWrapper, SystemPrompts } from '../chain/base' +import { + ChatHubLLMCallArg, + ChatHubLLMChainWrapper, + SystemPrompts +} from '../chain/base' import { VectorStore, VectorStoreRetriever } from 'langchain/vectorstores/base' import { BufferMemory, @@ -7,7 +11,10 @@ import { VectorStoreRetrieverMemory } from 'langchain/memory' import { Embeddings } from 'langchain/embeddings/base' -import { emptyEmbeddings, inMemoryVectorStoreRetrieverProvider } from '../model/in_memory' +import { + emptyEmbeddings, + inMemoryVectorStoreRetrieverProvider +} from '../model/in_memory' import { createLogger } from '../../utils/logger' import { Context } from 'koishi' import { ConversationRoom } from '../../types' @@ -69,7 +76,9 @@ export class ChatInterface { } } - async createChatHubLLMChainWrapper(): Promise<[ChatHubLLMChainWrapper, ClientConfigWrapper]> { + async createChatHubLLMChainWrapper(): Promise< + [ChatHubLLMChainWrapper, ClientConfigWrapper] + > { const service = this.ctx.chathub.platform const [llmPlatform, llmModelName] = parseRawModelName(this._input.model) const currentLLMConfig = await service.randomConfig(llmPlatform) @@ -90,20 +99,33 @@ export class ChatInterface { if (error instanceof ChatHubError) { throw error } - throw new ChatHubError(ChatHubErrorCode.EMBEDDINGS_INIT_ERROR, error) + throw new ChatHubError( + ChatHubErrorCode.EMBEDDINGS_INIT_ERROR, + error + ) } try { - vectorStoreRetrieverMemory = await this._initVectorStoreMemory(service, embeddings) + vectorStoreRetrieverMemory = await this._initVectorStoreMemory( + service, + embeddings + ) } catch (error) { if (error instanceof ChatHubError) { throw error } - throw new ChatHubError(ChatHubErrorCode.VECTOR_STORE_INIT_ERROR, error) + throw new ChatHubError( + ChatHubErrorCode.VECTOR_STORE_INIT_ERROR, + error + ) } try { - ;[llm, modelInfo] = await this._initModel(service, currentLLMConfig.value, llmModelName) + ;[llm, modelInfo] = await this._initModel( + service, + currentLLMConfig.value, + llmModelName + ) } catch (error) { if (error instanceof ChatHubError) { throw error @@ -119,7 +141,10 @@ export class ChatInterface { if (error instanceof ChatHubError) { throw error } - throw new ChatHubError(ChatHubErrorCode.CHAT_HISTORY_INIT_ERROR, error) + throw new ChatHubError( + ChatHubErrorCode.CHAT_HISTORY_INIT_ERROR, + error + ) } try { @@ -160,7 +185,9 @@ export class ChatInterface { this._chains = {} - await ctx.database.remove('chathub_conversation', { id: room.conversationId }) + await ctx.database.remove('chathub_conversation', { + id: room.conversationId + }) await ctx.database.remove('chathub_room', { roomId: room.roomId @@ -189,8 +216,13 @@ export class ChatInterface { } } - private async _initEmbeddings(service: PlatformService): Promise { - if (this._input.longMemory !== true && this._input.chatMode === 'chat') { + private async _initEmbeddings( + service: PlatformService + ): Promise { + if ( + this._input.longMemory !== true && + this._input.chatMode === 'chat' + ) { return emptyEmbeddings } @@ -203,12 +235,16 @@ export class ChatInterface { const [platform, modelName] = parseRawModelName(this._input.embeddings) - logger.info(`ChatHubLLMChainWrapper init embeddings for ${platform}, ${modelName}`) + logger.info( + `ChatHubLLMChainWrapper init embeddings for ${platform}, ${modelName}` + ) const client = await service.randomClient(platform) if (client == null || client instanceof PlatformModelClient) { - logger.warn(`Platform ${platform} is not supported, falling back to fake embeddings`) + logger.warn( + `Platform ${platform} is not supported, falling back to fake embeddings` + ) return emptyEmbeddings } @@ -240,32 +276,43 @@ export class ChatInterface { if ( this._input.longMemory !== true || - (this._input.chatMode !== 'chat' && this._input.chatMode !== 'browsing') + (this._input.chatMode !== 'chat' && + this._input.chatMode !== 'browsing') ) { vectorStoreRetriever = - await inMemoryVectorStoreRetrieverProvider.createVectorStoreRetriever({ - embeddings - }) + await inMemoryVectorStoreRetrieverProvider.createVectorStoreRetriever( + { + embeddings + } + ) } else if (this._input.vectorStoreName == null) { logger.warn( 'Vector store is empty, falling back to fake vector store. Try check your config.' ) vectorStoreRetriever = - await inMemoryVectorStoreRetrieverProvider.createVectorStoreRetriever({ - embeddings - }) + await inMemoryVectorStoreRetrieverProvider.createVectorStoreRetriever( + { + embeddings + } + ) } else { - const store = await service.createVectorStore(this._input.vectorStoreName, { - embeddings, - key: this._input.conversationId - }) + const store = await service.createVectorStore( + this._input.vectorStoreName, + { + embeddings, + key: this._input.conversationId + } + ) - vectorStoreRetriever = ScoreThresholdRetriever.fromVectorStore(store, { - minSimilarityScore: 0.85, // Finds results with at least this similarity score - maxK: 100, // The maximum K value to use. Use it based to your chunk size to make sure you don't run out of tokens - kIncrement: 2 // How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc. - }) + vectorStoreRetriever = ScoreThresholdRetriever.fromVectorStore( + store, + { + minSimilarityScore: 0.85, // Finds results with at least this similarity score + maxK: 100, // The maximum K value to use. Use it based to your chunk size to make sure you don't run out of tokens + kIncrement: 2 // How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc. + } + ) } this._vectorStoreRetrieverMemory = new VectorStoreRetrieverMemory({ @@ -285,7 +332,9 @@ export class ChatInterface { ): Promise<[ChatHubChatModel, ModelInfo]> { const platform = await service.getClient(config) - const llmInfo = (await platform.getModels()).find((model) => model.name === llmModelName) + const llmInfo = (await platform.getModels()).find( + (model) => model.name === llmModelName + ) const llmModel = platform.createModel(llmModelName) @@ -304,9 +353,11 @@ export class ChatInterface { const embeddings = emptyEmbeddings const vectorStoreRetriever = - await inMemoryVectorStoreRetrieverProvider.createVectorStoreRetriever({ - embeddings - }) + await inMemoryVectorStoreRetrieverProvider.createVectorStoreRetriever( + { + embeddings + } + ) this._vectorStoreRetrieverMemory = new VectorStoreRetrieverMemory({ returnDocs: true, diff --git a/packages/core/src/llm-core/chat/default.ts b/packages/core/src/llm-core/chat/default.ts index a26a6cdd..3418bbd2 100644 --- a/packages/core/src/llm-core/chat/default.ts +++ b/packages/core/src/llm-core/chat/default.ts @@ -59,7 +59,8 @@ export async function defaultFactory(ctx: Context, service: PlatformService) { async (params) => { const tools = await selectAndCreateTools( service, - (name) => name.includes('search') || name.includes('web-browser'), + (name) => + name.includes('search') || name.includes('web-browser'), { model: params.model, embeddings: params.embeddings @@ -76,29 +77,42 @@ export async function defaultFactory(ctx: Context, service: PlatformService) { } if ( - (model._llmType() === 'openai' && model.modelName.includes('0613')) || + (model._llmType() === 'openai' && + model.modelName.includes('0613')) || model.modelName.includes('qwen') ) { - return ChatHubFunctionCallBrowsingChain.fromLLMAndTools(model, tools, options) + return ChatHubFunctionCallBrowsingChain.fromLLMAndTools( + model, + tools, + options + ) } else { - return ChatHubBrowsingChain.fromLLMAndTools(model, tools, options) + return ChatHubBrowsingChain.fromLLMAndTools( + model, + tools, + options + ) } } ) - service.registerChatChain('plugin', '插件模式(基于 LangChain 的 Agent)', async (params) => { - return ChatHubPluginChain.fromLLMAndTools( - params.model, - await selectAndCreateTools(service, (_) => true, { - model: params.model, - embeddings: params.embeddings - }), - { - systemPrompts: params.systemPrompt, - historyMemory: params.historyMemory - } - ) - }) + service.registerChatChain( + 'plugin', + '插件模式(基于 LangChain 的 Agent)', + async (params) => { + return ChatHubPluginChain.fromLLMAndTools( + params.model, + await selectAndCreateTools(service, (_) => true, { + model: params.model, + embeddings: params.embeddings + }), + { + systemPrompts: params.systemPrompt, + historyMemory: params.historyMemory + } + ) + } + ) } function updateModels(ctx: Context, service: PlatformService) { @@ -110,7 +124,10 @@ function updateChatChains(ctx: Context, service: PlatformService) { } function updateEmbeddings(ctx: Context, service: PlatformService) { - ctx.schema.set('embeddings', Schema.union(getModelNames(service, ModelType.embeddings))) + ctx.schema.set( + 'embeddings', + Schema.union(getModelNames(service, ModelType.embeddings)) + ) } function updateVectorStores(ctx: Context, service: PlatformService) { @@ -135,10 +152,15 @@ function selectAndCreateTools( function getChatChainNames(service: PlatformService) { return service .getChatChains() - .map((info) => Schema.const(info.name).description(info.description ?? info.name)) + .map((info) => + Schema.const(info.name).description(info.description ?? info.name) + ) } -function getModelNames(service: PlatformService, type: ModelType = ModelType.llm) { +function getModelNames( + service: PlatformService, + type: ModelType = ModelType.llm +) { const models = service.getAllModels(type).concat('无') return models.map((model) => Schema.const(model).description(model)) diff --git a/packages/core/src/llm-core/memory/message/database_memory.ts b/packages/core/src/llm-core/memory/message/database_memory.ts index 24d499ae..053d36a6 100644 --- a/packages/core/src/llm-core/memory/message/database_memory.ts +++ b/packages/core/src/llm-core/memory/message/database_memory.ts @@ -63,7 +63,9 @@ export class KoishiDataBaseChatMessageHistory extends BaseChatMessageHistory { } async clear(): Promise { - await this._ctx.database.remove('chathub_message', { conversation: this.conversationId }) + await this._ctx.database.remove('chathub_message', { + conversation: this.conversationId + }) await this._ctx.database.upsert('chathub_conversation', [ { @@ -78,7 +80,9 @@ export class KoishiDataBaseChatMessageHistory extends BaseChatMessageHistory { } async delete(): Promise { - await this._ctx.database.remove('chathub_conversation', { id: this.conversationId }) + await this._ctx.database.remove('chathub_conversation', { + id: this.conversationId + }) } async updateAdditionalKwargs(key: string, value: string): Promise { @@ -99,7 +103,9 @@ export class KoishiDataBaseChatMessageHistory extends BaseChatMessageHistory { await this._saveConversation() } - async overrideAdditionalKwargs(kwargs: { [key: string]: string }): Promise { + async overrideAdditionalKwargs(kwargs: { + [key: string]: string + }): Promise { await this.loadConversation() this._additional_kwargs = Object.assign(this._additional_kwargs, kwargs) await this._saveConversation() @@ -119,7 +125,9 @@ export class KoishiDataBaseChatMessageHistory extends BaseChatMessageHistory { } while (currentMessageId != null) { - const currentMessage = queried.find((item) => item.id === currentMessageId) + const currentMessage = queried.find( + (item) => item.id === currentMessageId + ) if (!currentMessage) { throw new Error('currentMessage is null') @@ -151,7 +159,9 @@ export class KoishiDataBaseChatMessageHistory extends BaseChatMessageHistory { private async _loadConversation() { const conversation = ( - await this._ctx.database.get('chathub_conversation', { id: this.conversationId }) + await this._ctx.database.get('chathub_conversation', { + id: this.conversationId + }) )?.[0] if (conversation) { @@ -161,7 +171,9 @@ export class KoishiDataBaseChatMessageHistory extends BaseChatMessageHistory { ? JSON.parse(conversation.additional_kwargs) : {} } else { - await this._ctx.database.create('chathub_conversation', { id: this.conversationId }) + await this._ctx.database.create('chathub_conversation', { + id: this.conversationId + }) } if (!this._serializedChatHistory) { @@ -176,7 +188,9 @@ export class KoishiDataBaseChatMessageHistory extends BaseChatMessageHistory { } private async _saveMessage(message: BaseMessage) { - const lastedMessage = this._serializedChatHistory.find((item) => item.id === this._latestId) + const lastedMessage = this._serializedChatHistory.find( + (item) => item.id === this._latestId + ) const serializedMessage: ChatHubMessage = { id: uuidv4(), diff --git a/packages/core/src/llm-core/model/base.ts b/packages/core/src/llm-core/model/base.ts index ab5b6239..50e90f1f 100644 --- a/packages/core/src/llm-core/model/base.ts +++ b/packages/core/src/llm-core/model/base.ts @@ -25,7 +25,11 @@ export class ChatHubSaveableVectorStore return this._store.addDocuments(documents) } - similaritySearchVectorWithScore(query: number[], k: number, filter?: this['FilterType']) { + similaritySearchVectorWithScore( + query: number[], + k: number, + filter?: this['FilterType'] + ) { return this._store.similaritySearchVectorWithScore(query, k, filter) } diff --git a/packages/core/src/llm-core/model/in_memory.ts b/packages/core/src/llm-core/model/in_memory.ts index e10f42a7..1fea41fe 100644 --- a/packages/core/src/llm-core/model/in_memory.ts +++ b/packages/core/src/llm-core/model/in_memory.ts @@ -39,4 +39,5 @@ export class EmptyEmbeddings extends ChatHubBaseEmbeddings { export const emptyEmbeddings = new EmptyEmbeddings() -export const inMemoryVectorStoreRetrieverProvider = new InMemoryVectorStoreRetrieverProvider() +export const inMemoryVectorStoreRetrieverProvider = + new InMemoryVectorStoreRetrieverProvider() diff --git a/packages/core/src/llm-core/platform/api.ts b/packages/core/src/llm-core/platform/api.ts index b03e8555..689b199f 100644 --- a/packages/core/src/llm-core/platform/api.ts +++ b/packages/core/src/llm-core/platform/api.ts @@ -1,4 +1,8 @@ -import { BaseMessage, ChatGeneration, ChatGenerationChunk } from 'langchain/schema' +import { + BaseMessage, + ChatGeneration, + ChatGenerationChunk +} from 'langchain/schema' import { StructuredTool } from 'langchain/tools' export interface BaseRequestParams { @@ -80,7 +84,9 @@ export abstract class ModelRequester implements BaseRequester { return result } - abstract completionStream(params: ModelRequestParams): AsyncGenerator + abstract completionStream( + params: ModelRequestParams + ): AsyncGenerator abstract init(): Promise diff --git a/packages/core/src/llm-core/platform/config.ts b/packages/core/src/llm-core/platform/config.ts index 94bed165..5e29d87e 100644 --- a/packages/core/src/llm-core/platform/config.ts +++ b/packages/core/src/llm-core/platform/config.ts @@ -70,7 +70,8 @@ export class ClientConfigPool { return config } - this._currentLoadConfigIndex = (this._currentLoadConfigIndex + 1) % this._configs.length + this._currentLoadConfigIndex = + (this._currentLoadConfigIndex + 1) % this._configs.length loadConfigCount++ @@ -118,7 +119,10 @@ export class ClientConfigPool { for (const config of this._configs) { const md5 = config.md5() - const isAvailable = await this.ctx.cache.get('chathub/client_config', md5) + const isAvailable = await this.ctx.cache.get( + 'chathub/client_config', + md5 + ) config.isAvailable = isAvailable } diff --git a/packages/core/src/llm-core/platform/model.ts b/packages/core/src/llm-core/platform/model.ts index 44642be2..e81b5d52 100644 --- a/packages/core/src/llm-core/platform/model.ts +++ b/packages/core/src/llm-core/platform/model.ts @@ -1,5 +1,8 @@ import { Tiktoken } from 'js-tiktoken' -import { BaseChatModel, BaseChatModelCallOptions } from 'langchain/chat_models/base' +import { + BaseChatModel, + BaseChatModelCallOptions +} from 'langchain/chat_models/base' import { EmbeddingsRequester, EmbeddingsRequestParams, @@ -7,7 +10,12 @@ import { ModelRequestParams } from './api' import { CallbackManagerForLLMRun } from 'langchain/callbacks' -import { BaseMessage, ChatGeneration, ChatGenerationChunk, ChatResult } from 'langchain/schema' +import { + BaseMessage, + ChatGeneration, + ChatGenerationChunk, + ChatResult +} from 'langchain/schema' import { encodingForModel } from '../utils/tiktoken' import { getModelContextSize, @@ -107,7 +115,9 @@ export class ChatHubChatModel extends BaseChatModel { /** * Get the parameters used to invoke the model */ - invocationParams(options?: this['ParsedCallOptions']): ChatHubModelCallOptions { + invocationParams( + options?: this['ParsedCallOptions'] + ): ChatHubModelCallOptions { let maxTokens = options?.maxTokens ?? this._options.maxTokens if (maxTokens > this._maxModelContextSize || maxTokens < 0) { @@ -120,8 +130,10 @@ export class ChatHubChatModel extends BaseChatModel { model: options?.model ?? this._options.model, temperature: options?.temperature ?? this._options.temperature, topP: options?.topP ?? this._options.topP, - frequencyPenalty: options?.frequencyPenalty ?? this._options.frequencyPenalty, - presencePenalty: options?.presencePenalty ?? this._options.presencePenalty, + frequencyPenalty: + options?.frequencyPenalty ?? this._options.frequencyPenalty, + presencePenalty: + options?.presencePenalty ?? this._options.presencePenalty, n: options?.n ?? this._options.n, logitBias: options?.logitBias ?? this._options.logitBias, maxTokens: maxTokens === -1 ? undefined : maxTokens, @@ -165,7 +177,11 @@ export class ChatHubChatModel extends BaseChatModel { // fallback to max params.maxTokens = getModelContextSize(params.model) - const response = await this._generateWithRetry(messages, params, runManager) + const response = await this._generateWithRetry( + messages, + params, + runManager + ) return { generations: [response] @@ -181,7 +197,11 @@ export class ChatHubChatModel extends BaseChatModel { let response: ChatGeneration if (options.stream) { - const stream = this._streamResponseChunks(messages, options, runManager) + const stream = this._streamResponseChunks( + messages, + options, + runManager + ) for await (const chunk of stream) { response = chunk } @@ -198,7 +218,10 @@ export class ChatHubChatModel extends BaseChatModel { return this.caller.call(generateWithRetry) } - private async _withTimeout(func: () => Promise, timeout: number): Promise { + private async _withTimeout( + func: () => Promise, + timeout: number + ): Promise { // eslint-disable-next-line no-async-promise-executor return new Promise(async (resolve, reject) => { const timeoutId = setTimeout(() => { @@ -308,7 +331,9 @@ export class ChatHubChatModel extends BaseChatModel { private async _countMessageTokens(message: BaseMessage) { let result = (await this.getNumTokens(message.content)) + - (await this.getNumTokens(messageTypeToOpenAIRole(message._getType()))) + (await this.getNumTokens( + messageTypeToOpenAIRole(message._getType()) + )) if (message.name) { result += await this.getNumTokens(message.name) @@ -336,7 +361,9 @@ export class ChatHubChatModel extends BaseChatModel { if (!this.__encoding) { try { this.__encoding = await encodingForModel( - 'modelName' in this ? getModelNameForTiktoken(this.modelName as string) : 'gpt2' + 'modelName' in this + ? getModelNameForTiktoken(this.modelName as string) + : 'gpt2' ) } catch (error) { logger.warn( @@ -420,7 +447,9 @@ export class ChatHubEmbeddings extends ChatHubBaseEmbeddings { async embedDocuments(texts: string[]): Promise { const subPrompts = chunkArray( - this.stripNewLines ? texts.map((t) => t.replaceAll('\n', ' ')) : texts, + this.stripNewLines + ? texts.map((t) => t.replaceAll('\n', ' ')) + : texts, this.batchSize ) @@ -459,7 +488,11 @@ export class ChatHubEmbeddings extends ChatHubBaseEmbeddings { new Promise(async (resolve, reject) => { const timeout = setTimeout( () => { - reject(Error(`timeout when calling ${this.modelName} embeddings`)) + reject( + Error( + `timeout when calling ${this.modelName} embeddings` + ) + ) }, this.timeout ?? 1000 * 30 ) diff --git a/packages/core/src/llm-core/platform/service.ts b/packages/core/src/llm-core/platform/service.ts index fcd4ade9..19ac37e3 100644 --- a/packages/core/src/llm-core/platform/service.ts +++ b/packages/core/src/llm-core/platform/service.ts @@ -38,7 +38,10 @@ export class PlatformService { registerClient( name: PlatformClientNames, - createClientFunction: (ctx: Context, config: ClientConfig) => BasePlatformClient + createClientFunction: ( + ctx: Context, + config: ClientConfig + ) => BasePlatformClient ) { if (PlatformService._createClientFunctions[name]) { throw new Error(`Client ${name} already exists`) @@ -84,15 +87,37 @@ export class PlatformService { } if (client instanceof PlatformModelClient) { - await this.ctx.parallel('chathub/model-removed', this, platform, client) + await this.ctx.parallel( + 'chathub/model-removed', + this, + platform, + client + ) } else if (client instanceof PlatformEmbeddingsClient) { - await this.ctx.parallel('chathub/embeddings-removed', this, platform, client) + await this.ctx.parallel( + 'chathub/embeddings-removed', + this, + platform, + client + ) } else if (client instanceof PlatformModelAndEmbeddingsClient) { - await this.ctx.parallel('chathub/embeddings-removed', this, platform, client) - await this.ctx.parallel('chathub/model-removed', this, platform, client) + await this.ctx.parallel( + 'chathub/embeddings-removed', + this, + platform, + client + ) + await this.ctx.parallel( + 'chathub/model-removed', + this, + platform, + client + ) } - delete PlatformService._platformClients[this._getClientConfigAsKey(config.value)] + delete PlatformService._platformClients[ + this._getClientConfigAsKey(config.value) + ] } delete PlatformService._configPools[platform] @@ -128,7 +153,11 @@ export class PlatformService { description, createFunction: createChatChainFunction } - await this.ctx.parallel('chathub/chat-chain-added', this, PlatformService._chatChains[name]) + await this.ctx.parallel( + 'chathub/chat-chain-added', + this, + PlatformService._chatChains[name] + ) return async () => await this.unregisterChatChain(name) } @@ -218,13 +247,15 @@ export class PlatformService { async getClient(config: ClientConfig) { return ( - PlatformService._platformClients[this._getClientConfigAsKey(config)] ?? - (await this.createClient(config.platform, config)) + PlatformService._platformClients[ + this._getClientConfigAsKey(config) + ] ?? (await this.createClient(config.platform, config)) ) } async createClient(platform: string, config: ClientConfig) { - const createClientFunction = PlatformService._createClientFunctions[platform] + const createClientFunction = + PlatformService._createClientFunctions[platform] if (!createClientFunction) { throw new Error(`Create client function ${platform} not found`) @@ -254,18 +285,40 @@ export class PlatformService { // filter existing models PlatformService._models[platform] = availableModels.concat( - models.filter((m) => !availableModels.some((am) => am.name === m.name)) + models.filter( + (m) => !availableModels.some((am) => am.name === m.name) + ) ) await sleep(50) if (client instanceof PlatformModelClient) { - await this.ctx.parallel('chathub/model-added', this, platform, client) + await this.ctx.parallel( + 'chathub/model-added', + this, + platform, + client + ) } else if (client instanceof PlatformEmbeddingsClient) { - await this.ctx.parallel('chathub/embeddings-added', this, platform, client) + await this.ctx.parallel( + 'chathub/embeddings-added', + this, + platform, + client + ) } else if (client instanceof PlatformModelAndEmbeddingsClient) { - await this.ctx.parallel('chathub/embeddings-added', this, platform, client) - await this.ctx.parallel('chathub/model-added', this, platform, client) + await this.ctx.parallel( + 'chathub/embeddings-added', + this, + platform, + client + ) + await this.ctx.parallel( + 'chathub/model-added', + this, + platform, + client + ) } return client @@ -290,7 +343,9 @@ export class PlatformService { } clients.push(client) - PlatformService._platformClients[this._getClientConfigAsKey(config.value)] = client + PlatformService._platformClients[ + this._getClientConfigAsKey(config.value) + ] = client } return clients @@ -347,7 +402,10 @@ declare module 'koishi' { platform: PlatformClientNames, client: BasePlatformClient | BasePlatformClient[] ) => Promise - 'chathub/vector-store-added': (service: PlatformService, name: string) => Promise + 'chathub/vector-store-added': ( + service: PlatformService, + name: string + ) => Promise 'chathub/chat-chain-removed': ( service: PlatformService, chain: ChatHubChainInfo @@ -357,7 +415,10 @@ declare module 'koishi' { platform: PlatformClientNames, client: BasePlatformClient ) => Promise - 'chathub/vector-store-removed': (service: PlatformService, name: string) => Promise + 'chathub/vector-store-removed': ( + service: PlatformService, + name: string + ) => Promise 'chathub/embeddings-removed': ( service: PlatformService, platform: PlatformClientNames, diff --git a/packages/core/src/llm-core/platform/types.ts b/packages/core/src/llm-core/platform/types.ts index ce4259e1..961ef3f5 100644 --- a/packages/core/src/llm-core/platform/types.ts +++ b/packages/core/src/llm-core/platform/types.ts @@ -11,7 +11,9 @@ import { Tool } from 'langchain/tools' export interface ChatHubChainInfo { name: string description?: string - createFunction: (params: CreateChatHubLLMChainParams) => Promise + createFunction: ( + params: CreateChatHubLLMChainParams + ) => Promise } export interface CreateToolParams { @@ -37,7 +39,9 @@ export interface CreateChatHubLLMChainParams { export type CreateToolFunction = (params: CreateToolParams) => Promise -export type CreateVectorStoreFunction = (params: CreateVectorStoreParams) => Promise +export type CreateVectorStoreFunction = ( + params: CreateVectorStoreParams +) => Promise export interface PlatformClientName { default: never diff --git a/packages/core/src/llm-core/prompt/preset_prompt_parse.ts b/packages/core/src/llm-core/prompt/preset_prompt_parse.ts index fd12c6fb..219f7d0e 100644 --- a/packages/core/src/llm-core/prompt/preset_prompt_parse.ts +++ b/packages/core/src/llm-core/prompt/preset_prompt_parse.ts @@ -1,4 +1,9 @@ -import { AIMessage, BaseMessage, HumanMessage, SystemMessage } from 'langchain/schema' +import { + AIMessage, + BaseMessage, + HumanMessage, + SystemMessage +} from 'langchain/schema' import { load } from 'js-yaml' export interface PresetTemplate { @@ -67,7 +72,9 @@ function loadTxtPreset(rawText: string): PresetTemplate { // logger.debug(`role: ${role}, content: ${content}`) if (role === 'keyword') { - triggerKeyword.push(...content.split(',').map((keyword) => keyword.trim())) + triggerKeyword.push( + ...content.split(',').map((keyword) => keyword.trim()) + ) } else if (role === 'format_user_prompt') { formatUserPromptString = content.trim() } else if (role === 'assistant' || role === 'ai' || role === 'model') { @@ -102,7 +109,10 @@ export function formatPresetTemplate( inputVariables: Record ): BaseMessage[] { presetTemplate.messages.forEach((message) => { - message.content = formatPresetTemplateString(message.content, inputVariables) + message.content = formatPresetTemplateString( + message.content, + inputVariables + ) }) return presetTemplate.messages diff --git a/packages/core/src/llm-core/utils/count_tokens.ts b/packages/core/src/llm-core/utils/count_tokens.ts index b6dc17ab..d033adff 100644 --- a/packages/core/src/llm-core/utils/count_tokens.ts +++ b/packages/core/src/llm-core/utils/count_tokens.ts @@ -117,14 +117,19 @@ interface CalculateMaxTokenProps { const logger = createLogger() -export const calculateMaxTokens = async ({ prompt, modelName }: CalculateMaxTokenProps) => { +export const calculateMaxTokens = async ({ + prompt, + modelName +}: CalculateMaxTokenProps) => { // fallback to approximate calculation if tiktoken is not available let numTokens = Math.ceil(prompt.length / 4) try { numTokens = (await encodingForModel(modelName)).encode(prompt).length } catch (error) { - logger.warn('Failed to calculate number of tokens, falling back to approximate count') + logger.warn( + 'Failed to calculate number of tokens, falling back to approximate count' + ) } const maxTokens = getModelContextSize(modelName) diff --git a/packages/core/src/llm-core/utils/tiktoken.ts b/packages/core/src/llm-core/utils/tiktoken.ts index 7956d996..79c5b984 100644 --- a/packages/core/src/llm-core/utils/tiktoken.ts +++ b/packages/core/src/llm-core/utils/tiktoken.ts @@ -17,9 +17,12 @@ export async function getEncoding( } ) { if (!(encoding in cache)) { - cache[encoding] = await chathubFetch(`https://tiktoken.pages.dev/js/${encoding}.json`, { - signal: options?.signal - }) + cache[encoding] = await chathubFetch( + `https://tiktoken.pages.dev/js/${encoding}.json`, + { + signal: options?.signal + } + ) .then((res) => res.json() as unknown as TiktokenBPE) .catch((e) => { delete cache[encoding] diff --git a/packages/core/src/middleware.ts b/packages/core/src/middleware.ts index c8fb5944..8f4a7853 100644 --- a/packages/core/src/middleware.ts +++ b/packages/core/src/middleware.ts @@ -13,7 +13,11 @@ export async function middleware(ctx: Context, config: Config) { } const middleware: { - apply: (ctx: Context, config: Config, chain: ChatChain) => PromiseLike | void + apply: ( + ctx: Context, + config: Config, + chain: ChatChain + ) => PromiseLike | void } = await require(`./middlewares/${file}`) if (middleware.apply) { diff --git a/packages/core/src/middlewares/add_preset.ts b/packages/core/src/middlewares/add_preset.ts index 1c73bf81..ae4585b4 100644 --- a/packages/core/src/middlewares/add_preset.ts +++ b/packages/core/src/middlewares/add_preset.ts @@ -9,7 +9,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('add_preset', async (session, context) => { const { command } = context - if (command !== 'add_preset') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'add_preset') + return ChainMiddlewareRunStatus.SKIPPED const presetName = context.options.addPreset @@ -18,7 +19,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { try { await preset.getPreset(presetName) - await context.send('该预设关键词已经和其他预设关键词冲突,请更换其他关键词重试哦') + await context.send( + '该预设关键词已经和其他预设关键词冲突,请更换其他关键词重试哦' + ) return ChainMiddlewareRunStatus.STOP } catch (e) {} @@ -44,7 +47,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const yamlText = dump(presetObject) - await fs.writeFile(preset.resolvePresetDir() + `/${presetName}.yml`, yamlText) + await fs.writeFile( + preset.resolvePresetDir() + `/${presetName}.yml`, + yamlText + ) context.message = `预设添加成功,预设名称为: ${presetName}。 请调用预设列表命令查看。` diff --git a/packages/core/src/middlewares/add_room_to_group.ts b/packages/core/src/middlewares/add_room_to_group.ts index bd41b954..31732276 100644 --- a/packages/core/src/middlewares/add_room_to_group.ts +++ b/packages/core/src/middlewares/add_room_to_group.ts @@ -12,19 +12,26 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('add_room_to_group', async (session, context) => { const { command } = context - if (command !== 'add_room_to_group') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'add_room_to_group') + return ChainMiddlewareRunStatus.SKIPPED // eslint-disable-next-line @typescript-eslint/naming-convention let { room: targetRoom, room_resolve } = context.options if (targetRoom == null && room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(room_resolve?.name) targetRoom = rooms.find( - (room) => room.roomName === room_resolve?.name || room.roomId === roomId + (room) => + room.roomName === room_resolve?.name || + room.roomId === roomId ) } @@ -33,7 +40,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - if (targetRoom.roomMasterId !== session.userId && !(await checkAdmin(session))) { + if ( + targetRoom.roomMasterId !== session.userId && + !(await checkAdmin(session)) + ) { context.message = '你不是房间的房主,无法执行此操作。' return ChainMiddlewareRunStatus.STOP } diff --git a/packages/core/src/middlewares/allow_reply.ts b/packages/core/src/middlewares/allow_reply.ts index 130202bf..5f4eaac7 100644 --- a/packages/core/src/middlewares/allow_reply.ts +++ b/packages/core/src/middlewares/allow_reply.ts @@ -19,7 +19,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { session.parsed.appel && config.allowAtReply ? true : // bot名字 - session.content.startsWith(config.botName) && config.isNickname + session.content.startsWith(config.botName) && + config.isNickname ? true : // 随机回复 Math.random() < config.randomReplyFrequency @@ -28,9 +29,14 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { context.command != null if (result) { - const notReply = await ctx.serial('chathub/before-check-sender', session) + const notReply = await ctx.serial( + 'chathub/before-check-sender', + session + ) - return notReply ? ChainMiddlewareRunStatus.STOP : ChainMiddlewareRunStatus.CONTINUE + return notReply + ? ChainMiddlewareRunStatus.STOP + : ChainMiddlewareRunStatus.CONTINUE } else { return ChainMiddlewareRunStatus.STOP } diff --git a/packages/core/src/middlewares/black_list.ts b/packages/core/src/middlewares/black_list.ts index 997f2029..e7d7eb27 100644 --- a/packages/core/src/middlewares/black_list.ts +++ b/packages/core/src/middlewares/black_list.ts @@ -10,7 +10,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('black_list', async (session, context) => { const resolved = await session.resolve(config.blackList) if (resolved === true) { - logger.debug(`[黑名单] ${session.username}(${session.userId}): ${session.content}`) + logger.debug( + `[黑名单] ${session.username}(${session.userId}): ${session.content}` + ) context.message = config.blockText return ChainMiddlewareRunStatus.STOP } diff --git a/packages/core/src/middlewares/censor.ts b/packages/core/src/middlewares/censor.ts index f494a032..c8d3a305 100644 --- a/packages/core/src/middlewares/censor.ts +++ b/packages/core/src/middlewares/censor.ts @@ -12,7 +12,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const message = context.options.responseMessage - message.content = await ctx.censor.transform(message.content, session) + message.content = await ctx.censor.transform( + message.content, + session + ) }) .after('request_model') // .before("lifecycle-request_model") diff --git a/packages/core/src/middlewares/chat_time_limit_check.ts b/packages/core/src/middlewares/chat_time_limit_check.ts index 15753247..a35aebde 100644 --- a/packages/core/src/middlewares/chat_time_limit_check.ts +++ b/packages/core/src/middlewares/chat_time_limit_check.ts @@ -43,7 +43,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { // 用满了 if (chatLimitOnDataBase.count >= chatLimitComputed) { const time = Math.ceil( - (1000 * 60 * 60 - (Date.now() - chatLimitOnDataBase.time)) / 1000 / 60 + (1000 * 60 * 60 - + (Date.now() - chatLimitOnDataBase.time)) / + 1000 / + 60 ) context.message = `你的聊天次数已经用完了喵,还需要等待 ${time} 分钟才能继续聊天喵 >_<` diff --git a/packages/core/src/middlewares/check_room.ts b/packages/core/src/middlewares/check_room.ts index a5e5cda5..3d0e363f 100644 --- a/packages/core/src/middlewares/check_room.ts +++ b/packages/core/src/middlewares/check_room.ts @@ -19,14 +19,17 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { if (room == null && rooms.length > 0) { room = rooms[Math.floor(Math.random() * rooms.length)] await switchConversationRoom(ctx, session, room.roomId) - await context.send(`检测到你没有指定房间,已为你随机切换到房间 ${room.roomName}`) + await context.send( + `检测到你没有指定房间,已为你随机切换到房间 ${room.roomName}` + ) } else if (room == null && rooms.length === 0) { context.message = '你还没有加入任何房间,请先加入房间。' return ChainMiddlewareRunStatus.STOP } else if ( !rooms.some( (searchRoom) => - searchRoom.roomName === room.roomName || searchRoom.roomId === room.roomId + searchRoom.roomName === room.roomName || + searchRoom.roomId === room.roomId ) ) { context.message = `你没有加入此房间,请先加入房间 ${room.roomName}。` diff --git a/packages/core/src/middlewares/clear_room.ts b/packages/core/src/middlewares/clear_room.ts index 7d698ae2..8471b768 100644 --- a/packages/core/src/middlewares/clear_room.ts +++ b/packages/core/src/middlewares/clear_room.ts @@ -1,21 +1,30 @@ import { Context } from 'koishi' import { Config } from '../config' import { ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' -import { checkAdmin, getAllJoinedConversationRoom, getConversationRoomUser } from '../chains/rooms' +import { + checkAdmin, + getAllJoinedConversationRoom, + getConversationRoomUser +} from '../chains/rooms' export function apply(ctx: Context, config: Config, chain: ChatChain) { chain .middleware('clear_room', async (session, context) => { const { command } = context - if (command !== 'clear_room') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'clear_room') + return ChainMiddlewareRunStatus.SKIPPED let targetRoom = context.options.room if (targetRoom == null && context.options.room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(context.options.room_resolve?.name) @@ -31,9 +40,17 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - const userInfo = await getConversationRoomUser(ctx, session, targetRoom, session.userId) + const userInfo = await getConversationRoomUser( + ctx, + session, + targetRoom, + session.userId + ) - if (userInfo.roomPermission === 'member' && !(await checkAdmin(session))) { + if ( + userInfo.roomPermission === 'member' && + !(await checkAdmin(session)) + ) { context.message = `你不是房间 ${targetRoom.roomName} 的管理员,无法清除聊天记录。` return ChainMiddlewareRunStatus.STOP } diff --git a/packages/core/src/middlewares/cooldown_time.ts b/packages/core/src/middlewares/cooldown_time.ts index 9d3558e6..8d8fc937 100644 --- a/packages/core/src/middlewares/cooldown_time.ts +++ b/packages/core/src/middlewares/cooldown_time.ts @@ -10,7 +10,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const currentChatTime = Date.now() if (currentChatTime - lastChatTime < config.msgCooldown * 1000) { const waitTime = - (config.msgCooldown * 1000 - (currentChatTime - lastChatTime)) / 1000 + (config.msgCooldown * 1000 - + (currentChatTime - lastChatTime)) / + 1000 // logger.debug(`[冷却中:${waitTime}s] ${session.username}(${session.userId}): ${session.content}`) diff --git a/packages/core/src/middlewares/create_room.ts b/packages/core/src/middlewares/create_room.ts index 8673c316..e2e08116 100644 --- a/packages/core/src/middlewares/create_room.ts +++ b/packages/core/src/middlewares/create_room.ts @@ -7,7 +7,10 @@ import { ChatChain } from '../chains/chain' import { createLogger } from '../utils/logger' -import { createConversationRoom, getConversationRoomCount } from '../chains/rooms' +import { + createConversationRoom, + getConversationRoomCount +} from '../chains/rooms' import { ConversationRoom } from '../types' import { randomUUID } from 'crypto' import { ModelType } from '../llm-core/platform/types' @@ -25,20 +28,24 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { options: { room_resolve } } = context - if (command !== 'create_room') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'create_room') + return ChainMiddlewareRunStatus.SKIPPED if (!room_resolve) return ChainMiddlewareRunStatus.SKIPPED - let { model, preset, name, chatMode, password, visibility } = room_resolve + let { model, preset, name, chatMode, password, visibility } = + room_resolve logger.debug( `[create_room] model: ${model}, length: ${ - Object.values(room_resolve).filter((value) => value != null).length + Object.values(room_resolve).filter((value) => value != null) + .length }, visibility: ${visibility}` ) if ( - Object.values(room_resolve).filter((value) => value != null).length > 0 && + Object.values(room_resolve).filter((value) => value != null) + .length > 0 && model != null && visibility !== 'template' ) { @@ -58,7 +65,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { room_resolve.name = room_resolve.name ?? '未命名房间' room_resolve.chatMode = room_resolve.chatMode ?? 'chat' room_resolve.password = room_resolve.password ?? null - room_resolve.visibility = room_resolve.visibility ?? 'private' + room_resolve.visibility = + room_resolve.visibility ?? 'private' room_resolve.model = room_resolve.model ?? null await createRoom(ctx, context, session, context.options) @@ -75,7 +83,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { // 1. 输入房间名 if (name == null) { - await context.send('请输入你需要使用的房间名,如:' + '我的房间') + await context.send( + '请输入你需要使用的房间名,如:' + '我的房间' + ) const result = await session.prompt(1000 * 30) @@ -107,7 +117,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { while (true) { if (model == null) { - await context.send('请输入你需要使用的模型,如:' + 'openai/gpt-3.5-turbo') + await context.send( + '请输入你需要使用的模型,如:' + 'openai/gpt-3.5-turbo' + ) const result = await session.prompt(1000 * 30) @@ -235,7 +247,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { break } - await context.send(`无法识别可见性:${visibility},请重新输入。`) + await context.send( + `无法识别可见性:${visibility},请重新输入。` + ) } // 5. 聊天模式 @@ -273,7 +287,11 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { chatMode = room_resolve.chatMode // 6. 密码 - if (session.isDirect && visibility === 'private' && password == null) { + if ( + session.isDirect && + visibility === 'private' && + password == null + ) { await context.send( '请输入你需要使用的密码,如:123456。如果不输入密码请回复 N(则不设置密码)。否则回复你需要使用的密码。' ) @@ -304,7 +322,8 @@ async function createRoom( session: Session, options: ChainMiddlewareContextOptions ) { - const { model, preset, name, chatMode, password, visibility } = options.room_resolve + const { model, preset, name, chatMode, password, visibility } = + options.room_resolve const createRoom: ConversationRoom = { conversationId: randomUUID(), diff --git a/packages/core/src/middlewares/delete_preset.ts b/packages/core/src/middlewares/delete_preset.ts index 7bd40c38..2dba9406 100644 --- a/packages/core/src/middlewares/delete_preset.ts +++ b/packages/core/src/middlewares/delete_preset.ts @@ -12,7 +12,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('delete_preset', async (session, context) => { const { command } = context - if (command !== 'delete_preset') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'delete_preset') + return ChainMiddlewareRunStatus.SKIPPED const presetName = context.options.deletePreset const preset = ctx.chathub.preset @@ -31,7 +32,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } } catch (e) { - await context.send('找不到该预设!请检查你是否输入了正确的预设?') + await context.send( + '找不到该预设!请检查你是否输入了正确的预设?' + ) return ChainMiddlewareRunStatus.STOP } @@ -56,7 +59,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const defaultPreset = await preset.getDefaultPreset() - logger.debug(`${context.options.senderInfo} ${defaultPreset.triggerKeyword[0]}`) + logger.debug( + `${context.options.senderInfo} ${defaultPreset.triggerKeyword[0]}` + ) const roomList = await ctx.database.get('chathub_room', { preset: presetName diff --git a/packages/core/src/middlewares/delete_room.ts b/packages/core/src/middlewares/delete_room.ts index 673d87f8..ba2caad9 100644 --- a/packages/core/src/middlewares/delete_room.ts +++ b/packages/core/src/middlewares/delete_room.ts @@ -1,21 +1,30 @@ import { Context } from 'koishi' import { Config } from '../config' import { ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' -import { checkAdmin, deleteConversationRoom, getAllJoinedConversationRoom } from '../chains/rooms' +import { + checkAdmin, + deleteConversationRoom, + getAllJoinedConversationRoom +} from '../chains/rooms' export function apply(ctx: Context, config: Config, chain: ChatChain) { chain .middleware('delete_room', async (session, context) => { const { command } = context - if (command !== 'delete_room') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'delete_room') + return ChainMiddlewareRunStatus.SKIPPED let targetRoom = context.options.room if (targetRoom == null && context.options.room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(context.options.room_resolve?.name) @@ -31,7 +40,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - if (targetRoom.roomMasterId !== session.userId && !(await checkAdmin(session))) { + if ( + targetRoom.roomMasterId !== session.userId && + !(await checkAdmin(session)) + ) { context.message = '你不是房间的房主,无法删除房间。' return ChainMiddlewareRunStatus.STOP } diff --git a/packages/core/src/middlewares/invite_room.ts b/packages/core/src/middlewares/invite_room.ts index 2dca4a12..9d250c24 100644 --- a/packages/core/src/middlewares/invite_room.ts +++ b/packages/core/src/middlewares/invite_room.ts @@ -1,14 +1,19 @@ import { Context } from 'koishi' import { Config } from '../config' import { ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' -import { checkAdmin, getConversationRoomUser, joinConversationRoom } from '../chains/rooms' +import { + checkAdmin, + getConversationRoomUser, + joinConversationRoom +} from '../chains/rooms' export function apply(ctx: Context, config: Config, chain: ChatChain) { chain .middleware('invite_room', async (session, context) => { const { command } = context - if (command !== 'invite_room') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'invite_room') + return ChainMiddlewareRunStatus.SKIPPED const targetRoom = context.options.room @@ -18,9 +23,17 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - const userInfo = await getConversationRoomUser(ctx, session, targetRoom, session.userId) + const userInfo = await getConversationRoomUser( + ctx, + session, + targetRoom, + session.userId + ) - if (userInfo.roomPermission === 'member' && !(await checkAdmin(session))) { + if ( + userInfo.roomPermission === 'member' && + !(await checkAdmin(session)) + ) { context.message = `你不是房间 ${targetRoom.roomName} 的管理员,无法邀请用户加入。` return ChainMiddlewareRunStatus.STOP } @@ -28,10 +41,18 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const targetUser = context.options.resolve_user.id as string[] for (const user of targetUser) { - await joinConversationRoom(ctx, session, targetRoom, session.isDirect, user) + await joinConversationRoom( + ctx, + session, + targetRoom, + session.isDirect, + user + ) } - context.message = `已邀请用户 ${targetUser.join(',')} 加入房间 ${targetRoom.roomName}` + context.message = `已邀请用户 ${targetUser.join(',')} 加入房间 ${ + targetRoom.roomName + }` return ChainMiddlewareRunStatus.STOP }) diff --git a/packages/core/src/middlewares/join_room.ts b/packages/core/src/middlewares/join_room.ts index 90ed20f7..d405d9a1 100644 --- a/packages/core/src/middlewares/join_room.ts +++ b/packages/core/src/middlewares/join_room.ts @@ -1,7 +1,11 @@ import { Context } from 'koishi' import { Config } from '../config' import { ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' -import { checkAdmin, joinConversationRoom, queryConversationRoom } from '../chains/rooms' +import { + checkAdmin, + joinConversationRoom, + queryConversationRoom +} from '../chains/rooms' export function apply(ctx: Context, config: Config, chain: ChatChain) { chain @@ -46,7 +50,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { if (await checkAdmin(session)) { // 空的是因为 - } else if (targetRoom.visibility === 'private' && targetRoom.password == null) { + } else if ( + targetRoom.visibility === 'private' && + targetRoom.password == null + ) { context.message = '该房间为私密房间。房主未设置密码加入,只能由房主邀请进入,无法加入。' return ChainMiddlewareRunStatus.STOP @@ -55,12 +62,15 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { targetRoom.password != null && !session.isDirect ) { - context.message = '该房间为私密房间。由于需要输入密码,你无法在群聊中加入。' + context.message = + '该房间为私密房间。由于需要输入密码,你无法在群聊中加入。' return ChainMiddlewareRunStatus.STOP } if (targetRoom.password) { - await context.send(`请输入密码来加入房间 ${targetRoom.roomName}。`) + await context.send( + `请输入密码来加入房间 ${targetRoom.roomName}。` + ) const result = await session.prompt(1000 * 30) if (result == null) { diff --git a/packages/core/src/middlewares/kick_member.ts b/packages/core/src/middlewares/kick_member.ts index be91545f..917f9b0d 100644 --- a/packages/core/src/middlewares/kick_member.ts +++ b/packages/core/src/middlewares/kick_member.ts @@ -1,14 +1,19 @@ import { Context } from 'koishi' import { Config } from '../config' import { ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' -import { checkAdmin, getConversationRoomUser, kickUserFromConversationRoom } from '../chains/rooms' +import { + checkAdmin, + getConversationRoomUser, + kickUserFromConversationRoom +} from '../chains/rooms' export function apply(ctx: Context, config: Config, chain: ChatChain) { chain .middleware('kick_member', async (session, context) => { const { command } = context - if (command !== 'kick_member') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'kick_member') + return ChainMiddlewareRunStatus.SKIPPED const targetRoom = context.options.room @@ -18,9 +23,17 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - const userInfo = await getConversationRoomUser(ctx, session, targetRoom, session.userId) + const userInfo = await getConversationRoomUser( + ctx, + session, + targetRoom, + session.userId + ) - if (userInfo.roomPermission === 'member' && !(await checkAdmin(session))) { + if ( + userInfo.roomPermission === 'member' && + !(await checkAdmin(session)) + ) { context.message = `你不是房间 ${targetRoom.roomName} 的管理员,无法踢出用户。` return ChainMiddlewareRunStatus.STOP } @@ -28,10 +41,17 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const targetUser = context.options.resolve_user.id as string[] for (const user of targetUser) { - await kickUserFromConversationRoom(ctx, session, targetRoom, user) + await kickUserFromConversationRoom( + ctx, + session, + targetRoom, + user + ) } - context.message = `已将以下用户踢出房间 ${targetRoom.roomName}:${targetUser.join(',')}` + context.message = `已将以下用户踢出房间 ${ + targetRoom.roomName + }:${targetUser.join(',')}` return ChainMiddlewareRunStatus.STOP }) diff --git a/packages/core/src/middlewares/leave_room.ts b/packages/core/src/middlewares/leave_room.ts index 63cc9f49..d1127f4c 100644 --- a/packages/core/src/middlewares/leave_room.ts +++ b/packages/core/src/middlewares/leave_room.ts @@ -12,14 +12,19 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('leave_room', async (session, context) => { const { command } = context - if (command !== 'leave_room') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'leave_room') + return ChainMiddlewareRunStatus.SKIPPED let targetRoom = context.options.room if (targetRoom == null && context.options.room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(context.options.room_resolve?.name) diff --git a/packages/core/src/middlewares/list_all_embeddings.ts b/packages/core/src/middlewares/list_all_embeddings.ts index 3f48bac1..a9424102 100644 --- a/packages/core/src/middlewares/list_all_embeddings.ts +++ b/packages/core/src/middlewares/list_all_embeddings.ts @@ -22,7 +22,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { options: { page, limit } } = context - if (command !== 'list_embeddings') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'list_embeddings') + return ChainMiddlewareRunStatus.SKIPPED const models = service.getAllModels(ModelType.embeddings) diff --git a/packages/core/src/middlewares/list_all_model.ts b/packages/core/src/middlewares/list_all_model.ts index 98472776..4770f77f 100644 --- a/packages/core/src/middlewares/list_all_model.ts +++ b/packages/core/src/middlewares/list_all_model.ts @@ -23,7 +23,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { options: { page, limit } } = context - if (command !== 'list_model') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'list_model') + return ChainMiddlewareRunStatus.SKIPPED const models = services.getAllModels(ModelType.llm) diff --git a/packages/core/src/middlewares/list_all_preset.ts b/packages/core/src/middlewares/list_all_preset.ts index 13dffb0a..49a952e3 100644 --- a/packages/core/src/middlewares/list_all_preset.ts +++ b/packages/core/src/middlewares/list_all_preset.ts @@ -20,7 +20,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { } = context const preset = ctx.chathub.preset - if (command !== 'list_preset') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'list_preset') + return ChainMiddlewareRunStatus.SKIPPED const presets = await preset.getAllPreset() diff --git a/packages/core/src/middlewares/list_all_vectorstore.ts b/packages/core/src/middlewares/list_all_vectorstore.ts index 0bf4d7f3..1d1fdda7 100644 --- a/packages/core/src/middlewares/list_all_vectorstore.ts +++ b/packages/core/src/middlewares/list_all_vectorstore.ts @@ -21,7 +21,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { options: { page, limit } } = context - if (command !== 'list_vector_store') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'list_vector_store') + return ChainMiddlewareRunStatus.SKIPPED const vectorStoreProviders = service.getVectorStoreRetrievers() diff --git a/packages/core/src/middlewares/list_room.ts b/packages/core/src/middlewares/list_room.ts index c38095cd..0358684b 100644 --- a/packages/core/src/middlewares/list_room.ts +++ b/packages/core/src/middlewares/list_room.ts @@ -25,11 +25,17 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const rooms = await getAllJoinedConversationRoom(ctx, session) - const key = session.isDirect ? session.userId : session.guildId + '-' + session.userId + const key = session.isDirect + ? session.userId + : session.guildId + '-' + session.userId await pagination.push(rooms, key) - context.message = await pagination.getFormattedPage(page, limit, key) + context.message = await pagination.getFormattedPage( + page, + limit, + key + ) return ChainMiddlewareRunStatus.STOP }) diff --git a/packages/core/src/middlewares/mute_user.ts b/packages/core/src/middlewares/mute_user.ts index de326ec1..26a0767d 100644 --- a/packages/core/src/middlewares/mute_user.ts +++ b/packages/core/src/middlewares/mute_user.ts @@ -21,7 +21,11 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { if (room == null && context.options.room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(context.options.room_resolve?.name) @@ -37,9 +41,17 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - const userInfo = await getConversationRoomUser(ctx, session, room, session.userId) + const userInfo = await getConversationRoomUser( + ctx, + session, + room, + session.userId + ) - if (userInfo.roomPermission === 'member' && !(await checkAdmin(session))) { + if ( + userInfo.roomPermission === 'member' && + !(await checkAdmin(session)) + ) { context.message = `你不是房间 ${room.roomName} 的管理员,无法禁言用户。` return ChainMiddlewareRunStatus.STOP } diff --git a/packages/core/src/middlewares/read_chat_message.ts b/packages/core/src/middlewares/read_chat_message.ts index 8c6c447e..d02dbcb7 100644 --- a/packages/core/src/middlewares/read_chat_message.ts +++ b/packages/core/src/middlewares/read_chat_message.ts @@ -6,7 +6,8 @@ import { chathubFetch } from '../utils/request' export function apply(ctx: Context, config: Config, chain: ChatChain) { chain .middleware('read_chat_message', async (session, context) => { - let message = context.command != null ? context.message : session.elements + let message = + context.command != null ? context.message : session.elements message = message as h[] | string @@ -14,7 +15,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { message = [h.text(message)] } - const transformedMessage = ctx.chathub.messageTransformer.transform(session, message) + const transformedMessage = ctx.chathub.messageTransformer.transform( + session, + message + ) if (transformedMessage.content.length < 1) { return ChainMiddlewareRunStatus.STOP @@ -26,43 +30,52 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { }) .after('lifecycle-prepare') - ctx.chathub.messageTransformer.intercept('text', async (session, element, message) => { - message.content += element.attrs['content'] - }) + ctx.chathub.messageTransformer.intercept( + 'text', + async (session, element, message) => { + message.content += element.attrs['content'] + } + ) - ctx.chathub.messageTransformer.intercept('at', async (session, element, message) => { - const name = element.attrs['name'] - const id = element.attrs['id'] + ctx.chathub.messageTransformer.intercept( + 'at', + async (session, element, message) => { + const name = element.attrs['name'] + const id = element.attrs['id'] - if (name && id !== session.bot.selfId) { - message.content += `@${name}` + if (name && id !== session.bot.selfId) { + message.content += `@${name}` + } } - }) + ) - ctx.chathub.messageTransformer.intercept('image', async (session, element, message) => { - const images: string[] = message.additional_kwargs.images ?? [] + ctx.chathub.messageTransformer.intercept( + 'image', + async (session, element, message) => { + const images: string[] = message.additional_kwargs.images ?? [] - const url = element.attrs['url'] as string + const url = element.attrs['url'] as string - // logger.debug(`image url: ${url}`) + // logger.debug(`image url: ${url}`) - if (url.startsWith('data:image')) { - images.push(url) - } else { - const response = await chathubFetch(url) + if (url.startsWith('data:image')) { + images.push(url) + } else { + const response = await chathubFetch(url) - // support any text - const ext = url.match(/\.([^.]*)$/)?.[1] + // support any text + const ext = url.match(/\.([^.]*)$/)?.[1] - const buffer = await response.arrayBuffer() + const buffer = await response.arrayBuffer() - const base64 = Buffer.from(buffer).toString('base64') + const base64 = Buffer.from(buffer).toString('base64') - images.push(`data:image/${ext ?? 'png'};base64,${base64}`) - } + images.push(`data:image/${ext ?? 'png'};base64,${base64}`) + } - message.additional_kwargs.images = images - }) + message.additional_kwargs.images = images + } + ) } declare module '../chains/chain' { diff --git a/packages/core/src/middlewares/request_model.ts b/packages/core/src/middlewares/request_model.ts index 622078bd..1b25b1f8 100644 --- a/packages/core/src/middlewares/request_model.ts +++ b/packages/core/src/middlewares/request_model.ts @@ -1,6 +1,10 @@ import { Context, Session, sleep } from 'koishi' import { Config } from '../config' -import { ChainMiddlewareContext, ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' +import { + ChainMiddlewareContext, + ChainMiddlewareRunStatus, + ChatChain +} from '../chains/chain' import { createLogger } from '../utils/logger' import { Message } from '../types' import { formatPresetTemplateString } from '../llm-core/prompt' @@ -14,7 +18,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('request_model', async (session, context) => { const { room, inputMessage } = context.options - const presetTemplate = await ctx.chathub.preset.getPreset(room.preset) + const presetTemplate = await ctx.chathub.preset.getPreset( + room.preset + ) if (presetTemplate.formatUserPromptString != null) { context.message = formatPresetTemplateString( @@ -40,9 +46,15 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { flow.subscribe(async (text) => { bufferText.text = text - await handleMessage(session, config, context, bufferText, async (text) => { - await sendMessage(context, text) - }) + await handleMessage( + session, + config, + context, + bufferText, + async (text) => { + await sendMessage(context, text) + } + ) }) setTimeout(async () => { @@ -53,7 +65,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { inputMessage.conversationId = room.conversationId inputMessage.name = - session.author?.nickname ?? session.author?.userId ?? session.username + session.author?.nickname ?? + session.author?.userId ?? + session.username try { responseMessage = await ctx.chathub.chat( @@ -83,7 +97,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { ) } catch (e) { if (e.message.includes('output values have 1 keys')) { - throw new ChatHubError(ChatHubErrorCode.MODEL_RESPONSE_IS_EMPTY) + throw new ChatHubError( + ChatHubErrorCode.MODEL_RESPONSE_IS_EMPTY + ) } else { throw e } @@ -111,7 +127,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { }) .after('lifecycle-request_model') - const sendMessage = async (context: ChainMiddlewareContext, text: string) => { + const sendMessage = async ( + context: ChainMiddlewareContext, + text: string + ) => { if (text == null || text.trim() === '') { return } @@ -158,7 +177,11 @@ async function handleMessage( await sleep(100) } else if (lastText !== text && diffText !== '') { try { - await session.bot.editMessage(session.channelId, currentMessageId, text) + await session.bot.editMessage( + session.channelId, + currentMessageId, + text + ) } catch (e) { logger.error(e) } @@ -177,7 +200,10 @@ async function handleMessage( const sendTogglePunctuations = ['.', '!', '!', '?', '?'] - if (finish && (diffText.trim().length > 0 || bufferText.trim().length > 0)) { + if ( + finish && + (diffText.trim().length > 0 || bufferText.trim().length > 0) + ) { bufferText = bufferText + diffText await sendMessage(bufferText) @@ -194,7 +220,8 @@ async function handleMessage( if (punctuations.includes(char)) { if (bufferText.trim().length > 0) { await sendMessage( - bufferText.trimStart() + (sendTogglePunctuations.includes(char) ? char : '') + bufferText.trimStart() + + (sendTogglePunctuations.includes(char) ? char : '') ) } bufferText = '' diff --git a/packages/core/src/middlewares/resolve_model.ts b/packages/core/src/middlewares/resolve_model.ts index 1794b626..ee7d79b6 100644 --- a/packages/core/src/middlewares/resolve_model.ts +++ b/packages/core/src/middlewares/resolve_model.ts @@ -28,12 +28,17 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { ) } - if (models.length === 0 || models.find((x) => x.name === modelName) == null) { + if ( + models.length === 0 || + models.find((x) => x.name === modelName) == null + ) { // 这比较难,强行 fallback 到推荐模型 const recommendModel = platform + '/' + models[0].name - logger.debug(`[resolve_model] recommendModel: ${recommendModel}`) + logger.debug( + `[resolve_model] recommendModel: ${recommendModel}` + ) await context.send( '检查到您可能更新了某些配置,已无法使用之前设置的旧的模型,已为您自动切换到其他可用模型。' diff --git a/packages/core/src/middlewares/resolve_room.ts b/packages/core/src/middlewares/resolve_room.ts index b76162f3..8d291ca7 100644 --- a/packages/core/src/middlewares/resolve_room.ts +++ b/packages/core/src/middlewares/resolve_room.ts @@ -28,10 +28,16 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { if (joinRoom == null) { // 随机加入到一个你已经加入的房间??? - const joinedRooms = await getAllJoinedConversationRoom(ctx, session) + const joinedRooms = await getAllJoinedConversationRoom( + ctx, + session + ) if (joinedRooms.length > 0) { - joinRoom = joinedRooms[Math.floor(Math.random() * joinedRooms.length)] + joinRoom = + joinedRooms[ + Math.floor(Math.random() * joinedRooms.length) + ] await switchConversationRoom(ctx, session, joinRoom.roomId) logger.success( @@ -40,7 +46,11 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { } } - if (joinRoom == null && !session.isDirect && (context.command?.length ?? 0) < 1) { + if ( + joinRoom == null && + !session.isDirect && + (context.command?.length ?? 0) < 1 + ) { joinRoom = await queryPublicConversationRoom(ctx, session) if (joinRoom != null) { logger.success( @@ -52,7 +62,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { if (joinRoom == null && (context.command?.length ?? 0) < 1) { // 尝试基于模板房间创建房间 - const templateRoom = await getTemplateConversationRoom(ctx, config) + const templateRoom = await getTemplateConversationRoom( + ctx, + config + ) if (templateRoom == null) { // 没有就算了。后面需要房间的中间件直接报错就完事。 @@ -73,12 +86,16 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { cloneRoom.roomName = session.isDirect ? `${session.username ?? session.userId} 的私有房间` : `${ - session.guildName ?? session.username ?? session.guildId.toString() + session.guildName ?? + session.username ?? + session.guildId.toString() } 的公共房间` await createConversationRoom(ctx, session, cloneRoom) - logger.success(`已为用户 ${session.userId} 自动创建房间 ${cloneRoom.roomName}。`) + logger.success( + `已为用户 ${session.userId} 自动创建房间 ${cloneRoom.roomName}。` + ) joinRoom = cloneRoom } diff --git a/packages/core/src/middlewares/room_info.ts b/packages/core/src/middlewares/room_info.ts index f7fbb617..4bde0634 100644 --- a/packages/core/src/middlewares/room_info.ts +++ b/packages/core/src/middlewares/room_info.ts @@ -15,7 +15,11 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { if (room == null && context.options.room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(context.options.room_resolve?.name) diff --git a/packages/core/src/middlewares/room_permission.ts b/packages/core/src/middlewares/room_permission.ts index 194e5292..ba47d75c 100644 --- a/packages/core/src/middlewares/room_permission.ts +++ b/packages/core/src/middlewares/room_permission.ts @@ -1,21 +1,30 @@ import { Context } from 'koishi' import { Config } from '../config' import { ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' -import { checkAdmin, getAllJoinedConversationRoom, setUserPermission } from '../chains/rooms' +import { + checkAdmin, + getAllJoinedConversationRoom, + setUserPermission +} from '../chains/rooms' export function apply(ctx: Context, config: Config, chain: ChatChain) { chain .middleware('room_permission', async (session, context) => { const { command } = context - if (command !== 'room_permission') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'room_permission') + return ChainMiddlewareRunStatus.SKIPPED let targetRoom = context.options.room if (targetRoom == null && context.options.room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(context.options.room_resolve?.name) @@ -31,7 +40,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - if (targetRoom.roomMasterId !== session.userId && !(await checkAdmin(session))) { + if ( + targetRoom.roomMasterId !== session.userId && + !(await checkAdmin(session)) + ) { context.message = '你不是房间的房主,无法为用户设置权限。' return ChainMiddlewareRunStatus.STOP } @@ -48,15 +60,25 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { context.message = '操作超时未确认,已自动取消。' return ChainMiddlewareRunStatus.STOP } else if ( - ['admin', 'member', 'a', 'm'].every((text) => result.toLowerCase() !== text) + ['admin', 'member', 'a', 'm'].every( + (text) => result.toLowerCase() !== text + ) ) { context.message = '你输入的权限值不正确,已自动取消。' return ChainMiddlewareRunStatus.STOP } - const currentPermission = result.startsWith('a') ? 'admin' : 'member' + const currentPermission = result.startsWith('a') + ? 'admin' + : 'member' - await setUserPermission(ctx, session, targetRoom, currentPermission, user) + await setUserPermission( + ctx, + session, + targetRoom, + currentPermission, + user + ) context.message = `已为用户 ${user} 设置房间 ${targetRoom.roomName} 的权限为 ${currentPermission}` diff --git a/packages/core/src/middlewares/set_default_embeddings.ts b/packages/core/src/middlewares/set_default_embeddings.ts index 80620a98..383db657 100644 --- a/packages/core/src/middlewares/set_default_embeddings.ts +++ b/packages/core/src/middlewares/set_default_embeddings.ts @@ -11,7 +11,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('set_default_embeddings', async (session, context) => { const { command, options } = context - if (command !== 'set_embeddings') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'set_embeddings') + return ChainMiddlewareRunStatus.SKIPPED const { setEmbeddings } = options @@ -41,7 +42,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { buffer.push('请输入更精确的嵌入模型名称以避免歧义') - buffer.push('例如:chathub.embeddings.set ' + targetEmbeddings[0]) + buffer.push( + '例如:chathub.embeddings.set ' + targetEmbeddings[0] + ) context.message = buffer.join('\n') } else if (targetEmbeddings.length === 0) { @@ -50,7 +53,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const fullName = platform + '/' + targetEmbeddings[0] - await context.send(`已将默认嵌入模型设置为 ${fullName} (将自动重启插件应用更改)`) + await context.send( + `已将默认嵌入模型设置为 ${fullName} (将自动重启插件应用更改)` + ) config.defaultEmbeddings = fullName ctx.scope.update(config, true) diff --git a/packages/core/src/middlewares/set_default_vectorstore.ts b/packages/core/src/middlewares/set_default_vectorstore.ts index 63c1ee8b..e90427d3 100644 --- a/packages/core/src/middlewares/set_default_vectorstore.ts +++ b/packages/core/src/middlewares/set_default_vectorstore.ts @@ -9,7 +9,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('set_default_vectorstore', async (session, context) => { const { command, options } = context - if (command !== 'set_vector_store') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'set_vector_store') + return ChainMiddlewareRunStatus.SKIPPED const { setVectorStore } = options @@ -36,7 +37,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { buffer.push('请输入更精确的向量数据库名称以避免歧义') - buffer.push('例如:chathub.vectorstore.set ' + targetVectorStoreProviders[0]) + buffer.push( + '例如:chathub.vectorstore.set ' + + targetVectorStoreProviders[0] + ) context.message = buffer.join('\n') } else if (targetVectorStoreProviders.length === 0) { diff --git a/packages/core/src/middlewares/set_room.ts b/packages/core/src/middlewares/set_room.ts index 1c20c1e1..4fc77bd4 100644 --- a/packages/core/src/middlewares/set_room.ts +++ b/packages/core/src/middlewares/set_room.ts @@ -20,7 +20,11 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { if (room == null && context.options.room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(context.options.room_resolve?.name) @@ -36,7 +40,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - if (room.roomMasterId !== session.userId && !(await checkAdmin(session))) { + if ( + room.roomMasterId !== session.userId && + !(await checkAdmin(session)) + ) { context.message = '你不是房间的房主,无法设置房间的属性' return ChainMiddlewareRunStatus.STOP } @@ -44,7 +51,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const oldPreset = room.preset if ( - Object.values(room_resolve).filter((value) => value != null).length > 0 && + Object.values(room_resolve).filter((value) => value != null) + .length > 0 && room_resolve.visibility !== 'template' ) { await context.send( @@ -70,8 +78,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { room.roomName = room_resolve.name ?? room.roomName room.chatMode = room_resolve.chatMode ?? room.chatMode room.password = room_resolve.password ?? room.password - // eslint-disable-next-line @typescript-eslint/no-explicit-any - room.visibility = (room_resolve.visibility as any) ?? room.visibility + + room.visibility = + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (room_resolve.visibility as any) ?? room.visibility room.model = room_resolve.model ?? room.model await ctx.database.upsert('chathub_room', [room]) @@ -93,7 +103,14 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { // 交互式创建 - let { model, preset, roomName: name, chatMode, password, visibility } = room + let { + model, + preset, + roomName: name, + chatMode, + password, + visibility + } = room // 1. 输入房间名 @@ -197,7 +214,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { break } - await context.send(`无法识别可见性:${visibility},请重新输入。`) + await context.send( + `无法识别可见性:${visibility},请重新输入。` + ) } // 5. 聊天模式 @@ -218,7 +237,11 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { chatMode = room.chatMode // 6. 密码 - if (session.isDirect && visibility === 'private' && password == null) { + if ( + session.isDirect && + visibility === 'private' && + password == null + ) { await context.send( '请输入你需要使用的密码,如:123456。如果不输入密码请回复 N(则不设置密码)。否则回复你需要使用的密码。' ) diff --git a/packages/core/src/middlewares/switch_room.ts b/packages/core/src/middlewares/switch_room.ts index e877f74b..83dc0892 100644 --- a/packages/core/src/middlewares/switch_room.ts +++ b/packages/core/src/middlewares/switch_room.ts @@ -8,7 +8,8 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .middleware('switch_room', async (session, context) => { const { command } = context - if (command !== 'switch_room') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'switch_room') + return ChainMiddlewareRunStatus.SKIPPED const targetConversationRoom = await switchConversationRoom( ctx, diff --git a/packages/core/src/middlewares/thinking_message_send.ts b/packages/core/src/middlewares/thinking_message_send.ts index 4fe7c53a..bd972a7b 100644 --- a/packages/core/src/middlewares/thinking_message_send.ts +++ b/packages/core/src/middlewares/thinking_message_send.ts @@ -1,6 +1,10 @@ import { Context, h, sleep } from 'koishi' import { Config } from '../config' -import { ChainMiddlewareContextOptions, ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' +import { + ChainMiddlewareContextOptions, + ChainMiddlewareRunStatus, + ChatChain +} from '../chains/chain' import { createLogger } from '../utils/logger' const logger = createLogger() @@ -16,7 +20,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { context.options.thinkingTimeoutObject = thinkingTimeoutObject thinkingTimeoutObject.timeout = setTimeout(async () => { - const queueCount = await getQueueCount(thinkingTimeoutObject, context.options) + const queueCount = await getQueueCount( + thinkingTimeoutObject, + context.options + ) if (thinkingTimeoutObject.timeout == null) { return @@ -24,13 +31,19 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const messageIds = await session.send( h.text( - config.thinkingMessage.replace('{count}', (queueCount ?? '未知').toString()) + config.thinkingMessage.replace( + '{count}', + (queueCount ?? '未知').toString() + ) ) ) thinkingTimeoutObject.recallFunc = async () => { try { - await session.bot.deleteMessage(session.channelId, messageIds[0]) + await session.bot.deleteMessage( + session.channelId, + messageIds[0] + ) } catch (e) { logger.error(e) } @@ -52,7 +65,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { .before('lifecycle-prepare') } -async function getQueueCount(obj: ThinkingTimeoutObject, options: ChainMiddlewareContextOptions) { +async function getQueueCount( + obj: ThinkingTimeoutObject, + options: ChainMiddlewareContextOptions +) { while (obj.timeout != null && options.queueCount == null) { await sleep(10) } diff --git a/packages/core/src/middlewares/transfer_room.ts b/packages/core/src/middlewares/transfer_room.ts index 20b9abb4..ea496841 100644 --- a/packages/core/src/middlewares/transfer_room.ts +++ b/packages/core/src/middlewares/transfer_room.ts @@ -1,21 +1,30 @@ import { Context } from 'koishi' import { Config } from '../config' import { ChainMiddlewareRunStatus, ChatChain } from '../chains/chain' -import { checkAdmin, getAllJoinedConversationRoom, transferConversationRoom } from '../chains/rooms' +import { + checkAdmin, + getAllJoinedConversationRoom, + transferConversationRoom +} from '../chains/rooms' export function apply(ctx: Context, config: Config, chain: ChatChain) { chain .middleware('transfer_room', async (session, context) => { const { command } = context - if (command !== 'transfer_room') return ChainMiddlewareRunStatus.SKIPPED + if (command !== 'transfer_room') + return ChainMiddlewareRunStatus.SKIPPED let room = context.options.room if (room == null && context.options.room_resolve != null) { // 尝试完整搜索一次 - const rooms = await getAllJoinedConversationRoom(ctx, session, true) + const rooms = await getAllJoinedConversationRoom( + ctx, + session, + true + ) const roomId = parseInt(context.options.room_resolve?.name) @@ -31,7 +40,10 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { return ChainMiddlewareRunStatus.STOP } - if (room.roomMasterId !== session.userId && !(await checkAdmin(session))) { + if ( + room.roomMasterId !== session.userId && + !(await checkAdmin(session)) + ) { context.message = '你不是房间的房主,无法转移房间给他人' return ChainMiddlewareRunStatus.STOP } diff --git a/packages/core/src/middlewares/wipe.ts b/packages/core/src/middlewares/wipe.ts index 1edb93d3..8f07603c 100644 --- a/packages/core/src/middlewares/wipe.ts +++ b/packages/core/src/middlewares/wipe.ts @@ -22,7 +22,9 @@ export function apply(ctx: Context, config: Config, chain: ChatChain) { const expression = generateExpression() - buffer.push(`\n请输入下列算式的结果以确认删除:${expression.expression}。`) + buffer.push( + `\n请输入下列算式的结果以确认删除:${expression.expression}。` + ) await context.send(buffer.join('\n')) diff --git a/packages/core/src/preset.ts b/packages/core/src/preset.ts index 4b5172b8..b7907003 100644 --- a/packages/core/src/preset.ts +++ b/packages/core/src/preset.ts @@ -33,7 +33,10 @@ export class Preset { if (extension !== '.txt' && extension !== '.yml') { continue } - const rawText = await fs.readFile(path.join(presetDir, file), 'utf-8') + const rawText = await fs.readFile( + path.join(presetDir, file), + 'utf-8' + ) const preset = loadPreset(rawText) preset.path = path.join(presetDir, file) this._presets.push(preset) @@ -41,7 +44,11 @@ export class Preset { this.ctx.schema.set( 'preset', - Schema.union(this._presets.map((preset) => Schema.const(preset.triggerKeyword[0]))) + Schema.union( + this._presets.map((preset) => + Schema.const(preset.triggerKeyword[0]) + ) + ) ) } @@ -87,7 +94,9 @@ export class Preset { } } */ - const preset = this._presets.find((preset) => preset.triggerKeyword.includes('chatgpt')) + const preset = this._presets.find((preset) => + preset.triggerKeyword.includes('chatgpt') + ) if (preset) { // await this.cache.set('default-preset', 'chatgpt') @@ -144,7 +153,9 @@ export class Preset { const fileStat = await fs.stat(filePath) if (fileStat.isFile()) { await fs.mkdir(currentPresetDir, { recursive: true }) - logger.debug(`copy preset file ${filePath} to ${currentPresetDir}`) + logger.debug( + `copy preset file ${filePath} to ${currentPresetDir}` + ) await fs.copyFile(filePath, path.join(currentPresetDir, file)) } } diff --git a/packages/core/src/render.ts b/packages/core/src/render.ts index db4d2979..a4e0acf6 100644 --- a/packages/core/src/render.ts +++ b/packages/core/src/render.ts @@ -8,7 +8,10 @@ export abstract class Renderer { protected readonly config: Config ) {} - abstract render(message: Message, options: RenderOptions): Promise + abstract render( + message: Message, + options: RenderOptions + ): Promise } export class DefaultRenderer { @@ -34,13 +37,17 @@ export class DefaultRenderer { const currentRenderer = await this._getRenderer(options.type) const rawRenderer = - options.type === 'raw' ? currentRenderer : await this._getRenderer('raw') + options.type === 'raw' + ? currentRenderer + : await this._getRenderer('raw') result.push(await currentRenderer.render(message, options)) if (message.additionalReplyMessages) { for (const additionalMessage of message.additionalReplyMessages) { - result.push(await rawRenderer.render(additionalMessage, options)) + result.push( + await rawRenderer.render(additionalMessage, options) + ) } } diff --git a/packages/core/src/renders/image.ts b/packages/core/src/renders/image.ts index 2a178f76..1cbbcbd5 100644 --- a/packages/core/src/renders/image.ts +++ b/packages/core/src/renders/image.ts @@ -54,7 +54,10 @@ export default class ImageRenderer extends Renderer { return this.__page } - async render(message: Message, options: RenderOptions): Promise { + async render( + message: Message, + options: RenderOptions + ): Promise { const markdownText = message.content const page = await this._page() @@ -97,15 +100,18 @@ export default class ImageRenderer extends Renderer { } private async _textToQrcode(markdownText: string): Promise { - const response = await chathubFetch('https://pastebin.mozilla.org/api/', { - method: 'POST', - body: new URLSearchParams({ - expires: '86400', - format: 'url', - lexer: '_markdown', - content: markdownText - }) - }) + const response = await chathubFetch( + 'https://pastebin.mozilla.org/api/', + { + method: 'POST', + body: new URLSearchParams({ + expires: '86400', + format: 'url', + lexer: '_markdown', + content: markdownText + }) + } + ) const url = await response.text() diff --git a/packages/core/src/renders/mixed-image.ts b/packages/core/src/renders/mixed-image.ts index ec05cd1e..56ee7edb 100644 --- a/packages/core/src/renders/mixed-image.ts +++ b/packages/core/src/renders/mixed-image.ts @@ -55,7 +55,10 @@ export default class MixedImageRenderer extends Renderer { return this.__page } - async render(message: Message, options: RenderOptions): Promise { + async render( + message: Message, + options: RenderOptions + ): Promise { const elements: h[] = [] const content = message.content @@ -71,7 +74,8 @@ export default class MixedImageRenderer extends Renderer { const mergedMatchedTexts: MatchedText[] = [] for (let i = 0; i < matchedTexts.length; i++) { - const lastMatchedText = mergedMatchedTexts[mergedMatchedTexts.length - 1] + const lastMatchedText = + mergedMatchedTexts[mergedMatchedTexts.length - 1] const currentMatchedText = matchedTexts[i] @@ -86,13 +90,17 @@ export default class MixedImageRenderer extends Renderer { } } - logger.debug(`mergedMatchedTexts: ${JSON.stringify(mergedMatchedTexts)}`) + logger.debug( + `mergedMatchedTexts: ${JSON.stringify(mergedMatchedTexts)}` + ) // step 4: render markdown to image for (const matchedText of mergedMatchedTexts) { if (matchedText.type === 'markdown') { - const image = await this._renderMarkdownToImage(matchedText.text) + const image = await this._renderMarkdownToImage( + matchedText.text + ) const element = h.image(image, 'image/png') @@ -122,7 +130,11 @@ export default class MixedImageRenderer extends Renderer { const currentMatchedTexts: MatchedText[] = [] for (const token of tokens) { - if (token.type === 'text' || token.type === 'del' || token.type === 'br') { + if ( + token.type === 'text' || + token.type === 'del' || + token.type === 'br' + ) { currentMatchedTexts.push({ type: 'text', text: token.raw @@ -141,7 +153,8 @@ export default class MixedImageRenderer extends Renderer { const matchedTexts = this._matchText(token.tokens) currentMatchedTexts.push(...matchedTexts) } else if (token.type === 'space') { - const currentMatchedText = currentMatchedTexts[currentMatchedTexts.length - 1] + const currentMatchedText = + currentMatchedTexts[currentMatchedTexts.length - 1] currentMatchedText.text = currentMatchedText.text + token.raw } else { currentMatchedTexts.length = 0 @@ -157,7 +170,9 @@ export default class MixedImageRenderer extends Renderer { return currentMatchedTexts } - private async _renderMarkdownToImage(markdownText: string): Promise { + private async _renderMarkdownToImage( + markdownText: string + ): Promise { const page = await this._page() const templateHtmlPath = __dirname + '/../../resources/template.html' @@ -196,15 +211,18 @@ export default class MixedImageRenderer extends Renderer { } private async _textToQrcode(markdownText: string): Promise { - const response = await chathubFetch('https://pastebin.mozilla.org/api/', { - method: 'POST', - body: new URLSearchParams({ - expires: '86400', - format: 'url', - lexer: '_markdown', - content: markdownText - }) - }) + const response = await chathubFetch( + 'https://pastebin.mozilla.org/api/', + { + method: 'POST', + body: new URLSearchParams({ + expires: '86400', + format: 'url', + lexer: '_markdown', + content: markdownText + }) + } + ) const url = await response.text() diff --git a/packages/core/src/renders/mixed-voice.ts b/packages/core/src/renders/mixed-voice.ts index 028ddb87..7f3c1613 100644 --- a/packages/core/src/renders/mixed-voice.ts +++ b/packages/core/src/renders/mixed-voice.ts @@ -18,7 +18,10 @@ export default class MixedVoiceRenderer extends Renderer { super(ctx, config) } - async render(message: Message, options: RenderOptions): Promise { + async render( + message: Message, + options: RenderOptions + ): Promise { const elements: h[] = [] const renderText = (await this.renderText(message, options)).element @@ -42,7 +45,10 @@ export default class MixedVoiceRenderer extends Renderer { } } - async renderText(message: Message, options: RenderOptions): Promise { + async renderText( + message: Message, + options: RenderOptions + ): Promise { let transformed = transformAndEscape(message.content) if (options.split) { @@ -56,7 +62,10 @@ export default class MixedVoiceRenderer extends Renderer { } } - async renderVoice(message: Message, options: RenderOptions): Promise { + async renderVoice( + message: Message, + options: RenderOptions + ): Promise { const splitMessages = this._splitMessage(message.content) .flatMap((text) => text.trim().split('\n\n')) .filter((text) => text.length > 0) diff --git a/packages/core/src/renders/raw.ts b/packages/core/src/renders/raw.ts index f687b9a7..b625d2be 100644 --- a/packages/core/src/renders/raw.ts +++ b/packages/core/src/renders/raw.ts @@ -3,7 +3,10 @@ import { Renderer } from '../render' import { h } from 'koishi' export default class RawRenderer extends Renderer { - async render(message: Message, options: RenderOptions): Promise { + async render( + message: Message, + options: RenderOptions + ): Promise { return { element: h.text(message.content) } diff --git a/packages/core/src/renders/text.ts b/packages/core/src/renders/text.ts index ecaf7ce9..ade0ed4e 100644 --- a/packages/core/src/renders/text.ts +++ b/packages/core/src/renders/text.ts @@ -5,7 +5,10 @@ import { h } from 'koishi' import he from 'he' export default class TextRenderer extends Renderer { - async render(message: Message, options: RenderOptions): Promise { + async render( + message: Message, + options: RenderOptions + ): Promise { let transformed = transformAndEscape(message.content) if (options.split) { diff --git a/packages/core/src/renders/voice.ts b/packages/core/src/renders/voice.ts index 807d72b7..316c5b0f 100644 --- a/packages/core/src/renders/voice.ts +++ b/packages/core/src/renders/voice.ts @@ -8,7 +8,10 @@ import type {} from '@initencounter/vits' const logger = createLogger() export default class VoiceRenderer extends Renderer { - async render(message: Message, options: RenderOptions): Promise { + async render( + message: Message, + options: RenderOptions + ): Promise { const splitMessages = this._splitMessage(message.content) .flatMap((text) => text.trim().split('\n\n')) .filter((text) => text.length > 0) @@ -19,13 +22,19 @@ export default class VoiceRenderer extends Renderer { return { element: await Promise.all( splitMessages.map(async (text) => { - return h('message', await this._renderToVoice(text, options)) + return h( + 'message', + await this._renderToVoice(text, options) + ) }) ) } } else { return { - element: await this._renderToVoice(splitMessages.join(''), options) + element: await this._renderToVoice( + splitMessages.join(''), + options + ) } } } diff --git a/packages/core/src/services/chat.ts b/packages/core/src/services/chat.ts index 23bd8469..7a277046 100644 --- a/packages/core/src/services/chat.ts +++ b/packages/core/src/services/chat.ts @@ -17,9 +17,16 @@ import { ModelType, PlatformClientNames } from '../llm-core/platform/types' -import { ClientConfig, ClientConfigPool, ClientConfigPoolMode } from '../llm-core/platform/config' +import { + ClientConfig, + ClientConfigPool, + ClientConfigPoolMode +} from '../llm-core/platform/config' import { BasePlatformClient } from '../llm-core/platform/client' -import { ChatHubBaseEmbeddings, ChatHubChatModel } from '../llm-core/platform/model' +import { + ChatHubBaseEmbeddings, + ChatHubChatModel +} from '../llm-core/platform/model' import { ChatHubLLMChainWrapper } from '../llm-core/chain/base' import { ChatEvents } from './types' import { parseRawModelName } from '../llm-core/utils/count_tokens' @@ -69,9 +76,12 @@ export class ChatHubService extends Service { async awaitUninstallPlugin(plugin: ChatHubPlugin | string) { await this._lock.runLocked(async () => { - const pluginName = typeof plugin === 'string' ? plugin : plugin.platformName + const pluginName = + typeof plugin === 'string' ? plugin : plugin.platformName while (true) { - const targetPlugin = this._plugins.find((p) => p.platformName === pluginName) + const targetPlugin = this._plugins.find( + (p) => p.platformName === pluginName + ) if (!targetPlugin) { break @@ -113,14 +123,20 @@ export class ChatHubService extends Service { return this._plugins.find(fun) } - chat(room: ConversationRoom, message: Message, event: ChatEvents, stream: boolean = false) { + chat( + room: ConversationRoom, + message: Message, + event: ChatEvents, + stream: boolean = false + ) { const { model: modelName } = room // provider const [platform] = parseRawModelName(modelName) const chatInterfaceWrapper = - this._chatInterfaceWrapper[platform] ?? this._createChatInterfaceWrapper(platform) + this._chatInterfaceWrapper[platform] ?? + this._createChatInterfaceWrapper(platform) return chatInterfaceWrapper.chat(room, message, event, stream) } @@ -131,7 +147,10 @@ export class ChatHubService extends Service { // provider const [platform] = parseRawModelName(modelName) - return this._chatInterfaceWrapper[platform] ?? this._createChatInterfaceWrapper(platform) + return ( + this._chatInterfaceWrapper[platform] ?? + this._createChatInterfaceWrapper(platform) + ) } async clearChatHistory(room: ConversationRoom) { @@ -411,7 +430,9 @@ export class ChatHubService extends Service { ) } - private _createChatInterfaceWrapper(platform: string): ChatInterfaceWrapper { + private _createChatInterfaceWrapper( + platform: string + ): ChatInterfaceWrapper { const chatBridger = new ChatInterfaceWrapper(this) logger.debug(`_createChatInterfaceWrapper: ${platform}`) this._chatInterfaceWrapper[platform] = chatBridger @@ -462,7 +483,10 @@ export class ChatHubPlugin< } async initClients() { - this._platformService.registerConfigPool(this.platformName, this._platformConfigPool) + this._platformService.registerConfigPool( + this.platformName, + this._platformConfigPool + ) try { await this._platformService.createClients(this.platformName) @@ -520,7 +544,10 @@ export class ChatHubPlugin< } } - registerConfigPool(platformName: PlatformClientNames, configPool: ClientConfigPool) { + registerConfigPool( + platformName: PlatformClientNames, + configPool: ClientConfigPool + ) { this._platformService.registerConfigPool(platformName, configPool) } @@ -536,13 +563,19 @@ export class ChatHubPlugin< ) => BasePlatformClient, platformName: string = this.platformName ) { - const disposable = this._platformService.registerClient(platformName, func) + const disposable = this._platformService.registerClient( + platformName, + func + ) this._disposables.push(disposable) } async registerVectorStore(name: string, func: CreateVectorStoreFunction) { - const disposable = await this._platformService.registerVectorStore(name, func) + const disposable = await this._platformService.registerVectorStore( + name, + func + ) this._disposables.push(disposable) } @@ -554,9 +587,15 @@ export class ChatHubPlugin< async registerChatChainProvider( name: string, description: string, - func: (params: CreateChatHubLLMChainParams) => Promise + func: ( + params: CreateChatHubLLMChainParams + ) => Promise ) { - const disposable = await this._platformService.registerChatChain(name, description, func) + const disposable = await this._platformService.registerChatChain( + name, + description, + func + ) this._disposables.push(disposable) } } @@ -592,7 +631,8 @@ class ChatInterfaceWrapper { const requestId = uuidv4() const maxQueueLength = config.value.concurrentMaxSize - const currentQueueLength = await this._modelQueue.getQueueLength(platform) + const currentQueueLength = + await this._modelQueue.getQueueLength(platform) await this._conversationQueue.add(conversationId, requestId) await this._modelQueue.add(platform, requestId) @@ -603,7 +643,8 @@ class ChatInterfaceWrapper { try { const { chatInterface } = - this._conversations[conversationId] ?? (await this._createChatInterface(room)) + this._conversations[conversationId] ?? + (await this._createChatInterface(room)) const humanMessage = new HumanMessage({ content: message.content, @@ -620,11 +661,11 @@ class ChatInterfaceWrapper { return { content: (chainValues.message as AIMessage).content, - additionalReplyMessages: (chainValues.additionalReplyMessages as string[])?.map( - (content) => ({ - content - }) - ) + additionalReplyMessages: ( + chainValues.additionalReplyMessages as string[] + )?.map((content) => ({ + content + })) } } finally { await this._modelQueue.remove(platform, requestId) @@ -636,7 +677,8 @@ class ChatInterfaceWrapper { const { conversationId } = room const { chatInterface } = - this._conversations[conversationId] ?? (await this._createChatInterface(room)) + this._conversations[conversationId] ?? + (await this._createChatInterface(room)) return chatInterface } @@ -682,7 +724,9 @@ class ChatInterfaceWrapper { this._conversations = {} } - private async _createChatInterface(room: ConversationRoom): Promise { + private async _createChatInterface( + room: ConversationRoom + ): Promise { const presetTemplate = await this._service.preset.getPreset(room.preset) const config = this._service.config @@ -703,7 +747,8 @@ class ChatInterfaceWrapper { ? config.defaultEmbeddings : undefined, vectorStoreName: - config.defaultVectorStore && config.defaultVectorStore.length > 0 + config.defaultVectorStore && + config.defaultVectorStore.length > 0 ? config.defaultVectorStore : undefined, maxMessagesCount: config.messageCount diff --git a/packages/core/src/utils/error.ts b/packages/core/src/utils/error.ts index 6ae56480..8f30873f 100644 --- a/packages/core/src/utils/error.ts +++ b/packages/core/src/utils/error.ts @@ -12,7 +12,9 @@ export class ChatHubError extends Error { ) { super(ERROR_FORMAT_TEMPLATE.replace('%s', errorCode.toString())) this.name = 'ChatHubError' - logger.error('='.repeat(20) + 'ChatHubError:' + errorCode + '='.repeat(20)) + logger.error( + '='.repeat(20) + 'ChatHubError:' + errorCode + '='.repeat(20) + ) if (originError) { logger.error(originError) if (originError.cause) { diff --git a/packages/core/src/utils/pagination.ts b/packages/core/src/utils/pagination.ts index 2aab0266..2c528f5a 100644 --- a/packages/core/src/utils/pagination.ts +++ b/packages/core/src/utils/pagination.ts @@ -2,7 +2,8 @@ export class Pagination { private _cacheMap: Record = {} constructor(private input: PaginationInput) { - input.formatString.pages = input.formatString.pages ?? '\n当前为第 {page} / {total} 页' + input.formatString.pages = + input.formatString.pages ?? '\n当前为第 {page} / {total} 页' input.page = input.page ?? 1 input.limit = input.limit ?? 5 } @@ -18,7 +19,10 @@ export class Pagination { ) { const items = this._cacheMap[key] - return items.slice((page - 1) * limit, Math.min(items.length, page * limit)) + return items.slice( + (page - 1) * limit, + Math.min(items.length, page * limit) + ) } async getFormattedPage( diff --git a/packages/core/src/utils/queue.ts b/packages/core/src/utils/queue.ts index 22438558..9f3426b0 100644 --- a/packages/core/src/utils/queue.ts +++ b/packages/core/src/utils/queue.ts @@ -53,6 +53,8 @@ export class RequestIdQueue { } public async getQueueLength(key: string) { - return await this._lock.runLocked(async () => this._queue[key]?.length ?? 0) + return await this._lock.runLocked( + async () => this._queue[key]?.length ?? 0 + ) } } diff --git a/packages/core/src/utils/request.ts b/packages/core/src/utils/request.ts index 7d0c7dca..0d600fc0 100644 --- a/packages/core/src/utils/request.ts +++ b/packages/core/src/utils/request.ts @@ -24,7 +24,9 @@ function createProxyAgentForFetch( try { proxyAddressURL = new URL(proxyAddress) } catch (e) { - logger.error('无法解析你的代理地址,请检查你的代理地址是否正确!(例如是否添加了http://)') + logger.error( + '无法解析你的代理地址,请检查你的代理地址是否正确!(例如是否添加了http://)' + ) logger.error(e) throw e } @@ -52,7 +54,9 @@ function createProxyAgentForFetch( return init } -function createProxyAgent(proxyAddress: string): HttpsProxyAgent | SocksProxyAgent { +function createProxyAgent( + proxyAddress: string +): HttpsProxyAgent | SocksProxyAgent { if (proxyAddress.startsWith('socks://')) { return new SocksProxyAgent(proxyAddress) } else if (proxyAddress.match(/^https?:\/\//)) { @@ -82,7 +86,10 @@ export function setGlobalProxyAddress(address: string) { * package undici, and with proxy support * @returns */ -export function chathubFetch(info: fetchType.RequestInfo, init?: fetchType.RequestInit) { +export function chathubFetch( + info: fetchType.RequestInfo, + init?: fetchType.RequestInit +) { if (globalProxyAddress != null && !init?.dispatcher) { init = createProxyAgentForFetch(init || {}, globalProxyAddress) } @@ -110,6 +117,7 @@ export function ws(url: string, options?: ClientOptions | ClientRequestArgs) { export function randomUA() { return RandomUserAgent.getRandom( - (ua) => ua.browserName === 'Chrome' && parseFloat(ua.browserVersion) >= 90 + (ua) => + ua.browserName === 'Chrome' && parseFloat(ua.browserVersion) >= 90 ) } diff --git a/packages/core/src/utils/sse.ts b/packages/core/src/utils/sse.ts index 3f58d381..98767725 100644 --- a/packages/core/src/utils/sse.ts +++ b/packages/core/src/utils/sse.ts @@ -4,14 +4,22 @@ import { ChatHubError, ChatHubErrorCode } from './error' // eslint-disable-next-line generator-star-spacing export async function* sseIterable( response: fetchType.Response, - checkedFunction?: (data: string, event?: string, kvMap?: Record) => boolean + checkedFunction?: ( + data: string, + event?: string, + kvMap?: Record + ) => boolean ) { if (!response.ok) { const error = await response.json().catch(() => ({})) throw new ChatHubError( ChatHubErrorCode.NETWORK_ERROR, - new Error(`${response.status} ${response.statusText} ${JSON.stringify(error)}`) + new Error( + `${response.status} ${response.statusText} ${JSON.stringify( + error + )}` + ) ) } @@ -36,7 +44,9 @@ export async function* sseIterable( continue } - const splitted = decodeValue.split('\n\n').flatMap((item) => item.split('\n')) + const splitted = decodeValue + .split('\n\n') + .flatMap((item) => item.split('\n')) let currentTemp: Record = {} @@ -50,7 +60,11 @@ export async function* sseIterable( // data: {aa:xx} // event:finish - const [, type, data] = /(\w+):\s*(.*)$/g.exec(item) ?? ['', '', ''] + const [, type, data] = /(\w+):\s*(.*)$/g.exec(item) ?? [ + '', + '', + '' + ] currentTemp[type] = data @@ -59,7 +73,11 @@ export async function* sseIterable( } if (checkedFunction) { - const result = checkedFunction(data, currentTemp?.['event'], currentTemp) + const result = checkedFunction( + data, + currentTemp?.['event'], + currentTemp + ) if (result) { yield data diff --git a/packages/embeddings-service/src/embeddings/huggingface.ts b/packages/embeddings-service/src/embeddings/huggingface.ts index 9c785fc2..0b007abc 100644 --- a/packages/embeddings-service/src/embeddings/huggingface.ts +++ b/packages/embeddings-service/src/embeddings/huggingface.ts @@ -1,6 +1,9 @@ import { Context } from 'koishi' import { chathubFetch } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { ChatHubPlugin } from '@dingyi222666/koishi-plugin-chathub/lib/services/chat' import { Config } from '..' import { @@ -98,8 +101,11 @@ class HuggingfaceEmbeddingsRequester implements EmbeddingsRequester { this._inferenceClient = new HfInference(this._apiKey) } - async embeddings(params: EmbeddingsRequestParams): Promise { - const input = typeof params.input === 'string' ? [params.input] : params.input + async embeddings( + params: EmbeddingsRequestParams + ): Promise { + const input = + typeof params.input === 'string' ? [params.input] : params.input const result = await this._inferenceClient.featureExtraction({ model: params.model, @@ -117,8 +123,12 @@ class HuggingfaceEmbeddingsRequester implements EmbeddingsRequester { class HfInference { constructor(private readonly _apiKey?: string) {} - async featureExtraction(params: { model: string; inputs: string[] }): Promise { - const url = 'https://api-inference.huggingface.co/models/' + params.model + async featureExtraction(params: { + model: string + inputs: string[] + }): Promise { + const url = + 'https://api-inference.huggingface.co/models/' + params.model const headers = { Authorization: `Bearer ${this._apiKey}` @@ -131,7 +141,11 @@ class HfInference { }) if (!response.ok) { - if (response.headers.get('Content-Type')?.startsWith('application/json')) { + if ( + response.headers + .get('Content-Type') + ?.startsWith('application/json') + ) { // eslint-disable-next-line @typescript-eslint/no-explicit-any const output: any = await response.json() if (output.error) { diff --git a/packages/embeddings-service/src/index.ts b/packages/embeddings-service/src/index.ts index 96b445dd..5df5d1bd 100644 --- a/packages/embeddings-service/src/index.ts +++ b/packages/embeddings-service/src/index.ts @@ -35,7 +35,9 @@ export const Config: Schema = Schema.intersect([ .required(), huggingfaceModels: Schema.array(String) .description('调用 Huggingface 的 Embeddings 模型') - .default(['sentence-transformers/distilbert-base-nli-mean-tokens']) + .default([ + 'sentence-transformers/distilbert-base-nli-mean-tokens' + ]) }).description('Huggingface 设置'), Schema.object({}) ]) diff --git a/packages/gptfree-adapter/src/client.ts b/packages/gptfree-adapter/src/client.ts index c9377d9f..336ae7a3 100644 --- a/packages/gptfree-adapter/src/client.ts +++ b/packages/gptfree-adapter/src/client.ts @@ -7,7 +7,10 @@ import { } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/types' import { Context } from 'koishi' import { Config } from '.' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { GPTFreeRequester } from './requester' import { getModelContextSize, diff --git a/packages/gptfree-adapter/src/index.ts b/packages/gptfree-adapter/src/index.ts index 9e61eac6..49e7bd13 100644 --- a/packages/gptfree-adapter/src/index.ts +++ b/packages/gptfree-adapter/src/index.ts @@ -37,9 +37,9 @@ export interface Config extends ChatHubPlugin.Config { export const Config: Schema = Schema.intersect([ ChatHubPlugin.Config, Schema.object({ - apiEndPoints: Schema.array(Schema.string().default('http://127.0.0.1:3000')).description( - '请求 GPTFree 自搭建后端的API 地址' - ) + apiEndPoints: Schema.array( + Schema.string().default('http://127.0.0.1:3000') + ).description('请求 GPTFree 自搭建后端的API 地址') }).description('请求设置') ]) diff --git a/packages/gptfree-adapter/src/requester.ts b/packages/gptfree-adapter/src/requester.ts index 1fac457b..a52dc1dc 100644 --- a/packages/gptfree-adapter/src/requester.ts +++ b/packages/gptfree-adapter/src/requester.ts @@ -6,10 +6,19 @@ import { ClientConfig } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/p import { chathubFetch } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' import * as fetchType from 'undici/types/fetch' import { ChatGenerationChunk } from 'langchain/schema' -import { ChatCompletionResponse, ChatCompletionResponseMessageRoleEnum } from './types' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatCompletionResponse, + ChatCompletionResponseMessageRoleEnum +} from './types' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { sseIterable } from '@dingyi222666/koishi-plugin-chathub/lib/utils/sse' -import { convertDeltaToMessageChunk, langchainMessageToOpenAIMessage } from './utils' +import { + convertDeltaToMessageChunk, + langchainMessageToOpenAIMessage +} from './utils' import { createLogger } from '@dingyi222666/koishi-plugin-chathub/lib/utils/logger' import { parseRawModelName } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/utils/count_tokens' @@ -20,7 +29,9 @@ export class GPTFreeRequester extends ModelRequester { super() } - async *completionStream(params: ModelRequestParams): AsyncGenerator { + async *completionStream( + params: ModelRequestParams + ): AsyncGenerator { const [site, modelName] = parseRawModelName(params.model) logger.debug(`gptfree site: ${site}, model: ${modelName}`) try { @@ -63,11 +74,17 @@ export class GPTFreeRequester extends ModelRequester { if ((delta as any).error) { throw new ChatHubError( ChatHubErrorCode.API_REQUEST_FAILED, - new Error('error when calling openai completion, Result: ' + chunk) + new Error( + 'error when calling openai completion, Result: ' + + chunk + ) ) } - const messageChunk = convertDeltaToMessageChunk(delta, defaultRole) + const messageChunk = convertDeltaToMessageChunk( + delta, + defaultRole + ) messageChunk.content = content + messageChunk.content @@ -108,11 +125,14 @@ export class GPTFreeRequester extends ModelRequester { return data.flatMap( // eslint-disable-next-line @typescript-eslint/no-explicit-any (site: any) => - site.models.map((model: string) => site.site + '/' + model) as string[] + site.models.map( + (model: string) => site.site + '/' + model + ) as string[] ) } catch (e) { const error = new Error( - 'error when listing gptfree models, Result: ' + JSON.stringify(data) + 'error when listing gptfree models, Result: ' + + JSON.stringify(data) ) error.stack = e.stack diff --git a/packages/gptfree-adapter/src/types.ts b/packages/gptfree-adapter/src/types.ts index 92360f33..2a961f58 100644 --- a/packages/gptfree-adapter/src/types.ts +++ b/packages/gptfree-adapter/src/types.ts @@ -9,7 +9,11 @@ export interface ChatCompletionResponse { object: string created: number model: string - usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number } + usage: { + prompt_tokens: number + completion_tokens: number + total_tokens: number + } } export interface ChatCompletionResponseMessage { @@ -31,4 +35,8 @@ export interface ChatCompletionRequestMessageFunctionCall { arguments?: string } -export type ChatCompletionResponseMessageRoleEnum = 'system' | 'assistant' | 'user' | 'function' +export type ChatCompletionResponseMessageRoleEnum = + | 'system' + | 'assistant' + | 'user' + | 'function' diff --git a/packages/gptfree-adapter/src/utils.ts b/packages/gptfree-adapter/src/utils.ts index a3bf54a1..9834a7c4 100644 --- a/packages/gptfree-adapter/src/utils.ts +++ b/packages/gptfree-adapter/src/utils.ts @@ -7,7 +7,10 @@ import { MessageType, SystemMessageChunk } from 'langchain/schema' -import { ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum } from './types' +import { + ChatCompletionResponseMessage, + ChatCompletionResponseMessageRoleEnum +} from './types' export function langchainMessageToOpenAIMessage( messages: BaseMessage[] @@ -23,7 +26,9 @@ export function langchainMessageToOpenAIMessage( }) } -export function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum { +export function messageTypeToOpenAIRole( + type: MessageType +): ChatCompletionResponseMessageRoleEnum { switch (type) { case 'system': return 'system' diff --git a/packages/lmsys-adapter/src/client.ts b/packages/lmsys-adapter/src/client.ts index 27286c1c..fae36e5a 100644 --- a/packages/lmsys-adapter/src/client.ts +++ b/packages/lmsys-adapter/src/client.ts @@ -6,7 +6,10 @@ import { } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/types' import { Context } from 'koishi' import { Config } from '.' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { LMSYSRequester } from './requester' import { LmsysClientConfig } from './types' diff --git a/packages/lmsys-adapter/src/index.ts b/packages/lmsys-adapter/src/index.ts index fe01a637..a0a80745 100644 --- a/packages/lmsys-adapter/src/index.ts +++ b/packages/lmsys-adapter/src/index.ts @@ -6,7 +6,11 @@ import { LmsysClientConfig } from './types' export function apply(ctx: Context, config: Config) { config.chatConcurrentMaxSize = 1 - const plugin = new ChatHubPlugin(ctx, config, 'lmsys') + const plugin = new ChatHubPlugin( + ctx, + config, + 'lmsys' + ) ctx.on('ready', async () => { await plugin.registerToService() @@ -25,7 +29,9 @@ export function apply(ctx: Context, config: Config) { ] }) - await plugin.registerClient((_, clientConfig) => new LMSYSClient(ctx, config, clientConfig)) + await plugin.registerClient( + (_, clientConfig) => new LMSYSClient(ctx, config, clientConfig) + ) await plugin.initClients() }) @@ -39,7 +45,9 @@ export const Config: Schema = Schema.intersect([ ChatHubPlugin.Config, Schema.object({ - formatMessages: Schema.boolean().description('是否使用历史聊天消息').default(false) + formatMessages: Schema.boolean() + .description('是否使用历史聊天消息') + .default(false) }).description('对话设置') ]) diff --git a/packages/lmsys-adapter/src/requester.ts b/packages/lmsys-adapter/src/requester.ts index 5b2f3319..ef0d66a7 100644 --- a/packages/lmsys-adapter/src/requester.ts +++ b/packages/lmsys-adapter/src/requester.ts @@ -5,9 +5,15 @@ import { import { WebSocket } from 'ws' import { AIMessageChunk, ChatGenerationChunk } from 'langchain/schema' import { createLogger } from '@dingyi222666/koishi-plugin-chathub/lib/utils/logger' -import { randomUA, ws } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' +import { + randomUA, + ws +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' import { formatMessages, generateSessionHash, html2md, serial } from './utils' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { FnIndex, LmsysClientConfig, @@ -27,7 +33,9 @@ export class LMSYSRequester extends ModelRequester { super() } - async *completionStream(params: ModelRequestParams): AsyncGenerator { + async *completionStream( + params: ModelRequestParams + ): AsyncGenerator { if (this._conversationHash == null) { await this.init() } @@ -89,7 +97,9 @@ export class LMSYSRequester extends ModelRequester { const receiveWebSocket = this._createWebSocket() sendWebsocket.on('close', (code, data) => { - logger.debug(`send websocket close with code: ${code}, data: ${data.toString()}`) + logger.debug( + `send websocket close with code: ${code}, data: ${data.toString()}` + ) if (data.toString() === '114514') { logger.debug(`close receive websocket`) receiveWebSocket.close() @@ -111,7 +121,10 @@ export class LMSYSRequester extends ModelRequester { this._conversationHash = conversationHash } catch (e) { - throw new ChatHubError(ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, e) + throw new ChatHubError( + ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, + e + ) } } @@ -142,7 +155,9 @@ export class LMSYSRequester extends ModelRequester { const receiveWebSocket = this._createWebSocket() sendWebsocket.on('close', (code, data) => { - logger.debug(`send websocket close with code: ${code}, data: ${data.toString()}`) + logger.debug( + `send websocket close with code: ${code}, data: ${data.toString()}` + ) if (data.toString() === '114514') { logger.debug(`close receive websocket`) receiveWebSocket.close() @@ -186,7 +201,10 @@ export class LMSYSRequester extends ModelRequester { this._conversationHash = conversationHash } catch (e) { await this.dispose() - throw new ChatHubError(ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, e) + throw new ChatHubError( + ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, + e + ) } } @@ -210,10 +228,15 @@ export class LMSYSRequester extends ModelRequester { websocket.on('message', async (data) => { const event = JSON.parse(data.toString()) - await this._handleEventMessage(event, handleEventParams, websocket, { - resolve, - reject - }) + await this._handleEventMessage( + event, + handleEventParams, + websocket, + { + resolve, + reject + } + ) }) this._handleCloseEvent(websocket, tempParams, { resolve, reject }) @@ -228,7 +251,10 @@ export class LMSYSRequester extends ModelRequester { { resolve, reject }: PromiseConstructorParameters ) { websocket.on('open', () => { - logger.debug('WebSocket Connected: ' + (fnIndex === FnIndex.Send ? 'send' : 'receive')) + logger.debug( + 'WebSocket Connected: ' + + (fnIndex === FnIndex.Send ? 'send' : 'receive') + ) if (fnIndex === FnIndex.Send || fnIndex === FnIndex.InitSend) { resolve('') @@ -257,7 +283,9 @@ export class LMSYSRequester extends ModelRequester { private async _handleEventMessage( // eslint-disable-next-line @typescript-eslint/no-explicit-any event: any, - params: ResponseTempParams & { writer?: WritableStreamDefaultWriter }, + params: ResponseTempParams & { + writer?: WritableStreamDefaultWriter + }, websocket: WebSocket, { resolve, reject }: PromiseConstructorParameters ) { @@ -265,11 +293,19 @@ export class LMSYSRequester extends ModelRequester { logger.debug(`event: ${JSON.stringify(event)}`) } - const { conversationHash, fnIndex, data: sendData, stopTokenFound, writer } = params + const { + conversationHash, + fnIndex, + data: sendData, + stopTokenFound, + writer + } = params if (event.msg === 'send_hash') { // logger.debug(`send_hash: ${conversationHash}, fnIndex: ${fnIndex}`) - websocket.send(serial({ fn_index: fnIndex, session_hash: conversationHash })) + websocket.send( + serial({ fn_index: fnIndex, session_hash: conversationHash }) + ) } else if (event.msg === 'send_data') { websocket.send( serial({ @@ -288,7 +324,11 @@ export class LMSYSRequester extends ModelRequester { if (!event.success || !event.output.data) { await writer?.write('[DONE]') - reject(new Error(event?.output?.error ?? 'process_generating error')) + reject( + new Error( + event?.output?.error ?? 'process_generating error' + ) + ) return } @@ -321,7 +361,9 @@ export class LMSYSRequester extends ModelRequester { } }) - await writer?.write(text.replace('▌', '').replace(/^(.+?)(:|:)\s?/, '')) + await writer?.write( + text.replace('▌', '').replace(/^(.+?)(:|:)\s?/, '') + ) if (!params.stopTokenFound) { params.result = text @@ -332,7 +374,9 @@ export class LMSYSRequester extends ModelRequester { } else if (event.msg === 'process_completed') { try { if (event.success !== true) { - throw new Error(event.output?.error ?? event ?? 'unknown error') + throw new Error( + event.output?.error ?? event ?? 'unknown error' + ) } if (!event.output) { diff --git a/packages/lmsys-adapter/src/types.ts b/packages/lmsys-adapter/src/types.ts index fad63625..32d1537d 100644 --- a/packages/lmsys-adapter/src/types.ts +++ b/packages/lmsys-adapter/src/types.ts @@ -23,8 +23,12 @@ export interface LmsysClientConfig extends ClientConfig { formatMessages: boolean } -type PromiseConstructor = Parameters[0]> +type PromiseConstructor = Parameters< + ConstructorParameters[0] +> export type PromiseConstructorParameters = { - [K in 'resolve' | 'reject']: K extends 'resolve' ? PromiseConstructor[0] : PromiseConstructor[1] + [K in 'resolve' | 'reject']: K extends 'resolve' + ? PromiseConstructor[0] + : PromiseConstructor[1] } diff --git a/packages/lmsys-adapter/src/utils.ts b/packages/lmsys-adapter/src/utils.ts index d350dc13..a79376c9 100644 --- a/packages/lmsys-adapter/src/utils.ts +++ b/packages/lmsys-adapter/src/utils.ts @@ -26,7 +26,8 @@ export async function formatMessages(messages: BaseMessage[]) { } function formatMessage(message: BaseMessage) { - const roleType = message._getType() === 'human' ? 'user' : message._getType() + const roleType = + message._getType() === 'human' ? 'user' : message._getType() return `${roleType}: ${message.content}` } diff --git a/packages/newbing-adapter/src/constants.ts b/packages/newbing-adapter/src/constants.ts index 31706e9d..c48a9ca7 100644 --- a/packages/newbing-adapter/src/constants.ts +++ b/packages/newbing-adapter/src/constants.ts @@ -14,18 +14,25 @@ import { randomInt } from 'crypto' * @param {number} size */ export const genRanHex = (size) => - [...Array(size)].map(() => Math.floor(Math.random() * 16).toString(16)).join('') + [...Array(size)] + .map(() => Math.floor(Math.random() * 16).toString(16)) + .join('') -const random = (min: number, max: number) => Math.floor(Math.random() * (max - min + 1)) + min +const random = (min: number, max: number) => + Math.floor(Math.random() * (max - min + 1)) + min -export const randomIP = `13.${random(104, 107)}.${random(0, 255)}.${random(0, 255)}` +export const randomIP = `13.${random(104, 107)}.${random(0, 255)}.${random( + 0, + 255 +)}` export const HEADERS_INIT_CONVER = { authority: 'edgeservices.bing.com', accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'accept-language': 'en-US,en;q=0.9', 'cache-control': 'max-age=0', - 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', + 'sec-ch-ua': + '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', 'sec-ch-ua-arch': '"x86"', 'sec-ch-ua-bitness': '"64"', 'sec-ch-ua-full-version': '"110.0.1587.69"', @@ -53,7 +60,8 @@ export const HEADERS = { accept: 'application/json', 'accept-language': 'en-US,en;q=0.9', 'content-type': 'application/json', - 'sec-ch-ua': '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"', + 'sec-ch-ua': + '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"', 'sec-ch-ua-arch': '"x86"', 'sec-ch-ua-bitness': '"64"', 'sec-ch-ua-full-version': '"109.0.1518.78"', @@ -67,7 +75,8 @@ export const HEADERS = { 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin', 'x-ms-client-request-id': uuidv4(), - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', + 'x-ms-useragent': + 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', Referer: 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx', 'Referrer-Policy': 'origin-when-cross-origin', 'x-forwarded-for': randomIP @@ -132,7 +141,8 @@ function formatMessages(messages: BaseMessage[]) { result.push('\nThese are some conversations records between you and I: \n') for (const message of formatMessages) { - const roleType = message._getType() === 'human' ? 'user' : message._getType() + const roleType = + message._getType() === 'human' ? 'user' : message._getType() result.push(`${roleType}: ${message.content}`) } @@ -242,7 +252,9 @@ export function buildChatRequest( previousMessages.forEach((message) => { if ( - requestPreviousMessages.filter((message) => message.author === 'user').length < + requestPreviousMessages.filter( + (message) => message.author === 'user' + ).length < (conversation.maxNumUserMessagesInConversation ?? 5) - 1 ) { requestPreviousMessages.push({ @@ -304,7 +316,8 @@ export function unpackResponse(data: string | ArrayBuffer | Blob) { } export function randomString(length: number) { - const chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' + const chars = + 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' let result = '' diff --git a/packages/newbing-adapter/src/index.ts b/packages/newbing-adapter/src/index.ts index c8b088f1..10c069fd 100644 --- a/packages/newbing-adapter/src/index.ts +++ b/packages/newbing-adapter/src/index.ts @@ -6,7 +6,11 @@ import { BingClientConfig } from './types' export function apply(ctx: Context, config: Config) { config.chatConcurrentMaxSize = 0 - const plugin = new ChatHubPlugin(ctx, config, 'bing') + const plugin = new ChatHubPlugin( + ctx, + config, + 'bing' + ) ctx.on('ready', async () => { await plugin.registerToService() @@ -25,7 +29,9 @@ export function apply(ctx: Context, config: Config) { }) }) - await plugin.registerClient((_, clientConfig) => new BingClient(ctx, config, clientConfig)) + await plugin.registerClient( + (_, clientConfig) => new BingClient(ctx, config, clientConfig) + ) await plugin.initClients() }) @@ -44,20 +50,24 @@ export const Config: Schema = Schema.intersect([ ChatHubPlugin.Config, Schema.object({ - cookies: Schema.array(Schema.string().role('secret').required()).description( - 'Bing 账号的 Cookie' - ), + cookies: Schema.array( + Schema.string().role('secret').required() + ).description('Bing 账号的 Cookie'), webSocketApiEndPoint: Schema.string() .description('New Bing 的WebSocket Api EndPoint') .default('wss://sydney.bing.com/sydney/ChatHub'), createConversationApiEndPoint: Schema.string() .description('New Bing 的新建会话 Api EndPoint') - .default('https://edgeservices.bing.com/edgesvc/turing/conversation/create') + .default( + 'https://edgeservices.bing.com/edgesvc/turing/conversation/create' + ) }).description('请求设置'), Schema.object({ sydney: Schema.boolean() - .description('是否开启 Sydeny 模式(破解对话20次回复数限制,账号可能会有风险)') + .description( + '是否开启 Sydeny 模式(破解对话20次回复数限制,账号可能会有风险)' + ) .default(false) }).description('对话设置') ]) diff --git a/packages/newbing-adapter/src/requester.ts b/packages/newbing-adapter/src/requester.ts index 57930a00..d731e0dc 100644 --- a/packages/newbing-adapter/src/requester.ts +++ b/packages/newbing-adapter/src/requester.ts @@ -4,8 +4,15 @@ import { } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/api' import { AIMessageChunk, ChatGenerationChunk } from 'langchain/schema' import { createLogger } from '@dingyi222666/koishi-plugin-chathub/lib/utils/logger' -import { chathubFetch, randomUA, ws } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + chathubFetch, + randomUA, + ws +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { readableStreamToAsyncIterable } from '@dingyi222666/koishi-plugin-chathub/lib/utils/stream' import { Context } from 'koishi' import { @@ -54,7 +61,10 @@ export class BingRequester extends ModelRequester { ) { super() - let cookie = _chatConfig.apiKey.length < 1 ? `_U=${randomString(169)}` : _chatConfig.apiKey + let cookie = + _chatConfig.apiKey.length < 1 + ? `_U=${randomString(169)}` + : _chatConfig.apiKey if (!cookie.includes('_U=')) { cookie = `_U=${cookie}` @@ -65,7 +75,8 @@ export class BingRequester extends ModelRequester { } if (_pluginConfig.createConversationApiEndPoint.length > 0) { - this._createConversationUrl = _pluginConfig.createConversationApiEndPoint + this._createConversationUrl = + _pluginConfig.createConversationApiEndPoint } this._cookie = cookie @@ -74,7 +85,9 @@ export class BingRequester extends ModelRequester { // this._headers['User-Agent'] = this._ua } - async *completionStream(params: ModelRequestParams): AsyncGenerator { + async *completionStream( + params: ModelRequestParams + ): AsyncGenerator { if (this._isThrottled === true) { this._chatConfig.sydney = false } @@ -173,13 +186,19 @@ export class BingRequester extends ModelRequester { if (event?.item?.throttling?.maxNumUserMessagesInConversation) { conversationInfo.maxNumUserMessagesInConversation = - event?.item?.throttling?.maxNumUserMessagesInConversation + event?.item?.throttling + ?.maxNumUserMessagesInConversation } if (JSON.stringify(event) === '{}') { ws.send( serial( - buildChatRequest(conversationInfo, message, sydney, previousMessages) + buildChatRequest( + conversationInfo, + message, + sydney, + previousMessages + ) ) ) @@ -219,13 +238,17 @@ export class BingRequester extends ModelRequester { maxNumUserMessagesInConversation = event?.arguments?.[0]?.throttling?.maxNumUserMessagesInConversation } */ - let updatedText = message.adaptiveCards?.[0]?.body?.[0]?.text + let updatedText = + message.adaptiveCards?.[0]?.body?.[0]?.text if (updatedText == null) { updatedText = message.text } - if (!updatedText || updatedText === replySoFar[messageCursor]) { + if ( + !updatedText || + updatedText === replySoFar[messageCursor] + ) { return } @@ -237,7 +260,9 @@ export class BingRequester extends ModelRequester { if (updatedText.trim().endsWith(stopToken)) { // apology = true // remove stop token from updated text - replySoFar[messageCursor] = updatedText.replace(stopToken, '').trim() + replySoFar[messageCursor] = updatedText + .replace(stopToken, '') + .trim() return } @@ -246,19 +271,23 @@ export class BingRequester extends ModelRequester { messageCursor += 1 replySoFar.push(updatedText) } else { - replySoFar[messageCursor] = replySoFar[messageCursor] + updatedText + replySoFar[messageCursor] = + replySoFar[messageCursor] + updatedText } // logger.debug(`message: ${JSON.stringify(message)}`) await writable.write(replySoFar.join('\n\n')) } else if (event.type === 2) { - const messages = event.item.messages as ChatResponseMessage[] | undefined + const messages = event.item.messages as + | ChatResponseMessage[] + | undefined if (!messages) { reject( new Error( - event.item.result.error || `Unknown error: ${JSON.stringify(event)}` + event.item.result.error || + `Unknown error: ${JSON.stringify(event)}` ) ) return @@ -268,7 +297,10 @@ export class BingRequester extends ModelRequester { for (let i = messages.length - 1; i >= 0; i--) { const message = messages[i] - if (message.author === 'bot' && message.messageType == null) { + if ( + message.author === 'bot' && + message.messageType == null + ) { eventMessage = messages[i] break } @@ -291,8 +323,10 @@ export class BingRequester extends ModelRequester { logger.debug(JSON.stringify(event.item)) if (replySoFar[0] && eventMessage) { - eventMessage.adaptiveCards[0].body[0].text = replySoFar.join('\n\n') - eventMessage.text = eventMessage.adaptiveCards[0].body[0].text + eventMessage.adaptiveCards[0].body[0].text = + replySoFar.join('\n\n') + eventMessage.text = + eventMessage.adaptiveCards[0].body[0].text resolve(eventMessage.text) return @@ -317,7 +351,11 @@ export class BingRequester extends ModelRequester { return } - if (event.item?.result?.exception?.indexOf('maximum context length') > -1) { + if ( + event.item?.result?.exception?.indexOf( + 'maximum context length' + ) > -1 + ) { reject( new Error( 'long context with 8k token limit, please start a new conversation' @@ -356,8 +394,10 @@ export class BingRequester extends ModelRequester { replySoFar[0] /* || event.item.messages[0].topicChangerText) */ || sydney ) { - eventMessage.adaptiveCards = eventMessage.adaptiveCards || [] - eventMessage.adaptiveCards[0] = eventMessage.adaptiveCards[0] || { + eventMessage.adaptiveCards = + eventMessage.adaptiveCards || [] + eventMessage.adaptiveCards[0] = eventMessage + .adaptiveCards[0] || { type: 'AdaptiveCard', body: [ { @@ -370,8 +410,8 @@ export class BingRequester extends ModelRequester { } eventMessage.adaptiveCards[0].body = eventMessage.adaptiveCards[0].body || [] - eventMessage.adaptiveCards[0].body[0] = eventMessage.adaptiveCards[0] - .body[0] || { + eventMessage.adaptiveCards[0].body[0] = eventMessage + .adaptiveCards[0].body[0] || { type: 'TextBlock', wrap: true, text: '' @@ -381,7 +421,8 @@ export class BingRequester extends ModelRequester { ? eventMessage.spokenText ?? eventMessage.text : replySoFar.join('\n\n') eventMessage.adaptiveCards[0].body[0].text = text - eventMessage.text = eventMessage.adaptiveCards[0].body[0].text + eventMessage.text = + eventMessage.adaptiveCards[0].body[0].text // delete useless suggestions from moderation filter delete eventMessage.suggestedResponses } @@ -391,7 +432,10 @@ export class BingRequester extends ModelRequester { // [{"type":7,"error":"Connection closed with an error.","allowReconnect":true}] ws.close() resolve( - new Error('error: ' + event.error || 'Connection closed with an error.') + new Error( + 'error: ' + event.error || + 'Connection closed with an error.' + ) ) } }) @@ -413,7 +457,8 @@ export class BingRequester extends ModelRequester { conversationId: conversationResponse.conversationId, invocationId: 0, clientId: conversationResponse.clientId, - conversationSignature: conversationResponse.conversationSignature, + conversationSignature: + conversationResponse.conversationSignature, conversationStyle: this._style } } @@ -432,19 +477,29 @@ export class BingRequester extends ModelRequester { }) ).json()) as ConversationResponse - logger.debug(`Create conversation response: ${JSON.stringify(resp)}`) + logger.debug( + `Create conversation response: ${JSON.stringify(resp)}` + ) if (!resp.result) { throw new Error('Invalid response') } } catch (err) { - throw new ChatHubError(ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, err) + throw new ChatHubError( + ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, + err + ) } if (resp.result.value !== 'Success') { - logger.debug(`Failed to create conversation: ${JSON.stringify(resp)}`) + logger.debug( + `Failed to create conversation: ${JSON.stringify(resp)}` + ) const message = `${resp.result.value}: ${resp.result.message}` - throw new ChatHubError(ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, new Error(message)) + throw new ChatHubError( + ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, + new Error(message) + ) } return resp diff --git a/packages/newbing-adapter/src/types.ts b/packages/newbing-adapter/src/types.ts index 3410464a..8d422622 100644 --- a/packages/newbing-adapter/src/types.ts +++ b/packages/newbing-adapter/src/types.ts @@ -122,7 +122,8 @@ export function generateMarkdown(response: BingChatResponse) { // change `[^Number^]` to markdown link const regex = /\[\^(\d+)\^\]/g const markdown = response.details.text.replace(regex, (match, p1) => { - const sourceAttribution = response.details.sourceAttributions[Number(p1) - 1] + const sourceAttribution = + response.details.sourceAttributions[Number(p1) - 1] return `[${sourceAttribution.providerDisplayName}](${sourceAttribution.seeMoreUrl})` }) return markdown diff --git a/packages/openai-adapter/src/client.ts b/packages/openai-adapter/src/client.ts index eff5e77d..908577bf 100644 --- a/packages/openai-adapter/src/client.ts +++ b/packages/openai-adapter/src/client.ts @@ -11,7 +11,10 @@ import { } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/types' import { Context } from 'koishi' import { Config } from '.' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { OpenAIRequester } from './requester' export class OpenAIClient extends PlatformModelAndEmbeddingsClient { @@ -50,12 +53,20 @@ export class OpenAIClient extends PlatformModelAndEmbeddingsClient const rawModels = await this._requester.getModels() return rawModels - .filter((model) => model.includes('gpt') || model.includes('text-embedding')) + .filter( + (model) => + model.includes('gpt') || + model.includes('text-embedding') + ) .map((model) => { return { name: model, - type: model.includes('gpt') ? ModelType.llm : ModelType.embeddings, - supportChatMode: model.includes('gpt') ? (_) => true : undefined + type: model.includes('gpt') + ? ModelType.llm + : ModelType.embeddings, + supportChatMode: model.includes('gpt') + ? (_) => true + : undefined } }) } catch (e) { @@ -63,7 +74,9 @@ export class OpenAIClient extends PlatformModelAndEmbeddingsClient } } - protected _createModel(model: string): ChatHubChatModel | ChatHubBaseEmbeddings { + protected _createModel( + model: string + ): ChatHubChatModel | ChatHubBaseEmbeddings { const info = this._models[model] if (info == null) { diff --git a/packages/openai-adapter/src/index.ts b/packages/openai-adapter/src/index.ts index 1859a3c1..2ce6f803 100644 --- a/packages/openai-adapter/src/index.ts +++ b/packages/openai-adapter/src/index.ts @@ -43,7 +43,10 @@ export const Config: Schema = Schema.intersect([ Schema.object({ apiKeys: Schema.array( Schema.tuple([ - Schema.string().role('secret').description('OpenAI 的 API Key').required(), + Schema.string() + .role('secret') + .description('OpenAI 的 API Key') + .required(), Schema.string() .description('请求 OpenAI API 的地址') .default('https://api.openai.com/v1') @@ -69,13 +72,17 @@ export const Config: Schema = Schema.intersect([ .step(0.1) .default(0.8), presencePenalty: Schema.number() - .description('重复惩罚,越高越不易重复出现过至少一次的 Token(-2~2,每步0.1)') + .description( + '重复惩罚,越高越不易重复出现过至少一次的 Token(-2~2,每步0.1)' + ) .min(-2) .max(2) .step(0.1) .default(0.2), frequencyPenalty: Schema.number() - .description('频率惩罚,越高越不易重复出现次数较多的 Token(-2~2,每步0.1)') + .description( + '频率惩罚,越高越不易重复出现次数较多的 Token(-2~2,每步0.1)' + ) .min(-2) .max(2) .step(0.1) diff --git a/packages/openai-adapter/src/requester.ts b/packages/openai-adapter/src/requester.ts index b3dbba08..844c1c51 100644 --- a/packages/openai-adapter/src/requester.ts +++ b/packages/openai-adapter/src/requester.ts @@ -13,7 +13,10 @@ import { ChatCompletionResponseMessageRoleEnum, CreateEmbeddingResponse } from './types' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { sseIterable } from '@dingyi222666/koishi-plugin-chathub/lib/utils/sse' import { convertDeltaToMessageChunk, @@ -24,12 +27,17 @@ import { createLogger } from '@dingyi222666/koishi-plugin-chathub/lib/utils/logg import { chathubFetch } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' const logger = createLogger() -export class OpenAIRequester extends ModelRequester implements EmbeddingsRequester { +export class OpenAIRequester + extends ModelRequester + implements EmbeddingsRequester +{ constructor(private _config: ClientConfig) { super() } - async *completionStream(params: ModelRequestParams): AsyncGenerator { + async *completionStream( + params: ModelRequestParams + ): AsyncGenerator { try { const response = await this._post( 'chat/completions', @@ -58,7 +66,10 @@ export class OpenAIRequester extends ModelRequester implements EmbeddingsRequest const iterator = sseIterable(response) let content = '' - let functionCall: ChatCompletionRequestMessageFunctionCall = { name: '', arguments: '' } + let functionCall: ChatCompletionRequestMessageFunctionCall = { + name: '', + arguments: '' + } let defaultRole: ChatCompletionResponseMessageRoleEnum = 'assistant' @@ -76,7 +87,10 @@ export class OpenAIRequester extends ModelRequester implements EmbeddingsRequest if ((data as any).error) { throw new ChatHubError( ChatHubErrorCode.API_REQUEST_FAILED, - new Error('error when calling openai completion, Result: ' + chunk) + new Error( + 'error when calling openai completion, Result: ' + + chunk + ) ) } @@ -86,17 +100,23 @@ export class OpenAIRequester extends ModelRequester implements EmbeddingsRequest } const { delta } = choice - const messageChunk = convertDeltaToMessageChunk(delta, defaultRole) + const messageChunk = convertDeltaToMessageChunk( + delta, + defaultRole + ) messageChunk.content = content + messageChunk.content - const deltaFunctionCall = messageChunk.additional_kwargs.function_call + const deltaFunctionCall = + messageChunk.additional_kwargs.function_call if (deltaFunctionCall) { deltaFunctionCall.arguments = functionCall.arguments + deltaFunctionCall.arguments - deltaFunctionCall.name = functionCall.name + deltaFunctionCall.name + deltaFunctionCall.name = + functionCall.name + deltaFunctionCall.name } else if (functionCall.name.length > 0) { - messageChunk.additional_kwargs.function_call = functionCall + messageChunk.additional_kwargs.function_call = + functionCall } defaultRole = (delta.role ?? @@ -108,12 +128,18 @@ export class OpenAIRequester extends ModelRequester implements EmbeddingsRequest }) yield generationChunk content = messageChunk.content - functionCall = deltaFunctionCall ?? { name: '', arguments: '' } + functionCall = deltaFunctionCall ?? { + name: '', + arguments: '' + } } catch (e) { if (errorCount > 20) { throw new ChatHubError( ChatHubErrorCode.API_REQUEST_FAILED, - new Error('error when calling openai completion, Result: ' + chunk) + new Error( + 'error when calling openai completion, Result: ' + + chunk + ) ) } else { continue @@ -129,7 +155,9 @@ export class OpenAIRequester extends ModelRequester implements EmbeddingsRequest } } - async embeddings(params: EmbeddingsRequestParams): Promise { + async embeddings( + params: EmbeddingsRequestParams + ): Promise { // eslint-disable-next-line @typescript-eslint/no-explicit-any let data: CreateEmbeddingResponse @@ -144,13 +172,19 @@ export class OpenAIRequester extends ModelRequester implements EmbeddingsRequest data = JSON.parse(rawData) as CreateEmbeddingResponse if (data.data && data.data.length > 0) { - return (data as CreateEmbeddingResponse).data.map((it) => it.embedding) + return (data as CreateEmbeddingResponse).data.map( + (it) => it.embedding + ) } - throw new Error('error when calling openai embeddings, Result: ' + JSON.stringify(data)) + throw new Error( + 'error when calling openai embeddings, Result: ' + + JSON.stringify(data) + ) } catch (e) { const error = new Error( - 'error when calling openai embeddings, Result: ' + JSON.stringify(data) + 'error when calling openai embeddings, Result: ' + + JSON.stringify(data) ) error.stack = e.stack @@ -173,7 +207,8 @@ export class OpenAIRequester extends ModelRequester implements EmbeddingsRequest return ([]>data.data).map((model) => model.id) } catch (e) { const error = new Error( - 'error when listing openai models, Result: ' + JSON.stringify(data) + 'error when listing openai models, Result: ' + + JSON.stringify(data) ) error.stack = e.stack diff --git a/packages/openai-adapter/src/types.ts b/packages/openai-adapter/src/types.ts index 4f7702c7..0b75268b 100644 --- a/packages/openai-adapter/src/types.ts +++ b/packages/openai-adapter/src/types.ts @@ -13,7 +13,11 @@ export interface ChatCompletionResponse { object: string created: number model: string - usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number } + usage: { + prompt_tokens: number + completion_tokens: number + total_tokens: number + } } export interface ChatCompletionResponseMessage { @@ -117,4 +121,8 @@ export interface CreateEmbeddingResponseUsage { total_tokens: number } -export type ChatCompletionResponseMessageRoleEnum = 'system' | 'assistant' | 'user' | 'function' +export type ChatCompletionResponseMessageRoleEnum = + | 'system' + | 'assistant' + | 'user' + | 'function' diff --git a/packages/openai-adapter/src/utils.ts b/packages/openai-adapter/src/utils.ts index bab57954..0d9844c5 100644 --- a/packages/openai-adapter/src/utils.ts +++ b/packages/openai-adapter/src/utils.ts @@ -29,7 +29,9 @@ export function langchainMessageToOpenAIMessage( }) } -export function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum { +export function messageTypeToOpenAIRole( + type: MessageType +): ChatCompletionResponseMessageRoleEnum { switch (type) { case 'system': return 'system' @@ -44,11 +46,15 @@ export function messageTypeToOpenAIRole(type: MessageType): ChatCompletionRespon } } -export function formatToolsToOpenAIFunctions(tools: StructuredTool[]): ChatCompletionFunctions[] { +export function formatToolsToOpenAIFunctions( + tools: StructuredTool[] +): ChatCompletionFunctions[] { return tools.map(formatToolToOpenAIFunction) } -export function formatToolToOpenAIFunction(tool: StructuredTool): ChatCompletionFunctions { +export function formatToolToOpenAIFunction( + tool: StructuredTool +): ChatCompletionFunctions { return { name: tool.name, description: tool.description, diff --git a/packages/plugin-common/src/index.ts b/packages/plugin-common/src/index.ts index 4a069f2b..d37f1b12 100644 --- a/packages/plugin-common/src/index.ts +++ b/packages/plugin-common/src/index.ts @@ -26,14 +26,18 @@ export interface Config extends ChatHubPlugin.Config { export const Config: Schema = Schema.intersect([ Schema.object({ request: Schema.boolean() - .description('是否启用 request 插件(为模型提供 get/post 请求接口)') + .description( + '是否启用 request 插件(为模型提供 get/post 请求接口)' + ) .default(true), fs: Schema.boolean() .description('是否启用 fs 插件(为模型提供文件读写接口)') .default(false), bilibili: Schema.boolean() - .description('是否启用 bilibili 插件(为模型提供 bilibili 视频的阅读能力)') + .description( + '是否启用 bilibili 插件(为模型提供 bilibili 视频的阅读能力)' + ) .default(false) }).description('插件列表'), @@ -49,7 +53,9 @@ export const Config: Schema = Schema.intersect([ Schema.object({ fs: Schema.const(true).required(), fsScopePath: Schema.string() - .description('fs 插件的作用域路径 (为空则为整个电脑上的任意路径)') + .description( + 'fs 插件的作用域路径 (为空则为整个电脑上的任意路径)' + ) .default('') }), Schema.object({ diff --git a/packages/plugin-common/src/plugin.ts b/packages/plugin-common/src/plugin.ts index 2c033189..2ac87dcd 100644 --- a/packages/plugin-common/src/plugin.ts +++ b/packages/plugin-common/src/plugin.ts @@ -4,7 +4,11 @@ import { ChatHubPlugin } from '@dingyi222666/koishi-plugin-chathub/lib/services/ import { Config } from '.' import path from 'path' -export async function plugin(ctx: Context, config: Config, plugin: ChatHubPlugin) { +export async function plugin( + ctx: Context, + config: Config, + plugin: ChatHubPlugin +) { const list = await fs.readdir(path.join(__dirname, '/plugins')) for (const file of list) { @@ -13,7 +17,11 @@ export async function plugin(ctx: Context, config: Config, plugin: ChatHubPlugin } const func: { - apply: (ctx: Context, config: Config, plugin: ChatHubPlugin) => PromiseLike | void + apply: ( + ctx: Context, + config: Config, + plugin: ChatHubPlugin + ) => PromiseLike | void } = await require(`./plugins/${file}`) if (func.apply) { diff --git a/packages/plugin-common/src/plugins/fs.ts b/packages/plugin-common/src/plugins/fs.ts index b1c0acb1..4bb451a2 100644 --- a/packages/plugin-common/src/plugins/fs.ts +++ b/packages/plugin-common/src/plugins/fs.ts @@ -6,7 +6,11 @@ import { BaseFileStore } from 'langchain/schema' import { Tool, ToolParams } from 'langchain/tools' import { ChatHubPlugin } from '@dingyi222666/koishi-plugin-chathub/lib/services/chat' -export async function apply(ctx: Context, config: Config, plugin: ChatHubPlugin) { +export async function apply( + ctx: Context, + config: Config, + plugin: ChatHubPlugin +) { if (config.fs !== true) { return } @@ -109,7 +113,9 @@ export class WriteFileTool extends Tool { const regex = /"(.*)",(\s*)?"(.*)"$/ const match = rawText.match(regex) if (!match) { - throw new Error(`Input "${rawText}" is not match the regex "${regex}"`) + throw new Error( + `Input "${rawText}" is not match the regex "${regex}"` + ) } const filePath = match[1] const text = match[3] diff --git a/packages/plugin-common/src/plugins/request.ts b/packages/plugin-common/src/plugins/request.ts index c7c3101d..2cfc6506 100644 --- a/packages/plugin-common/src/plugins/request.ts +++ b/packages/plugin-common/src/plugins/request.ts @@ -1,10 +1,17 @@ import { Context } from 'koishi' import { Config } from '..' -import { chathubFetch, randomUA } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' +import { + chathubFetch, + randomUA +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' import { Tool } from 'langchain/tools' import { ChatHubPlugin } from '@dingyi222666/koishi-plugin-chathub/lib/services/chat' -export async function apply(ctx: Context, config: Config, plugin: ChatHubPlugin) { +export async function apply( + ctx: Context, + config: Config, + plugin: ChatHubPlugin +) { if (config.request !== true) { return } diff --git a/packages/poe-adapter/src/index.ts b/packages/poe-adapter/src/index.ts index 053926fc..439955c4 100644 --- a/packages/poe-adapter/src/index.ts +++ b/packages/poe-adapter/src/index.ts @@ -5,7 +5,11 @@ import { PoeClient } from './client' export function apply(ctx: Context, config: Config) { config.chatConcurrentMaxSize = 1 - const plugin = new ChatHubPlugin(ctx, config, 'poe') + const plugin = new ChatHubPlugin( + ctx, + config, + 'poe' + ) ctx.on('ready', async () => { await plugin.registerToService() @@ -24,7 +28,9 @@ export function apply(ctx: Context, config: Config) { }) }) - await plugin.registerClient((_, clientConfig) => new PoeClient(ctx, config, clientConfig)) + await plugin.registerClient( + (_, clientConfig) => new PoeClient(ctx, config, clientConfig) + ) await plugin.initClients() }) @@ -39,13 +45,15 @@ export const Config: Schema = Schema.intersect([ ChatHubPlugin.Config, Schema.object({ - cookies: Schema.array(Schema.string().role('secret').required()).description( - '已登录的 Poe 账号 Cookie 的 p-b 的值' - ) + cookies: Schema.array( + Schema.string().role('secret').required() + ).description('已登录的 Poe 账号 Cookie 的 p-b 的值') }).description('请求设置'), Schema.object({ - formatMessages: Schema.boolean().description('是否使用历史聊天消息').default(true) + formatMessages: Schema.boolean() + .description('是否使用历史聊天消息') + .default(true) }).description('对话设置') ]) diff --git a/packages/poe-adapter/src/requester.ts b/packages/poe-adapter/src/requester.ts index 3511b6ba..168e15c9 100644 --- a/packages/poe-adapter/src/requester.ts +++ b/packages/poe-adapter/src/requester.ts @@ -5,12 +5,24 @@ import { import { WebSocket } from 'ws' import { AIMessageChunk, ChatGenerationChunk } from 'langchain/schema' import { createLogger } from '@dingyi222666/koishi-plugin-chathub/lib/utils/logger' -import { chathubFetch, randomUA, ws } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' -import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' +import { + chathubFetch, + randomUA, + ws +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/request' +import { + ChatHubError, + ChatHubErrorCode +} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error' import { readableStreamToAsyncIterable } from '@dingyi222666/koishi-plugin-chathub/lib/utils/stream' import { Context, sleep } from 'koishi' -import { PoeBot, PoeClientConfig, PoeRequestHeaders, PoeSettingsResponse } from './types' +import { + PoeBot, + PoeClientConfig, + PoeRequestHeaders, + PoeSettingsResponse +} from './types' import { calculateClientNonce, extractFormKey, @@ -42,7 +54,9 @@ export class PoeRequester extends ModelRequester { } } - async *completionStream(params: ModelRequestParams): AsyncGenerator { + async *completionStream( + params: ModelRequestParams + ): AsyncGenerator { await this.init() // await this._refreshConversation() @@ -55,7 +69,11 @@ export class PoeRequester extends ModelRequester { const writable = stream.writable.getWriter() setTimeout(async () => { - const listenerPromise = this._buildListenerPromise(params, this._ws, writable) + const listenerPromise = this._buildListenerPromise( + params, + this._ws, + writable + ) /* await */ // not await to prevent blocking @@ -65,7 +83,10 @@ export class PoeRequester extends ModelRequester { if (result instanceof Error) { if (!(result instanceof ChatHubError)) { - err = new ChatHubError(ChatHubErrorCode.API_REQUEST_FAILED, err) + err = new ChatHubError( + ChatHubErrorCode.API_REQUEST_FAILED, + err + ) } err = result } @@ -102,7 +123,10 @@ export class PoeRequester extends ModelRequester { if (e instanceof ChatHubError) { throw e } - throw new ChatHubError(ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, e) + throw new ChatHubError( + ChatHubErrorCode.MODEL_CONVERSION_INIT_ERROR, + e + ) } } @@ -112,7 +136,9 @@ export class PoeRequester extends ModelRequester { return Object.keys(this._poeBots) } - private async _sendMessage(params: ModelRequestParams): Promise { + private async _sendMessage( + params: ModelRequestParams + ): Promise { const bot = this._poeBots[params.model] const prompt = this._config.formatMessages @@ -180,7 +206,11 @@ export class PoeRequester extends ModelRequester { } private async _init() { - if (this._poeSettings == null || this._headers['poe-formkey'] == null || this._ws == null) { + if ( + this._poeSettings == null || + this._headers['poe-formkey'] == null || + this._ws == null + ) { await this._getCredentials() await this._initBots() @@ -208,11 +238,14 @@ export class PoeRequester extends ModelRequester { delete cloneOfHeaders[key] } - const response = await chathubFetch('https://poe.com', { headers: cloneOfHeaders }) + const response = await chathubFetch('https://poe.com', { + headers: cloneOfHeaders + }) const source = await response.text() - const jsonRegex = /