Skip to content

Commit

Permalink
style: format code
Browse files Browse the repository at this point in the history
  • Loading branch information
dingyi222666 committed Sep 22, 2023
1 parent 56dd003 commit b664218
Show file tree
Hide file tree
Showing 146 changed files with 2,644 additions and 926 deletions.
2 changes: 1 addition & 1 deletion .eslintrc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ plugins:
rules:
prettier/prettier:
- error
- printWidth: 100
- printWidth: 80
- tabWidth: 4

array-callback-return: off
Expand Down
4 changes: 2 additions & 2 deletions .prettierrc
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
{
"trailingComma": "none",
"printWidth": 100,
"printWidth": 80,
"tabWidth": 4,
"semi": false,
"singleQuote": true,
"endOfLine": "auto",
"htmlWhitespaceSensitivity": "ignore",
"arrowParens": "always",
"overrides": []
}
}
10 changes: 6 additions & 4 deletions packages/bard-adapter/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ export function apply(ctx: Context, config: Config) {
})
})

await plugin.registerClient((_, clientConfig) => new BardClient(ctx, config, clientConfig))
await plugin.registerClient(
(_, clientConfig) => new BardClient(ctx, config, clientConfig)
)

await plugin.initClients()
})
Expand All @@ -40,9 +42,9 @@ export const Config: Schema<Config> = Schema.intersect([
ChatHubPlugin.Config,

Schema.object({
cookies: Schema.array(Schema.string().role('secret').required()).description(
'在 bard.google.com 登录后获取的 Cookie'
)
cookies: Schema.array(
Schema.string().role('secret').required()
).description('在 bard.google.com 登录后获取的 Cookie')
}).description('请求设置')
])

Expand Down
69 changes: 47 additions & 22 deletions packages/bard-adapter/src/requester.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,18 @@ import {
ModelRequestParams
} from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/api'
import { ClientConfig } from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/config'
import { AIMessageChunk, BaseMessage, ChatGeneration, ChatGenerationChunk } from 'langchain/schema'
import {
AIMessageChunk,
BaseMessage,
ChatGeneration,
ChatGenerationChunk
} from 'langchain/schema'
import { createLogger } from '@dingyi222666/koishi-plugin-chathub/lib/utils/logger'
import { chathubFetch } from '@dingyi222666/koishi-plugin-chathub/lib/utils/request'
import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error'
import {
ChatHubError,
ChatHubErrorCode
} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error'
import { Random } from 'koishi'
import { BardRequestInfo, BardResponse, BardWebRequestInfo } from './types'
import { SESSION_HEADERS } from './utils'
Expand All @@ -26,7 +34,9 @@ export class BardRequester extends ModelRequester {
super()
}

async *completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk> {
async *completionStream(
params: ModelRequestParams
): AsyncGenerator<ChatGenerationChunk> {
// the bard not support event stream, so just call completion

const result = await this.completion(params)
Expand Down Expand Up @@ -97,7 +107,8 @@ export class BardRequester extends ModelRequester {

const bardResponse = await this._parseResponse(await response.text())

this._bardRequestInfo.requestId = this._bardRequestInfo.requestId + 100000
this._bardRequestInfo.requestId =
this._bardRequestInfo.requestId + 100000

this._bardRequestInfo.conversation = {
c: bardResponse.conversationId,
Expand Down Expand Up @@ -163,30 +174,39 @@ export class BardRequester extends ModelRequester {
}

// data:image/
const imageName = 'bard-ai.' + image.match(/data:image\/(\w+);base64,(.+)/)?.[1]
const imageName =
'bard-ai.' + image.match(/data:image\/(\w+);base64,(.+)/)?.[1]

logger.debug(`Uploading image ${imageName}`)

const imageData = Buffer.from(image.replace(/^data:image\/\w+;base64,/, ''), 'base64')
const imageData = Buffer.from(
image.replace(/^data:image\/\w+;base64,/, ''),
'base64'
)

const size = imageData.byteLength.toString()
const formBody = [
`${encodeURIComponent('File name')}=${encodeURIComponent(imageName)}`
`${encodeURIComponent('File name')}=${encodeURIComponent(
imageName
)}`
].join('')

try {
let response = await chathubFetch('https://content-push.googleapis.com/upload/', {
method: 'POST',
headers: {
'X-Goog-Upload-Command': 'start',
'X-Goog-Upload-Protocol': 'resumable',
'X-Goog-Upload-Header-Content-Length': size,
'X-Tenant-Id': 'bard-storage',
'Push-Id': 'feeds/mcudyrk2a4khkz'
},
body: formBody,
credentials: 'include'
})
let response = await chathubFetch(
'https://content-push.googleapis.com/upload/',
{
method: 'POST',
headers: {
'X-Goog-Upload-Command': 'start',
'X-Goog-Upload-Protocol': 'resumable',
'X-Goog-Upload-Header-Content-Length': size,
'X-Tenant-Id': 'bard-storage',
'Push-Id': 'feeds/mcudyrk2a4khkz'
},
body: formBody,
credentials: 'include'
}
)

const uploadUrl = response.headers.get('X-Goog-Upload-URL')

Expand Down Expand Up @@ -223,7 +243,9 @@ export class BardRequester extends ModelRequester {

this._bardWebRequestInfo = await this._getInitParams()

logger.info(`bard init params: ${JSON.stringify(this._bardWebRequestInfo)}`)
logger.info(
`bard init params: ${JSON.stringify(this._bardWebRequestInfo)}`
)

if (this._bardRequestInfo.conversation == null) {
this._bardRequestInfo.conversation = {
Expand Down Expand Up @@ -340,7 +362,9 @@ export class BardRequester extends ModelRequester {
}

private _buildHeader(isUploadImage: boolean = false) {
const base: typeof SESSION_HEADERS & { cookie?: string } = { ...SESSION_HEADERS }
const base: typeof SESSION_HEADERS & { cookie?: string } = {
...SESSION_HEADERS
}

base.cookie = this._config.apiKey

Expand All @@ -354,7 +378,8 @@ export class BardRequester extends ModelRequester {
headers: {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Content-Type':
'application/x-www-form-urlencoded;charset=UTF-8',
cookie: this._config.apiKey
},
credentials: 'same-origin'
Expand Down
9 changes: 7 additions & 2 deletions packages/chatglm-adapter/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,10 @@ import {
} from '@dingyi222666/koishi-plugin-chathub/lib/llm-core/platform/types'
import { Context } from 'koishi'
import { Config } from '.'
import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error'
import {
ChatHubError,
ChatHubErrorCode
} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error'
import { OpenLLMRequester } from './requester'

export class OpenLLMClient extends PlatformModelAndEmbeddingsClient<ClientConfig> {
Expand Down Expand Up @@ -69,7 +72,9 @@ export class OpenLLMClient extends PlatformModelAndEmbeddingsClient<ClientConfig
}
}

protected _createModel(model: string): ChatHubChatModel | ChatHubBaseEmbeddings {
protected _createModel(
model: string
): ChatHubChatModel | ChatHubBaseEmbeddings {
const info = this._models[model]

if (info == null) {
Expand Down
17 changes: 13 additions & 4 deletions packages/chatglm-adapter/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,20 @@ export const Config: Schema<Config> = Schema.intersect([
Schema.object({
apiKeys: Schema.array(
Schema.tuple([
Schema.string().role('secret').default('').description('OpenAI 的 API Key'),
Schema.string()
.role('secret')
.default('')
.description('OpenAI 的 API Key'),
Schema.string()
.description('请求 API for Open LLMs 自搭建后端的地址')
.default('http://127.0.0.1:8000')
])
)
.description('API for Open LLMs 服务的 API Key 和请求地址列表')
.default([['', 'http://127.0.0.1:8000']]),
embeddings: Schema.string().description('Embeddings 模型的名称').default('moka-ai/m3e-base')
embeddings: Schema.string()
.description('Embeddings 模型的名称')
.default('moka-ai/m3e-base')
}).description('请求设置'),

Schema.object({
Expand All @@ -71,13 +76,17 @@ export const Config: Schema<Config> = Schema.intersect([
.step(0.1)
.default(0.8),
presencePenalty: Schema.number()
.description('重复惩罚,越高越不易重复出现过至少一次的 Token(-2~2,每步0.1)')
.description(
'重复惩罚,越高越不易重复出现过至少一次的 Token(-2~2,每步0.1)'
)
.min(-2)
.max(2)
.step(0.1)
.default(0.2),
frequencyPenalty: Schema.number()
.description('频率惩罚,越高越不易重复出现次数较多的 Token(-2~2,每步0.1)')
.description(
'频率惩罚,越高越不易重复出现次数较多的 Token(-2~2,每步0.1)'
)
.min(-2)
.max(2)
.step(0.1)
Expand Down
36 changes: 28 additions & 8 deletions packages/chatglm-adapter/src/requester.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,20 +13,28 @@ import {
ChatCompletionResponseMessageRoleEnum,
CreateEmbeddingResponse
} from './types'
import { ChatHubError, ChatHubErrorCode } from '@dingyi222666/koishi-plugin-chathub/lib/utils/error'
import {
ChatHubError,
ChatHubErrorCode
} from '@dingyi222666/koishi-plugin-chathub/lib/utils/error'
import { sseIterable } from '@dingyi222666/koishi-plugin-chathub/lib/utils/sse'
import {
convertDeltaToMessageChunk,
formatToolsToOpenAIFunctions,
langchainMessageToOpenAIMessage
} from './utils'

export class OpenLLMRequester extends ModelRequester implements EmbeddingsRequester {
export class OpenLLMRequester
extends ModelRequester
implements EmbeddingsRequester
{
constructor(private _config: ClientConfig) {
super()
}

async *completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk> {
async *completionStream(
params: ModelRequestParams
): AsyncGenerator<ChatGenerationChunk> {
try {
const response = await this._post(
'chat/completions',
Expand Down Expand Up @@ -70,7 +78,10 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques
if ((data as any).error) {
throw new ChatHubError(
ChatHubErrorCode.API_REQUEST_FAILED,
new Error('error when calling completion, Result: ' + chunk)
new Error(
'error when calling completion, Result: ' +
chunk
)
)
}

Expand All @@ -80,7 +91,10 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques
}

const { delta } = choice
const messageChunk = convertDeltaToMessageChunk(delta, defaultRole)
const messageChunk = convertDeltaToMessageChunk(
delta,
defaultRole
)

messageChunk.content = content + messageChunk.content

Expand All @@ -107,7 +121,9 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques
}
}

async embeddings(params: EmbeddingsRequestParams): Promise<number[] | number[][]> {
async embeddings(
params: EmbeddingsRequestParams
): Promise<number[] | number[][]> {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let data: CreateEmbeddingResponse | any

Expand All @@ -122,7 +138,9 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques
data = JSON.parse(data) as CreateEmbeddingResponse

if (data.data && data.data.length > 0) {
return (data as CreateEmbeddingResponse).data.map((it) => it.embedding)
return (data as CreateEmbeddingResponse).data.map(
(it) => it.embedding
)
}

throw new Error()
Expand All @@ -149,7 +167,9 @@ export class OpenLLMRequester extends ModelRequester implements EmbeddingsReques
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return (<Record<string, any>[]>data.data).map((model) => model.id)
} catch (e) {
const error = new Error('error when listing models, Result: ' + JSON.stringify(data))
const error = new Error(
'error when listing models, Result: ' + JSON.stringify(data)
)

error.stack = e.stack
error.cause = e.cause
Expand Down
12 changes: 10 additions & 2 deletions packages/chatglm-adapter/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@ export interface ChatCompletionResponse {
object: string
created: number
model: string
usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number }
usage: {
prompt_tokens: number
completion_tokens: number
total_tokens: number
}
}

export interface ChatCompletionResponseMessage {
Expand Down Expand Up @@ -113,4 +117,8 @@ export interface CreateEmbeddingResponseUsage {
total_tokens: number
}

export type ChatCompletionResponseMessageRoleEnum = 'system' | 'assistant' | 'user' | 'function'
export type ChatCompletionResponseMessageRoleEnum =
| 'system'
| 'assistant'
| 'user'
| 'function'
12 changes: 9 additions & 3 deletions packages/chatglm-adapter/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@ export function langchainMessageToOpenAIMessage(
})
}

export function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum {
export function messageTypeToOpenAIRole(
type: MessageType
): ChatCompletionResponseMessageRoleEnum {
switch (type) {
case 'system':
return 'system'
Expand All @@ -44,11 +46,15 @@ export function messageTypeToOpenAIRole(type: MessageType): ChatCompletionRespon
}
}

export function formatToolsToOpenAIFunctions(tools: StructuredTool[]): ChatCompletionFunctions[] {
export function formatToolsToOpenAIFunctions(
tools: StructuredTool[]
): ChatCompletionFunctions[] {
return tools.map(formatToolToOpenAIFunction)
}

export function formatToolToOpenAIFunction(tool: StructuredTool): ChatCompletionFunctions {
export function formatToolToOpenAIFunction(
tool: StructuredTool
): ChatCompletionFunctions {
return {
name: tool.name,
description: tool.description,
Expand Down
6 changes: 5 additions & 1 deletion packages/claude2-adapter/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,11 @@ export class Claude2Client extends PlatformModelClient<Claude2ClientConfig> {

protected _createModel(model: string): ChatHubChatModel {
return new ChatHubChatModel({
requester: new Claude2Requester(this.ctx, this._clientConfig, this._organizationId),
requester: new Claude2Requester(
this.ctx,
this._clientConfig,
this._organizationId
),
model,
modelMaxContextSize: 10000,
timeout: this._config.timeout,
Expand Down
Loading

0 comments on commit b664218

Please sign in to comment.