Skip to content

Commit

Permalink
feat: add independent configuration for xai
Browse files Browse the repository at this point in the history
fix: fix non-fourm groups, no response when replying to bot /new commands alone.
fix telegraph debug messages with no specific information.
chore: add action every time you send a message.
chore: emove some reduntant code
  • Loading branch information
adolphnov committed Nov 21, 2024
1 parent 6d2d770 commit 4246d49
Show file tree
Hide file tree
Showing 23 changed files with 562 additions and 22,737 deletions.
1 change: 0 additions & 1 deletion dist/buildinfo.json

This file was deleted.

21,761 changes: 0 additions & 21,761 deletions dist/index.js

This file was deleted.

1 change: 0 additions & 1 deletion dist/timestamp

This file was deleted.

37 changes: 19 additions & 18 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "chatgpt-telegram-workers",
"type": "module",
"version": "2.0.1",
"version": "2.0.2",
"description": "The easiest and quickest way to deploy your own ChatGPT Telegram bot is to use a single file and simply copy and paste it. There is no need for any dependencies, local development environment configuration, domain names, or servers.",
"author": "tbxark <tbxark@outlook.com>",
"license": "MIT",
Expand Down Expand Up @@ -40,26 +40,27 @@
"clean": "rm -rf dist"
},
"dependencies": {
"@ai-sdk/anthropic": "^1.0.0",
"@ai-sdk/azure": "^1.0.2",
"@ai-sdk/cohere": "^1.0.0",
"@ai-sdk/google": "^1.0.0",
"@ai-sdk/google-vertex": "^1.0.0",
"@ai-sdk/mistral": "^1.0.1",
"@ai-sdk/openai": "^1.0.1",
"ai": "^4.0.1",
"@ai-sdk/anthropic": "^1.0.1",
"@ai-sdk/azure": "^1.0.3",
"@ai-sdk/cohere": "^1.0.1",
"@ai-sdk/google": "^1.0.1",
"@ai-sdk/google-vertex": "^1.0.1",
"@ai-sdk/mistral": "^1.0.2",
"@ai-sdk/openai": "^1.0.2",
"@ai-sdk/xai": "^1.0.2",
"ai": "^4.0.2",
"cloudflare-worker-adapter": "^1.3.4",
"node-cron": "^3.0.3",
"ws": "^8.18.0"
},
"devDependencies": {
"@ai-sdk/anthropic": "^1.0.0",
"@ai-sdk/azure": "^1.0.2",
"@ai-sdk/cohere": "^1.0.0",
"@ai-sdk/google": "^1.0.0",
"@ai-sdk/google-vertex": "^1.0.0",
"@ai-sdk/mistral": "^1.0.1",
"@ai-sdk/openai": "^1.0.1",
"@ai-sdk/anthropic": "^1.0.1",
"@ai-sdk/azure": "^1.0.3",
"@ai-sdk/cohere": "^1.0.1",
"@ai-sdk/google": "^1.0.1",
"@ai-sdk/google-vertex": "^1.0.1",
"@ai-sdk/mistral": "^1.0.2",
"@ai-sdk/openai": "^1.0.2",
"@antfu/eslint-config": "^3.9.2",
"@cloudflare/workers-types": "^4.20241112.0",
"@google-cloud/vertexai": "^1.9.0",
Expand All @@ -71,11 +72,11 @@
"@types/react-dom": "^18.3.1",
"@types/ws": "^8.5.13",
"@vercel/node": "^3.2.25",
"ai": "^4.0.1",
"ai": "^4.0.2",
"eslint": "^9.15.0",
"eslint-plugin-format": "^0.1.2",
"gts": "^6.0.2",
"openai": "^4.72.0",
"openai": "^4.73.0",
"react-dom": "^18.3.1",
"rollup-plugin-cleanup": "^3.2.1",
"rollup-plugin-node-externals": "^7.1.3",
Expand Down
17 changes: 9 additions & 8 deletions scripts/plugins/docker/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,15 @@ const packageJson = `
"start": "node index.js"
},
"dependencies": {
"@ai-sdk/anthropic": "^1.0.0",
"@ai-sdk/azure": "^1.0.2",
"@ai-sdk/cohere": "^1.0.0",
"@ai-sdk/google": "^1.0.0",
"@ai-sdk/google-vertex": "^1.0.0",
"@ai-sdk/mistral": "^1.0.1",
"@ai-sdk/openai": "^1.0.1",
"ai": "^4.0.1",
"@ai-sdk/anthropic": "^1.0.1",
"@ai-sdk/azure": "^1.0.3",
"@ai-sdk/cohere": "^1.0.1",
"@ai-sdk/google": "^1.0.1",
"@ai-sdk/google-vertex": "^1.0.1",
"@ai-sdk/mistral": "^1.0.2",
"@ai-sdk/openai": "^1.0.2",
"@ai-sdk/xai": "^1.0.2",
"ai": "^4.0.2",
"cloudflare-worker-adapter": "^1.3.4",
"node-cron": "^3.0.3",
"ws": "^8.18.0"
Expand Down
9 changes: 0 additions & 9 deletions src/agent/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,6 @@ import type { WorkerContext } from '../config/context';
import type { ChatAgent, ChatStreamTextHandler, HistoryItem, HistoryModifier, ImageResult, LLMChatParams, LLMChatRequestParams, ResponseMessage } from './types';
import { ENV } from '../config/env';

/**
* @returns {(function(string): number)}
*/
function tokensCounter(): (text: string) => number {
return (text) => {
return text.length;
};
}

export async function loadHistory(key: string): Promise<HistoryItem[]> {
// 加载历史记录
let history = [];
Expand Down
2 changes: 1 addition & 1 deletion src/agent/index.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/* eslint-disable no-case-declarations */
import type { CoreMessage, CoreToolChoice, CoreUserMessage, LanguageModelV1 } from 'ai';
import type { CoreMessage, CoreUserMessage, LanguageModelV1 } from 'ai';
import type { AudioAgent, ChatAgent, ImageAgent } from './types';
import { createAnthropic } from '@ai-sdk/anthropic';
import { createCohere } from '@ai-sdk/cohere';
Expand Down
8 changes: 0 additions & 8 deletions src/agent/model_middleware.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
/* eslint-disable unused-imports/no-unused-vars */
import type {
CoreToolChoice,
LanguageModelV1,
LanguageModelV1CallOptions,
Experimental_LanguageModelV1Middleware as LanguageModelV1Middleware,
Expand All @@ -12,7 +11,6 @@ import type { ChatStreamTextHandler } from './types';
import { createLlmModel } from '.';
import { getLogSingleton } from '../log/logDecortor';
import { log } from '../log/logger';
import { OpenAI } from './openai';

type Writeable<T> = { -readonly [P in keyof T]: T[P] };

Expand Down Expand Up @@ -49,12 +47,9 @@ export function AIMiddleware({ config, tools, activeTools, onStream, toolChoice,
if (toolChoice.length > 0 && step < toolChoice.length && params.mode.type === 'regular') {
params.mode.toolChoice = toolChoice[step] as any;
log.info(`toolChoice changed: ${JSON.stringify(toolChoice[step])}`);
// Unable to filter through activeTools, can only compromise by using tools.
// Filter out used tools to prevent calling the same tool.
params.mode.tools = params.mode.tools?.filter(i => activeTools.includes(i.name));
}
warpMessages(params, tools, activeTools, rawSystemPrompt);
// log.info(`request params: ${JSON.stringify(params, null, 2)}`);
return params;
},

Expand Down Expand Up @@ -111,7 +106,6 @@ export function AIMiddleware({ config, tools, activeTools, onStream, toolChoice,
logs.ongoingFunctions = logs.ongoingFunctions.filter(i => i.startTime !== startTime);
sendToolCall = false;
step++;
// onStream?.send(`${messageReferencer.join('')}...\n` + `step ${step} finished`);
},
};
}
Expand Down Expand Up @@ -161,8 +155,6 @@ function trimActiveTools(activeTools: string[], toolNames: string[]) {

function recordModelLog(config: AgentUserConfig, model: LanguageModelV1, activeTools: string[], toolChoice: ToolChoice) {
const logs = getLogSingleton(config);
// const openaiTransformModelRegex = new RegExp(`^${OpenAI.transformModelPerfix}`);
// const modelId = model.provider.includes('openai') ? model.modelId.replace(openaiTransformModelRegex, '') : model.modelId;
log.info(`provider: ${model.provider}, modelId: ${model.modelId} `);
if (activeTools.length > 0 && toolChoice?.type !== 'none') {
logs.tool.model = model.modelId;
Expand Down
23 changes: 1 addition & 22 deletions src/agent/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,9 @@ import { log } from '../log/logger';
import { requestText2Image } from './chat';
import { requestChatCompletionsV2 } from './request';

class OpenAIBase {
export class OpenAIBase {
readonly name = 'openai';
type = 'chat';
apikey = (context: AgentUserConfig): string => {
if (this.type === 'tool' && context.FUNCTION_CALL_API_KEY) {
return context.FUNCTION_CALL_API_KEY;
}
const length = context.OPENAI_API_KEY.length;
return context.OPENAI_API_KEY[Math.floor(Math.random() * length)];
};
Expand All @@ -33,30 +29,13 @@ export class OpenAI extends OpenAIBase implements ChatAgent {
return Array.isArray(params?.content) ? ctx.OPENAI_VISION_MODEL : ctx.OPENAI_CHAT_MODEL;
};

// readonly transformModel = (model: string, context: AgentUserConfig): string => {
// if (context.OPENAI_NEED_TRANSFORM_MODEL.includes(model)) {
// return `${OpenAI.transformModelPerfix}${model}`;
// }
// return model;
// };

// 仅文本对话使用该地址
readonly base_url = (context: AgentUserConfig): string => {
if (this.type === 'tool' && context.FUNCTION_CALL_BASE) {
return context.FUNCTION_CALL_BASE;
}
return context.OPENAI_API_BASE;
};

readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise<{ messages: ResponseMessage[]; content: string }> => {
const userMessage = params.messages.at(-1) as CoreUserMessage;
const originalModel = this.model(context, userMessage);
// const transformedModel = this.transformModel(originalModel, context);
const provider = createOpenAI({
baseURL: context.OPENAI_API_BASE,
apiKey: this.apikey(context),
compatibility: 'strict',
// fetch: originalModel === transformedModel ? undefined : this.fetch,
});

const languageModelV1 = provider.languageModel(originalModel, undefined);
Expand Down
2 changes: 1 addition & 1 deletion src/agent/request.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import type { CoreMessage, CoreToolChoice, LanguageModelV1, StepResult } from 'ai';
import type { CoreMessage, LanguageModelV1, StepResult } from 'ai';
import type { ToolChoice } from '.';
import type { AgentUserConfig } from '../config/env';
import type { ChatStreamTextHandler, OpenAIFuncCallData, ResponseMessage } from './types';
Expand Down
2 changes: 1 addition & 1 deletion src/agent/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ export interface Agent<AgentRequest> {
modelKey: string;
enable: (context: AgentUserConfig) => boolean;
request: AgentRequest;
model: (ctx: AgentUserConfig) => string;
model: (ctx: AgentUserConfig, params?: LLMChatRequestParams) => string;
render?: (response: Response) => Promise<ImageResult>;
}

Expand Down
30 changes: 30 additions & 0 deletions src/agent/xai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import type { AgentUserConfig } from '../config/env';
import type { ChatAgent, ChatStreamTextHandler, LLMChatParams, LLMChatRequestParams, ResponseMessage } from './types';
import { createXai } from '@ai-sdk/xai';
import { warpLLMParams } from '.';
import { requestChatCompletionsV2 } from './request';

export class XAI implements ChatAgent {
readonly name = 'xai';
readonly modelKey = 'XAI_CHAT_MODEL';

readonly enable = (context: AgentUserConfig): boolean => {
return !!(context.XAI_API_KEY);
};

readonly model = (ctx: AgentUserConfig, params?: LLMChatRequestParams): string => {
return Array.isArray(params?.content) ? ctx.XAI_VISION_MODEL : ctx.XAI_CHAT_MODEL;
};

readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise<{ messages: ResponseMessage[]; content: string }> => {
const provider = createXai({
baseURL: context.XAI_API_BASE,
apiKey: context.XAI_API_KEY || undefined,
});
const languageModelV1 = provider.languageModel(this.model(context), undefined);
return requestChatCompletionsV2(await warpLLMParams({
model: languageModelV1,
messages: params.messages,
}, context), onStream);
};
}
55 changes: 16 additions & 39 deletions src/config/config.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import type { FlowStruct, LogLevelType } from './types';
import type { LogLevelType } from './types';
import prompts_default from '../extra/prompt';
// -- 只能通过环境变量覆盖的配置 --
export class EnvironmentConfig {
Expand Down Expand Up @@ -42,6 +42,8 @@ export class EnvironmentConfig {
'AZURE_COMPLETIONS_API',
'AZURE_DALLE_API',
'GOOGLEAI_STUDIO_API_BASE',
'OPENAILIKE_API_BASE',
'XAI_API_BASE',
];

// -- 群组相关 --
Expand Down Expand Up @@ -132,28 +134,18 @@ export class EnvironmentConfig {
// Log level
LOG_LEVEL: LogLevelType = 'info';

// The model is not fully compatible with the openai function call setting parameter to false, by default it is not fully compatible.
// When the model name does not contain "gpt" and this parameter is set to false: remove data with empty content (when calling gpt function, content is empty), remove tool_choice and tool_calls parameters.
// At the same time, replace role = tool data with role = user, and replace content with name + result.
// This parameter only takes effect when chat agent is openai.
MODEL_COMPATIBLE_OPENAI = false;

// -------------

// -- 模式开关 --
//
// 使用流模式
STREAM_MODE = true;
// 安全模式
// 安全模式 异步模式(polling, 异步webhook)下可关闭
SAFE_MODE = true;
// 调试模式
DEBUG_MODE = false;
// 开发模式
DEV_MODE = false;
/**
* @deprecated 是否发送初始化消息
*/
SEND_INIT_MESSAGE = true;

QSTASH_URL = 'https://qstash.upstash.io';
// qstash token
Expand Down Expand Up @@ -253,7 +245,7 @@ export class GeminiConfig {
// Google Gemini API: Cloudflare AI gateway: https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_name}/google-ai-studio/v1/models
GOOGLE_API_BASE = 'https://generativelanguage.googleapis.com/v1beta';
// Google Gemini Model
GOOGLE_CHAT_MODEL = 'gemini-1.5-flash-latest';
GOOGLE_CHAT_MODEL = 'gemini-1.5-flash-002';
}

// -- Mistral 配置 --
Expand Down Expand Up @@ -314,6 +306,16 @@ export class VertexConfig {
VERTEX_SEARCH_GROUNDING = false;
}

export class XAIConfig {
// XAI api key
XAI_API_KEY: string | null = null;
// XAI api base
XAI_API_BASE = 'https://api.xai.com/v1';
// XAI api model
XAI_CHAT_MODEL = 'grok-beta';
XAI_VISION_MODEL = 'grok-vision-beta';
}

export class DefineKeys {
DEFINE_KEYS: string[] = [];
}
Expand All @@ -330,37 +332,12 @@ export class ExtraUserConfig {
// Function to use, currently has duckduckgo and jina_reader
// '["duckduckgo", "jina_reader"]'
USE_TOOLS: string[] = ['duckduckgo', 'jina_reader'];
JINA_API_KEY = [];
JINA_API_KEY: string[] = [];
// if starts with '{agent}:' perfix, the specified agent corresponds to the chat model,
// otherwise use the current agent and the specified model.
// Keep empty to use the current agent chat model as function call model.
TOOL_MODEL = '';

/**
* @deprecated in this version, it is no longer supported
*/
FUNCTION_REPLY_ASAP = true;
PROMPT: Record<string, string> = prompts_default;
MODES: Record<string, FlowStruct> = {
default: { text: {}, image: {}, audio: { workflow: [{ type: 'text' }, {}] } },
dalle: {
text: {
disableHistory: true,
disableTool: true,
workflow: [{ agent: 'openai', model: 'gpt-4o-2024-08-06', prompt: 'dalle' }, { type: 'image' }],
},
},
pk: {
text: {
// isParallel: true,
disableHistory: false,
disableTool: false,
workflow: [{ model: 'gpt-4o-2024-08-06' }, { model: 'chatgpt-4o-latest' }],
},
},
};

CURRENT_MODE = 'default';

// INLINE_AGENTS
// INLINE_AGENTS = ['openai', 'claude', 'google', 'vertex', 'cohere', 'workersai'];
Expand Down
2 changes: 1 addition & 1 deletion src/telegram/command/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import type { RequestTemplate } from '../../plugins/template';
import type { UnionData } from '../utils/utils';
import type { CommandHandler } from './types';
import { ENV } from '../../config/env';
import { log } from '../../log/logger';
import { executeRequest, formatInput } from '../../plugins/template';
import { MessageSender, sendAction } from '../utils/send';
import { loadChatRoleWithContext } from './auth';
Expand All @@ -25,7 +26,6 @@ import {
SystemCommandHandler,
VersionCommandHandler,
} from './system';
import { log } from '../../log/logger';

const SYSTEM_COMMANDS: CommandHandler[] = [
new StartCommandHandler(),
Expand Down
Loading

0 comments on commit 4246d49

Please sign in to comment.