diff --git a/src/backend/bisheng/api/services/assistant_agent.py b/src/backend/bisheng/api/services/assistant_agent.py index 347607ef1..ab75dd0ee 100644 --- a/src/backend/bisheng/api/services/assistant_agent.py +++ b/src/backend/bisheng/api/services/assistant_agent.py @@ -63,6 +63,11 @@ def __init__(self, assistant_info: Assistant, chat_id: str): self.tools: List[BaseTool] = [] self.offline_flows = [] self.agent: ConfigurableAssistant | None = None + self.agent_executor_dict = { + 'ReAct': 'get_react_agent_executor', + 'function call': 'get_openai_functions_agent_executor', + } + self.current_agent_executor = None self.llm: BaseLanguageModel | None = None self.llm_agent_executor = None self.knowledge_skill_path = str(Path(__file__).parent / 'knowledge_skill.json') @@ -281,6 +286,9 @@ async def init_agent(self): # 引入agent执行参数 agent_executor_params = self.get_agent_executor() agent_executor_type = self.llm_agent_executor or agent_executor_params.pop('type') + self.current_agent_executor = agent_executor_type + # 做转换 + agent_executor_type = self.agent_executor_dict.get(agent_executor_type, agent_executor_type) prompt = self.assistant.prompt if self.assistant.model_name.startswith("command-r"): @@ -334,12 +342,6 @@ async def run(self, query: str, chat_history: List = None, callback: Callbacks = """ 运行智能体对话 """ - if chat_history: - chat_history.append(HumanMessage(content=query)) - inputs = chat_history - else: - inputs = [HumanMessage(content=query)] - # 假回调,将已下线的技能回调给前端 for one in self.offline_flows: if callback is not None: @@ -348,6 +350,14 @@ async def run(self, query: str, chat_history: List = None, callback: Callbacks = 'name': one, }, input_str='flow if offline', run_id=run_id) await callback[0].on_tool_end(output='flow is offline', name=one, run_id=run_id) + if self.current_agent_executor == 'ReAct': + return await self.react_run(query, chat_history, callback) + + if chat_history: + chat_history.append(HumanMessage(content=query)) + inputs = chat_history + else: + inputs = [HumanMessage(content=query)] result = await self.agent.ainvoke(inputs, config=RunnableConfig(callbacks=callback)) # 包含了history,将history排除, 默认取最后一个为最终结果 res = [result[-1]] @@ -364,3 +374,15 @@ async def run(self, query: str, chat_history: List = None, callback: Callbacks = except Exception as e: logger.error(f"record assistant history error: {str(e)}") return res + + async def react_run(self, query: str, chat_history: List = None, callback: Callbacks = None): + """ react 模式的输入和执行 """ + result = await self.agent.ainvoke({ + 'input': query, + 'chat_history': chat_history + }, config=RunnableConfig(callbacks=callback)) + print(result) + output = result['agent_outcome'].return_values['output'] + if isinstance(output, dict): + output = output['text'] + return [AIMessage(content=output)] diff --git a/src/backend/bisheng/api/v2/assistant.py b/src/backend/bisheng/api/v2/assistant.py new file mode 100644 index 000000000..eb237948c --- /dev/null +++ b/src/backend/bisheng/api/v2/assistant.py @@ -0,0 +1,5 @@ +# 免登录的助手相关接口 + + +router = APIRouter(prefix='/chat', tags=['AssistantOpenApi']) + diff --git a/src/backend/bisheng/initdb_config.yaml b/src/backend/bisheng/initdb_config.yaml index 1aa6adc5b..fe4dda86f 100644 --- a/src/backend/bisheng/initdb_config.yaml +++ b/src/backend/bisheng/initdb_config.yaml @@ -90,10 +90,9 @@ env: # 智能助手相关配置 gpts: agent_executor: - # 默认agent的executor, 如果模型里有配置优先用模型的 - type: 'get_openai_functions_agent_executor' - interrupt_before_action: False - recursion_limit: 50 + # 默认agent的executor, 支持模型里单独配置对应的agent_executor_type; react/function call + type: 'function call' # 支持function call的模型使用此agent配置 + # type: 'ReAct' # 不支持function call的模型使用此agent配置 # 助手可选大模型列表,第一个为默认模型 llms: - type: 'ChatOpenAI' @@ -110,7 +109,7 @@ gpts: model_name: 'Qwen-1_8B-Chat' host_base_url: 'http://192.168.106.12:9001/v2.1/models/Qwen-1_8B-Chat/infer' temperature: 0.3 - agent_executor_type: 'get_qwen_local_functions_agent_executor' + agent_executor_type: 'React' # 预置工具所需的配置 tools: # 画图工具配置,引用公共的openai配置