diff --git a/src/backend/bisheng/api/services/assistant_agent.py b/src/backend/bisheng/api/services/assistant_agent.py index 347607ef1..ab75dd0ee 100644 --- a/src/backend/bisheng/api/services/assistant_agent.py +++ b/src/backend/bisheng/api/services/assistant_agent.py @@ -63,6 +63,11 @@ def __init__(self, assistant_info: Assistant, chat_id: str): self.tools: List[BaseTool] = [] self.offline_flows = [] self.agent: ConfigurableAssistant | None = None + self.agent_executor_dict = { + 'ReAct': 'get_react_agent_executor', + 'function call': 'get_openai_functions_agent_executor', + } + self.current_agent_executor = None self.llm: BaseLanguageModel | None = None self.llm_agent_executor = None self.knowledge_skill_path = str(Path(__file__).parent / 'knowledge_skill.json') @@ -281,6 +286,9 @@ async def init_agent(self): # 引入agent执行参数 agent_executor_params = self.get_agent_executor() agent_executor_type = self.llm_agent_executor or agent_executor_params.pop('type') + self.current_agent_executor = agent_executor_type + # 做转换 + agent_executor_type = self.agent_executor_dict.get(agent_executor_type, agent_executor_type) prompt = self.assistant.prompt if self.assistant.model_name.startswith("command-r"): @@ -334,12 +342,6 @@ async def run(self, query: str, chat_history: List = None, callback: Callbacks = """ 运行智能体对话 """ - if chat_history: - chat_history.append(HumanMessage(content=query)) - inputs = chat_history - else: - inputs = [HumanMessage(content=query)] - # 假回调,将已下线的技能回调给前端 for one in self.offline_flows: if callback is not None: @@ -348,6 +350,14 @@ async def run(self, query: str, chat_history: List = None, callback: Callbacks = 'name': one, }, input_str='flow if offline', run_id=run_id) await callback[0].on_tool_end(output='flow is offline', name=one, run_id=run_id) + if self.current_agent_executor == 'ReAct': + return await self.react_run(query, chat_history, callback) + + if chat_history: + chat_history.append(HumanMessage(content=query)) + inputs = chat_history + else: + inputs = [HumanMessage(content=query)] result = await self.agent.ainvoke(inputs, config=RunnableConfig(callbacks=callback)) # 包含了history,将history排除, 默认取最后一个为最终结果 res = [result[-1]] @@ -364,3 +374,15 @@ async def run(self, query: str, chat_history: List = None, callback: Callbacks = except Exception as e: logger.error(f"record assistant history error: {str(e)}") return res + + async def react_run(self, query: str, chat_history: List = None, callback: Callbacks = None): + """ react 模式的输入和执行 """ + result = await self.agent.ainvoke({ + 'input': query, + 'chat_history': chat_history + }, config=RunnableConfig(callbacks=callback)) + print(result) + output = result['agent_outcome'].return_values['output'] + if isinstance(output, dict): + output = output['text'] + return [AIMessage(content=output)] diff --git a/src/backend/bisheng/api/v1/callback.py b/src/backend/bisheng/api/v1/callback.py index e7953f448..b3759d085 100644 --- a/src/backend/bisheng/api/v1/callback.py +++ b/src/backend/bisheng/api/v1/callback.py @@ -35,13 +35,19 @@ def __init__(self, websocket: WebSocket, flow_id: str, chat_id: str, user_id: in # }, # 存储工具调用的input信息 # } + # 流式输出的队列 + self.stream_queue: Queue = kwargs.get('stream_queue') + async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: logger.debug(f'on_llm_new_token token={token} kwargs={kwargs}') resp = ChatResponse(message=token, type='stream', flow_id=self.flow_id, chat_id=self.chat_id) + # 将流式输出内容放入到队列内,以方便中断流式输出后,可以将内容记录到数据库 await self.websocket.send_json(resp.dict()) + if self.stream_queue: + self.stream_queue.put(token) async def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> Any: @@ -233,10 +239,13 @@ async def on_chat_model_start(self, serialized: Dict[str, Any], class StreamingLLMCallbackHandler(BaseCallbackHandler): """Callback handler for streaming LLM responses.""" - def __init__(self, websocket: WebSocket, flow_id: str, chat_id: str): + def __init__(self, websocket: WebSocket, flow_id: str, chat_id: str, user_id: int = None, **kwargs: Any): self.websocket = websocket self.flow_id = flow_id self.chat_id = chat_id + self.user_id = user_id + + self.stream_queue: Queue = kwargs.get('stream_queue') def on_llm_new_token(self, token: str, **kwargs: Any) -> None: resp = ChatResponse(message=token, @@ -248,6 +257,9 @@ def on_llm_new_token(self, token: str, **kwargs: Any) -> None: coroutine = self.websocket.send_json(resp.dict()) asyncio.run_coroutine_threadsafe(coroutine, loop) + if self.stream_queue: + self.stream_queue.put(token) + def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: log = f'\nThought: {action.log}' # if there are line breaks, split them and send them @@ -386,21 +398,6 @@ async def on_tool_end(self, output: str, **kwargs: Any) -> Any: class AsyncGptsDebugCallbackHandler(AsyncGptsLLMCallbackHandler): - def __init__(self, websocket: WebSocket, flow_id: str, chat_id: str, user_id: int = None, **kwargs: Any): - super().__init__(websocket, flow_id, chat_id, user_id, **kwargs) - self.stream_queue: Queue = kwargs.get('stream_queue') - - async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: - logger.debug(f'on_llm_new_token token={token} kwargs={kwargs}') - resp = ChatResponse(message=token, - type='stream', - flow_id=self.flow_id, - chat_id=self.chat_id) - - # 将流式输出内容放入到队列内,以方便中断流式输出后,可以将内容记录到数据库 - await self.websocket.send_json(resp.dict()) - self.stream_queue.put(token) - @staticmethod def parse_tool_category(tool_name) -> (str, str): """ diff --git a/src/backend/bisheng/api/v1/endpoints.py b/src/backend/bisheng/api/v1/endpoints.py index b2bb7d3a6..8a95c4731 100644 --- a/src/backend/bisheng/api/v1/endpoints.py +++ b/src/backend/bisheng/api/v1/endpoints.py @@ -83,6 +83,8 @@ def get_config(admin_user: UserPayload = Depends(get_admin_user)): @router.post('/config/save') def save_config(data: dict, admin_user: UserPayload = Depends(get_admin_user)): + if not data.get('data', '').strip(): + raise HTTPException(status_code=500, detail='配置不能为空') try: # 校验是否符合yaml格式 _ = yaml.safe_load(data.get('data')) diff --git a/src/backend/bisheng/api/v2/assistant.py b/src/backend/bisheng/api/v2/assistant.py new file mode 100644 index 000000000..eb237948c --- /dev/null +++ b/src/backend/bisheng/api/v2/assistant.py @@ -0,0 +1,5 @@ +# 免登录的助手相关接口 + + +router = APIRouter(prefix='/chat', tags=['AssistantOpenApi']) + diff --git a/src/backend/bisheng/chat/client.py b/src/backend/bisheng/chat/client.py index 3aff9599c..e3fc46599 100644 --- a/src/backend/bisheng/chat/client.py +++ b/src/backend/bisheng/chat/client.py @@ -43,7 +43,7 @@ def __init__(self, request: Request, client_key: str, client_id: str, chat_id: s self.gpts_conf = settings.get_from_db('gpts') # 异步任务列表 self.task_ids = [] - # 流式输出的队列,用来接受流式输出的内容 + # 流式输出的队列,用来接受流式输出的内容,每次处理新的question时都清空 self.stream_queue = Queue() async def send_message(self, message: str): @@ -217,9 +217,13 @@ async def stop_handle_message(self, message: Dict[any, any]): # 有流式输出内容的话,记录流式输出内容到数据库 if answer.strip(): res = await self.add_message('bot', answer, 'answer', 'break_answer') - await self.send_response('answer', 'end', answer, message_id=res.id if res else None) + await self.send_response('answer', 'end', '', message_id=res.id if res else None) await self.send_response('processing', 'close', '') + async def clear_stream_queue(self): + while not self.stream_queue.empty(): + self.stream_queue.get() + async def handle_gpts_message(self, message: Dict[any, any]): if not message: return @@ -228,6 +232,8 @@ async def handle_gpts_message(self, message: Dict[any, any]): await self.stop_handle_message(message) return + # 清空流式队列,防止把上一次的回答,污染本次回答 + await self.clear_stream_queue() inputs = message.get('inputs', {}) input_msg = inputs.get('input') if not input_msg: diff --git a/src/backend/bisheng/chat/handlers.py b/src/backend/bisheng/chat/handlers.py index 592e5330a..431beaa23 100644 --- a/src/backend/bisheng/chat/handlers.py +++ b/src/backend/bisheng/chat/handlers.py @@ -1,5 +1,6 @@ import json import time +from queue import Queue from typing import Dict from bisheng.api.v1.schemas import ChatMessage, ChatResponse @@ -23,12 +24,16 @@ class Handler: - def __init__(self) -> None: - self.handler_dict = {} - self.handler_dict['default'] = self.process_message - self.handler_dict['autogen'] = self.process_autogen - self.handler_dict['auto_file'] = self.process_file - self.handler_dict['report'] = self.process_report + def __init__(self, stream_queue: Queue) -> None: + self.handler_dict = { + 'default': self.process_message, + 'autogen': self.process_autogen, + 'auto_file': self.process_file, + 'report': self.process_report, + 'stop': self.process_stop + } + # 记录流式输出的内容 + self.stream_queue = stream_queue async def dispatch_task(self, session: ChatManager, client_id: str, chat_id: str, action: str, payload: dict, user_id): @@ -39,11 +44,48 @@ async def dispatch_task(self, session: ChatManager, client_id: str, chat_id: str action = 'default' if action not in self.handler_dict: raise Exception(f'unknown action {action}') + if action != 'stop': + # 清空流式输出队列,防止上次的回答污染本次回答 + while not self.stream_queue.empty(): + self.stream_queue.get() await self.handler_dict[action](session, client_id, chat_id, payload, user_id) logger.info(f'dispatch_task done timecost={time.time() - start_time}') return client_id, chat_id + async def process_stop(self, session: ChatManager, client_id: str, chat_id: str, payload: Dict, user_id): + key = get_cache_key(client_id, chat_id) + langchain_object = session.in_memory_cache.get(key) + action = payload.get('action') + if isinstance(langchain_object, AutoGenChain): + if hasattr(langchain_object, 'stop'): + logger.info('reciever_human_interactive langchain_objct') + await langchain_object.stop() + else: + logger.error(f'act=auto_gen act={action}') + else: + # 普通技能的stop + res = thread_pool.cancel_task([key]) # 将进行中的任务进行cancel + if res[0]: + message = payload.get('inputs') or '手动停止' + res = ChatResponse(type='end', user_id=user_id, message='') + close = ChatResponse(type='close') + await session.send_json(client_id, chat_id, res, add=False) + await session.send_json(client_id, chat_id, close, add=False) + answer = '' + # 记录中止后产生的流式输出内容 + while not self.stream_queue.empty(): + answer += self.stream_queue.get() + if answer.strip(): + chat_message = ChatMessage(message=answer, + category='answer', + type='end', + user_id=user_id, + remark='break_answer', + is_bot=True) + session.chat_history.add_message(client_id, chat_id, chat_message) + logger.info(f'process_stop done') + async def process_report(self, session: ChatManager, client_id: str, @@ -170,6 +212,7 @@ async def process_message(self, websocket=session.active_connections[get_cache_key(client_id, chat_id)], flow_id=client_id, chat_id=chat_id, + stream_queue=self.stream_queue, ) except Exception as e: @@ -219,8 +262,7 @@ async def process_message(self, source=int(source)) await session.send_json(client_id, chat_id, response) - - # 循环结束 + # 循环结束 if is_begin: close_resp = ChatResponse(type='close', user_id=user_id) await session.send_json(client_id, chat_id, close_resp) @@ -300,24 +342,7 @@ async def process_autogen(self, session: ChatManager, client_id: str, chat_id: s langchain_object = session.in_memory_cache.get(key) logger.info(f'reciever_human_interactive langchain={langchain_object}') action = payload.get('action') - if action.lower() == 'stop': - if isinstance(langchain_object, AutoGenChain): - if hasattr(langchain_object, 'stop'): - logger.info('reciever_human_interactive langchain_objct') - await langchain_object.stop() - else: - logger.error(f'act=auto_gen act={action}') - else: - # 普通技能的stop - res = thread_pool.cancel_task([key]) # 将进行中的任务进行cancel - if res[0]: - message = payload.get('inputs') or '手动停止' - res = ChatResponse(type='end', user_id=user_id, message=message) - close = ChatResponse(type='close') - await session.send_json(client_id, chat_id, res) - await session.send_json(client_id, chat_id, close) - - elif action.lower() == 'continue': + if action.lower() == 'continue': # autgen_user 对话的时候,进程 wait() 需要换新 if hasattr(langchain_object, 'input'): await langchain_object.input(payload.get('inputs')) diff --git a/src/backend/bisheng/chat/manager.py b/src/backend/bisheng/chat/manager.py index 7577f2ad7..c154e75a9 100644 --- a/src/backend/bisheng/chat/manager.py +++ b/src/backend/bisheng/chat/manager.py @@ -6,6 +6,7 @@ from collections import defaultdict from typing import Any, Dict, List from uuid import UUID +from queue import Queue from loguru import logger from fastapi import WebSocket, WebSocketDisconnect, status, Request @@ -81,6 +82,9 @@ def __init__(self): # 已连接的客户端 self.active_clients: Dict[str, ChatClient] = {} + # 记录流式输出结果 + self.stream_queue: Dict[str, Queue] = {} + def update(self): if self.cache_manager.current_client_id in self.active_connections: self.last_cached_object_dict = self.cache_manager.get_last() @@ -98,9 +102,11 @@ def update(self): async def connect(self, client_id: str, chat_id: str, websocket: WebSocket): await websocket.accept() self.active_connections[get_cache_key(client_id, chat_id)] = websocket + self.stream_queue[get_cache_key(client_id, chat_id)] = Queue() def reuse_connect(self, client_id: str, chat_id: str, websocket: WebSocket): self.active_connections[get_cache_key(client_id, chat_id)] = websocket + self.stream_queue[get_cache_key(client_id, chat_id)] = Queue() def disconnect(self, client_id: str, chat_id: str, key: str = None): if key: @@ -305,7 +311,7 @@ async def handle_websocket( # 判断当前是否是空循环 process_param = { - 'autogen_pool': autogen_pool, + 'autogen_pool': thread_pool, 'user_id': user_id, 'payload': payload, 'graph_data': gragh_data, @@ -321,8 +327,7 @@ async def handle_websocket( # 处理任务状态 complete_normal = await thread_pool.as_completed(key_list) - autoComplete = await autogen_pool.as_completed(key_list) - complete = complete_normal + autoComplete + complete = complete_normal # if async_task and async_task.done(): # logger.debug(f'async_task_complete result={async_task.result}') if complete: @@ -456,9 +461,9 @@ async def _process_when_payload(self, flow_id: str, chat_id: str, if isinstance(self.in_memory_cache.get(langchain_obj_key), AutoGenChain): # autogen chain logger.info(f'autogen_submit {langchain_obj_key}') - autogen_pool.submit(key, Handler().dispatch_task, **params) + autogen_pool.submit(key, Handler(stream_queue=self.stream_queue[key]).dispatch_task, **params) else: - thread_pool.submit(key, Handler().dispatch_task, **params) + thread_pool.submit(key, Handler(stream_queue=self.stream_queue[key]).dispatch_task, **params) status_ = 'init' context.update({'status': status_}) context.update({'payload': {}}) # clean message @@ -504,6 +509,8 @@ async def preper_action(self, client_id, chat_id, langchain_obj_key, payload, action = 'report' step_resp.intermediate_steps = '文件解析完成,开始生成报告' await self.send_json(client_id, chat_id, step_resp) + elif payload.get('action') == 'stop': + action = 'stop' elif 'action' in payload: action = 'autogen' elif 'clear_history' in payload and payload['clear_history']: diff --git a/src/backend/bisheng/chat/utils.py b/src/backend/bisheng/chat/utils.py index c79ed54f8..4bd475298 100644 --- a/src/backend/bisheng/chat/utils.py +++ b/src/backend/bisheng/chat/utils.py @@ -23,7 +23,8 @@ async def process_graph(langchain_object, chat_inputs: ChatMessage, websocket: WebSocket, flow_id: str = None, - chat_id: str = None): + chat_id: str = None, + **kwargs): langchain_object = try_setting_streaming_options(langchain_object, websocket) logger.debug('Loaded langchain object') @@ -45,7 +46,8 @@ async def process_graph(langchain_object, chat_inputs.message, websocket=websocket, flow_id=flow_id, - chat_id=chat_id) + chat_id=chat_id, + **kwargs) logger.debug('Generated result and intermediate_steps') return result, intermediate_steps, source_document except Exception as e: diff --git a/src/backend/bisheng/initdb_config.yaml b/src/backend/bisheng/initdb_config.yaml index 8120b642c..fe4dda86f 100644 --- a/src/backend/bisheng/initdb_config.yaml +++ b/src/backend/bisheng/initdb_config.yaml @@ -16,7 +16,7 @@ minio_conf: &minio_conf knowledges: # 知识库所需的大模型配置,用来对文档内容总结一个标题,然后将标题和chunk合并存储到向量库内, 不配置则不总结文档 llm: - type: "ChatOpenAi" + type: "ChatOpenAI" model: "gpt-3.5-turbo" <<: *openai_conf unstructured_api_url: "" # 毕昇非结构化数据解析服务地址,提供包括OCR文字识别、表格识别、版式分析等能力。非必填,溯源必填 @@ -68,6 +68,8 @@ llm_request: default_operator: user: 3 url: https://bisheng.dataelem.com + # 免登录接口是否校验权限 + api_need_login: false # 是否需要验证码 use_captcha: @@ -83,14 +85,14 @@ env: # 可配置与http不一致的websocket地址 # websocket_url: 192.168.106.120:3003 office_url: http://IP:8701 # office 组件访问地址,需要浏览器能直接访问 + pro: false # 是否开启闭源网关 # 智能助手相关配置 gpts: agent_executor: - # 默认agent的executor, 如果模型里有配置优先用模型的 - type: 'get_openai_functions_agent_executor' - interrupt_before_action: False - recursion_limit: 50 + # 默认agent的executor, 支持模型里单独配置对应的agent_executor_type; react/function call + type: 'function call' # 支持function call的模型使用此agent配置 + # type: 'ReAct' # 不支持function call的模型使用此agent配置 # 助手可选大模型列表,第一个为默认模型 llms: - type: 'ChatOpenAI' @@ -107,7 +109,7 @@ gpts: model_name: 'Qwen-1_8B-Chat' host_base_url: 'http://192.168.106.12:9001/v2.1/models/Qwen-1_8B-Chat/infer' temperature: 0.3 - agent_executor_type: 'get_qwen_local_functions_agent_executor' + agent_executor_type: 'React' # 预置工具所需的配置 tools: # 画图工具配置,引用公共的openai配置 @@ -139,11 +141,13 @@ password_conf: max_error_times: 5 system_login_method: - # SSO 登录 - SSO_OAuth: true - # # LDAP 登录 - # LDAP: true - # LDAP服务器地址配置: XX - - # 切换 SSO/LDAP 登录后管理员用户名 - admin_username: admin \ No newline at end of file + # SSO 登录 + SSO_OAuth: true + # # LDAP 登录 + # LDAP: true + # LDAP服务器地址配置: XX + + # 切换 SSO/LDAP 登录后管理员用户名 + admin_username: admin + allow_multi_login: true # 是否允许多点登录 + diff --git a/src/backend/bisheng/utils/embedding.py b/src/backend/bisheng/utils/embedding.py index 85105b3f0..2b542e877 100644 --- a/src/backend/bisheng/utils/embedding.py +++ b/src/backend/bisheng/utils/embedding.py @@ -13,6 +13,8 @@ def decide_embeddings(model: str) -> Embeddings: """embed method""" model_list = settings.get_knowledge().get('embeddings') params = model_list.get(model) + if not params: + raise Exception(f'not found embedding {model} in system settings') component = params.pop('component', '') if model == 'text-embedding-ada-002' or component == 'openai': if is_openai_v1() and params.get('openai_proxy'): diff --git a/src/bisheng-langchain/bisheng_langchain/gpts/agent_types/__init__.py b/src/bisheng-langchain/bisheng_langchain/gpts/agent_types/__init__.py index ba410c1ee..b8243d62d 100644 --- a/src/bisheng-langchain/bisheng_langchain/gpts/agent_types/__init__.py +++ b/src/bisheng-langchain/bisheng_langchain/gpts/agent_types/__init__.py @@ -2,9 +2,11 @@ get_openai_functions_agent_executor, get_qwen_local_functions_agent_executor ) +from bisheng_langchain.gpts.agent_types.llm_react_agent import get_react_agent_executor __all__ = [ "get_openai_functions_agent_executor", - "get_qwen_local_functions_agent_executor" + "get_qwen_local_functions_agent_executor", + "get_react_agent_executor" ] \ No newline at end of file diff --git a/src/bisheng-langchain/bisheng_langchain/gpts/agent_types/llm_react_agent.py b/src/bisheng-langchain/bisheng_langchain/gpts/agent_types/llm_react_agent.py new file mode 100644 index 000000000..04d43fd4f --- /dev/null +++ b/src/bisheng-langchain/bisheng_langchain/gpts/agent_types/llm_react_agent.py @@ -0,0 +1,171 @@ +import operator +from typing import Annotated, Sequence, TypedDict, Union +from langchain.tools import BaseTool +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.messages import BaseMessage +from langchain_core.language_models import LanguageModelLike +from langgraph.graph import END, StateGraph +from langgraph.graph.state import CompiledStateGraph +from langgraph.prebuilt.tool_executor import ToolExecutor +from langgraph.utils import RunnableCallable +from langchain.agents import create_structured_chat_agent +from bisheng_langchain.gpts.prompts.react_agent_prompt import react_agent_prompt + + +def get_react_agent_executor( + tools: list[BaseTool], + llm: LanguageModelLike, + system_message: str, + interrupt_before_action: bool, + **kwargs +): + prompt = react_agent_prompt + prompt = prompt.partial(assistant_message=system_message) + agent = create_structured_chat_agent(llm, tools, prompt) + agent_executer = create_agent_executor(agent, tools) + return agent_executer + + +def _get_agent_state(input_schema=None): + if input_schema is None: + + class AgentState(TypedDict): + # The input string + input: str + # The list of previous messages in the conversation + chat_history: Sequence[BaseMessage] + # The outcome of a given call to the agent + # Needs `None` as a valid type, since this is what this will start as + agent_outcome: Union[AgentAction, AgentFinish, None] + # List of actions and corresponding observations + # Here we annotate this with `operator.add` to indicate that operations to + # this state should be ADDED to the existing values (not overwrite it) + intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add] + + else: + + class AgentState(input_schema): + # The outcome of a given call to the agent + # Needs `None` as a valid type, since this is what this will start as + agent_outcome: Union[AgentAction, AgentFinish, None] + # List of actions and corresponding observations + # Here we annotate this with `operator.add` to indicate that operations to + # this state should be ADDED to the existing values (not overwrite it) + intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add] + + return AgentState + + +def create_agent_executor( + agent_runnable, tools, input_schema=None +) -> CompiledStateGraph: + """This is a helper function for creating a graph that works with LangChain Agents. + + Args: + agent_runnable (RunnableLike): The agent runnable. + tools (list): A list of tools to be used by the agent. + input_schema (dict, optional): The input schema for the agent. Defaults to None. + + Returns: + The `CompiledStateGraph` object. + """ + + if isinstance(tools, ToolExecutor): + tool_executor = tools + else: + tool_executor = ToolExecutor(tools) + + state = _get_agent_state(input_schema) + + # Define logic that will be used to determine which conditional edge to go down + + def should_continue(data): + # If the agent outcome is an AgentFinish, then we return `exit` string + # This will be used when setting up the graph to define the flow + if isinstance(data["agent_outcome"], AgentFinish): + return "end" + # Otherwise, an AgentAction is returned + # Here we return `continue` string + # This will be used when setting up the graph to define the flow + else: + return "continue" + + def run_agent(data, config): + agent_outcome = agent_runnable.invoke(data, config) + return {"agent_outcome": agent_outcome} + + async def arun_agent(data, config): + agent_outcome = await agent_runnable.ainvoke(data, config) + return {"agent_outcome": agent_outcome} + + # Define the function to execute tools + def execute_tools(data, config): + # Get the most recent agent_outcome - this is the key added in the `agent` above + agent_action = data["agent_outcome"] + if not isinstance(agent_action, list): + agent_action = [agent_action] + output = tool_executor.batch(agent_action, config, return_exceptions=True) + return { + "intermediate_steps": [ + (action, str(out)) for action, out in zip(agent_action, output) + ] + } + + async def aexecute_tools(data, config): + # Get the most recent agent_outcome - this is the key added in the `agent` above + agent_action = data["agent_outcome"] + if not isinstance(agent_action, list): + agent_action = [agent_action] + output = await tool_executor.abatch( + agent_action, config, return_exceptions=True + ) + return { + "intermediate_steps": [ + (action, str(out)) for action, out in zip(agent_action, output) + ] + } + + # Define a new graph + workflow = StateGraph(state) + + # Define the two nodes we will cycle between + workflow.add_node("agent", RunnableCallable(run_agent, arun_agent)) + workflow.add_node("tools", RunnableCallable(execute_tools, aexecute_tools)) + + # Set the entrypoint as `agent` + # This means that this node is the first one called + workflow.set_entry_point("agent") + + # We now add a conditional edge + workflow.add_conditional_edges( + # First, we define the start node. We use `agent`. + # This means these are the edges taken after the `agent` node is called. + "agent", + # Next, we pass in the function that will determine which node is called next. + should_continue, + # Finally we pass in a mapping. + # The keys are strings, and the values are other nodes. + # END is a special node marking that the graph should finish. + # What will happen is we will call `should_continue`, and then the output of that + # will be matched against the keys in this mapping. + # Based on which one it matches, that node will then be called. + { + # If `tools`, then we call the tool node. + "continue": "tools", + # Otherwise we finish. + "end": END, + }, + ) + + # We now add a normal edge from `tools` to `agent`. + # This means that after `tools` is called, `agent` node is called next. + workflow.add_edge("tools", "agent") + + # Finally, we compile it! + # This compiles it into a LangChain Runnable, + # meaning you can use it as you would any other runnable + return workflow.compile() + + +if __name__ == "__main__": + pass diff --git a/src/bisheng-langchain/bisheng_langchain/gpts/assistant.py b/src/bisheng-langchain/bisheng_langchain/gpts/assistant.py index 38b11a87a..56771ab1e 100644 --- a/src/bisheng-langchain/bisheng_langchain/gpts/assistant.py +++ b/src/bisheng-langchain/bisheng_langchain/gpts/assistant.py @@ -97,9 +97,9 @@ def __init__(self, yaml_path) -> None: # init agent executor agent_executor_params = self.assistant_params['agent_executor'] - agent_executor_type = agent_executor_params.pop('type') + self.agent_executor_type = agent_executor_params.pop('type') self.assistant = ConfigurableAssistant( - agent_executor_type=agent_executor_type, + agent_executor_type=self.agent_executor_type, tools=tools, llm=llm, assistant_message=assistant_message, @@ -119,7 +119,10 @@ def run(self, query, chat_history=[], chat_round=5): inputs.append(HumanMessage(content=chat_history[i])) inputs.append(AIMessage(content=chat_history[i+1])) inputs.append(HumanMessage(content=query)) - result = asyncio.run(self.assistant.ainvoke(inputs)) + if self.agent_executor_type == 'get_react_agent_executor': + result = asyncio.run(self.assistant.ainvoke({"input": inputs[-1].content, "chat_history": inputs[:-1]})) + else: + result = asyncio.run(self.assistant.ainvoke(inputs)) return result @@ -127,14 +130,12 @@ def run(self, query, chat_history=[], chat_round=5): from langchain.globals import set_debug # set_debug(True) - chat_history = [] - query = "请简要分析中科创达软件股份有限公司2019年聘任、解聘会计师事务的情况。" - # chat_history = ['你好', '你好,有什么可以帮助你吗?', '福蓉科技股价多少?', '福蓉科技(股票代码:300049)的当前股价为48.67元。'] - # query = '去年这个时候的股价是多少?' - # bisheng_assistant = BishengAssistant("config/base_scene.yaml") + # chat_history = [] + # query = "600519、300750股价多少?" + chat_history = ['你好', '你好,有什么可以帮助你吗?', '福蓉科技股价多少?', '福蓉科技(股票代码:300049)的当前股价为48.67元。'] + query = '今天是什么时候?去年这个时候的股价是多少?' + bisheng_assistant = BishengAssistant("config/base_scene.yaml") # bisheng_assistant = BishengAssistant("config/knowledge_scene.yaml") - bisheng_assistant = BishengAssistant("config/rag_scene.yaml") + # bisheng_assistant = BishengAssistant("config/rag_scene.yaml") result = bisheng_assistant.run(query, chat_history=chat_history) - for r in result: - print(f'------------------') - print(type(r), r) + print(result) diff --git a/src/bisheng-langchain/bisheng_langchain/gpts/config/base_scene.yaml b/src/bisheng-langchain/bisheng_langchain/gpts/config/base_scene.yaml index 84ae809a6..d71b18723 100644 --- a/src/bisheng-langchain/bisheng_langchain/gpts/config/base_scene.yaml +++ b/src/bisheng-langchain/bisheng_langchain/gpts/config/base_scene.yaml @@ -1,20 +1,20 @@ assistant: - # prompt_type: 'ASSISTANT_PROMPT_DEFAULT' - # llm: - # type: 'ChatOpenAI' - # model: 'gpt-4-0125-preview' - # openai_api_key: '' - # openai_proxy: 'http://118.195.232.223:39995' - # temperature: 0.0 - - prompt_type: 'ASSISTANT_PROMPT_COHERE' + prompt_type: 'ASSISTANT_PROMPT_DEFAULT' llm: type: 'ChatOpenAI' - model: 'command-r-plus-104b' - openai_api_base: 'http://34.87.129.78:9100/v1' + model: 'gpt-4-0125-preview' openai_api_key: '' - openai_proxy: '' - temperature: 0.3 + openai_proxy: 'http://118.195.232.223:39995' + temperature: 0.0 + + # prompt_type: 'ASSISTANT_PROMPT_COHERE' + # llm: + # type: 'ChatOpenAI' + # model: 'command-r-plus-104b' + # openai_api_base: 'http://34.87.129.78:9100/v1' + # openai_api_key: '' + # openai_proxy: '' + # temperature: 0.3 tools: - type: "sina_realtime_info" @@ -39,8 +39,9 @@ assistant: - type: macro_china_shrzgm agent_executor: - type: 'get_openai_functions_agent_executor' + # type: 'get_openai_functions_agent_executor' # type: 'get_qwen_local_functions_agent_executor' + type: 'get_react_agent_executor' interrupt_before_action: False recursion_limit: 50 diff --git a/src/bisheng-langchain/bisheng_langchain/gpts/prompts/react_agent_prompt.py b/src/bisheng-langchain/bisheng_langchain/gpts/prompts/react_agent_prompt.py new file mode 100644 index 000000000..75c609e5a --- /dev/null +++ b/src/bisheng-langchain/bisheng_langchain/gpts/prompts/react_agent_prompt.py @@ -0,0 +1,68 @@ +from typing import List, Union +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, + MessagesPlaceholder +) +from langchain_core.prompts.prompt import PromptTemplate +from langchain_core.messages import FunctionMessage, SystemMessage, ToolMessage, AIMessage, HumanMessage, ChatMessage + + +system_temp = """ +{assistant_message} + +Respond to the human as helpfully and accurately as possible. You have access to the following tools: + +{tools} + +Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). + +Valid "action" values: "Final Answer" or {tool_names} + +Provide only ONE action per $JSON_BLOB, as shown: + +``` +{{{{ + "action": $TOOL_NAME, + "action_input": $INPUT +}}}} +``` + +Follow this format: + +Question: input question to answer +Thought: consider previous and subsequent steps +Action: +``` +$JSON_BLOB +``` +Observation: action result +... (repeat Thought/Action/Observation N times) +Thought: I know what to respond +Action: +``` +{{{{ + "action": "Final Answer", + "action_input": "Final response to human" +}}}} + +Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation +""" + +human_temp = """Question: {input} + +Thought: {agent_scratchpad} +(reminder to respond in a JSON blob no matter what)""" + + +react_agent_prompt = ChatPromptTemplate( + input_variables=['agent_scratchpad', 'input', 'tool_names', 'tools', 'assistant_message'], + optional_variables=['chat_history'], + input_types={'chat_history': List[Union[AIMessage, HumanMessage, ChatMessage, SystemMessage, FunctionMessage, ToolMessage]]}, + messages=[ + SystemMessagePromptTemplate.from_template(system_temp), + MessagesPlaceholder(variable_name='chat_history', optional=True), + HumanMessagePromptTemplate.from_template(human_temp) + ] +) \ No newline at end of file diff --git a/src/bisheng-langchain/requirements.txt b/src/bisheng-langchain/requirements.txt index 3662fe6e7..eb940fce9 100644 --- a/src/bisheng-langchain/requirements.txt +++ b/src/bisheng-langchain/requirements.txt @@ -10,7 +10,7 @@ pydantic==1.10.13 pymupdf==1.23.8 shapely==2.0.2 filetype==1.2.0 -langgraph==0.0.30 +langgraph==0.1.5 openai==1.14.3 langchain-openai==0.1.0 llama-index==0.9.48 diff --git a/src/frontend/public/locales/en/bs.json b/src/frontend/public/locales/en/bs.json index b2516f57b..37932d7ec 100644 --- a/src/frontend/public/locales/en/bs.json +++ b/src/frontend/public/locales/en/bs.json @@ -37,6 +37,7 @@ "document": "Documentation", "logout": "Logout", "logoutDescription": "Log out", + "logoutContent": "Are you sure to log out", "forBestExperience": "For the best experience, please access this website on a PC", "onlineDocumentation": "Online Documentation", "changePwd": "Password" diff --git a/src/frontend/public/locales/zh/bs.json b/src/frontend/public/locales/zh/bs.json index 8b52f76dc..a037ebc21 100644 --- a/src/frontend/public/locales/zh/bs.json +++ b/src/frontend/public/locales/zh/bs.json @@ -38,6 +38,7 @@ "document": "文档", "logout": "退出", "logoutDescription": "退出登录", + "logoutContent": "确认退出登录吗", "forBestExperience": "为了您的良好体验,请在 PC 端访问该网站", "onlineDocumentation": "在线文档", "changePwd": "修改密码" diff --git a/src/frontend/src/components/bs-comp/chatComponent/SourceEntry.tsx b/src/frontend/src/components/bs-comp/chatComponent/SourceEntry.tsx index f3a81adc8..bb2091ae2 100644 --- a/src/frontend/src/components/bs-comp/chatComponent/SourceEntry.tsx +++ b/src/frontend/src/components/bs-comp/chatComponent/SourceEntry.tsx @@ -25,7 +25,7 @@ export default function SourceEntry({ extra, end, source, className = '', onSour {(() => { switch (source) { case SourceType.FILE: - return {t('chat.source')}; + return {t('chat.source')}; case SourceType.NO_PERMISSION: return

{t('chat.noAccess')}

; case SourceType.LINK: diff --git a/src/frontend/src/contexts/locationContext.tsx b/src/frontend/src/contexts/locationContext.tsx index 95bb0a0aa..64f818b52 100644 --- a/src/frontend/src/contexts/locationContext.tsx +++ b/src/frontend/src/contexts/locationContext.tsx @@ -30,6 +30,7 @@ type locationContextType = { extraComponent: any; setExtraComponent: (newState: any) => void; appConfig: any; + reloadConfig: () => void }; //initial value for location context @@ -48,7 +49,8 @@ const initialValue = { setExtraNavigation: () => { }, extraComponent: <>, setExtraComponent: () => { }, - appConfig: { libAccepts: [] } + appConfig: { libAccepts: [] }, + reloadConfig: () => { } }; export const locationContext = createContext(initialValue); @@ -65,8 +67,7 @@ export function LocationProvider({ children }: { children: ReactNode }) { libAccepts: [] }) - // 获取系统配置 - useEffect(() => { + const loadConfig = () => { getAppConfig().then(res => { setAppConfig({ isDev: res.env === 'dev', @@ -80,6 +81,11 @@ export function LocationProvider({ children }: { children: ReactNode }) { chatPrompt: !!res.application_usage_tips }) }) + } + + // 获取系统配置 + useEffect(() => { + loadConfig() }, []) return ( @@ -95,7 +101,8 @@ export function LocationProvider({ children }: { children: ReactNode }) { setExtraNavigation, extraComponent, setExtraComponent, - appConfig + appConfig, + reloadConfig: loadConfig }} > {children} diff --git a/src/frontend/src/layout/MainLayout.tsx b/src/frontend/src/layout/MainLayout.tsx index 99723ec4f..0ff0a5799 100755 --- a/src/frontend/src/layout/MainLayout.tsx +++ b/src/frontend/src/layout/MainLayout.tsx @@ -40,7 +40,7 @@ export default function MainLayout() { const handleLogout = () => { bsConfirm({ title: `${t('prompt')}!`, - desc: `${t('menu.logoutDescription')}?`, + desc: `${t('menu.logoutContent')}?`, okTxt: t('system.confirm'), onOk(next) { captureAndAlertRequestErrorHoc(logoutApi()).then(_ => { diff --git a/src/frontend/src/pages/ChatAppPage/components/ResouceModal.tsx b/src/frontend/src/pages/ChatAppPage/components/ResouceModal.tsx index 973588647..114a66c56 100644 --- a/src/frontend/src/pages/ChatAppPage/components/ResouceModal.tsx +++ b/src/frontend/src/pages/ChatAppPage/components/ResouceModal.tsx @@ -5,6 +5,8 @@ import { useTranslation } from "react-i18next"; import { getSourceChunksApi, splitWordApi } from "../../../controllers/API"; import { downloadFile } from "../../../util/utils"; import FileView, { checkSassUrl } from "./FileView"; +import { QuestionMarkCircledIcon } from "@radix-ui/react-icons"; +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/bs-ui/tooltip"; // 顶部答案区 const Anwser = ({ id, msg, onInit, onAdd }) => { @@ -41,18 +43,43 @@ const Anwser = ({ id, msg, onInit, onAdd }) => { return () => pRef.current?.removeEventListener('click', handleclick) }, []) - return
+ return

} // let timer = null -const ResultPanne = ({ chatId, words, data, onClose, onAdd, children }: { chatId: string, words: string[], data: any, onClose: any, onAdd: any, children: any }) => { +const ResultPanne = ({ chatId, words, data, onClose, onAdd, children, closeDialog }: { chatId: string, words: string[], data: any, onClose: any, onAdd: any, children: any, closeDialog: () => void }) => { const { t } = useTranslation() const [editCustomKey, setEditCustomKey] = useState(false) const inputRef = useRef(null) + // 移动端 + const [collapse, setCollapse] = useState(true) + const [isMobile, setIsMobile] = useState(true) + const [width, setWidth] = useState(window.innerWidth); + const [height, setHeight] = useState(window.innerHeight); + const checkIsMobile = () => { + if (width < 640) { + setIsMobile(true) + } else { + setIsMobile(false) + } + } + useEffect(() => { + const handleResize = () => { + setWidth(window.innerWidth); + setHeight(window.innerHeight); + }; + window.addEventListener("resize", handleResize); + checkIsMobile() + return () => { + window.removeEventListener("resize", handleResize); + } + }, [width]) + // 移动端 e + const handleAddKeyword = (str: string) => { setEditCustomKey(false) if (!str) return @@ -86,61 +113,86 @@ const ResultPanne = ({ chatId, words, data, onClose, onAdd, children }: { chatId setTimeout(() => document.getElementById('taginput')?.focus(), 0); } - return
- {/* left */} -
- {/* label */} -
- {t('chat.filterLabel')} -
?
+ return
+ { + isMobile &&
+ {!collapse && { setCollapse(true) }} className="">收起} + {collapse && { setCollapse(false) }} className="">展开}
-
- {words.map((str, i) =>
{str} onClose(i)}>x
)} - { - editCustomKey ?
{ - if (event.key === "Enter" && !event.shiftKey) { - handleAddKeyword(inputRef.current.value); - } - }} - onBlur={() => { - handleAddKeyword(inputRef.current.value); - }}>
: -
{t('chat.addCustomLabel')}
- } + } + { + isMobile &&
+ 关闭
- {/* files */} -
-

{t('chat.sourceDocumentsLabel')}

- {files.map(_file => - _file.right ?
setFile(_file)} className={`group rounded-xl bg-[#fff] hover-bg-gray-200 flex items-center px-4 mb-2 relative min-h-16 cursor-pointer ${file?.id === _file.id && 'bg-gray-200'}`}> -

{_file.fileName}

- - {_file.score} -
: -
-

是真的马赛克.msk

+ } + {/* left */} + { + (!isMobile || !collapse) &&
+ {/* label */} + {/* 中英 */} +
+
+ {t('chat.filterLabel')} + + + + + + +

{t('chat.tooltipText')}

+
+
+
+
+
+
+ {words.map((str, i) =>
{str} onClose(i)}>x
)} + { + editCustomKey ?
{ + if (event.key === "Enter" && !event.shiftKey) { + handleAddKeyword(inputRef.current.value); + } + }} + onBlur={() => { + handleAddKeyword(inputRef.current.value); + }}>
: +
{t('chat.addCustomLabel')}
+ } +
+ {/* files */} +
+

{t('chat.sourceDocumentsLabel')}

+ {files.map(_file => + _file.right ?
setFile(_file)} className={`group rounded-xl bg-[#fff] dark:bg-[#303134] hover-bg-gray-200 flex items-center px-4 mb-2 relative min-h-16 cursor-pointer ${file?.id === _file.id && 'bg-gray-200'}`}> +

{_file.fileName}

+ {_file.score} -
- )} - {!files.length &&

{t('chat.noMatchedFilesMessage')}

} +
: +
+

是真的马赛克.msk

+ {_file.score} +
+ )} + {!files.length &&

{t('chat.noMatchedFilesMessage')}

} +
-
+ } {/* file pane */} {file && children(file)}
@@ -178,11 +230,11 @@ const ResouceModal = forwardRef((props, ref) => { */} {open &&
- + setOpen(false)}> { (file) => file.fileUrl ? : -
+

{t('chat.fileStorageFailure')}

} diff --git a/src/frontend/src/pages/ChatAppPage/index.tsx b/src/frontend/src/pages/ChatAppPage/index.tsx index fe7251ee4..19aa5c5b2 100755 --- a/src/frontend/src/pages/ChatAppPage/index.tsx +++ b/src/frontend/src/pages/ChatAppPage/index.tsx @@ -11,6 +11,7 @@ import { generateUUID } from "../../utils"; import ChatPanne from "./components/ChatPanne"; import { formatStrTime } from "@/util/utils"; import { SkillIcon, AssistantIcon } from "@/components/bs-icons"; +import { useMessageStore } from "@/components/bs-comp/chatComponent/messageStore"; export default function SkillChatPage() { @@ -61,7 +62,7 @@ export default function SkillChatPage() { // select chat const handleSelectChat = useDebounce(async (chat) => { - console.log('chat.id :>> ', chat); + // console.log('chat.id :>> ', chat); if (chat.chat_id === chatId) return setSelelctChat({ id: chat.flow_id, chatId: chat.chat_id, type: chat.flow_type }) setChatId(chat.chat_id) @@ -125,6 +126,22 @@ const useChatList = () => { const [id, setId] = useState('') const [chatList, setChatList] = useState([]) const chatsRef = useRef(null) + const { chatId, messages } = useMessageStore() + + useEffect(() => { + if (messages.length > 0) { + const latest:any = messages[messages.length - 1] + setChatList(chats => chats.map(chat => (chat.chat_id === chatId && chat.latest_message) + ? { + ...chat, latest_message: { + ...chat.latest_message, + message: latest.message[latest.chatKey] || latest.message + } + } + : chat) + ) + } + }, [messages, chatId]) const pageRef = useRef(0) const onScrollLoad = async () => { diff --git a/src/frontend/src/pages/LogPage/index.tsx b/src/frontend/src/pages/LogPage/index.tsx index 9227a42b3..5e365fdaf 100644 --- a/src/frontend/src/pages/LogPage/index.tsx +++ b/src/frontend/src/pages/LogPage/index.tsx @@ -146,7 +146,7 @@ export default function index() { {t('log.objectType')} {t('log.operationObject')} {t('log.ipAddress')} - {t('log.remark')} + {t('log.remark')} @@ -158,10 +158,10 @@ export default function index() { {transformModule(log.system_id)} {transformEvent(log.event_type)} {transformObjectType(log.object_type)} -
{log.object_name}
+
{log.object_name || '无'}
{log.ip_address} -
{log.note?.replace('编辑后', `\n编辑后`)}
+
{log.note?.replace('编辑后', `\n编辑后`) || '无'}
))} diff --git a/src/frontend/src/pages/SystemPage/components/Config.tsx b/src/frontend/src/pages/SystemPage/components/Config.tsx index ff4c5e742..387230d23 100644 --- a/src/frontend/src/pages/SystemPage/components/Config.tsx +++ b/src/frontend/src/pages/SystemPage/components/Config.tsx @@ -2,12 +2,15 @@ import { useContext, useEffect, useRef, useState } from "react"; import AceEditor from "react-ace"; import { useTranslation } from "react-i18next"; import { Button } from "../../../components/bs-ui/button"; -import { alertContext } from "../../../contexts/alertContext"; import { getSysConfigApi, setSysConfigApi } from "../../../controllers/API/user"; import { captureAndAlertRequestErrorHoc } from "../../../controllers/request"; +import { locationContext } from "@/contexts/locationContext"; +import { useToast } from "@/components/bs-ui/toast/use-toast"; export default function Config() { - const { setErrorData, setSuccessData } = useContext(alertContext); + const { toast, message } = useToast() + const { reloadConfig } = useContext(locationContext) + const [config, setConfig] = useState('') const { t } = useTranslation() @@ -21,15 +24,23 @@ export default function Config() { const handleSave = () => { if (validataRef.current.length) { - return setErrorData({ + return toast({ + variant: 'error', title: `yaml${t('formatError')}`, - list: validataRef.current.map(el => el.text), - }); + description: validataRef.current.map(el => el.text) + }) } captureAndAlertRequestErrorHoc(setSysConfigApi({ data: codeRef.current }).then(res => { - setSuccessData({ title: t('success') }) + message({ + variant: 'success', + title: t('prompt'), + description: t('success') + }) setConfig(codeRef.current) + + // 更新配置信息 + reloadConfig() })) } diff --git a/src/frontend/src/pages/SystemPage/components/CreateUser.tsx b/src/frontend/src/pages/SystemPage/components/CreateUser.tsx index fa0cd61d0..bd8c5921c 100644 --- a/src/frontend/src/pages/SystemPage/components/CreateUser.tsx +++ b/src/frontend/src/pages/SystemPage/components/CreateUser.tsx @@ -1,17 +1,23 @@ -import { Dialog, DialogContent, DialogFooter, DialogHeader, DialogTitle } from "@/components/bs-ui/dialog" import { Button } from "@/components/bs-ui/button" -import { useTranslation } from "react-i18next" -import { Label } from "@/components/bs-ui/label" +import { Dialog, DialogContent, DialogFooter, DialogHeader, DialogTitle } from "@/components/bs-ui/dialog" import { Input } from "@/components/bs-ui/input" -import UserRoleItem from "./UserRoleItem" -import { PlusIcon } from "@radix-ui/react-icons" -import { useState } from "react" -import { generateUUID } from "@/components/bs-ui/utils" +import { Label } from "@/components/bs-ui/label" import { useToast } from "@/components/bs-ui/toast/use-toast" -import { handleEncrypt, PWD_RULE } from "@/pages/LoginPage/utils" -import { copyText } from "@/utils" +import { generateUUID } from "@/components/bs-ui/utils" import { createUserApi } from "@/controllers/API/user" import { captureAndAlertRequestErrorHoc } from "@/controllers/request" +import { handleEncrypt, PWD_RULE } from "@/pages/LoginPage/utils" +import { copyText } from "@/utils" +import { EyeNoneIcon, EyeOpenIcon, PlusIcon } from "@radix-ui/react-icons" +import { useState } from "react" +import { useTranslation } from "react-i18next" +import UserRoleItem from "./UserRoleItem" + +enum inputType { + PASSWORD = 'password', + TEXT = 'text' +} +const EyeIconStyle = 'absolute right-7 cursor-pointer' export default function CreateUser({open, onClose, onSave}) { const { t } = useTranslation() @@ -59,6 +65,11 @@ export default function CreateUser({open, onClose, onSave}) { })) } + const [type, setType] = useState(inputType.PASSWORD) + const handleShowPwd = () => { + type === inputType.PASSWORD ? setType(inputType.TEXT) : setType(inputType.PASSWORD) + } + return onClose(b)}> @@ -68,16 +79,20 @@ export default function CreateUser({open, onClose, onSave}) {
setForm({...form, user_name:e.target.value})} - placeholder="后续使用此用户名进行登录,用户名不可修改"/> + placeholder="后续使用此用户名进行登录,用户名不可修改" className="h-[50px]"/>
- setForm({...form, password:e.target.value})}/> +
+ setForm({...form, password:e.target.value})} className="h-[50px]"/> + {type === inputType.PASSWORD ? + : } +
-
+
{items.map((item, index) => 1}