diff --git a/docs/getting_started/install/environment/environment.md b/docs/getting_started/install/environment/environment.md index 021bbe861..11aec8d40 100644 --- a/docs/getting_started/install/environment/environment.md +++ b/docs/getting_started/install/environment/environment.md @@ -6,7 +6,9 @@ LLM Model Name, see /pilot/configs/model_config.LLM_MODEL_CONFIG * LLM_MODEL=vicuna-13b MODEL_SERVER_ADDRESS + * MODEL_SERVER=http://127.0.0.1:8000 + LIMIT_MODEL_CONCURRENCY * LIMIT_MODEL_CONCURRENCY=5 @@ -84,21 +86,6 @@ embedding recall max token ,2000 * WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network ``` -```{admonition} Vector Store SETTINGS -#### Chroma -* VECTOR_STORE_TYPE=Chroma -#### MILVUS -* VECTOR_STORE_TYPE=Milvus -* MILVUS_URL=127.0.0.1 -* MILVUS_PORT=19530 -* MILVUS_USERNAME -* MILVUS_PASSWORD -* MILVUS_SECURE= - -#### WEAVIATE -* WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network -``` - ```{admonition} Multi-GPU Setting See https://developer.nvidia.com/blog/cuda-pro-tip-control-gpu-visibility-cuda_visible_devices/ If CUDA_VISIBLE_DEVICES is not configured, all available gpus will be used diff --git a/pilot/base_modules/agent/plugins_util.py b/pilot/base_modules/agent/plugins_util.py index cf8fb8df3..b5f12c5eb 100644 --- a/pilot/base_modules/agent/plugins_util.py +++ b/pilot/base_modules/agent/plugins_util.py @@ -111,7 +111,7 @@ def load_from_git(cfg: Config): print("save file") cfg.set_plugins(scan_plugins(cfg.debug_mode)) else: - print("get file faild,response code:", response.status_code) + print("get file failed,response code:", response.status_code) except Exception as e: print("load plugin from git exception!" + str(e)) diff --git a/pilot/connections/rdbms/conn_clickhouse.py b/pilot/connections/rdbms/conn_clickhouse.py index a64950877..b2762556a 100644 --- a/pilot/connections/rdbms/conn_clickhouse.py +++ b/pilot/connections/rdbms/conn_clickhouse.py @@ -106,13 +106,13 @@ def get_table_comments(self, db_name): return [ (table_comment[0], table_comment[1]) for table_comment in table_comments ] - + def table_simple_info(self): # group_concat() not supported in clickhouse, use arrayStringConcat+groupArray instead; and quotes need to be escaped _sql = f""" select concat(TABLE_NAME, \'(\' , arrayStringConcat(groupArray(column_name),\'-\'), \')\') as schema_info - from information_schema.COLUMNS where table_schema=\'{self.get_current_db_name()}\' group by TABLE_NAME; """ - + from information_schema.COLUMNS where table_schema=\'{self.get_current_db_name()}\' group by TABLE_NAME; """ + cursor = self.session.execute(text(_sql)) results = cursor.fetchall() return results diff --git a/pilot/openapi/api_v1/api_v1.py b/pilot/openapi/api_v1/api_v1.py index 23cba7948..d33bd1df1 100644 --- a/pilot/openapi/api_v1/api_v1.py +++ b/pilot/openapi/api_v1/api_v1.py @@ -172,7 +172,7 @@ async def test_connect(db_config: DBConfig = Body()): CFG.LOCAL_DB_MANAGE.test_connect(db_config) return Result.succ(True) except Exception as e: - return Result.faild(code="E1001", msg=str(e)) + return Result.failed(code="E1001", msg=str(e)) @router.post("/v1/chat/db/summary", response_model=Result[bool]) @@ -305,7 +305,7 @@ async def params_load( return Result.succ(get_hist_messages(conv_uid)) except Exception as e: logger.error("excel load error!", e) - return Result.faild(code="E000X", msg=f"File Load Error {e}") + return Result.failed(code="E000X", msg=f"File Load Error {e}") @router.post("/v1/chat/dialogue/delete") @@ -352,7 +352,7 @@ async def get_chat_instance(dialogue: ConversationVo = Body()) -> BaseChat: if not ChatScene.is_valid_mode(dialogue.chat_mode): raise StopAsyncIteration( - Result.faild("Unsupported Chat Mode," + dialogue.chat_mode + "!") + Result.failed("Unsupported Chat Mode," + dialogue.chat_mode + "!") ) chat_param = { @@ -430,7 +430,7 @@ async def model_types(controller: BaseModelController = Depends(get_model_contro return Result.succ(list(types)) except Exception as e: - return Result.faild(code="E000X", msg=f"controller model types error {e}") + return Result.failed(code="E000X", msg=f"controller model types error {e}") @router.get("/v1/model/supports") @@ -440,7 +440,7 @@ async def model_supports(worker_manager: WorkerManager = Depends(get_worker_mana models = await worker_manager.supported_models() return Result.succ(FlatSupportedModel.from_supports(models)) except Exception as e: - return Result.faild(code="E000X", msg=f"Fetch supportd models error {e}") + return Result.failed(code="E000X", msg=f"Fetch supportd models error {e}") async def no_stream_generator(chat): diff --git a/pilot/openapi/api_v1/editor/api_editor_v1.py b/pilot/openapi/api_v1/editor/api_editor_v1.py index e41998942..86b98c9a0 100644 --- a/pilot/openapi/api_v1/editor/api_editor_v1.py +++ b/pilot/openapi/api_v1/editor/api_editor_v1.py @@ -107,7 +107,7 @@ async def get_editor_sql(con_uid: str, round: int): .replace("\n", " ") ) return Result.succ(json.loads(context)) - return Result.faild(msg="not have sql!") + return Result.failed(msg="not have sql!") @router.post("/v1/editor/sql/run", response_model=Result[SqlRunData]) @@ -116,7 +116,7 @@ async def editor_sql_run(run_param: dict = Body()): db_name = run_param["db_name"] sql = run_param["sql"] if not db_name and not sql: - return Result.faild("SQL run param error!") + return Result.failed("SQL run param error!") conn = CFG.LOCAL_DB_MANAGE.get_connect(db_name) try: @@ -169,7 +169,7 @@ async def sql_editor_submit(sql_edit_context: ChatSqlEditContext = Body()): ) history_mem.update(history_messages) return Result.succ(None) - return Result.faild(msg="Edit Faild!") + return Result.failed(msg="Edit Failed!") @router.get("/v1/editor/chart/list", response_model=Result[ChartList]) @@ -191,7 +191,7 @@ async def get_editor_chart_list(con_uid: str): charts=json.loads(element["data"]["content"]), ) return Result.succ(chart_list) - return Result.faild(msg="Not have charts!") + return Result.failed(msg="Not have charts!") @router.post("/v1/editor/chart/info", response_model=Result[ChartDetail]) @@ -210,7 +210,7 @@ async def get_editor_chart_info(param: dict = Body()): logger.error( "this dashboard dialogue version too old, can't support editor!" ) - return Result.faild( + return Result.failed( msg="this dashboard dialogue version too old, can't support editor!" ) for element in last_round["messages"]: @@ -234,7 +234,7 @@ async def get_editor_chart_info(param: dict = Body()): ) return Result.succ(detail) - return Result.faild(msg="Can't Find Chart Detail Info!") + return Result.failed(msg="Can't Find Chart Detail Info!") @router.post("/v1/editor/chart/run", response_model=Result[ChartRunData]) @@ -244,7 +244,7 @@ async def editor_chart_run(run_param: dict = Body()): sql = run_param["sql"] chart_type = run_param["chart_type"] if not db_name and not sql: - return Result.faild("SQL run param error!") + return Result.failed("SQL run param error!") try: dashboard_data_loader: DashboardDataLoader = DashboardDataLoader() db_conn = CFG.LOCAL_DB_MANAGE.get_connect(db_name) @@ -334,7 +334,7 @@ async def chart_editor_submit(chart_edit_context: ChatChartEditContext = Body()) ) except Exception as e: logger.error(f"edit chart exception!{str(e)}", e) - return Result.faild(msg=f"Edit chart exception!{str(e)}") + return Result.failed(msg=f"Edit chart exception!{str(e)}") history_mem.update(history_messages) return Result.succ(None) - return Result.faild(msg="Edit Faild!") + return Result.failed(msg="Edit Failed!") diff --git a/pilot/openapi/api_view_model.py b/pilot/openapi/api_view_model.py index 60065f2f2..af1aa4b9c 100644 --- a/pilot/openapi/api_view_model.py +++ b/pilot/openapi/api_view_model.py @@ -17,11 +17,11 @@ def succ(cls, data: T): return Result(success=True, err_code=None, err_msg=None, data=data) @classmethod - def faild(cls, msg): + def failed(cls, msg): return Result(success=False, err_code="E000X", err_msg=msg, data=None) @classmethod - def faild(cls, code, msg): + def failed(cls, code, msg): return Result(success=False, err_code=code, err_msg=msg, data=None) diff --git a/pilot/openapi/base.py b/pilot/openapi/base.py index 506254ec7..d8c814787 100644 --- a/pilot/openapi/base.py +++ b/pilot/openapi/base.py @@ -7,4 +7,4 @@ async def validation_exception_handler(request: Request, exc: RequestValidationE message = "" for error in exc.errors(): message += ".".join(error.get("loc")) + ":" + error.get("msg") + ";" - return Result.faild(code="E0001", msg=message) + return Result.failed(code="E0001", msg=message) diff --git a/pilot/scene/base_chat.py b/pilot/scene/base_chat.py index 86fe8f859..24ec1c928 100644 --- a/pilot/scene/base_chat.py +++ b/pilot/scene/base_chat.py @@ -223,7 +223,7 @@ async def stream_call(self): span.end() except Exception as e: print(traceback.format_exc()) - logger.error("model response parase faild!" + str(e)) + logger.error("model response parase failed!" + str(e)) self.current_message.add_view_message( f"""ERROR!{str(e)}\n {ai_response_text} """ ) diff --git a/pilot/scene/chat_dashboard/data_loader.py b/pilot/scene/chat_dashboard/data_loader.py index faabe542a..970fc92dd 100644 --- a/pilot/scene/chat_dashboard/data_loader.py +++ b/pilot/scene/chat_dashboard/data_loader.py @@ -52,8 +52,8 @@ def get_chart_values_by_data(self, field_names, datas, chart_sql: str): values.append(value_item) return field_names, values except Exception as e: - logger.debug("Prepare Chart Data Faild!" + str(e)) - raise ValueError("Prepare Chart Data Faild!") + logger.debug("Prepare Chart Data Failed!" + str(e)) + raise ValueError("Prepare Chart Data Failed!") def get_chart_values_by_db(self, db_name: str, chart_sql: str): logger.info(f"get_chart_values_by_db:{db_name},{chart_sql}") diff --git a/pilot/scene/chat_dashboard/prompt.py b/pilot/scene/chat_dashboard/prompt.py index 9fed97f8f..7f1dd4090 100644 --- a/pilot/scene/chat_dashboard/prompt.py +++ b/pilot/scene/chat_dashboard/prompt.py @@ -42,7 +42,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = False +PROMPT_NEED_STREAM_OUT = False prompt = PromptTemplate( template_scene=ChatScene.ChatDashboard.value(), @@ -50,9 +50,9 @@ response_format=json.dumps(RESPONSE_FORMAT, indent=4), template_define=PROMPT_SCENE_DEFINE, template=_DEFAULT_TEMPLATE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=ChatDashboardOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), ) CFG.prompt_template_registry.register(prompt, is_default=True) diff --git a/pilot/scene/chat_data/chat_excel/excel_analyze/prompt.py b/pilot/scene/chat_data/chat_excel/excel_analyze/prompt.py index 23c86bd4d..c1dfdfee3 100644 --- a/pilot/scene/chat_data/chat_excel/excel_analyze/prompt.py +++ b/pilot/scene/chat_data/chat_excel/excel_analyze/prompt.py @@ -51,7 +51,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = True +PROMPT_NEED_STREAM_OUT = True # Temperature is a configuration hyperparameter that controls the randomness of language model output. # A high temperature produces more unpredictable and creative results, while a low temperature produces more common and conservative output. @@ -63,9 +63,9 @@ input_variables=["user_input", "table_name", "disply_type"], template_define=_PROMPT_SCENE_DEFINE, template=_DEFAULT_TEMPLATE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=ChatExcelOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), need_historical_messages=True, # example_selector=sql_data_example, diff --git a/pilot/scene/chat_data/chat_excel/excel_learning/prompt.py b/pilot/scene/chat_data/chat_excel/excel_learning/prompt.py index aefd96a71..df17aec6b 100644 --- a/pilot/scene/chat_data/chat_excel/excel_learning/prompt.py +++ b/pilot/scene/chat_data/chat_excel/excel_learning/prompt.py @@ -67,7 +67,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = False +PROMPT_NEED_STREAM_OUT = False # Temperature is a configuration hyperparameter that controls the randomness of language model output. # A high temperature produces more unpredictable and creative results, while a low temperature produces more common and conservative output. @@ -80,9 +80,9 @@ response_format=json.dumps(RESPONSE_FORMAT_SIMPLE, ensure_ascii=False, indent=4), template_define=PROMPT_SCENE_DEFINE, template=_DEFAULT_TEMPLATE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=LearningExcelOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), # example_selector=sql_data_example, temperature=PROMPT_TEMPERATURE, diff --git a/pilot/scene/chat_db/auto_execute/prompt.py b/pilot/scene/chat_db/auto_execute/prompt.py index abc889cec..9b4bcb6a5 100644 --- a/pilot/scene/chat_db/auto_execute/prompt.py +++ b/pilot/scene/chat_db/auto_execute/prompt.py @@ -33,7 +33,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = False +PROMPT_NEED_STREAM_OUT = False # Temperature is a configuration hyperparameter that controls the randomness of language model output. # A high temperature produces more unpredictable and creative results, while a low temperature produces more common and conservative output. @@ -46,9 +46,9 @@ response_format=json.dumps(RESPONSE_FORMAT_SIMPLE, ensure_ascii=False, indent=4), template_define=PROMPT_SCENE_DEFINE, template=_DEFAULT_TEMPLATE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=DbChatOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), # example_selector=sql_data_example, temperature=PROMPT_TEMPERATURE, diff --git a/pilot/scene/chat_db/auto_execute/prompt_baichuan.py b/pilot/scene/chat_db/auto_execute/prompt_baichuan.py index 95aa962fa..4888cbd7f 100644 --- a/pilot/scene/chat_db/auto_execute/prompt_baichuan.py +++ b/pilot/scene/chat_db/auto_execute/prompt_baichuan.py @@ -36,7 +36,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = False +PROMPT_NEED_STREAM_OUT = False # Temperature is a configuration hyperparameter that controls the randomness of language model output. # A high temperature produces more unpredictable and creative results, while a low temperature produces more common and conservative output. @@ -50,9 +50,9 @@ template_is_strict=False, template_define=PROMPT_SCENE_DEFINE, template=_DEFAULT_TEMPLATE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=DbChatOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), # example_selector=sql_data_example, temperature=PROMPT_TEMPERATURE, diff --git a/pilot/scene/chat_db/professional_qa/prompt.py b/pilot/scene/chat_db/professional_qa/prompt.py index ca4110398..c84f2eb7a 100644 --- a/pilot/scene/chat_db/professional_qa/prompt.py +++ b/pilot/scene/chat_db/professional_qa/prompt.py @@ -54,7 +54,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = True +PROMPT_NEED_STREAM_OUT = True prompt = PromptTemplate( template_scene=ChatScene.ChatWithDbQA.value(), @@ -62,9 +62,9 @@ response_format=None, template_define=PROMPT_SCENE_DEFINE, template=_DEFAULT_TEMPLATE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=NormalChatOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), ) diff --git a/pilot/scene/chat_knowledge/inner_db_summary/prompt.py b/pilot/scene/chat_knowledge/inner_db_summary/prompt.py index 924fab2c6..3f81906c0 100644 --- a/pilot/scene/chat_knowledge/inner_db_summary/prompt.py +++ b/pilot/scene/chat_knowledge/inner_db_summary/prompt.py @@ -33,7 +33,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = False +PROMPT_NEED_STREAM_OUT = False prompt = PromptTemplate( template_scene=ChatScene.InnerChatDBSummary.value(), @@ -41,9 +41,9 @@ response_format=json.dumps(RESPONSE_FORMAT, indent=4), template_define=PROMPT_SCENE_DEFINE, template=_DEFAULT_TEMPLATE + PROMPT_RESPONSE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=NormalChatOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), ) diff --git a/pilot/scene/chat_knowledge/v1/prompt.py b/pilot/scene/chat_knowledge/v1/prompt.py index 394906562..ea55fca5a 100644 --- a/pilot/scene/chat_knowledge/v1/prompt.py +++ b/pilot/scene/chat_knowledge/v1/prompt.py @@ -33,7 +33,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = True +PROMPT_NEED_STREAM_OUT = True prompt = PromptTemplate( template_scene=ChatScene.ChatKnowledge.value(), @@ -41,9 +41,9 @@ response_format=None, template_define=PROMPT_SCENE_DEFINE, template=_DEFAULT_TEMPLATE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=NormalChatOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), ) diff --git a/pilot/scene/chat_knowledge/v1/prompt_chatglm.py b/pilot/scene/chat_knowledge/v1/prompt_chatglm.py index 7f66c1e6f..898699e89 100644 --- a/pilot/scene/chat_knowledge/v1/prompt_chatglm.py +++ b/pilot/scene/chat_knowledge/v1/prompt_chatglm.py @@ -33,7 +33,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = True +PROMPT_NEED_STREAM_OUT = True prompt = PromptTemplate( template_scene=ChatScene.ChatKnowledge.value(), @@ -41,9 +41,9 @@ response_format=None, template_define=None, template=_DEFAULT_TEMPLATE, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=NormalChatOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), ) diff --git a/pilot/scene/chat_normal/prompt.py b/pilot/scene/chat_normal/prompt.py index ad0724874..dae412987 100644 --- a/pilot/scene/chat_normal/prompt.py +++ b/pilot/scene/chat_normal/prompt.py @@ -11,7 +11,7 @@ PROMPT_SEP = SeparatorStyle.SINGLE.value -PROMPT_NEED_NEED_STREAM_OUT = True +PROMPT_NEED_STREAM_OUT = True prompt = PromptTemplate( template_scene=ChatScene.ChatNormal.value(), @@ -19,9 +19,9 @@ response_format=None, template_define=PROMPT_SCENE_DEFINE, template=None, - stream_out=PROMPT_NEED_NEED_STREAM_OUT, + stream_out=PROMPT_NEED_STREAM_OUT, output_parser=NormalChatOutputParser( - sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT + sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT ), ) diff --git a/pilot/server/knowledge/api.py b/pilot/server/knowledge/api.py index 8e5e52b58..58b290d58 100644 --- a/pilot/server/knowledge/api.py +++ b/pilot/server/knowledge/api.py @@ -45,7 +45,7 @@ def space_add(request: KnowledgeSpaceRequest): knowledge_space_service.create_knowledge_space(request) return Result.succ([]) except Exception as e: - return Result.faild(code="E000X", msg=f"space add error {e}") + return Result.failed(code="E000X", msg=f"space add error {e}") @router.post("/knowledge/space/list") @@ -54,7 +54,7 @@ def space_list(request: KnowledgeSpaceRequest): try: return Result.succ(knowledge_space_service.get_knowledge_space(request)) except Exception as e: - return Result.faild(code="E000X", msg=f"space list error {e}") + return Result.failed(code="E000X", msg=f"space list error {e}") @router.post("/knowledge/space/delete") @@ -63,7 +63,7 @@ def space_delete(request: KnowledgeSpaceRequest): try: return Result.succ(knowledge_space_service.delete_space(request.name)) except Exception as e: - return Result.faild(code="E000X", msg=f"space list error {e}") + return Result.failed(code="E000X", msg=f"space list error {e}") @router.post("/knowledge/{space_name}/arguments") @@ -72,7 +72,7 @@ def arguments(space_name: str): try: return Result.succ(knowledge_space_service.arguments(space_name)) except Exception as e: - return Result.faild(code="E000X", msg=f"space list error {e}") + return Result.failed(code="E000X", msg=f"space list error {e}") @router.post("/knowledge/{space_name}/argument/save") @@ -83,7 +83,7 @@ def arguments_save(space_name: str, argument_request: SpaceArgumentRequest): knowledge_space_service.argument_save(space_name, argument_request) ) except Exception as e: - return Result.faild(code="E000X", msg=f"space list error {e}") + return Result.failed(code="E000X", msg=f"space list error {e}") @router.post("/knowledge/{space_name}/document/add") @@ -97,7 +97,7 @@ def document_add(space_name: str, request: KnowledgeDocumentRequest): ) # return Result.succ([]) except Exception as e: - return Result.faild(code="E000X", msg=f"document add error {e}") + return Result.failed(code="E000X", msg=f"document add error {e}") @router.post("/knowledge/{space_name}/document/list") @@ -108,7 +108,7 @@ def document_list(space_name: str, query_request: DocumentQueryRequest): knowledge_space_service.get_knowledge_documents(space_name, query_request) ) except Exception as e: - return Result.faild(code="E000X", msg=f"document list error {e}") + return Result.failed(code="E000X", msg=f"document list error {e}") @router.post("/knowledge/{space_name}/document/delete") @@ -119,7 +119,7 @@ def document_delete(space_name: str, query_request: DocumentQueryRequest): knowledge_space_service.delete_document(space_name, query_request.doc_name) ) except Exception as e: - return Result.faild(code="E000X", msg=f"document list error {e}") + return Result.failed(code="E000X", msg=f"document list error {e}") @router.post("/knowledge/{space_name}/document/upload") @@ -156,9 +156,9 @@ async def document_upload( ) ) # return Result.succ([]) - return Result.faild(code="E000X", msg=f"doc_file is None") + return Result.failed(code="E000X", msg=f"doc_file is None") except Exception as e: - return Result.faild(code="E000X", msg=f"document add error {e}") + return Result.failed(code="E000X", msg=f"document add error {e}") @router.post("/knowledge/{space_name}/document/sync") @@ -170,7 +170,7 @@ def document_sync(space_name: str, request: DocumentSyncRequest): ) return Result.succ([]) except Exception as e: - return Result.faild(code="E000X", msg=f"document sync error {e}") + return Result.failed(code="E000X", msg=f"document sync error {e}") @router.post("/knowledge/{space_name}/chunk/list") @@ -179,7 +179,7 @@ def document_list(space_name: str, query_request: ChunkQueryRequest): try: return Result.succ(knowledge_space_service.get_document_chunks(query_request)) except Exception as e: - return Result.faild(code="E000X", msg=f"document chunk list error {e}") + return Result.failed(code="E000X", msg=f"document chunk list error {e}") @router.post("/knowledge/{vector_name}/query") diff --git a/pilot/server/llm_manage/api.py b/pilot/server/llm_manage/api.py index f5602d3c4..617018642 100644 --- a/pilot/server/llm_manage/api.py +++ b/pilot/server/llm_manage/api.py @@ -33,9 +33,9 @@ async def model_params(): params.append(model_dict) return Result.succ(params) if not worker_instance: - return Result.faild(code="E000X", msg=f"can not find worker manager") + return Result.failed(code="E000X", msg=f"can not find worker manager") except Exception as e: - return Result.faild(code="E000X", msg=f"model stop failed {e}") + return Result.failed(code="E000X", msg=f"model stop failed {e}") @router.get("/v1/worker/model/list") @@ -78,7 +78,7 @@ async def model_list(): return Result.succ(responses) except Exception as e: - return Result.faild(code="E000X", msg=f"model list error {e}") + return Result.failed(code="E000X", msg=f"model list error {e}") @router.post("/v1/worker/model/stop") @@ -91,11 +91,11 @@ async def model_stop(request: WorkerStartupRequest): ComponentType.WORKER_MANAGER_FACTORY, WorkerManagerFactory ).create() if not worker_manager: - return Result.faild(code="E000X", msg=f"can not find worker manager") + return Result.failed(code="E000X", msg=f"can not find worker manager") request.params = {} return Result.succ(await worker_manager.model_shutdown(request)) except Exception as e: - return Result.faild(code="E000X", msg=f"model stop failed {e}") + return Result.failed(code="E000X", msg=f"model stop failed {e}") @router.post("/v1/worker/model/start") @@ -106,7 +106,7 @@ async def model_start(request: WorkerStartupRequest): ComponentType.WORKER_MANAGER_FACTORY, WorkerManagerFactory ).create() if not worker_manager: - return Result.faild(code="E000X", msg=f"can not find worker manager") + return Result.failed(code="E000X", msg=f"can not find worker manager") return Result.succ(await worker_manager.model_startup(request)) except Exception as e: - return Result.faild(code="E000X", msg=f"model start failed {e}") + return Result.failed(code="E000X", msg=f"model start failed {e}") diff --git a/pilot/server/prompt/api.py b/pilot/server/prompt/api.py index b94546891..0be4140a7 100644 --- a/pilot/server/prompt/api.py +++ b/pilot/server/prompt/api.py @@ -11,12 +11,12 @@ @router.post("/prompt/add") def prompt_add(request: PromptManageRequest): - print(f"/space/add params: {request}") + print(f"/prompt/add params: {request}") try: prompt_manage_service.create_prompt(request) return Result.succ([]) except Exception as e: - return Result.faild(code="E010X", msg=f"prompt add error {e}") + return Result.failed(code="E010X", msg=f"prompt add error {e}") @router.post("/prompt/list") @@ -25,7 +25,7 @@ def prompt_list(request: PromptManageRequest): try: return Result.succ(prompt_manage_service.get_prompts(request)) except Exception as e: - return Result.faild(code="E010X", msg=f"prompt list error {e}") + return Result.failed(code="E010X", msg=f"prompt list error {e}") @router.post("/prompt/update") @@ -34,7 +34,7 @@ def prompt_update(request: PromptManageRequest): try: return Result.succ(prompt_manage_service.update_prompt(request)) except Exception as e: - return Result.faild(code="E010X", msg=f"prompt update error {e}") + return Result.failed(code="E010X", msg=f"prompt update error {e}") @router.post("/prompt/delete") @@ -43,4 +43,4 @@ def prompt_delete(request: PromptManageRequest): try: return Result.succ(prompt_manage_service.delete_prompt(request.prompt_name)) except Exception as e: - return Result.faild(code="E010X", msg=f"prompt delete error {e}") + return Result.failed(code="E010X", msg=f"prompt delete error {e}")