Skip to content

Commit

Permalink
env param doc bugfix & typos fix (#664)
Browse files Browse the repository at this point in the history
  • Loading branch information
Aries-ckt authored Nov 9, 2023
2 parents 2fe8ca4 + c4ae0fd commit 8b1c73f
Show file tree
Hide file tree
Showing 22 changed files with 80 additions and 93 deletions.
17 changes: 2 additions & 15 deletions docs/getting_started/install/environment/environment.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@ LLM Model Name, see /pilot/configs/model_config.LLM_MODEL_CONFIG
* LLM_MODEL=vicuna-13b
MODEL_SERVER_ADDRESS
* MODEL_SERVER=http://127.0.0.1:8000
LIMIT_MODEL_CONCURRENCY
* LIMIT_MODEL_CONCURRENCY=5
Expand Down Expand Up @@ -84,21 +86,6 @@ embedding recall max token ,2000
* WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network
```

```{admonition} Vector Store SETTINGS
#### Chroma
* VECTOR_STORE_TYPE=Chroma
#### MILVUS
* VECTOR_STORE_TYPE=Milvus
* MILVUS_URL=127.0.0.1
* MILVUS_PORT=19530
* MILVUS_USERNAME
* MILVUS_PASSWORD
* MILVUS_SECURE=
#### WEAVIATE
* WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network
```

```{admonition} Multi-GPU Setting
See https://developer.nvidia.com/blog/cuda-pro-tip-control-gpu-visibility-cuda_visible_devices/
If CUDA_VISIBLE_DEVICES is not configured, all available gpus will be used
Expand Down
2 changes: 1 addition & 1 deletion pilot/base_modules/agent/plugins_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def load_from_git(cfg: Config):
print("save file")
cfg.set_plugins(scan_plugins(cfg.debug_mode))
else:
print("get file faild,response code:", response.status_code)
print("get file failed,response code:", response.status_code)
except Exception as e:
print("load plugin from git exception!" + str(e))

Expand Down
6 changes: 3 additions & 3 deletions pilot/connections/rdbms/conn_clickhouse.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,13 @@ def get_table_comments(self, db_name):
return [
(table_comment[0], table_comment[1]) for table_comment in table_comments
]

def table_simple_info(self):
# group_concat() not supported in clickhouse, use arrayStringConcat+groupArray instead; and quotes need to be escaped
_sql = f"""
select concat(TABLE_NAME, \'(\' , arrayStringConcat(groupArray(column_name),\'-\'), \')\') as schema_info
from information_schema.COLUMNS where table_schema=\'{self.get_current_db_name()}\' group by TABLE_NAME; """
from information_schema.COLUMNS where table_schema=\'{self.get_current_db_name()}\' group by TABLE_NAME; """

cursor = self.session.execute(text(_sql))
results = cursor.fetchall()
return results
10 changes: 5 additions & 5 deletions pilot/openapi/api_v1/api_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ async def test_connect(db_config: DBConfig = Body()):
CFG.LOCAL_DB_MANAGE.test_connect(db_config)
return Result.succ(True)
except Exception as e:
return Result.faild(code="E1001", msg=str(e))
return Result.failed(code="E1001", msg=str(e))


@router.post("/v1/chat/db/summary", response_model=Result[bool])
Expand Down Expand Up @@ -305,7 +305,7 @@ async def params_load(
return Result.succ(get_hist_messages(conv_uid))
except Exception as e:
logger.error("excel load error!", e)
return Result.faild(code="E000X", msg=f"File Load Error {e}")
return Result.failed(code="E000X", msg=f"File Load Error {e}")


@router.post("/v1/chat/dialogue/delete")
Expand Down Expand Up @@ -352,7 +352,7 @@ async def get_chat_instance(dialogue: ConversationVo = Body()) -> BaseChat:

if not ChatScene.is_valid_mode(dialogue.chat_mode):
raise StopAsyncIteration(
Result.faild("Unsupported Chat Mode," + dialogue.chat_mode + "!")
Result.failed("Unsupported Chat Mode," + dialogue.chat_mode + "!")
)

chat_param = {
Expand Down Expand Up @@ -430,7 +430,7 @@ async def model_types(controller: BaseModelController = Depends(get_model_contro
return Result.succ(list(types))

except Exception as e:
return Result.faild(code="E000X", msg=f"controller model types error {e}")
return Result.failed(code="E000X", msg=f"controller model types error {e}")


@router.get("/v1/model/supports")
Expand All @@ -440,7 +440,7 @@ async def model_supports(worker_manager: WorkerManager = Depends(get_worker_mana
models = await worker_manager.supported_models()
return Result.succ(FlatSupportedModel.from_supports(models))
except Exception as e:
return Result.faild(code="E000X", msg=f"Fetch supportd models error {e}")
return Result.failed(code="E000X", msg=f"Fetch supportd models error {e}")


async def no_stream_generator(chat):
Expand Down
18 changes: 9 additions & 9 deletions pilot/openapi/api_v1/editor/api_editor_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ async def get_editor_sql(con_uid: str, round: int):
.replace("\n", " ")
)
return Result.succ(json.loads(context))
return Result.faild(msg="not have sql!")
return Result.failed(msg="not have sql!")


@router.post("/v1/editor/sql/run", response_model=Result[SqlRunData])
Expand All @@ -116,7 +116,7 @@ async def editor_sql_run(run_param: dict = Body()):
db_name = run_param["db_name"]
sql = run_param["sql"]
if not db_name and not sql:
return Result.faild("SQL run param error!")
return Result.failed("SQL run param error!")
conn = CFG.LOCAL_DB_MANAGE.get_connect(db_name)

try:
Expand Down Expand Up @@ -169,7 +169,7 @@ async def sql_editor_submit(sql_edit_context: ChatSqlEditContext = Body()):
)
history_mem.update(history_messages)
return Result.succ(None)
return Result.faild(msg="Edit Faild!")
return Result.failed(msg="Edit Failed!")


@router.get("/v1/editor/chart/list", response_model=Result[ChartList])
Expand All @@ -191,7 +191,7 @@ async def get_editor_chart_list(con_uid: str):
charts=json.loads(element["data"]["content"]),
)
return Result.succ(chart_list)
return Result.faild(msg="Not have charts!")
return Result.failed(msg="Not have charts!")


@router.post("/v1/editor/chart/info", response_model=Result[ChartDetail])
Expand All @@ -210,7 +210,7 @@ async def get_editor_chart_info(param: dict = Body()):
logger.error(
"this dashboard dialogue version too old, can't support editor!"
)
return Result.faild(
return Result.failed(
msg="this dashboard dialogue version too old, can't support editor!"
)
for element in last_round["messages"]:
Expand All @@ -234,7 +234,7 @@ async def get_editor_chart_info(param: dict = Body()):
)

return Result.succ(detail)
return Result.faild(msg="Can't Find Chart Detail Info!")
return Result.failed(msg="Can't Find Chart Detail Info!")


@router.post("/v1/editor/chart/run", response_model=Result[ChartRunData])
Expand All @@ -244,7 +244,7 @@ async def editor_chart_run(run_param: dict = Body()):
sql = run_param["sql"]
chart_type = run_param["chart_type"]
if not db_name and not sql:
return Result.faild("SQL run param error!")
return Result.failed("SQL run param error!")
try:
dashboard_data_loader: DashboardDataLoader = DashboardDataLoader()
db_conn = CFG.LOCAL_DB_MANAGE.get_connect(db_name)
Expand Down Expand Up @@ -334,7 +334,7 @@ async def chart_editor_submit(chart_edit_context: ChatChartEditContext = Body())
)
except Exception as e:
logger.error(f"edit chart exception!{str(e)}", e)
return Result.faild(msg=f"Edit chart exception!{str(e)}")
return Result.failed(msg=f"Edit chart exception!{str(e)}")
history_mem.update(history_messages)
return Result.succ(None)
return Result.faild(msg="Edit Faild!")
return Result.failed(msg="Edit Failed!")
4 changes: 2 additions & 2 deletions pilot/openapi/api_view_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ def succ(cls, data: T):
return Result(success=True, err_code=None, err_msg=None, data=data)

@classmethod
def faild(cls, msg):
def failed(cls, msg):
return Result(success=False, err_code="E000X", err_msg=msg, data=None)

@classmethod
def faild(cls, code, msg):
def failed(cls, code, msg):
return Result(success=False, err_code=code, err_msg=msg, data=None)


Expand Down
2 changes: 1 addition & 1 deletion pilot/openapi/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ async def validation_exception_handler(request: Request, exc: RequestValidationE
message = ""
for error in exc.errors():
message += ".".join(error.get("loc")) + ":" + error.get("msg") + ";"
return Result.faild(code="E0001", msg=message)
return Result.failed(code="E0001", msg=message)
2 changes: 1 addition & 1 deletion pilot/scene/base_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ async def stream_call(self):
span.end()
except Exception as e:
print(traceback.format_exc())
logger.error("model response parase faild!" + str(e))
logger.error("model response parase failed!" + str(e))
self.current_message.add_view_message(
f"""<span style=\"color:red\">ERROR!</span>{str(e)}\n {ai_response_text} """
)
Expand Down
4 changes: 2 additions & 2 deletions pilot/scene/chat_dashboard/data_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ def get_chart_values_by_data(self, field_names, datas, chart_sql: str):
values.append(value_item)
return field_names, values
except Exception as e:
logger.debug("Prepare Chart Data Faild!" + str(e))
raise ValueError("Prepare Chart Data Faild!")
logger.debug("Prepare Chart Data Failed!" + str(e))
raise ValueError("Prepare Chart Data Failed!")

def get_chart_values_by_db(self, db_name: str, chart_sql: str):
logger.info(f"get_chart_values_by_db:{db_name},{chart_sql}")
Expand Down
6 changes: 3 additions & 3 deletions pilot/scene/chat_dashboard/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,17 +42,17 @@

PROMPT_SEP = SeparatorStyle.SINGLE.value

PROMPT_NEED_NEED_STREAM_OUT = False
PROMPT_NEED_STREAM_OUT = False

prompt = PromptTemplate(
template_scene=ChatScene.ChatDashboard.value(),
input_variables=["input", "table_info", "dialect", "supported_chat_type"],
response_format=json.dumps(RESPONSE_FORMAT, indent=4),
template_define=PROMPT_SCENE_DEFINE,
template=_DEFAULT_TEMPLATE,
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
stream_out=PROMPT_NEED_STREAM_OUT,
output_parser=ChatDashboardOutputParser(
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT
),
)
CFG.prompt_template_registry.register(prompt, is_default=True)
6 changes: 3 additions & 3 deletions pilot/scene/chat_data/chat_excel/excel_analyze/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@

PROMPT_SEP = SeparatorStyle.SINGLE.value

PROMPT_NEED_NEED_STREAM_OUT = True
PROMPT_NEED_STREAM_OUT = True

# Temperature is a configuration hyperparameter that controls the randomness of language model output.
# A high temperature produces more unpredictable and creative results, while a low temperature produces more common and conservative output.
Expand All @@ -63,9 +63,9 @@
input_variables=["user_input", "table_name", "disply_type"],
template_define=_PROMPT_SCENE_DEFINE,
template=_DEFAULT_TEMPLATE,
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
stream_out=PROMPT_NEED_STREAM_OUT,
output_parser=ChatExcelOutputParser(
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT
),
need_historical_messages=True,
# example_selector=sql_data_example,
Expand Down
6 changes: 3 additions & 3 deletions pilot/scene/chat_data/chat_excel/excel_learning/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@

PROMPT_SEP = SeparatorStyle.SINGLE.value

PROMPT_NEED_NEED_STREAM_OUT = False
PROMPT_NEED_STREAM_OUT = False

# Temperature is a configuration hyperparameter that controls the randomness of language model output.
# A high temperature produces more unpredictable and creative results, while a low temperature produces more common and conservative output.
Expand All @@ -80,9 +80,9 @@
response_format=json.dumps(RESPONSE_FORMAT_SIMPLE, ensure_ascii=False, indent=4),
template_define=PROMPT_SCENE_DEFINE,
template=_DEFAULT_TEMPLATE,
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
stream_out=PROMPT_NEED_STREAM_OUT,
output_parser=LearningExcelOutputParser(
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT
),
# example_selector=sql_data_example,
temperature=PROMPT_TEMPERATURE,
Expand Down
6 changes: 3 additions & 3 deletions pilot/scene/chat_db/auto_execute/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@

PROMPT_SEP = SeparatorStyle.SINGLE.value

PROMPT_NEED_NEED_STREAM_OUT = False
PROMPT_NEED_STREAM_OUT = False

# Temperature is a configuration hyperparameter that controls the randomness of language model output.
# A high temperature produces more unpredictable and creative results, while a low temperature produces more common and conservative output.
Expand All @@ -46,9 +46,9 @@
response_format=json.dumps(RESPONSE_FORMAT_SIMPLE, ensure_ascii=False, indent=4),
template_define=PROMPT_SCENE_DEFINE,
template=_DEFAULT_TEMPLATE,
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
stream_out=PROMPT_NEED_STREAM_OUT,
output_parser=DbChatOutputParser(
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT
),
# example_selector=sql_data_example,
temperature=PROMPT_TEMPERATURE,
Expand Down
6 changes: 3 additions & 3 deletions pilot/scene/chat_db/auto_execute/prompt_baichuan.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

PROMPT_SEP = SeparatorStyle.SINGLE.value

PROMPT_NEED_NEED_STREAM_OUT = False
PROMPT_NEED_STREAM_OUT = False

# Temperature is a configuration hyperparameter that controls the randomness of language model output.
# A high temperature produces more unpredictable and creative results, while a low temperature produces more common and conservative output.
Expand All @@ -50,9 +50,9 @@
template_is_strict=False,
template_define=PROMPT_SCENE_DEFINE,
template=_DEFAULT_TEMPLATE,
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
stream_out=PROMPT_NEED_STREAM_OUT,
output_parser=DbChatOutputParser(
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT
),
# example_selector=sql_data_example,
temperature=PROMPT_TEMPERATURE,
Expand Down
6 changes: 3 additions & 3 deletions pilot/scene/chat_db/professional_qa/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,17 +54,17 @@

PROMPT_SEP = SeparatorStyle.SINGLE.value

PROMPT_NEED_NEED_STREAM_OUT = True
PROMPT_NEED_STREAM_OUT = True

prompt = PromptTemplate(
template_scene=ChatScene.ChatWithDbQA.value(),
input_variables=["input", "table_info"],
response_format=None,
template_define=PROMPT_SCENE_DEFINE,
template=_DEFAULT_TEMPLATE,
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
stream_out=PROMPT_NEED_STREAM_OUT,
output_parser=NormalChatOutputParser(
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT
),
)

Expand Down
6 changes: 3 additions & 3 deletions pilot/scene/chat_knowledge/inner_db_summary/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,17 @@

PROMPT_SEP = SeparatorStyle.SINGLE.value

PROMPT_NEED_NEED_STREAM_OUT = False
PROMPT_NEED_STREAM_OUT = False

prompt = PromptTemplate(
template_scene=ChatScene.InnerChatDBSummary.value(),
input_variables=["db_profile_summary", "db_input", "response"],
response_format=json.dumps(RESPONSE_FORMAT, indent=4),
template_define=PROMPT_SCENE_DEFINE,
template=_DEFAULT_TEMPLATE + PROMPT_RESPONSE,
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
stream_out=PROMPT_NEED_STREAM_OUT,
output_parser=NormalChatOutputParser(
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT
),
)

Expand Down
6 changes: 3 additions & 3 deletions pilot/scene/chat_knowledge/v1/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,17 @@

PROMPT_SEP = SeparatorStyle.SINGLE.value

PROMPT_NEED_NEED_STREAM_OUT = True
PROMPT_NEED_STREAM_OUT = True

prompt = PromptTemplate(
template_scene=ChatScene.ChatKnowledge.value(),
input_variables=["context", "question"],
response_format=None,
template_define=PROMPT_SCENE_DEFINE,
template=_DEFAULT_TEMPLATE,
stream_out=PROMPT_NEED_NEED_STREAM_OUT,
stream_out=PROMPT_NEED_STREAM_OUT,
output_parser=NormalChatOutputParser(
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_NEED_STREAM_OUT
sep=PROMPT_SEP, is_stream_out=PROMPT_NEED_STREAM_OUT
),
)

Expand Down
Loading

0 comments on commit 8b1c73f

Please sign in to comment.