diff --git a/.env.template b/.env.template index 1183505d9..1a8a7c7e0 100644 --- a/.env.template +++ b/.env.template @@ -21,7 +21,7 @@ WEB_SERVER_PORT=7860 #*******************************************************************# #** LLM MODELS **# #*******************************************************************# -# LLM_MODEL, see /pilot/configs/model_config.LLM_MODEL_CONFIG +# LLM_MODEL, see dbgpt/configs/model_config.LLM_MODEL_CONFIG LLM_MODEL=vicuna-13b-v1.5 ## LLM model path, by default, DB-GPT will read the model path from LLM_MODEL_CONFIG based on the LLM_MODEL. ## Of course you can specify your model path according to LLM_MODEL_PATH @@ -29,7 +29,7 @@ LLM_MODEL=vicuna-13b-v1.5 ## 1. environment variable with key: {LLM_MODEL}_MODEL_PATH (Avoid multi-model conflicts) ## 2. environment variable with key: MODEL_PATH ## 3. environment variable with key: LLM_MODEL_PATH -## 4. the config in /pilot/configs/model_config.LLM_MODEL_CONFIG +## 4. the config in dbgpt/configs/model_config.LLM_MODEL_CONFIG # LLM_MODEL_PATH=/app/models/vicuna-13b-v1.5 # LLM_PROMPT_TEMPLATE=vicuna_v1.1 MODEL_SERVER=http://127.0.0.1:8000 @@ -47,7 +47,7 @@ QUANTIZE_8bit=True # PROXYLLM_BACKEND= ### You can configure parameters for a specific model with {model name}_{config key}=xxx -### See /pilot/model/parameter.py +### See dbgpt/model/parameter.py ## prompt template for current model # llama_cpp_prompt_template=vicuna_v1.1 ## llama-2-70b must be 8 @@ -86,7 +86,7 @@ KNOWLEDGE_SEARCH_REWRITE=False # EMBEDDING_TOKENIZER=all-MiniLM-L6-v2 # EMBEDDING_TOKEN_LIMIT=8191 -## Openai embedding model, See /pilot/model/parameter.py +## Openai embedding model, See dbgpt/model/parameter.py # EMBEDDING_MODEL=proxy_openai # proxy_openai_proxy_server_url=https://api.openai.com/v1 # proxy_openai_proxy_api_key={your-openai-sk}