Skip to content

Commit

Permalink
chore(llm): add backup key
Browse files Browse the repository at this point in the history
  • Loading branch information
booboosui committed Jun 28, 2024
1 parent 7635681 commit d6d8395
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 4 deletions.
6 changes: 3 additions & 3 deletions backend/app/pkgs/tools/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,16 @@ def chatCompletion(context, fackData="", bill: bool = True):
message = ""
success = False
try:
message, total_tokens, success = obj.chatCompletion(context, fackData, bill)
message, total_tokens, success = obj.chatCompletion(context, fackData, False, bill)
except Exception as e:
print("chatCompletion failed 1 time:" + str(e))
try:
message, total_tokens, success = obj.chatCompletion(context, fackData, bill)
message, total_tokens, success = obj.chatCompletion(context, fackData, False, bill)
except Exception as e:
print("chatCompletion failed 2 time:" + str(e))
traceback.print_exc()
try:
message, total_tokens, success = obj.chatCompletion(context, fackData, bill)
message, total_tokens, success = obj.chatCompletion(context, fackData, True, bill)
except Exception as e:
print("chatCompletion failed 2 time:" + str(e))
traceback.print_exc()
Expand Down
2 changes: 1 addition & 1 deletion backend/app/pkgs/tools/llm_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def get_next_api_key():
return get_next_api_key()

class LLMBase(LLMInterface):
def chatCompletion(self, context, fackData, bill):
def chatCompletion(self, context, fackData, use_backup_keys, bill):
# Test frontend
if MODE == "FAKE" and len(fackData) > 0:
time.sleep(5)
Expand Down
1 change: 1 addition & 0 deletions backend/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ def read_config(key):
SQLALCHEMY_DATABASE_URI = read_config("SQLALCHEMY_DATABASE_URI")
GPT_KEYS = json.loads(read_config("GPT_KEYS"))
LLM_MODEL = read_config("LLM_MODEL")
GPT_KEYS_BACKUP = json.loads(read_config("GPT_KEYS_BACKUP"))
MODE = read_config("MODE")
GRADE = read_config("GRADE")
AUTO_LOGIN = read_config("AUTO_LOGIN")
Expand Down
24 changes: 24 additions & 0 deletions env.yaml.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,30 @@ GPT_KEYS: |
}
}

GPT_KEYS_BACKUP: |
{
"openai": {
"keys": [
{"sk-xxxx": {"count": 0, "timestamp": 0}}
],
"api_type": "open_ai",
"api_base": "https://api.openai.com/v1",
"api_version": "2020-11-07",
"proxy": "None"
}
,
"azure": {
"keys": [
{"sk-xxxx": {"count": 0, "timestamp": 0}}
],
"api_type": "azure",
"api_base": "https://example-gpt.openai.azure.com/",
"api_version": "2023-05-15",
"deployment_id": "deployment-name",
"proxy": "None"
}
}

# Configure the model used (do not use less than 16k token model), [note] openai plus members and API members are different, you opena plus member does not mean that you can use gpt4 model, specifically consult the official documentation of openai
# 配置使用的模型(不要使用小于16k token的模型),【注意】openai的plus会员和API会员是不同的,你开通了plus会员不代表可以用gpt4的模型,具体查阅openai的官方文档
LLM_MODEL: "gpt-3.5-turbo-16k-0613"
Expand Down

0 comments on commit d6d8395

Please sign in to comment.