diff --git a/slack_bot/slack_bot/models/__init__.py b/slack_bot/slack_bot/models/__init__.py index 6ca66ca8..12898151 100644 --- a/slack_bot/slack_bot/models/__init__.py +++ b/slack_bot/slack_bot/models/__init__.py @@ -1,7 +1,7 @@ from .base import ResponseModel from .chat_completion import ChatCompletionAzure, ChatCompletionOpenAI from .hello import Hello -from .llama_index import LlamaIndexGPTAzure, LlamaIndexGPTOpenAI, LlamaIndexHF +from .llama import LlamaIndexGPTAzure, LlamaIndexGPTOpenAI, LlamaIndexHF # Please ensure that any models needing OPENAI_API_KEY are named *openai* # Please ensure that any models needing OPENAI_AZURE_API_BASE and OPENAI_AZURE_API_KEY are named *azure* diff --git a/slack_bot/slack_bot/models/llama_index.py b/slack_bot/slack_bot/models/llama.py similarity index 98% rename from slack_bot/slack_bot/models/llama_index.py rename to slack_bot/slack_bot/models/llama.py index d28c8882..eba2e416 100644 --- a/slack_bot/slack_bot/models/llama_index.py +++ b/slack_bot/slack_bot/models/llama.py @@ -361,7 +361,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.openai_api_key = os.getenv("OPENAI_API_KEY") self.temperature = 0.7 super().__init__( - *args, model_name="gpt-3.5-turbo-16k", max_input_size=16384, **kwargs + *args, model_name="gpt-3.5-turbo", max_input_size=16384, **kwargs ) def _prep_llm(self) -> LLM: @@ -390,7 +390,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.openai_api_version = "2023-03-15-preview" self.temperature = 0.7 super().__init__( - *args, model_name="gpt-3.5-turbo-16k", max_input_size=16384, **kwargs + *args, model_name="gpt-3.5-turbo", max_input_size=16384, **kwargs ) def _prep_llm(self) -> LLM: