From b205ca4d2ee20693beccdea08193cee472445c8f Mon Sep 17 00:00:00 2001 From: RohitRathore1 Date: Sat, 24 Feb 2024 09:39:17 +0000 Subject: [PATCH 1/2] make default model of a constant class variable --- .../agentchat/contrib/gpt_assistant_agent.py | 41 +++++++++++-------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index a6ab3372f87..ed919538e57 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -3,7 +3,6 @@ import json import time import logging -import copy from autogen import OpenAIWrapper from autogen.oai.openai_utils import retrieve_assistants_by_name @@ -21,6 +20,8 @@ class GPTAssistantAgent(ConversableAgent): This agent is unique in its reliance on the OpenAI Assistant for state management, differing from other agents like ConversableAgent. """ + DEFAULT_MODEL_NAME = "gpt-4-0125-preview" + def __init__( self, name="GPT Assistant", @@ -53,26 +54,25 @@ def __init__( - verbose (bool): If set to True, enables more detailed output from the assistant thread. - Other kwargs: Except verbose, others are passed directly to ConversableAgent. """ - - self._verbose = kwargs.pop("verbose", False) - super().__init__( - name=name, system_message=instructions, human_input_mode="NEVER", llm_config=llm_config, **kwargs - ) - if llm_config is False: raise ValueError("llm_config=False is not supported for GPTAssistantAgent.") - # Use AutoGen OpenAIWrapper to create a client - model_name = "gpt-4-0125-preview" - openai_client_cfg = copy.deepcopy(llm_config) - # GPTAssistantAgent's azure_deployment param may cause NotFoundError (404) in client.beta.assistants.list() - # See: https://github.com/microsoft/autogen/pull/1721 - if openai_client_cfg.get("config_list") is not None and len(openai_client_cfg["config_list"]) > 0: - model_name = openai_client_cfg["config_list"][0].pop("model", "gpt-4-0125-preview") - else: - model_name = openai_client_cfg.pop("model", "gpt-4-0125-preview") - - logger.warning("OpenAI client config of GPTAssistantAgent(%s) - model: %s", name, model_name) + openai_client_cfg = {} + # USe the class variable + model_name = GPTAssistantAgent.DEFAULT_MODEL_NAME + + # if llm_config and llm_config.get("config_list") is not None and len(llm_config["config_list"]) > 0: + # openai_client_cfg = llm_config["config_list"][0].copy() + # model_name = openai_client_cfg.pop("model", "gpt-4-1106-preview") + if llm_config and "config_list" in llm_config and len(llm_config["config_list"]) > 0: + openai_client_cfg = llm_config["config_list"][0].copy() + model_name = openai_client_cfg.pop("model", GPTAssistantAgent.DEFAULT_MODEL_NAME) + elif llm_config and "model" in llm_config: + model_name = llm_config.pop("model", GPTAssistantAgent.DEFAULT_MODEL_NAME) + + # Ensure openai_client_cfg is not None for OpenAIWrapper initialization + if not openai_client_cfg: + openai_client_cfg = {"model": model_name} oai_wrapper = OpenAIWrapper(**openai_client_cfg) if len(oai_wrapper._clients) > 1: @@ -158,6 +158,11 @@ def __init__( # Tools are specified but overwrite_tools is False; do not update the assistant's tools logger.warning("overwrite_tools is False. Using existing tools from assistant API.") + self._verbose = kwargs.pop("verbose", False) + super().__init__( + name=name, system_message=instructions, human_input_mode="NEVER", llm_config=llm_config, **kwargs + ) + # lazily create threads self._openai_threads = {} self._unread_index = defaultdict(int) From 8b44b3554a678ceb9ca3190c05bf0796256fb2ec Mon Sep 17 00:00:00 2001 From: RohitRathore1 Date: Mon, 26 Feb 2024 04:51:41 +0000 Subject: [PATCH 2/2] make default model of a constant class variable --- .../agentchat/contrib/gpt_assistant_agent.py | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index ed919538e57..c4c73ed2c51 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -3,6 +3,7 @@ import json import time import logging +import copy from autogen import OpenAIWrapper from autogen.oai.openai_utils import retrieve_assistants_by_name @@ -54,25 +55,27 @@ def __init__( - verbose (bool): If set to True, enables more detailed output from the assistant thread. - Other kwargs: Except verbose, others are passed directly to ConversableAgent. """ + + self._verbose = kwargs.pop("verbose", False) + super().__init__( + name=name, system_message=instructions, human_input_mode="NEVER", llm_config=llm_config, **kwargs + ) + if llm_config is False: raise ValueError("llm_config=False is not supported for GPTAssistantAgent.") - # Use AutoGen OpenAIWrapper to create a client - openai_client_cfg = {} - # USe the class variable + # Use AutooGen OpenAIWrapper to create a client + openai_client_cfg = copy.deepcopy(llm_config) + # Use the class variable model_name = GPTAssistantAgent.DEFAULT_MODEL_NAME - # if llm_config and llm_config.get("config_list") is not None and len(llm_config["config_list"]) > 0: - # openai_client_cfg = llm_config["config_list"][0].copy() - # model_name = openai_client_cfg.pop("model", "gpt-4-1106-preview") - if llm_config and "config_list" in llm_config and len(llm_config["config_list"]) > 0: - openai_client_cfg = llm_config["config_list"][0].copy() + # GPTAssistantAgent's azure_deployment param may cause NotFoundError (404) in client.beta.assistants.list() + # See: https://github.com/microsoft/autogen/pull/1721 + if openai_client_cfg.get("config_list") is not None and len(openai_client_cfg["config_list"]) > 0: + model_name = openai_client_cfg["config_list"][0].pop("model", GPTAssistantAgent.DEFAULT_MODEL_NAME) + else: model_name = openai_client_cfg.pop("model", GPTAssistantAgent.DEFAULT_MODEL_NAME) - elif llm_config and "model" in llm_config: - model_name = llm_config.pop("model", GPTAssistantAgent.DEFAULT_MODEL_NAME) - # Ensure openai_client_cfg is not None for OpenAIWrapper initialization - if not openai_client_cfg: - openai_client_cfg = {"model": model_name} + logger.warning("OpenAI client config of GPTAssistantAgent(%s) - model: %s", name, model_name) oai_wrapper = OpenAIWrapper(**openai_client_cfg) if len(oai_wrapper._clients) > 1: @@ -158,11 +161,6 @@ def __init__( # Tools are specified but overwrite_tools is False; do not update the assistant's tools logger.warning("overwrite_tools is False. Using existing tools from assistant API.") - self._verbose = kwargs.pop("verbose", False) - super().__init__( - name=name, system_message=instructions, human_input_mode="NEVER", llm_config=llm_config, **kwargs - ) - # lazily create threads self._openai_threads = {} self._unread_index = defaultdict(int)