diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 895eb7b5122..1f1b2ff489f 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1108,9 +1108,9 @@ def generate_oai_reply( extracted_response = self._generate_oai_reply_from_client( client, self._oai_system_message + messages, self.client_cache ) - return True, extracted_response + return (False, None) if extracted_response is None else (True, extracted_response) - def _generate_oai_reply_from_client(self, llm_client, messages, cache): + def _generate_oai_reply_from_client(self, llm_client, messages, cache) -> Union[str, Dict, None]: # unroll tool_responses all_messages = [] for message in messages: @@ -1132,8 +1132,8 @@ def _generate_oai_reply_from_client(self, llm_client, messages, cache): extracted_response = llm_client.extract_text_or_completion_object(response)[0] if extracted_response is None: - warnings.warn("Extracted_response is None.", UserWarning) - return False, None + warnings.warn("Extracted_response from {response} is None.", UserWarning) + return None # ensure function and tool calls will be accepted when sent back to the LLM if not isinstance(extracted_response, str) and hasattr(extracted_response, "model_dump"): extracted_response = model_dump(extracted_response) diff --git a/test/agentchat/test_assistant_agent.py b/test/agentchat/test_assistant_agent.py index 63cfff5e22a..af953d47c8a 100644 --- a/test/agentchat/test_assistant_agent.py +++ b/test/agentchat/test_assistant_agent.py @@ -68,18 +68,7 @@ def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5): config_list = autogen.config_list_from_json( OAI_CONFIG_LIST, file_location=KEY_LOC, - filter_dict={ - "model": { - "gpt-3.5-turbo", - "gpt-35-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", - "gpt-3.5-turbo-0301", - "chatgpt-35-turbo-0301", - "gpt-35-turbo-v0301", - "gpt", - }, - }, + filter_dict={"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]}, ) llm_config = { "cache_seed": 42, @@ -206,8 +195,8 @@ def generate_init_message(self, question) -> str: if __name__ == "__main__": - # test_gpt35() - test_create_execute_script(human_input_mode="TERMINATE") + test_gpt35() + # test_create_execute_script(human_input_mode="TERMINATE") # when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test # should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds) # although the max_consecutive_auto_reply is set to 10. diff --git a/test/agentchat/test_cache_agent.py b/test/agentchat/test_cache_agent.py index 8da9a919f84..52d5a11e102 100644 --- a/test/agentchat/test_cache_agent.py +++ b/test/agentchat/test_cache_agent.py @@ -6,6 +6,7 @@ import autogen from autogen.agentchat import AssistantAgent, UserProxyAgent from autogen.cache import Cache +from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST, here sys.path.append(os.path.join(os.path.dirname(__file__), "..")) from conftest import skip_openai, skip_redis # noqa: E402 @@ -107,23 +108,11 @@ def test_disk_cache(): def run_conversation(cache_seed, human_input_mode="NEVER", max_consecutive_auto_reply=5, cache=None): - KEY_LOC = "notebook" - OAI_CONFIG_LIST = "OAI_CONFIG_LIST" - here = os.path.abspath(os.path.dirname(__file__)) config_list = autogen.config_list_from_json( OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={ - "model": { - "gpt-3.5-turbo", - "gpt-35-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", - "gpt-3.5-turbo-0301", - "chatgpt-35-turbo-0301", - "gpt-35-turbo-v0301", - "gpt", - }, + "tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"], }, ) llm_config = { @@ -159,7 +148,7 @@ def run_conversation(cache_seed, human_input_mode="NEVER", max_consecutive_auto_ # track how long this takes user.initiate_chat(assistant, message=coding_task, cache=cache) - return user.chat_messages[list(user.chat_messages.keys())[-0]] + return user.chat_messages[assistant] def run_groupchat_conversation(cache, human_input_mode="NEVER", max_consecutive_auto_reply=5): @@ -170,16 +159,7 @@ def run_groupchat_conversation(cache, human_input_mode="NEVER", max_consecutive_ OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={ - "model": { - "gpt-3.5-turbo", - "gpt-35-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", - "gpt-3.5-turbo-0301", - "chatgpt-35-turbo-0301", - "gpt-35-turbo-v0301", - "gpt", - }, + "tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"], }, ) llm_config = { diff --git a/test/oai/test_client_stream.py b/test/oai/test_client_stream.py index a3bfc6161a6..af03824495c 100644 --- a/test/oai/test_client_stream.py +++ b/test/oai/test_client_stream.py @@ -233,7 +233,7 @@ def test_chat_tools_stream() -> None: config_list = config_list_from_json( env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, - filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]}, + filter_dict={"tags": ["multitool"]}, ) tools = [ {