Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

return None instead of tuple in _generate_oai_reply_from_client #1644

Merged
merged 3 commits into from
Feb 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1108,9 +1108,9 @@ def generate_oai_reply(
extracted_response = self._generate_oai_reply_from_client(
client, self._oai_system_message + messages, self.client_cache
)
return True, extracted_response
return (False, None) if extracted_response is None else (True, extracted_response)

def _generate_oai_reply_from_client(self, llm_client, messages, cache):
def _generate_oai_reply_from_client(self, llm_client, messages, cache) -> Union[str, Dict, None]:
# unroll tool_responses
all_messages = []
for message in messages:
Expand All @@ -1132,8 +1132,8 @@ def _generate_oai_reply_from_client(self, llm_client, messages, cache):
extracted_response = llm_client.extract_text_or_completion_object(response)[0]

if extracted_response is None:
warnings.warn("Extracted_response is None.", UserWarning)
return False, None
warnings.warn("Extracted_response from {response} is None.", UserWarning)
return None
# ensure function and tool calls will be accepted when sent back to the LLM
if not isinstance(extracted_response, str) and hasattr(extracted_response, "model_dump"):
extracted_response = model_dump(extracted_response)
Expand Down
17 changes: 3 additions & 14 deletions test/agentchat/test_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,7 @@ def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": {
"gpt-3.5-turbo",
"gpt-35-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
"gpt",
},
},
filter_dict={"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]},
)
llm_config = {
"cache_seed": 42,
Expand Down Expand Up @@ -206,8 +195,8 @@ def generate_init_message(self, question) -> str:


if __name__ == "__main__":
# test_gpt35()
test_create_execute_script(human_input_mode="TERMINATE")
test_gpt35()
# test_create_execute_script(human_input_mode="TERMINATE")
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
# although the max_consecutive_auto_reply is set to 10.
Expand Down
28 changes: 4 additions & 24 deletions test/agentchat/test_cache_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import autogen
from autogen.agentchat import AssistantAgent, UserProxyAgent
from autogen.cache import Cache
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST, here

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai, skip_redis # noqa: E402
Expand Down Expand Up @@ -107,23 +108,11 @@ def test_disk_cache():


def run_conversation(cache_seed, human_input_mode="NEVER", max_consecutive_auto_reply=5, cache=None):
KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
here = os.path.abspath(os.path.dirname(__file__))
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": {
"gpt-3.5-turbo",
"gpt-35-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
"gpt",
},
"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"],
},
)
llm_config = {
Expand Down Expand Up @@ -159,7 +148,7 @@ def run_conversation(cache_seed, human_input_mode="NEVER", max_consecutive_auto_

# track how long this takes
user.initiate_chat(assistant, message=coding_task, cache=cache)
return user.chat_messages[list(user.chat_messages.keys())[-0]]
return user.chat_messages[assistant]


def run_groupchat_conversation(cache, human_input_mode="NEVER", max_consecutive_auto_reply=5):
Expand All @@ -170,16 +159,7 @@ def run_groupchat_conversation(cache, human_input_mode="NEVER", max_consecutive_
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": {
"gpt-3.5-turbo",
"gpt-35-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
"gpt",
},
"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"],
},
)
llm_config = {
Expand Down
2 changes: 1 addition & 1 deletion test/oai/test_client_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def test_chat_tools_stream() -> None:
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
filter_dict={"tags": ["multitool"]},
)
tools = [
{
Expand Down
Loading