Skip to content

Commit

Permalink
print next speaker (#2800)
Browse files Browse the repository at this point in the history
* print next speaker

* fix test error
  • Loading branch information
sonichi authored and victordibia committed Jul 30, 2024
1 parent 1efa0bd commit 14dd31b
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 23 deletions.
3 changes: 3 additions & 0 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -1051,6 +1051,9 @@ def run_chat(
try:
# select the next speaker
speaker = groupchat.select_speaker(speaker, self)
if not silent:
iostream = IOStream.get_default()
iostream.print(colored(f"\nNext speaker: {speaker.name}\n", "green"), flush=True)
# let the speaker speak
reply = speaker.generate_reply(sender=self)
except KeyboardInterrupt:
Expand Down
35 changes: 19 additions & 16 deletions test/oai/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

import pytest

from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai
from autogen import OpenAIWrapper, config_list_from_json
from autogen.cache.cache import Cache
from autogen.oai.client import LEGACY_CACHE_DIR, LEGACY_DEFAULT_CACHE_SEED

Expand Down Expand Up @@ -104,10 +104,11 @@ def test_chat_completion():

@pytest.mark.skipif(skip, reason="openai>=1 not installed")
def test_completion():
config_list = config_list_openai_aoai(KEY_LOC)
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo-instruct"]}
)
client = OpenAIWrapper(config_list=config_list)
model = "gpt-3.5-turbo-instruct"
response = client.create(prompt="1+1=", model=model)
response = client.create(prompt="1+1=")
print(response)
print(client.extract_text_or_completion_object(response))

Expand All @@ -121,19 +122,21 @@ def test_completion():
],
)
def test_cost(cache_seed):
config_list = config_list_openai_aoai(KEY_LOC)
model = "gpt-3.5-turbo-instruct"
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo-instruct"]}
)
client = OpenAIWrapper(config_list=config_list, cache_seed=cache_seed)
response = client.create(prompt="1+3=", model=model)
response = client.create(prompt="1+3=")
print(response.cost)


@pytest.mark.skipif(skip, reason="openai>=1 not installed")
def test_usage_summary():
config_list = config_list_openai_aoai(KEY_LOC)
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo-instruct"]}
)
client = OpenAIWrapper(config_list=config_list)
model = "gpt-3.5-turbo-instruct"
response = client.create(prompt="1+3=", model=model, cache_seed=None)
response = client.create(prompt="1+3=", cache_seed=None)

# usage should be recorded
assert client.actual_usage_summary["total_cost"] > 0, "total_cost should be greater than 0"
Expand All @@ -148,14 +151,14 @@ def test_usage_summary():
assert client.total_usage_summary is None, "total_usage_summary should be None"

# actual usage and all usage should be different
response = client.create(prompt="1+3=", model=model, cache_seed=42)
response = client.create(prompt="1+3=", cache_seed=42)
assert client.total_usage_summary["total_cost"] > 0, "total_cost should be greater than 0"
client.clear_usage_summary()
response = client.create(prompt="1+3=", model=model, cache_seed=42)
response = client.create(prompt="1+3=", cache_seed=42)
assert client.actual_usage_summary is None, "No actual cost should be recorded"

# check update
response = client.create(prompt="1+3=", model=model, cache_seed=42)
response = client.create(prompt="1+3=", cache_seed=42)
assert (
client.total_usage_summary["total_cost"] == response.cost * 2
), "total_cost should be equal to response.cost * 2"
Expand Down Expand Up @@ -303,8 +306,8 @@ def test_cache():
# test_aoai_chat_completion()
# test_oai_tool_calling_extraction()
# test_chat_completion()
# test_completion()
test_completion()
# # test_cost()
# test_usage_summary()
test_legacy_cache()
test_cache()
# test_legacy_cache()
# test_cache()
13 changes: 6 additions & 7 deletions test/oai/test_client_stream.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
#!/usr/bin/env python3 -m pytest

import json
import os
import sys
from typing import Any, Dict, List, Literal, Optional, Union
from typing import Any, Dict, List, Optional, Union
from unittest.mock import MagicMock

import pytest

from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai
from autogen import OpenAIWrapper, config_list_from_json

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai # noqa: E402
Expand Down Expand Up @@ -280,11 +279,11 @@ def test_chat_tools_stream() -> None:

@pytest.mark.skipif(skip, reason="openai>=1 not installed")
def test_completion_stream() -> None:
config_list = config_list_openai_aoai(KEY_LOC)
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo-instruct"]}
)
client = OpenAIWrapper(config_list=config_list)
# Azure can't have dot in model/deployment name
model = "gpt-35-turbo-instruct" if config_list[0].get("api_type") == "azure" else "gpt-3.5-turbo-instruct"
response = client.create(prompt="1+1=", model=model, stream=True)
response = client.create(prompt="1+1=", stream=True)
print(response)
print(client.extract_text_or_completion_object(response))

Expand Down

0 comments on commit 14dd31b

Please sign in to comment.