Skip to content

Commit

Permalink
do model check properly (#1686)
Browse files Browse the repository at this point in the history
* do model check properly

* bug in logging test

* set llm_config
  • Loading branch information
sonichi committed Feb 15, 2024
1 parent d677b47 commit cff9ca9
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 62 deletions.
12 changes: 7 additions & 5 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,11 +144,13 @@ def __init__(
self.llm_config = self.DEFAULT_CONFIG.copy()
if isinstance(llm_config, dict):
self.llm_config.update(llm_config)
# We still have a default `llm_config` because the user didn't
# specify anything. This won't work, so raise an error to avoid
# an obscure message from the OpenAI service.
if self.llm_config == {}:
raise ValueError("Please specify the value for 'llm_config'.")
if "model" not in self.llm_config and (
not self.llm_config.get("config_list")
or any(not config.get("model") for config in self.llm_config["config_list"])
):
raise ValueError(
"Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
)
self.client = OpenAIWrapper(**self.llm_config)

if logging_enabled():
Expand Down
18 changes: 4 additions & 14 deletions autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,27 +361,15 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base
if logging_enabled():
log_new_wrapper(self, locals())
openai_config, extra_kwargs = self._separate_openai_config(base_config)
# This *may* work if the `llm_config` has specified the `model` attribute,
# so just warn here.
if type(config_list) is list and len(config_list) == 0:
logger.warning("OpenAI client was provided with an empty config_list, which may not be intended.")
# If the `llm_config` has no `model` then the call will fail. Abort now.
if "model" not in extra_kwargs:
raise ValueError("Please specify a value for the 'model' in 'llm_config'.")
# It's OK if "model" is not provided in base_config or config_list
# Because one can provide "model" at `create` time.

self._clients: List[ModelClient] = []
self._config_list: List[Dict[str, Any]] = []

if config_list:
config_list = [config.copy() for config in config_list] # make a copy before modifying
for config in config_list:
# We require that each element of `config_list` has a non-empty value
# for `model` specified unless `extra_kwargs` contains "model".
model = None
if "model" in config:
model = config["model"]
if "model" not in extra_kwargs and (model is None or len(model) == 0):
raise ValueError("Please specify a non-empty 'model' value for every item in 'config_list'.")
self._register_default_client(config, openai_config) # could modify the config
self._config_list.append(
{**extra_kwargs, **{k: v for k, v in config.items() if k not in self.openai_kwargs}}
Expand Down Expand Up @@ -610,6 +598,7 @@ def yes_or_no_filter(context, response):

if logging_enabled():
# Log the cache hit
# TODO: log the config_id and pass_filter etc.
log_chat_completion(
invocation_id=invocation_id,
client_id=id(client),
Expand Down Expand Up @@ -671,6 +660,7 @@ def yes_or_no_filter(context, response):
cache.set(key, response)

if logging_enabled():
# TODO: log the config_id and pass_filter etc.
log_chat_completion(
invocation_id=invocation_id,
client_id=id(client),
Expand Down
10 changes: 5 additions & 5 deletions test/agentchat/contrib/test_compressible_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,14 +216,14 @@ def test_mode_terminate():
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_new_compressible_agent_description():
assistant = CompressibleAgent(name="assistant", description="this is a description")
assistant = CompressibleAgent(name="assistant", description="this is a description", llm_config=False)

assert assistant.description == "this is a description", "description is not set correctly"


if __name__ == "__main__":
test_mode_compress()
test_mode_customized()
test_compress_message()
test_mode_terminate()
# test_mode_compress()
# test_mode_customized()
# test_compress_message()
# test_mode_terminate()
test_new_compressible_agent_description()
2 changes: 1 addition & 1 deletion test/agentchat/test_agent_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def test_groupchat_logging(db_connection):
# Verify chat_completions message
cur.execute(CHAT_COMPLETIONS_QUERY)
rows = cur.fetchall()
assert len(rows) == 2 # max_round - 1
assert len(rows) >= 2 # some config may fail

# Verify group chat manager agent
cur.execute(AGENTS_QUERY)
Expand Down
60 changes: 23 additions & 37 deletions test/agentchat/test_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ async def test_a_generate_reply_raises_on_messages_and_sender_none(conversable_a
def test_update_function_signature_and_register_functions() -> None:
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4"}]})

def exec_python(cell: str) -> None:
pass
Expand Down Expand Up @@ -618,9 +618,9 @@ def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any]
def test_register_for_llm():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4"}]})
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4"}]})
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4"}]})

@agent3.register_for_llm()
@agent2.register_for_llm(name="python")
Expand Down Expand Up @@ -691,9 +691,9 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s
def test_register_for_llm_api_style_function():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4"}]})
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4"}]})
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4"}]})

@agent3.register_for_llm(api_style="function")
@agent2.register_for_llm(name="python", api_style="function")
Expand Down Expand Up @@ -762,7 +762,7 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s
def test_register_for_llm_without_description():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4"}]})

with pytest.raises(ValueError) as e:

Expand All @@ -774,33 +774,33 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:


def test_register_for_llm_without_LLM():
try:
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
):
ConversableAgent(name="agent", llm_config=None)
assert False, "Expected ConversableAgent to throw ValueError."
except ValueError as e:
assert e.args[0] == "Please specify the value for 'llm_config'."


def test_register_for_llm_without_configuration():
try:
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
):
ConversableAgent(name="agent", llm_config={"config_list": []})
assert False, "Expected ConversableAgent to throw ValueError."
except ValueError as e:
assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'."


def test_register_for_llm_without_model_name():
try:
ConversableAgent(name="agent", llm_config={"config_list": [{"model": "", "api_key": ""}]})
assert False, "Expected ConversableAgent to throw ValueError."
except ValueError as e:
assert e.args[0] == "Please specify a non-empty 'model' value for every item in 'config_list'."
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
):
ConversableAgent(name="agent", llm_config={"config_list": [{"model": ""}]})


def test_register_for_execution():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4"}]})
user_proxy_1 = UserProxyAgent(name="user_proxy_1")
user_proxy_2 = UserProxyAgent(name="user_proxy_2")

Expand Down Expand Up @@ -835,7 +835,7 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]):
def test_register_functions():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4"}]})
user_proxy = UserProxyAgent(name="user_proxy")

def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:
Expand Down Expand Up @@ -1028,20 +1028,6 @@ def stopwatch(num_seconds: Annotated[str, "Number of seconds in the stopwatch."]
stopwatch_mock.assert_called_once_with(num_seconds="5")


@pytest.mark.skipif(
skip,
reason="do not run if skipping openai",
)
def test_no_llm_config():
# We expect a TypeError when the model isn't specified
with pytest.raises(TypeError, match=r".*Missing required arguments.*"):
agent1 = ConversableAgent(name="agent1", llm_config=False, human_input_mode="NEVER", default_auto_reply="")
agent2 = ConversableAgent(
name="agent2", llm_config={"api_key": "Intentionally left blank."}, human_input_mode="NEVER"
)
agent1.initiate_chat(agent2, message="hi")


if __name__ == "__main__":
# test_trigger()
# test_context()
Expand Down

0 comments on commit cff9ca9

Please sign in to comment.