From 265e8ba52afc90b9bc96af016cf4058bda620a28 Mon Sep 17 00:00:00 2001 From: Gunnar Kudrjavets Date: Mon, 12 Feb 2024 12:38:24 -0800 Subject: [PATCH] Validate llm_config passed to ConversableAgent Based on #1522, this commit implements the additional validation checks in `ConversableAgent.` Add the following validation and `raise ValueError` if: - The `llm_config` is `None` (validated in `ConversableAgent`). - The `llm_config` has no `model` specified and `config_list` is empty (validated in `OpenAIWrapper`). - The `config_list` has at least one entry, but not all the entries have the `model` is specified (validated in `OpenAIWrapper`). The rest of the changes are code churn to adjust or add the test cases. --- autogen/agentchat/conversable_agent.py | 23 +++++------------------ autogen/oai/client.py | 14 +++++++++++++- test/agentchat/test_conversable_agent.py | 8 ++++++-- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 86ef3db0d295..8c02cb0dff30 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -136,24 +136,6 @@ def __init__( else (lambda x: content_str(x.get("content")) == "TERMINATE") ) - if llm_config is None: - raise ValueError("Please specify the value for 'llm_config'.") - - if isinstance(llm_config, dict): - config_list = None - if "config_list" in llm_config: - config_list = llm_config["config_list"] - if config_list is None or len(config_list) == 0: - raise ValueError("Please specify at least one configuration in 'llm_config'.") - - # We know that there's at least one entry in the configuration. - # Verify that model is specified as well. - model = None - if "model" in llm_config["config_list"][0]: - model = llm_config["config_list"][0]["model"] - if model is None or len(model) == 0: - raise ValueError("Please specify a value for the 'model' in 'llm_config'.") - if llm_config is False: self.llm_config = False self.client = None @@ -161,6 +143,11 @@ def __init__( self.llm_config = self.DEFAULT_CONFIG.copy() if isinstance(llm_config, dict): self.llm_config.update(llm_config) + # We still have a default `llm_config` because the user didn't + # specify anything. This won't work, so raise an error to avoid + # an obscure message from the OpenAI service. + if self.llm_config == self.DEFAULT_CONFIG: + raise ValueError("Please specify the value for 'llm_config'.") self.client = OpenAIWrapper(**self.llm_config) # Initialize standalone client cache object. diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 1c46a01e87fd..c9a9afa06638 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -345,8 +345,13 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base and additional kwargs. """ openai_config, extra_kwargs = self._separate_openai_config(base_config) + # This *may* work if the `llm_config` has specified the `model` attribute, + # so just warn here. if type(config_list) is list and len(config_list) == 0: - logger.warning("openai client was provided with an empty config_list, which may not be intended.") + logger.warning("OpenAI client was provided with an empty config_list, which may not be intended.") + # If the `llm_config` has no `model` then the call will fail. Abort now. + if "model" not in extra_kwargs: + raise ValueError("Please specify a value for the 'model' in 'llm_config'.") self._clients: List[ModelClient] = [] self._config_list: List[Dict[str, Any]] = [] @@ -354,6 +359,13 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base if config_list: config_list = [config.copy() for config in config_list] # make a copy before modifying for config in config_list: + # We require that each element of `config_list` has a non-empty value + # for `model` specified. + model = None + if "model" in config: + model = config["model"] + if model is None or len(model) == 0: + raise ValueError("Please specify a value for the 'model' in 'config_list'.") self._register_default_client(config, openai_config) # could modify the config self._config_list.append( {**extra_kwargs, **{k: v for k, v in config.items() if k not in self.openai_kwargs}} diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index fd3da06c4cd7..426a13f8aa2c 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -781,12 +781,16 @@ def test_register_for_llm_without_LLM(): assert e.args[0] == "Please specify the value for 'llm_config'." +def test_register_for_llm_without_configuration_but_with_model_name(): + ConversableAgent(name="agent", llm_config={"model": "gpt-4", "config_list": []}) + + def test_register_for_llm_without_configuration(): try: ConversableAgent(name="agent", llm_config={"config_list": []}) assert False, "Expected ConversableAgent to throw ValueError." except ValueError as e: - assert e.args[0] == "Please specify at least one configuration in 'llm_config'." + assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'." def test_register_for_llm_without_model_name(): @@ -794,7 +798,7 @@ def test_register_for_llm_without_model_name(): ConversableAgent(name="agent", llm_config={"config_list": [{"model": "", "api_key": ""}]}) assert False, "Expected ConversableAgent to throw ValueError." except ValueError as e: - assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'." + assert e.args[0] == "Please specify a value for the 'model' in 'config_list'." def test_register_for_execution():