From da6f30571adb05ef4b44782d927ab6c6d2e160ab Mon Sep 17 00:00:00 2001 From: Gunnar Kudrjavets Date: Mon, 12 Feb 2024 12:38:24 -0800 Subject: [PATCH 1/7] Validate llm_config passed to ConversableAgent Based on #1522, this commit implements the additional validation checks in `ConversableAgent.` Add the following validation and `raise ValueError` if: - The `llm_config` is `None`. - The `llm_config` is valid, but `config_list` is missing or lacks elements. - The `config_list` is valid, but no `model` is specified. The rest of the changes are code churn to adjust or add the test cases. --- autogen/agentchat/conversable_agent.py | 18 +++++++++ test/agentchat/test_conversable_agent.py | 48 ++++++++++++++---------- 2 files changed, 46 insertions(+), 20 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index eafa0854f7f..7bb489ca946 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -136,6 +136,24 @@ def __init__( else (lambda x: content_str(x.get("content")) == "TERMINATE") ) + if llm_config is None: + raise ValueError("Please specify the value for 'llm_config'.") + + if isinstance(llm_config, dict): + config_list = None + if "config_list" in llm_config: + config_list = llm_config["config_list"] + if config_list is None or len(config_list) == 0: + raise ValueError("Please specify at least one configuration in 'llm_config'.") + + # We know that there's at least one entry in the configuration. + # Verify that model is specified as well. + model = None + if "model" in llm_config["config_list"][0]: + model = llm_config["config_list"][0]["model"] + if model is None or len(model) == 0: + raise ValueError("Please specify a value for the 'model' in 'llm_config'.") + if llm_config is False: self.llm_config = False self.client = None diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 8ff8038da7e..fd3da06c4cd 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -474,7 +474,7 @@ async def test_a_generate_reply_raises_on_messages_and_sender_none(conversable_a def test_update_function_signature_and_register_functions() -> None: with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") - agent = ConversableAgent(name="agent", llm_config={}) + agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) def exec_python(cell: str) -> None: pass @@ -618,9 +618,9 @@ def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any] def test_register_for_llm(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") - agent3 = ConversableAgent(name="agent3", llm_config={"config_list": []}) - agent2 = ConversableAgent(name="agent2", llm_config={"config_list": []}) - agent1 = ConversableAgent(name="agent1", llm_config={"config_list": []}) + agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) + agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) + agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) @agent3.register_for_llm() @agent2.register_for_llm(name="python") @@ -691,9 +691,9 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s def test_register_for_llm_api_style_function(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") - agent3 = ConversableAgent(name="agent3", llm_config={"config_list": []}) - agent2 = ConversableAgent(name="agent2", llm_config={"config_list": []}) - agent1 = ConversableAgent(name="agent1", llm_config={"config_list": []}) + agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) + agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) + agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) @agent3.register_for_llm(api_style="function") @agent2.register_for_llm(name="python", api_style="function") @@ -762,7 +762,7 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s def test_register_for_llm_without_description(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") - agent = ConversableAgent(name="agent", llm_config={}) + agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) with pytest.raises(ValueError) as e: @@ -774,25 +774,33 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: def test_register_for_llm_without_LLM(): - with pytest.MonkeyPatch.context() as mp: - mp.setenv("OPENAI_API_KEY", "mock") - agent = ConversableAgent(name="agent", llm_config=None) - agent.llm_config = None - assert agent.llm_config is None + try: + ConversableAgent(name="agent", llm_config=None) + assert False, "Expected ConversableAgent to throw ValueError." + except ValueError as e: + assert e.args[0] == "Please specify the value for 'llm_config'." - with pytest.raises(RuntimeError) as e: - @agent.register_for_llm(description="run cell in ipython and return the execution result.") - def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: - pass +def test_register_for_llm_without_configuration(): + try: + ConversableAgent(name="agent", llm_config={"config_list": []}) + assert False, "Expected ConversableAgent to throw ValueError." + except ValueError as e: + assert e.args[0] == "Please specify at least one configuration in 'llm_config'." + - assert e.value.args[0] == "LLM config must be setup before registering a function for LLM." +def test_register_for_llm_without_model_name(): + try: + ConversableAgent(name="agent", llm_config={"config_list": [{"model": "", "api_key": ""}]}) + assert False, "Expected ConversableAgent to throw ValueError." + except ValueError as e: + assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'." def test_register_for_execution(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") - agent = ConversableAgent(name="agent", llm_config={"config_list": []}) + agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) user_proxy_1 = UserProxyAgent(name="user_proxy_1") user_proxy_2 = UserProxyAgent(name="user_proxy_2") @@ -827,7 +835,7 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]): def test_register_functions(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") - agent = ConversableAgent(name="agent", llm_config={"config_list": []}) + agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}) user_proxy = UserProxyAgent(name="user_proxy") def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: From fa7b0d7e0650c738c9e51ca871fe3a866f141fe2 Mon Sep 17 00:00:00 2001 From: Gunnar Kudrjavets Date: Mon, 12 Feb 2024 12:38:24 -0800 Subject: [PATCH 2/7] Validate llm_config passed to ConversableAgent Based on #1522, this commit implements the additional validation checks in `ConversableAgent.` Add the following validation and `raise ValueError` if: - The `llm_config` is `None` (validated in `ConversableAgent`). - The `llm_config` has no `model` specified and `config_list` is empty (validated in `OpenAIWrapper`). - The `config_list` has at least one entry, but not all the entries have the `model` is specified (validated in `OpenAIWrapper`). The rest of the changes are code churn to adjust or add the test cases. --- autogen/agentchat/conversable_agent.py | 23 +++++------------------ autogen/oai/client.py | 14 +++++++++++++- test/agentchat/test_conversable_agent.py | 8 ++++++-- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 7bb489ca946..9733861db7a 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -136,24 +136,6 @@ def __init__( else (lambda x: content_str(x.get("content")) == "TERMINATE") ) - if llm_config is None: - raise ValueError("Please specify the value for 'llm_config'.") - - if isinstance(llm_config, dict): - config_list = None - if "config_list" in llm_config: - config_list = llm_config["config_list"] - if config_list is None or len(config_list) == 0: - raise ValueError("Please specify at least one configuration in 'llm_config'.") - - # We know that there's at least one entry in the configuration. - # Verify that model is specified as well. - model = None - if "model" in llm_config["config_list"][0]: - model = llm_config["config_list"][0]["model"] - if model is None or len(model) == 0: - raise ValueError("Please specify a value for the 'model' in 'llm_config'.") - if llm_config is False: self.llm_config = False self.client = None @@ -161,6 +143,11 @@ def __init__( self.llm_config = self.DEFAULT_CONFIG.copy() if isinstance(llm_config, dict): self.llm_config.update(llm_config) + # We still have a default `llm_config` because the user didn't + # specify anything. This won't work, so raise an error to avoid + # an obscure message from the OpenAI service. + if self.llm_config == self.DEFAULT_CONFIG: + raise ValueError("Please specify the value for 'llm_config'.") self.client = OpenAIWrapper(**self.llm_config) # Initialize standalone client cache object. diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 5970c87126d..66db6e9f3a7 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -345,8 +345,13 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base and additional kwargs. """ openai_config, extra_kwargs = self._separate_openai_config(base_config) + # This *may* work if the `llm_config` has specified the `model` attribute, + # so just warn here. if type(config_list) is list and len(config_list) == 0: - logger.warning("openai client was provided with an empty config_list, which may not be intended.") + logger.warning("OpenAI client was provided with an empty config_list, which may not be intended.") + # If the `llm_config` has no `model` then the call will fail. Abort now. + if "model" not in extra_kwargs: + raise ValueError("Please specify a value for the 'model' in 'llm_config'.") self._clients: List[ModelClient] = [] self._config_list: List[Dict[str, Any]] = [] @@ -354,6 +359,13 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base if config_list: config_list = [config.copy() for config in config_list] # make a copy before modifying for config in config_list: + # We require that each element of `config_list` has a non-empty value + # for `model` specified. + model = None + if "model" in config: + model = config["model"] + if model is None or len(model) == 0: + raise ValueError("Please specify a value for the 'model' in 'config_list'.") self._register_default_client(config, openai_config) # could modify the config self._config_list.append( {**extra_kwargs, **{k: v for k, v in config.items() if k not in self.openai_kwargs}} diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index fd3da06c4cd..426a13f8aa2 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -781,12 +781,16 @@ def test_register_for_llm_without_LLM(): assert e.args[0] == "Please specify the value for 'llm_config'." +def test_register_for_llm_without_configuration_but_with_model_name(): + ConversableAgent(name="agent", llm_config={"model": "gpt-4", "config_list": []}) + + def test_register_for_llm_without_configuration(): try: ConversableAgent(name="agent", llm_config={"config_list": []}) assert False, "Expected ConversableAgent to throw ValueError." except ValueError as e: - assert e.args[0] == "Please specify at least one configuration in 'llm_config'." + assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'." def test_register_for_llm_without_model_name(): @@ -794,7 +798,7 @@ def test_register_for_llm_without_model_name(): ConversableAgent(name="agent", llm_config={"config_list": [{"model": "", "api_key": ""}]}) assert False, "Expected ConversableAgent to throw ValueError." except ValueError as e: - assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'." + assert e.args[0] == "Please specify a value for the 'model' in 'config_list'." def test_register_for_execution(): From a51088c5a91a7c90cf49f6e92a5f348121973ea8 Mon Sep 17 00:00:00 2001 From: Gunnar Kudrjavets Date: Mon, 12 Feb 2024 12:38:24 -0800 Subject: [PATCH 3/7] Validate llm_config passed to ConversableAgent Based on #1522, this commit implements the additional validation checks in `ConversableAgent.` Add the following validation and `raise ValueError` if: - The `llm_config` is `None` (validated in `ConversableAgent`). - The `llm_config` has no `model` specified and `config_list` is empty (validated in `OpenAIWrapper`). - The `config_list` has at least one entry, but not all the entries have the `model` is specified (validated in `OpenAIWrapper`). The rest of the changes are code churn to adjust or add the test cases. --- test/agentchat/contrib/test_web_surfer.py | 4 +++- test/agentchat/test_conversable_agent.py | 4 ---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/test/agentchat/contrib/test_web_surfer.py b/test/agentchat/contrib/test_web_surfer.py index abf7903dee7..3e48c7c9387 100644 --- a/test/agentchat/contrib/test_web_surfer.py +++ b/test/agentchat/contrib/test_web_surfer.py @@ -51,7 +51,9 @@ def test_web_surfer() -> None: mp.setenv("OPENAI_API_KEY", "mock") page_size = 4096 web_surfer = WebSurferAgent( - "web_surfer", llm_config={"config_list": []}, browser_config={"viewport_size": page_size} + "web_surfer", + llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}, + browser_config={"viewport_size": page_size}, ) # Sneak a peak at the function map, allowing us to call the functions for testing here diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 426a13f8aa2..fe912568f97 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -781,10 +781,6 @@ def test_register_for_llm_without_LLM(): assert e.args[0] == "Please specify the value for 'llm_config'." -def test_register_for_llm_without_configuration_but_with_model_name(): - ConversableAgent(name="agent", llm_config={"model": "gpt-4", "config_list": []}) - - def test_register_for_llm_without_configuration(): try: ConversableAgent(name="agent", llm_config={"config_list": []}) From 955c5fe9e16063e19b615f6a35728159e525ea0b Mon Sep 17 00:00:00 2001 From: Gunnar Kudrjavets Date: Mon, 12 Feb 2024 12:38:24 -0800 Subject: [PATCH 4/7] Validate llm_config passed to ConversableAgent Based on #1522, this commit implements the additional validation checks in `ConversableAgent.` Add the following validation and `raise ValueError` if: - The `llm_config` is `None` (validated in `ConversableAgent`). - The `llm_config` has no `model` specified and `config_list` is empty (validated in `OpenAIWrapper`). - The `config_list` has at least one entry, but not all the entries have the `model` is specified (validated in `OpenAIWrapper`). The rest of the changes are code churn to adjust or add the test cases. --- autogen/agentchat/assistant_agent.py | 2 +- autogen/agentchat/conversable_agent.py | 6 +++--- autogen/oai/client.py | 2 +- test/agentchat/test_conversable_agent.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/autogen/agentchat/assistant_agent.py b/autogen/agentchat/assistant_agent.py index bdec0fef665..8c4f196f675 100644 --- a/autogen/agentchat/assistant_agent.py +++ b/autogen/agentchat/assistant_agent.py @@ -33,7 +33,7 @@ def __init__( self, name: str, system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE, - llm_config: Optional[Union[Dict, Literal[False]]] = None, + llm_config: Optional[Union[Dict, Literal[False]]] = False, is_termination_msg: Optional[Callable[[Dict], bool]] = None, max_consecutive_auto_reply: Optional[int] = None, human_input_mode: Optional[str] = "NEVER", diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 9733861db7a..8ed86406179 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -78,8 +78,8 @@ def __init__( human_input_mode: Optional[str] = "TERMINATE", function_map: Optional[Dict[str, Callable]] = None, code_execution_config: Union[Dict, Literal[False]] = False, - llm_config: Optional[Union[Dict, Literal[False]]] = None, - default_auto_reply: Optional[Union[str, Dict, None]] = "", + llm_config: Optional[Union[Dict, Literal[False]]] = False, + default_auto_reply: Union[str, Dict] = "", description: Optional[str] = None, ): """ @@ -121,7 +121,7 @@ def __init__( Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create) for available options. To disable llm-based auto reply, set to False. - default_auto_reply (str or dict or None): default auto reply when no code execution or llm-based reply is generated. + default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated. description (str): a short description of the agent. This description is used by other agents (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message) """ diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 66db6e9f3a7..9a03c3acee5 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -365,7 +365,7 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base if "model" in config: model = config["model"] if model is None or len(model) == 0: - raise ValueError("Please specify a value for the 'model' in 'config_list'.") + raise ValueError("Please specify a non-empty 'model' value for every item in 'config_list'.") self._register_default_client(config, openai_config) # could modify the config self._config_list.append( {**extra_kwargs, **{k: v for k, v in config.items() if k not in self.openai_kwargs}} diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index fe912568f97..c8449ac388e 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -794,7 +794,7 @@ def test_register_for_llm_without_model_name(): ConversableAgent(name="agent", llm_config={"config_list": [{"model": "", "api_key": ""}]}) assert False, "Expected ConversableAgent to throw ValueError." except ValueError as e: - assert e.args[0] == "Please specify a value for the 'model' in 'config_list'." + assert e.args[0] == "Please specify a non-empty 'model' value for every item in 'config_list'." def test_register_for_execution(): From e5bb7cd556b55d92304c50005aaadab03237b7a8 Mon Sep 17 00:00:00 2001 From: Gunnar Kudrjavets Date: Mon, 12 Feb 2024 12:38:24 -0800 Subject: [PATCH 5/7] Validate llm_config passed to ConversableAgent Based on #1522, this commit implements the additional validation checks in `ConversableAgent.` Add the following validation and `raise ValueError` if: - The `llm_config` is `None` (validated in `ConversableAgent`). - The `llm_config` has no `model` specified and `config_list` is empty (validated in `OpenAIWrapper`). - The `config_list` has at least one entry, but not all the entries have the `model` is specified (validated in `OpenAIWrapper`). The rest of the changes are code churn to adjust or add the test cases. --- autogen/agentchat/conversable_agent.py | 6 +++--- autogen/oai/client.py | 4 ++-- test/agentchat/contrib/test_web_surfer.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 8ed86406179..8f230781586 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -78,7 +78,7 @@ def __init__( human_input_mode: Optional[str] = "TERMINATE", function_map: Optional[Dict[str, Callable]] = None, code_execution_config: Union[Dict, Literal[False]] = False, - llm_config: Optional[Union[Dict, Literal[False]]] = False, + llm_config: Optional[Union[Dict, Literal[False]]] = None, default_auto_reply: Union[str, Dict] = "", description: Optional[str] = None, ): @@ -117,7 +117,7 @@ def __init__( - timeout (Optional, int): The maximum execution time in seconds. - last_n_messages (Experimental, int or str): The number of messages to look back for code execution. If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke, which is typically the last time execution was attempted. (Default: auto) - llm_config (dict or False): llm inference configuration. + llm_config (dict or False or None): llm inference configuration. Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create) for available options. To disable llm-based auto reply, set to False. @@ -146,7 +146,7 @@ def __init__( # We still have a default `llm_config` because the user didn't # specify anything. This won't work, so raise an error to avoid # an obscure message from the OpenAI service. - if self.llm_config == self.DEFAULT_CONFIG: + if self.llm_config == {}: raise ValueError("Please specify the value for 'llm_config'.") self.client = OpenAIWrapper(**self.llm_config) diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 9a03c3acee5..dcd4e785baa 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -360,11 +360,11 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base config_list = [config.copy() for config in config_list] # make a copy before modifying for config in config_list: # We require that each element of `config_list` has a non-empty value - # for `model` specified. + # for `model` specified unless `extra_kwargs` contains "model". model = None if "model" in config: model = config["model"] - if model is None or len(model) == 0: + if "model" not in extra_kwargs and (model is None or len(model) == 0): raise ValueError("Please specify a non-empty 'model' value for every item in 'config_list'.") self._register_default_client(config, openai_config) # could modify the config self._config_list.append( diff --git a/test/agentchat/contrib/test_web_surfer.py b/test/agentchat/contrib/test_web_surfer.py index 3e48c7c9387..7f9b2365908 100644 --- a/test/agentchat/contrib/test_web_surfer.py +++ b/test/agentchat/contrib/test_web_surfer.py @@ -52,7 +52,7 @@ def test_web_surfer() -> None: page_size = 4096 web_surfer = WebSurferAgent( "web_surfer", - llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]}, + llm_config=False, browser_config={"viewport_size": page_size}, ) From e699b4ba025ff73dc82e85c2d3c47014afffaee4 Mon Sep 17 00:00:00 2001 From: Gunnar Kudrjavets Date: Tue, 13 Feb 2024 20:54:22 -0800 Subject: [PATCH 6/7] Fix the test_web_surfer issue For anyone reading this: you need to `pip install markdownify` for the `import WebSurferAgent` to succeed. That is needed to run the `test_web_surfer.py` locally. Test logic needs `llm_config` that is not `None` and that is not `False`. Let us pray that this works as part of GitHub actions ... --- test/agentchat/contrib/test_web_surfer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/agentchat/contrib/test_web_surfer.py b/test/agentchat/contrib/test_web_surfer.py index 7f9b2365908..1bc7245a3ce 100644 --- a/test/agentchat/contrib/test_web_surfer.py +++ b/test/agentchat/contrib/test_web_surfer.py @@ -52,7 +52,7 @@ def test_web_surfer() -> None: page_size = 4096 web_surfer = WebSurferAgent( "web_surfer", - llm_config=False, + llm_config={"model": "gpt-4", "config_list": []}, browser_config={"viewport_size": page_size}, ) From 2ba92b72e2f04131c660902302be84a04a4bb16d Mon Sep 17 00:00:00 2001 From: Gunnar Kudrjavets Date: Wed, 14 Feb 2024 09:52:26 -0800 Subject: [PATCH 7/7] One more fix for llm_config validation contract --- autogen/agentchat/assistant_agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogen/agentchat/assistant_agent.py b/autogen/agentchat/assistant_agent.py index 8c4f196f675..df85fa2ed8e 100644 --- a/autogen/agentchat/assistant_agent.py +++ b/autogen/agentchat/assistant_agent.py @@ -33,7 +33,7 @@ def __init__( self, name: str, system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE, - llm_config: Optional[Union[Dict, Literal[False]]] = False, + llm_config: Optional[Union[Dict, Literal[False]]] = None, is_termination_msg: Optional[Callable[[Dict], bool]] = None, max_consecutive_auto_reply: Optional[int] = None, human_input_mode: Optional[str] = "NEVER", @@ -45,7 +45,7 @@ def __init__( name (str): agent name. system_message (str): system message for the ChatCompletion inference. Please override this attribute if you want to reprogram the agent. - llm_config (dict): llm inference configuration. + llm_config (dict or False or None): llm inference configuration. Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create) for available options. is_termination_msg (function): a function that takes a message in the form of a dictionary