Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Validate llm_config passed to ConversableAgent (issue #1522) #1654

Merged
merged 8 commits into from
Feb 15, 2024
2 changes: 1 addition & 1 deletion autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def __init__(
self,
name: str,
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
llm_config: Optional[Union[Dict, Literal[False]]] = None,
llm_config: Optional[Union[Dict, Literal[False]]] = False,
gunnarku marked this conversation as resolved.
Show resolved Hide resolved
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
Expand Down
11 changes: 8 additions & 3 deletions autogen/agentchat/conversable_agent.py
gunnarku marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ def __init__(
human_input_mode: Optional[str] = "TERMINATE",
function_map: Optional[Dict[str, Callable]] = None,
code_execution_config: Union[Dict, Literal[False]] = False,
llm_config: Optional[Union[Dict, Literal[False]]] = None,
default_auto_reply: Optional[Union[str, Dict, None]] = "",
llm_config: Optional[Union[Dict, Literal[False]]] = False,
gunnarku marked this conversation as resolved.
Show resolved Hide resolved
default_auto_reply: Union[str, Dict] = "",
description: Optional[str] = None,
):
"""
Expand Down Expand Up @@ -121,7 +121,7 @@ def __init__(
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
To disable llm-based auto reply, set to False.
default_auto_reply (str or dict or None): default auto reply when no code execution or llm-based reply is generated.
default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
"""
Expand All @@ -143,6 +143,11 @@ def __init__(
self.llm_config = self.DEFAULT_CONFIG.copy()
if isinstance(llm_config, dict):
self.llm_config.update(llm_config)
# We still have a default `llm_config` because the user didn't
# specify anything. This won't work, so raise an error to avoid
# an obscure message from the OpenAI service.
if self.llm_config == self.DEFAULT_CONFIG:
raise ValueError("Please specify the value for 'llm_config'.")
gunnarku marked this conversation as resolved.
Show resolved Hide resolved
self.client = OpenAIWrapper(**self.llm_config)

# Initialize standalone client cache object.
Expand Down
14 changes: 13 additions & 1 deletion autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,15 +345,27 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base
and additional kwargs.
"""
openai_config, extra_kwargs = self._separate_openai_config(base_config)
# This *may* work if the `llm_config` has specified the `model` attribute,
# so just warn here.
if type(config_list) is list and len(config_list) == 0:
logger.warning("openai client was provided with an empty config_list, which may not be intended.")
logger.warning("OpenAI client was provided with an empty config_list, which may not be intended.")
# If the `llm_config` has no `model` then the call will fail. Abort now.
if "model" not in extra_kwargs:
raise ValueError("Please specify a value for the 'model' in 'llm_config'.")

self._clients: List[ModelClient] = []
self._config_list: List[Dict[str, Any]] = []

if config_list:
config_list = [config.copy() for config in config_list] # make a copy before modifying
for config in config_list:
# We require that each element of `config_list` has a non-empty value
gunnarku marked this conversation as resolved.
Show resolved Hide resolved
# for `model` specified.
model = None
if "model" in config:
model = config["model"]
if model is None or len(model) == 0:
raise ValueError("Please specify a non-empty 'model' value for every item in 'config_list'.")
self._register_default_client(config, openai_config) # could modify the config
self._config_list.append(
{**extra_kwargs, **{k: v for k, v in config.items() if k not in self.openai_kwargs}}
Expand Down
4 changes: 3 additions & 1 deletion test/agentchat/contrib/test_web_surfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@ def test_web_surfer() -> None:
mp.setenv("OPENAI_API_KEY", "mock")
page_size = 4096
web_surfer = WebSurferAgent(
"web_surfer", llm_config={"config_list": []}, browser_config={"viewport_size": page_size}
"web_surfer",
llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]},
browser_config={"viewport_size": page_size},
)

# Sneak a peak at the function map, allowing us to call the functions for testing here
Expand Down
48 changes: 28 additions & 20 deletions test/agentchat/test_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ async def test_a_generate_reply_raises_on_messages_and_sender_none(conversable_a
def test_update_function_signature_and_register_functions() -> None:
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", "mock")
agent = ConversableAgent(name="agent", llm_config={})
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})

def exec_python(cell: str) -> None:
pass
Expand Down Expand Up @@ -618,9 +618,9 @@ def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any]
def test_register_for_llm():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", "mock")
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": []})
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": []})
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": []})
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})

@agent3.register_for_llm()
@agent2.register_for_llm(name="python")
Expand Down Expand Up @@ -691,9 +691,9 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s
def test_register_for_llm_api_style_function():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", "mock")
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": []})
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": []})
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": []})
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})

@agent3.register_for_llm(api_style="function")
@agent2.register_for_llm(name="python", api_style="function")
Expand Down Expand Up @@ -762,7 +762,7 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s
def test_register_for_llm_without_description():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", "mock")
agent = ConversableAgent(name="agent", llm_config={})
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})

with pytest.raises(ValueError) as e:

Expand All @@ -774,25 +774,33 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:


def test_register_for_llm_without_LLM():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", "mock")
agent = ConversableAgent(name="agent", llm_config=None)
agent.llm_config = None
assert agent.llm_config is None
try:
ConversableAgent(name="agent", llm_config=None)
assert False, "Expected ConversableAgent to throw ValueError."
except ValueError as e:
assert e.args[0] == "Please specify the value for 'llm_config'."

with pytest.raises(RuntimeError) as e:

@agent.register_for_llm(description="run cell in ipython and return the execution result.")
def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:
pass
def test_register_for_llm_without_configuration():
try:
ConversableAgent(name="agent", llm_config={"config_list": []})
assert False, "Expected ConversableAgent to throw ValueError."
except ValueError as e:
assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'."


assert e.value.args[0] == "LLM config must be setup before registering a function for LLM."
def test_register_for_llm_without_model_name():
try:
ConversableAgent(name="agent", llm_config={"config_list": [{"model": "", "api_key": ""}]})
assert False, "Expected ConversableAgent to throw ValueError."
except ValueError as e:
assert e.args[0] == "Please specify a non-empty 'model' value for every item in 'config_list'."


def test_register_for_execution():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", "mock")
agent = ConversableAgent(name="agent", llm_config={"config_list": []})
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
user_proxy_1 = UserProxyAgent(name="user_proxy_1")
user_proxy_2 = UserProxyAgent(name="user_proxy_2")

Expand Down Expand Up @@ -827,7 +835,7 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]):
def test_register_functions():
with pytest.MonkeyPatch.context() as mp:
mp.setenv("OPENAI_API_KEY", "mock")
agent = ConversableAgent(name="agent", llm_config={"config_list": []})
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
user_proxy = UserProxyAgent(name="user_proxy")

def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:
Expand Down
Loading