Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support llm_config in AgentOptimizer #2299

Merged
merged 6 commits into from
Apr 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 16 additions & 13 deletions autogen/agentchat/contrib/agent_optimizer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import copy
import json
from typing import Dict, List, Optional
from typing import Dict, List, Literal, Optional, Union

import autogen
from autogen.code_utils import execute_code
Expand Down Expand Up @@ -172,16 +172,16 @@ class AgentOptimizer:
def __init__(
self,
max_actions_per_step: int,
config_file_or_env: Optional[str] = "OAI_CONFIG_LIST",
config_file_location: Optional[str] = "",
llm_config: dict,
optimizer_model: Optional[str] = "gpt-4-1106-preview",
):
"""
(These APIs are experimental and may change in the future.)
Args:
max_actions_per_step (int): the maximum number of actions that the optimizer can take in one step.
config_file_or_env: path or environment of the OpenAI api configs.
config_file_location: the location of the OpenAI config file.
llm_config (dict): llm inference configuration.
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create) for available options.
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.
optimizer_model: the model used for the optimizer.
qingyun-wu marked this conversation as resolved.
Show resolved Hide resolved
"""
self.max_actions_per_step = max_actions_per_step
Expand All @@ -199,14 +199,17 @@ def __init__(
self._failure_functions_performance = []
self._best_performance = -1

config_list = autogen.config_list_from_json(
config_file_or_env,
file_location=config_file_location,
filter_dict={"model": [self.optimizer_model]},
assert isinstance(llm_config, dict), "llm_config must be a dict"
llm_config = copy.deepcopy(llm_config)
self.llm_config = llm_config
if self.llm_config in [{}, {"config_list": []}, {"config_list": [{"model": ""}]}]:
raise ValueError(
"When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
)
skzhang1 marked this conversation as resolved.
Show resolved Hide resolved
self.llm_config["config_list"] = autogen.filter_config(
llm_config["config_list"], {"model": [self.optimizer_model]}
)
if len(config_list) == 0:
raise RuntimeError("No valid openai config found in the config file or environment variable.")
self._client = autogen.OpenAIWrapper(config_list=config_list)
self._client = autogen.OpenAIWrapper(**self.llm_config)

def record_one_conversation(self, conversation_history: List[Dict], is_satisfied: bool = None):
"""
Expand Down Expand Up @@ -266,7 +269,7 @@ def step(self):
actions_num=action_index,
best_functions=best_functions,
incumbent_functions=incumbent_functions,
accumerated_experience=failure_experience_prompt,
accumulated_experience=failure_experience_prompt,
statistic_informations=statistic_prompt,
)
messages = [{"role": "user", "content": prompt}]
Expand Down
23 changes: 14 additions & 9 deletions notebook/agentchat_agentoptimizer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
"source": [
"import copy\n",
"import json\n",
"import os\n",
"from typing import Any, Callable, Dict, List, Optional, Tuple, Union\n",
"\n",
"from openai import BadRequestError\n",
Expand Down Expand Up @@ -299,16 +300,22 @@
"metadata": {},
"outputs": [],
"source": [
"config_list = config_list_from_json(env_or_file=\"OAI_CONFIG_LIST\")\n",
"llm_config = {\n",
" \"config_list\": [\n",
" {\n",
" \"model\": \"gpt-4-1106-preview\",\n",
" \"api_type\": \"azure\",\n",
" \"api_key\": os.environ[\"AZURE_OPENAI_API_KEY\"],\n",
" \"base_url\": \"https://ENDPOINT.openai.azure.com/\",\n",
" \"api_version\": \"2023-07-01-preview\",\n",
" }\n",
" ]\n",
"}\n",
"\n",
"assistant = autogen.AssistantAgent(\n",
" name=\"assistant\",\n",
" system_message=\"You are a helpful assistant.\",\n",
" llm_config={\n",
" \"timeout\": 600,\n",
" \"seed\": 42,\n",
" \"config_list\": config_list,\n",
" },\n",
" llm_config=llm_config,\n",
")\n",
"user_proxy = MathUserProxyAgent(\n",
" name=\"mathproxyagent\",\n",
Expand Down Expand Up @@ -361,9 +368,7 @@
"source": [
"EPOCH = 10\n",
"optimizer_model = \"gpt-4-1106-preview\"\n",
"optimizer = AgentOptimizer(\n",
" max_actions_per_step=3, config_file_or_env=\"OAI_CONFIG_LIST\", optimizer_model=optimizer_model\n",
")\n",
"optimizer = AgentOptimizer(max_actions_per_step=3, llm_config=llm_config, optimizer_model=optimizer_model)\n",
"for i in range(EPOCH):\n",
" for index, query in enumerate(train_data):\n",
" is_correct = user_proxy.initiate_chat(assistant, answer=query[\"answer\"], problem=query[\"question\"])\n",
Expand Down
31 changes: 15 additions & 16 deletions test/agentchat/contrib/test_agent_optimizer.py
qingyun-wu marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,13 @@ def test_record_conversation():
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)
assistant = AssistantAgent(
"assistant",
system_message="You are a helpful assistant.",
llm_config={
"timeout": 60,
"cache_seed": 42,
"config_list": config_list,
},
)
llm_config = {
"config_list": config_list,
"timeout": 60,
"cache_seed": 42,
}

assistant = AssistantAgent("assistant", system_message="You are a helpful assistant.", llm_config=llm_config)
user_proxy = UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
Expand All @@ -43,7 +41,7 @@ def test_record_conversation():
)

user_proxy.initiate_chat(assistant, message=problem)
optimizer = AgentOptimizer(max_actions_per_step=3, config_file_or_env=OAI_CONFIG_LIST)
optimizer = AgentOptimizer(max_actions_per_step=3, llm_config=llm_config)
optimizer.record_one_conversation(assistant.chat_messages_for_summary(user_proxy), is_satisfied=True)

assert len(optimizer._trial_conversations_history) == 1
Expand All @@ -66,14 +64,15 @@ def test_step():
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)
llm_config = {
"config_list": config_list,
"timeout": 60,
"cache_seed": 42,
}
assistant = AssistantAgent(
"assistant",
system_message="You are a helpful assistant.",
llm_config={
"timeout": 60,
"cache_seed": 42,
"config_list": config_list,
},
llm_config=llm_config,
)
user_proxy = UserProxyAgent(
name="user_proxy",
Expand All @@ -86,7 +85,7 @@ def test_step():
max_consecutive_auto_reply=3,
)

optimizer = AgentOptimizer(max_actions_per_step=3, config_file_or_env=OAI_CONFIG_LIST)
optimizer = AgentOptimizer(max_actions_per_step=3, llm_config=llm_config)
user_proxy.initiate_chat(assistant, message=problem)
optimizer.record_one_conversation(assistant.chat_messages_for_summary(user_proxy), is_satisfied=True)

Expand Down
4 changes: 2 additions & 2 deletions website/blog/2023-12-23-AgentOptimizer/index.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ is_satisfied is a bool value that represents whether the user is satisfied with
Example:

```python
optimizer = AgentOptimizer(max_actions_per_step=3, config_file_or_env="OAI_CONFIG_LIST")
optimizer = AgentOptimizer(max_actions_per_step=3, llm_config = llm_config)
# ------------ code to solve a problem ------------
# ......
# -------------------------------------------------
Expand Down Expand Up @@ -76,7 +76,7 @@ Moreover, it also includes mechanisms to check whether each update is feasible,
The optimization process is as follows:

```python
optimizer = AgentOptimizer(max_actions_per_step=3, config_file_or_env="OAI_CONFIG_LIST")
optimizer = AgentOptimizer(max_actions_per_step=3, llm_config = llm_config)
for i in range(EPOCH):
is_correct = user_proxy.initiate_chat(assistant, message = problem)
history = assistant.chat_messages_for_summary(user_proxy)
Expand Down
Loading