Skip to content

Commit

Permalink
raise error when msg is invalid; fix docstr; improve ResponsiveAgent;…
Browse files Browse the repository at this point in the history
… update doc and packaging; capture ipython output; find code blocks with llm when regex fails. (microsoft#1154)

* autogen.agent -> autogen.agentchat

* bug fix in portfolio

* notebook

* timeout

* timeout

* infer lang; close microsoft#1150

* timeout

* message context

* context handling

* add sender to generate_reply

* clean up the receive function

* move mathchat to contrib

* contrib

* last_message

* Add OptiGuide: agent and notebook

* Optiguide notebook: add figures and URL
1. figures and code points to remote URL
2. simplify the prompt for the interpreter, because
all information is already in the chat history.

* Update name: Agent -> GenericAgent

* Update notebook

* Rename: GenericAgent -> ResponsiveAgent

* Rebase to autogen.agentchat

* OptiGuide: Comment, sytle, and notebook updates

* simplify optiguide

* raise error when msg is invalid; fix docstr

* allow return None for generate_reply()

* update_system_message

* test update_system_message

* simplify optiguide

* simplify optiguide

* simplify optiguide

* simplify optiguide

* move test

* add test and fix bug

* doc update

* doc update

* doc update

* color

* optiguide

* prompt

* test danger case

* packaging

* docker

* remove path in traceback

* capture ipython output

* simplify

* find code blocks with llm

* find code with llm

* order

* order

* fix bug in context handling

* print executing msg

* print executing msg

* test find code

* test find code

* disable find_code

* default_auto_reply

* default auto reply

* remove optiguide

* remove -e

---------

Co-authored-by: Beibin Li <beibin79@gmail.com>
  • Loading branch information
sonichi and BeibinLi authored Aug 1, 2023
1 parent dba6ce5 commit 60f4da7
Show file tree
Hide file tree
Showing 39 changed files with 1,215 additions and 856 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/openai.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ jobs:
pip install -e .[autogen,blendsearch]
python -c "import flaml"
pip install coverage pytest datasets
- name: Install packages for test when needed
if: matrix.python-version == '3.9'
run: |
pip install docker
- name: Install packages for MathChat when needed
if: matrix.python-version != '3.11'
run: |
Expand Down
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,10 @@ Use the following guides to get started with FLAML in .NET:
* (New) The [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation) package can help you maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4, including:
- A drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
```python
from flaml import oai
from flaml import autogen

# perform tuning
config, analysis = oai.Completion.tune(
config, analysis = autogen.Completion.tune(
data=tune_data,
metric="success",
mode="max",
Expand All @@ -80,13 +80,13 @@ Use the following guides to get started with FLAML in .NET:
)

# perform inference for a test instance
response = oai.Completion.create(context=test_instance, **config)
response = autogen.Completion.create(context=test_instance, **config)
```
- LLM-driven intelligent agents which can collaborately perform tasks autonomously or with human feedback, including tasks that require using tools via code.
```python
assistant = AssistantAgent("assistant")
user_proxy = UserProxyAgent("user_proxy")
user_proxy.initiate_chat("Show me the YTD gain of 10 largest technology companies as of today.")
assistant = autogen.AssistantAgent("assistant")
user_proxy = autogen.UserProxyAgent("user_proxy")
user_proxy.initiate_chat(assistant, message="Show me the YTD gain of 10 largest technology companies as of today.")
```
* With three lines of code, you can start using this economical and fast
AutoML engine as a [scikit-learn style estimator](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML).
Expand Down
1 change: 0 additions & 1 deletion flaml/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from flaml.automl import AutoML, logger_formatter
from flaml.tune.searcher import CFO, BlendSearch, FLOW2, BlendSearchTuner, RandomSearch
from flaml.onlineml.autovw import AutoVW
from flaml.autogen import oai
from flaml.version import __version__


Expand Down
5 changes: 3 additions & 2 deletions flaml/autogen/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
DEFAULT_MODEL = "gpt-4"
FAST_MODEL = "gpt-3.5-turbo"
from .oai import *
from .agentchat import *
from .code_utils import DEFAULT_MODEL, FAST_MODEL
3 changes: 1 addition & 2 deletions flaml/autogen/agentchat/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from .agent import Agent
from .responsive_agent import ResponsiveAgent
from .assistant_agent import AssistantAgent
from .responsive_agent import ResponsiveAgent
from .user_proxy_agent import UserProxyAgent


__all__ = ["Agent", "ResponsiveAgent", "AssistantAgent", "UserProxyAgent"]
4 changes: 2 additions & 2 deletions flaml/autogen/agentchat/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,13 @@ def generate_reply(
messages: Optional[List[Dict]] = None,
default_reply: Optional[Union[str, Dict]] = "",
sender: Optional["Agent"] = None,
) -> Union[str, Dict]:
) -> Union[str, Dict, None]:
"""(Abstract method) Generate a reply based on the received messages.
Args:
messages (list[dict]): a list of messages received.
default_reply (str or dict): the default reply if no other reply is generated.
sender: sender of an Agent instance.
Returns:
str or dict: the generated reply.
str or dict or None: the generated reply. If None, no reply is generated.
"""
10 changes: 5 additions & 5 deletions flaml/autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(
self,
name: str,
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
oai_config: Optional[Union[Dict, bool]] = None,
llm_config: Optional[Union[Dict, bool]] = None,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
Expand All @@ -37,10 +37,10 @@ def __init__(
"""
Args:
name (str): agent name.
system_message (str): system message for the oai inference.
system_message (str): system message for the ChatCompletion inference.
Please override this attribute if you want to reprogram the agent.
oai_config (dict): oai inference configuration.
Please refer to [oai.Completion.create](/docs/reference/autogen/oai/completion#create)
llm_config (dict): llm inference configuration.
Please refer to [autogen.Completion.create](/docs/reference/autogen/oai/completion#create)
for available options.
is_termination_msg (function): a function that takes a message in the form of a dictionary
and returns a boolean value indicating if this received message is a termination message.
Expand All @@ -58,6 +58,6 @@ def __init__(
max_consecutive_auto_reply,
human_input_mode,
code_execution_config=code_execution_config,
oai_config=oai_config,
llm_config=llm_config,
**kwargs,
)
17 changes: 6 additions & 11 deletions flaml/autogen/agentchat/contrib/math_user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ def __init__(
Callable[[Dict], bool]
] = _is_termination_msg_mathchat, # terminate if \boxed{} in message
human_input_mode: Optional[str] = "NEVER", # Fully automated
default_auto_reply: Optional[Union[str, Dict, None]] = DEFAULT_REPLY,
max_invalid_q_per_step=3, # a parameter needed in MathChat
**kwargs,
):
Expand All @@ -153,13 +154,15 @@ def __init__(
the number of auto reply reaches the max_consecutive_auto_reply.
(3) (Default) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
max_invalid_q_per_step (int): (ADDED) the maximum number of invalid queries per step.
**kwargs (dict): other kwargs in [UserProxyAgent](user_proxy_agent#__init__).
"""
super().__init__(
name=name,
is_termination_msg=is_termination_msg,
human_input_mode=human_input_mode,
default_auto_reply=default_auto_reply,
**kwargs,
)

Expand Down Expand Up @@ -220,14 +223,6 @@ def execute_one_python_code(self, pycode):
return_code, output, _ = execute_code(pycode, **self._code_execution_config, timeout=5)
is_success = return_code == 0

# Decode the output
if isinstance(output, bytes):
try:
output = output.decode("utf-8")
except UnicodeDecodeError:
is_success = False
output = "The return cannot be decoded."

if not is_success:
# Remove the file information from the error string
pattern = r'File "/[^"]+\.py", line \d+, in .+\n'
Expand Down Expand Up @@ -285,11 +280,11 @@ def generate_reply(
self,
messages: Optional[List[Dict]] = None,
default_reply: Optional[Union[str, Dict]] = DEFAULT_REPLY,
sender: Optional["Agent"] = None,
) -> Union[str, Dict]:
sender: Optional[Agent] = None,
) -> Union[str, Dict, None]:
"""Generate an auto reply."""
if messages is None:
messages = self._oai_conversations[sender.name]
messages = self._oai_messages[sender.name]
message = messages[-1]
message = message.get("content", "")
code_blocks = extract_code(message)
Expand Down
Loading

0 comments on commit 60f4da7

Please sign in to comment.