Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Completion -> client #426

Merged
merged 8 commits into from
Oct 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .github/workflows/openai.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ jobs:
run: |
pip install -e .[retrievechat]
- name: Install packages for Teachable when needed
if: matrix.python-version == '3.11'
run: |
pip install -e .[teachable]
- name: Coverage
Expand All @@ -76,7 +77,8 @@ jobs:
OAI_CONFIG_LIST: ${{ secrets.OAI_CONFIG_LIST }}
run: |
pip install nbconvert nbformat ipykernel
coverage run -a -m pytest test/test_with_openai.py
coverage run -a -m pytest test/agentchat/test_py10.py
coverage run -a -m pytest test/agentchat/test_teachable_agent.py
coverage run -a -m pytest test/test_notebook.py
coverage xml
cat "$(pwd)/test/executed_openai_notebook_output.txt"
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ Please find more [code examples](https://microsoft.github.io/autogen/docs/Exampl
Autogen also helps maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4. It offers enhanced LLM inference with powerful functionalities like tuning, caching, error handling, and templating. For example, you can optimize generations by LLM with your own tuning data, success metrics, and budgets.

```python
# perform tuning
# perform tuning for openai<1
config, analysis = autogen.Completion.tune(
data=tune_data,
metric="success",
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def __init__(
system_message (str): system message for the ChatCompletion inference.
Please override this attribute if you want to reprogram the agent.
llm_config (dict): llm inference configuration.
Please refer to [Completion.create](/docs/reference/oai/completion#create)
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
is_termination_msg (function): a function that takes a message in the form of a dictionary
and returns a boolean value indicating if this received message is a termination message.
Expand Down
11 changes: 5 additions & 6 deletions autogen/agentchat/contrib/teachable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(
system_message (str): system message for the ChatCompletion inference.
human_input_mode (str): This agent should NEVER prompt the human for input.
llm_config (dict or False): llm inference configuration.
Please refer to [Completion.create](/docs/reference/oai/completion#create)
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
To disable llm-based auto reply, set to False.
analyzer_llm_config (dict or False): llm inference configuration passed to TextAnalyzerAgent.
Expand Down Expand Up @@ -125,11 +125,8 @@ def _generate_teachable_assistant_reply(
messages = messages.copy()
messages[-1]["content"] = new_user_text

# Generate a response.
msgs = self._oai_system_message + messages
response = oai.ChatCompletion.create(messages=msgs, **self.llm_config)
response_text = oai.ChatCompletion.extract_text_or_function_call(response)[0]
return True, response_text
# Generate a response by reusing existing generate_oai_reply
return self.generate_oai_reply(messages, sender, config)

def learn_from_user_feedback(self):
"""Reviews the user comments from the last chat, and decides what teachings to store as memos."""
Expand Down Expand Up @@ -265,6 +262,8 @@ def analyze(self, text_to_analyze, analysis_instructions):
self.send(recipient=self.analyzer, message=analysis_instructions, request_reply=True) # Request the reply.
return self.last_message(self.analyzer)["content"]
else:
# TODO: This is not an encouraged usage pattern. It breaks the conversation-centric design.
# consider using the arg "silent"
# Use the analyzer's method directly, to leave analyzer message out of the printed chat.
return self.analyzer.analyze_text(text_to_analyze, analysis_instructions)

Expand Down
8 changes: 2 additions & 6 deletions autogen/agentchat/contrib/text_analyzer_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(
system_message (str): system message for the ChatCompletion inference.
human_input_mode (str): This agent should NEVER prompt the human for input.
llm_config (dict or False): llm inference configuration.
Please refer to [Completion.create](/docs/reference/oai/completion#create)
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
To disable llm-based auto reply, set to False.
teach_config (dict or None): Additional parameters used by TeachableAgent.
Expand Down Expand Up @@ -74,9 +74,5 @@ def analyze_text(self, text_to_analyze, analysis_instructions):
msg_text = "\n".join(
[analysis_instructions, text_to_analyze, analysis_instructions]
) # Repeat the instructions.
messages = self._oai_system_message + [{"role": "user", "content": msg_text}]

# Generate and return the analysis string.
response = oai.ChatCompletion.create(context=None, messages=messages, **self.llm_config)
output_text = oai.ChatCompletion.extract_text_or_function_call(response)[0]
return output_text
return self.generate_oai_reply([{"role": "user", "content": msg_text}], None, None)[1]
38 changes: 23 additions & 15 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import json
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from autogen import oai
from autogen import OpenAIWrapper
from .agent import Agent
from autogen.code_utils import (
DEFAULT_MODEL,
Expand Down Expand Up @@ -93,7 +93,7 @@ def __init__(
- timeout (Optional, int): The maximum execution time in seconds.
- last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
llm_config (dict or False): llm inference configuration.
Please refer to [Completion.create](/docs/reference/oai/completion#create)
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
To disable llm-based auto reply, set to False.
default_auto_reply (str or dict or None): default auto reply when no code execution or llm-based reply is generated.
Expand All @@ -107,10 +107,12 @@ def __init__(
)
if llm_config is False:
self.llm_config = False
self.client = None
else:
self.llm_config = self.DEFAULT_CONFIG.copy()
if isinstance(llm_config, dict):
self.llm_config.update(llm_config)
self.client = OpenAIWrapper(**self.llm_config)

self._code_execution_config = {} if code_execution_config is None else code_execution_config
self.human_input_mode = human_input_mode
Expand Down Expand Up @@ -254,8 +256,10 @@ def _message_to_dict(message: Union[Dict, str]):
"""
if isinstance(message, str):
return {"content": message}
else:
elif isinstance(message, dict):
return message
else:
return dict(message)

def _append_oai_message(self, message: Union[Dict, str], role, conversation_id: Agent) -> bool:
"""Append a message to the ChatCompletion conversation.
Expand Down Expand Up @@ -285,6 +289,7 @@ def _append_oai_message(self, message: Union[Dict, str], role, conversation_id:
oai_message["role"] = "function" if message.get("role") == "function" else role
if "function_call" in oai_message:
oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call.
oai_message["function_call"] = dict(oai_message["function_call"])
self._oai_messages[conversation_id].append(oai_message)
return True

Expand All @@ -306,7 +311,7 @@ def send(
- role (str): the role of the message, any role that is not "function"
will be modified to "assistant".
- context (dict): the context of the message, which will be passed to
[Completion.create](../oai/Completion#create).
[OpenAIWrapper.create](../oai/client#create).
For example, one agent can send a message A as:
```python
{
Expand Down Expand Up @@ -355,7 +360,7 @@ async def a_send(
- role (str): the role of the message, any role that is not "function"
will be modified to "assistant".
- context (dict): the context of the message, which will be passed to
[Completion.create](../oai/Completion#create).
[OpenAIWrapper.create](../oai/client#create).
For example, one agent can send a message A as:
```python
{
Expand Down Expand Up @@ -398,18 +403,21 @@ def _print_received_message(self, message: Union[Dict, str], sender: Agent):
content = message.get("content")
if content is not None:
if "context" in message:
content = oai.ChatCompletion.instantiate(
content = OpenAIWrapper.instantiate(
content,
message["context"],
self.llm_config and self.llm_config.get("allow_format_str_template", False),
)
print(content, flush=True)
if "function_call" in message:
func_print = f"***** Suggested function Call: {message['function_call'].get('name', '(No function name found)')} *****"
function_call = dict(message["function_call"])
func_print = (
f"***** Suggested function Call: {function_call.get('name', '(No function name found)')} *****"
)
print(colored(func_print, "green"), flush=True)
print(
"Arguments: \n",
message["function_call"].get("arguments", "(No arguments found)"),
function_call.get("arguments", "(No arguments found)"),
flush=True,
sep="",
)
Expand Down Expand Up @@ -447,7 +455,7 @@ def receive(
This field is only needed to distinguish between "function" or "assistant"/"user".
4. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
5. "context" (dict): the context of the message, which will be passed to
[Completion.create](../oai/Completion#create).
[OpenAIWrapper.create](../oai/client#create).
sender: sender of an Agent instance.
request_reply (bool or None): whether a reply is requested from the sender.
If None, the value is determined by `self.reply_at_receive[sender]`.
Expand Down Expand Up @@ -483,7 +491,7 @@ async def a_receive(
This field is only needed to distinguish between "function" or "assistant"/"user".
4. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
5. "context" (dict): the context of the message, which will be passed to
[Completion.create](../oai/Completion#create).
[OpenAIWrapper.create](../oai/client#create).
sender: sender of an Agent instance.
request_reply (bool or None): whether a reply is requested from the sender.
If None, the value is determined by `self.reply_at_receive[sender]`.
Expand Down Expand Up @@ -596,17 +604,17 @@ def generate_oai_reply(
config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
"""Generate a reply using autogen.oai."""
llm_config = self.llm_config if config is None else config
if llm_config is False:
client = self.client if config is None else config
if client is None:
return False, None
if messages is None:
messages = self._oai_messages[sender]

# TODO: #1143 handle token limit exceeded error
response = oai.ChatCompletion.create(
context=messages[-1].pop("context", None), messages=self._oai_system_message + messages, **llm_config
response = client.create(
context=messages[-1].pop("context", None), messages=self._oai_system_message + messages
)
return True, oai.ChatCompletion.extract_text_or_function_call(response)[0]
return True, client.extract_text_or_function_call(response)[0]

def generate_code_execution_reply(
self,
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def __init__(
- last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
llm_config (dict or False): llm inference configuration.
Please refer to [Completion.create](/docs/reference/oai/completion#create)
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
Default to false, which disables llm-based auto reply.
system_message (str): system message for ChatCompletion inference.
Expand Down
55 changes: 7 additions & 48 deletions autogen/code_utils.py
sonichi marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -84,49 +84,8 @@ def extract_code(
return extracted


# _FIND_CODE_SYS_MSG = [
# {
# "role": "system",
# "content": """In the following conversation, an assistant suggests code and a user is expected to run it.
# Read the conversation, and then find all the right code blocks for the user to run next in the right order.
# Only return the code blocks that are expected to run.
# Don't include code blocks which have been executed unless the user is requested to run the same block again.
# When the user needs to run multiple blocks in sequence, make sure to output all the blocks to run in a right order.
# If the line beginning with "# filename" is put before a code block, move it into the code block as the first line.
# Make sure to add the right "python" or "sh" identifier if the language identifier is missing for a code block.
# Don't make other changes to the code blocks.
# Don't reply anything else if at least one code block is expected to run.
# If no code block is expeted to run, check whether the task has been successfully finished at full satisfaction.
# If not, reply with the reason why the task is not finished.""",
# },
# ]
# _FIND_CODE_CONFIG = {
# "model": FAST_MODEL,
# }


# def find_code(messages: List[Dict], sys_msg=None, **config) -> Tuple[List[Tuple[str, str]], str]:
# """Find code from a list of messages.

# Args:
# messages (str): The list of messages to find code from.
# sys_msg (Optional, str): The system message to prepend to the messages.
# config (Optional, dict): The configuration for the API call.

# Returns:
# list: A list of tuples, each containing the language and the code.
# str: The generated text by llm.
# """
# params = {**_FIND_CODE_CONFIG, **config}
# if sys_msg is None or not sys_msg[0]["content"]:
# sys_msg = _FIND_CODE_SYS_MSG
# response = oai.ChatCompletion.create(messages=sys_msg + messages, **params)
# content = oai.Completion.extract_text(response)[0]
# return extract_code(content), content


def generate_code(pattern: str = CODE_BLOCK_PATTERN, **config) -> Tuple[str, float]:
"""Generate code.
"""(Deprecated) Generate code.

Args:
pattern (Optional, str): The regular expression pattern for finding the code block.
Expand All @@ -151,7 +110,7 @@ def generate_code(pattern: str = CODE_BLOCK_PATTERN, **config) -> Tuple[str, flo


def improve_function(file_name, func_name, objective, **config):
"""(work in progress) Improve the function to achieve the objective."""
"""(Deprecated) Improve the function to achieve the objective."""
params = {**_IMPROVE_FUNCTION_CONFIG, **config}
# read the entire file into a str
with open(file_name, "r") as f:
Expand All @@ -172,7 +131,7 @@ def improve_function(file_name, func_name, objective, **config):


def improve_code(files, objective, suggest_only=True, **config):
"""Improve the code to achieve a given objective.
"""(Deprecated) Improve the code to achieve a given objective.

Args:
files (list): A list of file names containing the source code.
Expand Down Expand Up @@ -422,7 +381,7 @@ def execute_code(


def generate_assertions(definition: str, **config) -> Tuple[str, float]:
"""Generate assertions for a function.
"""(Deprecated) Generate assertions for a function.

Args:
definition (str): The function definition, including the signature and docstr.
Expand Down Expand Up @@ -459,7 +418,7 @@ def eval_function_completions(
timeout: Optional[float] = 3,
use_docker: Optional[bool] = True,
) -> Dict:
"""Select a response from a list of responses for the function completion task (using generated assertions), and/or evaluate if the task is successful using a gold test.
"""(Deprecated) Select a response from a list of responses for the function completion task (using generated assertions), and/or evaluate if the task is successful using a gold test.

Args:
responses (list): The list of responses.
Expand Down Expand Up @@ -549,7 +508,7 @@ def __init__(self, assertions):
self.metrics = self.responses = None

def pass_assertions(self, context, response, **_):
"""Check if the response passes the assertions."""
"""(Deprecated) Check if the response passes the assertions."""
responses = oai.Completion.extract_text(response)
metrics = eval_function_completions(responses, context["definition"], assertions=self._assertions)
self._assertions = metrics["assertions"]
Expand All @@ -564,7 +523,7 @@ def implement(
configs: Optional[List[Dict]] = None,
assertions: Optional[Union[str, Callable[[str], Tuple[str, float]]]] = generate_assertions,
) -> Tuple[str, float]:
"""Implement a function from a definition.
"""(Deprecated) Implement a function from a definition.

Args:
definition (str): The function definition, including the signature and docstr.
Expand Down
2 changes: 1 addition & 1 deletion autogen/math_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@


def solve_problem(problem: str, **config) -> str:
"""(Experimental) Solve the math problem.
"""(Deprecated) Solve the math problem.

Args:
problem (str): The problem statement.
Expand Down
Loading
Loading