diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 86492455080..b1141cfdacf 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -1,3 +1,5 @@ +import copy +import json import logging import random import re @@ -12,6 +14,7 @@ from ..io.base import IOStream from ..runtime_logging import log_new_agent, logging_enabled from .agent import Agent +from .chat import ChatResult from .conversable_agent import ConversableAgent logger = logging.getLogger(__name__) @@ -1116,6 +1119,290 @@ async def a_run_chat( a.previous_cache = None return True, None + def resume( + self, + messages: Union[List[Dict], str], + remove_termination_string: str = None, + silent: Optional[bool] = False, + ) -> Tuple[ConversableAgent, Dict]: + """Resumes a group chat using the previous messages as a starting point. Requires the agents, group chat, and group chat manager to be established + as per the original group chat. + + Args: + - messages Union[List[Dict], str]: The content of the previous chat's messages, either as a Json string or a list of message dictionaries. + - remove_termination_string str: Remove the provided string from the last message to prevent immediate termination + - silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False. + + Returns: + - Tuple[ConversableAgent, Dict]: A tuple containing the last agent who spoke and their message + """ + + # Convert messages from string to messages list, if needed + if isinstance(messages, str): + messages = self.messages_from_string(messages) + elif isinstance(messages, list) and all(isinstance(item, dict) for item in messages): + messages = copy.deepcopy(messages) + else: + raise Exception("Messages is not of type str or List[Dict]") + + # Clean up the objects, ensuring there are no messages in the agents and group chat + + # Clear agent message history + for agent in self._groupchat.agents: + if isinstance(agent, ConversableAgent): + agent.clear_history() + + # Clear Manager message history + self.clear_history() + + # Clear GroupChat messages + self._groupchat.reset() + + # Validation of message and agents + + try: + self._valid_resume_messages(messages) + except: + raise + + # Load the messages into the group chat + for i, message in enumerate(messages): + + if "name" in message: + message_speaker_agent = self._groupchat.agent_by_name(message["name"]) + else: + # If there's no name, assign the group chat manager (this is an indication the ChatResult messages was used instead of groupchat.messages as state) + message_speaker_agent = self + message["name"] = self.name + + # If it wasn't an agent speaking, it may be the manager + if not message_speaker_agent and message["name"] == self.name: + message_speaker_agent = self + + # Add previous messages to each agent (except their own messages and the last message, as we'll kick off the conversation with it) + if i != len(messages) - 1: + for agent in self._groupchat.agents: + if agent.name != message["name"]: + self.send(message, self._groupchat.agent_by_name(agent.name), request_reply=False, silent=True) + + # Add previous message to the new groupchat, if it's an admin message the name may not match so add the message directly + if message_speaker_agent: + self._groupchat.append(message, message_speaker_agent) + else: + self._groupchat.messages.append(message) + + # Last speaker agent + last_speaker_name = message["name"] + + # Last message to check for termination (we could avoid this by ignoring termination check for resume in the future) + last_message = message + + # Get last speaker as an agent + previous_last_agent = self._groupchat.agent_by_name(name=last_speaker_name) + + # If we didn't match a last speaker agent, we check that it's the group chat's admin name and assign the manager, if so + if not previous_last_agent and ( + last_speaker_name == self._groupchat.admin_name or last_speaker_name == self.name + ): + previous_last_agent = self + + # Termination removal and check + self._process_resume_termination(remove_termination_string, messages) + + if not silent: + iostream = IOStream.get_default() + iostream.print( + f"Prepared group chat with {len(messages)} messages, the last speaker is", + colored(last_speaker_name, "yellow"), + flush=True, + ) + + # Update group chat settings for resuming + self._groupchat.send_introductions = False + + return previous_last_agent, last_message + + async def a_resume( + self, + messages: Union[List[Dict], str], + remove_termination_string: str = None, + silent: Optional[bool] = False, + ) -> Tuple[ConversableAgent, Dict]: + """Resumes a group chat using the previous messages as a starting point, asynchronously. Requires the agents, group chat, and group chat manager to be established + as per the original group chat. + + Args: + - messages Union[List[Dict], str]: The content of the previous chat's messages, either as a Json string or a list of message dictionaries. + - remove_termination_string str: Remove the provided string from the last message to prevent immediate termination + - silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False. + + Returns: + - Tuple[ConversableAgent, Dict]: A tuple containing the last agent who spoke and their message + """ + + # Convert messages from string to messages list, if needed + if isinstance(messages, str): + messages = self.messages_from_string(messages) + elif isinstance(messages, list) and all(isinstance(item, dict) for item in messages): + messages = copy.deepcopy(messages) + else: + raise Exception("Messages is not of type str or List[Dict]") + + # Clean up the objects, ensuring there are no messages in the agents and group chat + + # Clear agent message history + for agent in self._groupchat.agents: + if isinstance(agent, ConversableAgent): + agent.clear_history() + + # Clear Manager message history + self.clear_history() + + # Clear GroupChat messages + self._groupchat.reset() + + # Validation of message and agents + + try: + self._valid_resume_messages(messages) + except: + raise + + # Load the messages into the group chat + for i, message in enumerate(messages): + + if "name" in message: + message_speaker_agent = self._groupchat.agent_by_name(message["name"]) + else: + # If there's no name, assign the group chat manager (this is an indication the ChatResult messages was used instead of groupchat.messages as state) + message_speaker_agent = self + message["name"] = self.name + + # If it wasn't an agent speaking, it may be the manager + if not message_speaker_agent and message["name"] == self.name: + message_speaker_agent = self + + # Add previous messages to each agent (except their own messages and the last message, as we'll kick off the conversation with it) + if i != len(messages) - 1: + for agent in self._groupchat.agents: + if agent.name != message["name"]: + await self.a_send( + message, self._groupchat.agent_by_name(agent.name), request_reply=False, silent=True + ) + + # Add previous message to the new groupchat, if it's an admin message the name may not match so add the message directly + if message_speaker_agent: + self._groupchat.append(message, message_speaker_agent) + else: + self._groupchat.messages.append(message) + + # Last speaker agent + last_speaker_name = message["name"] + + # Last message to check for termination (we could avoid this by ignoring termination check for resume in the future) + last_message = message + + # Get last speaker as an agent + previous_last_agent = self._groupchat.agent_by_name(name=last_speaker_name) + + # If we didn't match a last speaker agent, we check that it's the group chat's admin name and assign the manager, if so + if not previous_last_agent and ( + last_speaker_name == self._groupchat.admin_name or last_speaker_name == self.name + ): + previous_last_agent = self + + # Termination removal and check + self._process_resume_termination(remove_termination_string, messages) + + if not silent: + iostream = IOStream.get_default() + iostream.print( + f"Prepared group chat with {len(messages)} messages, the last speaker is", + colored(last_speaker_name, "yellow"), + flush=True, + ) + + # Update group chat settings for resuming + self._groupchat.send_introductions = False + + return previous_last_agent, last_message + + def _valid_resume_messages(self, messages: List[Dict]): + """Validates the messages used for resuming + + args: + messages (List[Dict]): list of messages to resume with + + returns: + - bool: Whether they are valid for resuming + """ + # Must have messages to start with, otherwise they should run run_chat + if not messages: + raise Exception( + "Cannot resume group chat as no messages were provided. Use GroupChatManager.run_chat or ConversableAgent.initiate_chat to start a new chat." + ) + + # Check that all agents in the chat messages exist in the group chat + for message in messages: + if message.get("name"): + if ( + not self._groupchat.agent_by_name(message["name"]) + and not message["name"] == self._groupchat.admin_name # ignore group chat's name + and not message["name"] == self.name # ignore group chat manager's name + ): + raise Exception(f"Agent name in message doesn't exist as agent in group chat: {message['name']}") + + def _process_resume_termination(self, remove_termination_string: str, messages: List[Dict]): + """Removes termination string, if required, and checks if termination may occur. + + args: + remove_termination_string (str): termination string to remove from the last message + + returns: + None + """ + + last_message = messages[-1] + + # Replace any given termination string in the last message + if remove_termination_string: + if messages[-1].get("content") and remove_termination_string in messages[-1]["content"]: + messages[-1]["content"] = messages[-1]["content"].replace(remove_termination_string, "") + + # Check if the last message meets termination (if it has one) + if self._is_termination_msg: + if self._is_termination_msg(last_message): + logger.warning("WARNING: Last message meets termination criteria and this may terminate the chat.") + + def messages_from_string(self, message_string: str) -> List[Dict]: + """Reads the saved state of messages in Json format for resume and returns as a messages list + + args: + - message_string: Json string, the saved state + + returns: + - List[Dict]: List of messages + """ + try: + state = json.loads(message_string) + except json.JSONDecodeError: + raise Exception("Messages string is not a valid JSON string") + + return state + + def messages_to_string(self, messages: List[Dict]) -> str: + """Converts the provided messages into a Json string that can be used for resuming the chat. + The state is made up of a list of messages + + args: + - messages (List[Dict]): set of messages to convert to a string + + returns: + - str: Json representation of the messages which can be persisted for resuming later + """ + + return json.dumps(messages) + def _raise_exception_on_async_reply_functions(self) -> None: """Raise an exception if any async reply functions are registered. diff --git a/autogen/code_utils.py b/autogen/code_utils.py index e1bc951f099..98ed6067066 100644 --- a/autogen/code_utils.py +++ b/autogen/code_utils.py @@ -6,8 +6,10 @@ import subprocess import sys import time +import venv from concurrent.futures import ThreadPoolExecutor, TimeoutError from hashlib import md5 +from types import SimpleNamespace from typing import Any, Callable, Dict, List, Optional, Tuple, Union import docker @@ -719,3 +721,19 @@ def implement( # cost += metrics["gen_cost"] # if metrics["succeed_assertions"] or i == len(configs) - 1: # return responses[metrics["index_selected"]], cost, i + + +def create_virtual_env(dir_path: str, **env_args) -> SimpleNamespace: + """Creates a python virtual environment and returns the context. + + Args: + dir_path (str): Directory path where the env will be created. + **env_args: Any extra args to pass to the `EnvBuilder` + + Returns: + SimpleNamespace: the virtual env context object.""" + if not env_args: + env_args = {"with_pip": True} + env_builder = venv.EnvBuilder(**env_args) + env_builder.create(dir_path) + return env_builder.ensure_directories(dir_path) diff --git a/autogen/coding/local_commandline_code_executor.py b/autogen/coding/local_commandline_code_executor.py index ed92cd527be..29172bbe922 100644 --- a/autogen/coding/local_commandline_code_executor.py +++ b/autogen/coding/local_commandline_code_executor.py @@ -1,4 +1,5 @@ import logging +import os import re import subprocess import sys @@ -6,6 +7,7 @@ from hashlib import md5 from pathlib import Path from string import Template +from types import SimpleNamespace from typing import Any, Callable, ClassVar, Dict, List, Optional, Union from typing_extensions import ParamSpec @@ -64,6 +66,7 @@ class LocalCommandLineCodeExecutor(CodeExecutor): def __init__( self, timeout: int = 60, + virtual_env_context: Optional[SimpleNamespace] = None, work_dir: Union[Path, str] = Path("."), functions: List[Union[FunctionWithRequirements[Any, A], Callable[..., Any], FunctionWithRequirementsStr]] = [], functions_module: str = "functions", @@ -82,8 +85,22 @@ def __init__( PowerShell (pwsh, powershell, ps1), HTML, CSS, and JavaScript. Execution policies determine whether each language's code blocks are executed or saved only. + ## Execution with a Python virtual environment + A python virtual env can be used to execute code and install dependencies. This has the added benefit of not polluting the + base environment with unwanted modules. + ```python + from autogen.code_utils import create_virtual_env + from autogen.coding import LocalCommandLineCodeExecutor + + venv_dir = ".venv" + venv_context = create_virtual_env(venv_dir) + + executor = LocalCommandLineCodeExecutor(virtual_env_context=venv_context) + ``` + Args: timeout (int): The timeout for code execution, default is 60 seconds. + virtual_env_context (Optional[SimpleNamespace]): The virtual environment context to use. work_dir (Union[Path, str]): The working directory for code execution, defaults to the current directory. functions (List[Union[FunctionWithRequirements[Any, A], Callable[..., Any], FunctionWithRequirementsStr]]): A list of callable functions available to the executor. functions_module (str): The module name under which functions are accessible. @@ -105,6 +122,7 @@ def __init__( self._timeout = timeout self._work_dir: Path = work_dir + self._virtual_env_context: Optional[SimpleNamespace] = virtual_env_context self._functions = functions # Setup could take some time so we intentionally wait for the first code block to do it. @@ -196,7 +214,11 @@ def _setup_functions(self) -> None: required_packages = list(set(flattened_packages)) if len(required_packages) > 0: logging.info("Ensuring packages are installed in executor.") - cmd = [sys.executable, "-m", "pip", "install"] + required_packages + if self._virtual_env_context: + py_executable = self._virtual_env_context.env_exe + else: + py_executable = sys.executable + cmd = [py_executable, "-m", "pip", "install"] + required_packages try: result = subprocess.run( cmd, cwd=self._work_dir, capture_output=True, text=True, timeout=float(self._timeout) @@ -269,9 +291,18 @@ def _execute_code_dont_check_setup(self, code_blocks: List[CodeBlock]) -> Comman program = _cmd(lang) cmd = [program, str(written_file.absolute())] + env = os.environ.copy() + + if self._virtual_env_context: + path_with_virtualenv = rf"{self._virtual_env_context.bin_path}{os.pathsep}{env['PATH']}" + env["PATH"] = path_with_virtualenv + if WIN32: + activation_script = os.path.join(self._virtual_env_context.bin_path, "activate.bat") + cmd = [activation_script, "&&", *cmd] + try: result = subprocess.run( - cmd, cwd=self._work_dir, capture_output=True, text=True, timeout=float(self._timeout) + cmd, cwd=self._work_dir, capture_output=True, text=True, timeout=float(self._timeout), env=env ) except subprocess.TimeoutExpired: logs_all += "\n" + TIMEOUT_MSG diff --git a/autogen/graph_utils.py b/autogen/graph_utils.py index 88c218fde5e..d36b47a12ed 100644 --- a/autogen/graph_utils.py +++ b/autogen/graph_utils.py @@ -1,5 +1,5 @@ import logging -from typing import Dict, List +from typing import Dict, List, Optional from autogen.agentchat import Agent @@ -110,7 +110,9 @@ def invert_disallowed_to_allowed(disallowed_speaker_transitions_dict: dict, agen return allowed_speaker_transitions_dict -def visualize_speaker_transitions_dict(speaker_transitions_dict: dict, agents: List[Agent]): +def visualize_speaker_transitions_dict( + speaker_transitions_dict: dict, agents: List[Agent], export_path: Optional[str] = None +): """ Visualize the speaker_transitions_dict using networkx. """ @@ -133,4 +135,8 @@ def visualize_speaker_transitions_dict(speaker_transitions_dict: dict, agents: L # Visualize nx.draw(G, with_labels=True, font_weight="bold") - plt.show() + + if export_path is not None: + plt.savefig(export_path) + else: + plt.show() diff --git a/dotnet/AutoGen.sln b/dotnet/AutoGen.sln index 3841b9acf7b..b46b8091cf5 100644 --- a/dotnet/AutoGen.sln +++ b/dotnet/AutoGen.sln @@ -29,9 +29,11 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.Core", "src\AutoGen EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.OpenAI", "src\AutoGen.OpenAI\AutoGen.OpenAI.csproj", "{63445BB7-DBB9-4AEF-9D6F-98BBE75EE1EC}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AutoGen.Mistral", "src\AutoGen.Mistral\AutoGen.Mistral.csproj", "{6585D1A4-3D97-4D76-A688-1933B61AEB19}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.Mistral", "src\AutoGen.Mistral\AutoGen.Mistral.csproj", "{6585D1A4-3D97-4D76-A688-1933B61AEB19}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AutoGen.Mistral.Tests", "test\AutoGen.Mistral.Tests\AutoGen.Mistral.Tests.csproj", "{15441693-3659-4868-B6C1-B106F52FF3BA}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.Mistral.Tests", "test\AutoGen.Mistral.Tests\AutoGen.Mistral.Tests.csproj", "{15441693-3659-4868-B6C1-B106F52FF3BA}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AutoGen.SemanticKernel.Tests", "test\AutoGen.SemanticKernel.Tests\AutoGen.SemanticKernel.Tests.csproj", "{1DFABC4A-8458-4875-8DCB-59F3802DAC65}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -87,6 +89,10 @@ Global {15441693-3659-4868-B6C1-B106F52FF3BA}.Debug|Any CPU.Build.0 = Debug|Any CPU {15441693-3659-4868-B6C1-B106F52FF3BA}.Release|Any CPU.ActiveCfg = Release|Any CPU {15441693-3659-4868-B6C1-B106F52FF3BA}.Release|Any CPU.Build.0 = Release|Any CPU + {1DFABC4A-8458-4875-8DCB-59F3802DAC65}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1DFABC4A-8458-4875-8DCB-59F3802DAC65}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1DFABC4A-8458-4875-8DCB-59F3802DAC65}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1DFABC4A-8458-4875-8DCB-59F3802DAC65}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -104,6 +110,7 @@ Global {63445BB7-DBB9-4AEF-9D6F-98BBE75EE1EC} = {18BF8DD7-0585-48BF-8F97-AD333080CE06} {6585D1A4-3D97-4D76-A688-1933B61AEB19} = {18BF8DD7-0585-48BF-8F97-AD333080CE06} {15441693-3659-4868-B6C1-B106F52FF3BA} = {F823671B-3ECA-4AE6-86DA-25E920D3FE64} + {1DFABC4A-8458-4875-8DCB-59F3802DAC65} = {F823671B-3ECA-4AE6-86DA-25E920D3FE64} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {93384647-528D-46C8-922C-8DB36A382F0B} diff --git a/dotnet/eng/MetaInfo.props b/dotnet/eng/MetaInfo.props index 4c354d8fee2..8aff3c60226 100644 --- a/dotnet/eng/MetaInfo.props +++ b/dotnet/eng/MetaInfo.props @@ -1,7 +1,7 @@ - 0.0.12 + 0.0.13 AutoGen https://microsoft.github.io/autogen-for-net/ https://github.com/microsoft/autogen diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MiddlewareAgentCodeSnippet.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MiddlewareAgentCodeSnippet.cs index 8be026552e3..320afd0de67 100644 --- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MiddlewareAgentCodeSnippet.cs +++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MiddlewareAgentCodeSnippet.cs @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // MiddlewareAgentCodeSnippet.cs -using AutoGen.Core; using System.Text.Json; +using AutoGen.Core; using AutoGen.OpenAI; using FluentAssertions; diff --git a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MistralAICodeSnippet.cs b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MistralAICodeSnippet.cs index cd49810dc6c..0ce1d840d36 100644 --- a/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MistralAICodeSnippet.cs +++ b/dotnet/sample/AutoGen.BasicSamples/CodeSnippet/MistralAICodeSnippet.cs @@ -2,8 +2,8 @@ // MistralAICodeSnippet.cs #region using_statement -using AutoGen.Mistral; using AutoGen.Core; +using AutoGen.Mistral; using AutoGen.Mistral.Extension; using FluentAssertions; #endregion using_statement @@ -83,4 +83,4 @@ public async Task MistralAIChatAgentGetWeatherToolUsageAsync() reply.GetContent().Should().Be("The weather in Seattle is sunny."); #endregion send_message_with_function_call } -} \ No newline at end of file +} diff --git a/dotnet/sample/AutoGen.BasicSamples/Example01_AssistantAgent.cs b/dotnet/sample/AutoGen.BasicSamples/Example01_AssistantAgent.cs index 8797bda8313..3ee363bfc06 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example01_AssistantAgent.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example01_AssistantAgent.cs @@ -1,9 +1,9 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Example01_AssistantAgent.cs -using AutoGen.Core; using AutoGen; using AutoGen.BasicSample; +using AutoGen.Core; using FluentAssertions; /// diff --git a/dotnet/sample/AutoGen.BasicSamples/Example02_TwoAgent_MathChat.cs b/dotnet/sample/AutoGen.BasicSamples/Example02_TwoAgent_MathChat.cs index f20b0848a3e..c2957f32da7 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example02_TwoAgent_MathChat.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example02_TwoAgent_MathChat.cs @@ -1,9 +1,9 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Example02_TwoAgent_MathChat.cs -using AutoGen.Core; using AutoGen; using AutoGen.BasicSample; +using AutoGen.Core; using FluentAssertions; public static class Example02_TwoAgent_MathChat { diff --git a/dotnet/sample/AutoGen.BasicSamples/Example03_Agent_FunctionCall.cs b/dotnet/sample/AutoGen.BasicSamples/Example03_Agent_FunctionCall.cs index bfb8d71095b..57b9ea76dcb 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example03_Agent_FunctionCall.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example03_Agent_FunctionCall.cs @@ -2,8 +2,8 @@ // Example03_Agent_FunctionCall.cs using AutoGen; -using AutoGen.Core; using AutoGen.BasicSample; +using AutoGen.Core; using FluentAssertions; /// diff --git a/dotnet/sample/AutoGen.BasicSamples/Example04_Dynamic_GroupChat_Coding_Task.cs b/dotnet/sample/AutoGen.BasicSamples/Example04_Dynamic_GroupChat_Coding_Task.cs index d9489e522e6..c5d9a01f971 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example04_Dynamic_GroupChat_Coding_Task.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example04_Dynamic_GroupChat_Coding_Task.cs @@ -2,8 +2,8 @@ // Example04_Dynamic_GroupChat_Coding_Task.cs using AutoGen; -using AutoGen.Core; using AutoGen.BasicSample; +using AutoGen.Core; using AutoGen.DotnetInteractive; using AutoGen.OpenAI; using FluentAssertions; diff --git a/dotnet/sample/AutoGen.BasicSamples/Example07_Dynamic_GroupChat_Calculate_Fibonacci.cs b/dotnet/sample/AutoGen.BasicSamples/Example07_Dynamic_GroupChat_Calculate_Fibonacci.cs index 89e6f45f898..6584baa5fae 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example07_Dynamic_GroupChat_Calculate_Fibonacci.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example07_Dynamic_GroupChat_Calculate_Fibonacci.cs @@ -5,8 +5,8 @@ using System.Text.Json; using AutoGen; using AutoGen.BasicSample; -using AutoGen.DotnetInteractive; using AutoGen.Core; +using AutoGen.DotnetInteractive; using AutoGen.OpenAI; using FluentAssertions; diff --git a/dotnet/sample/AutoGen.BasicSamples/Example14_MistralClientAgent_TokenCount.cs b/dotnet/sample/AutoGen.BasicSamples/Example14_MistralClientAgent_TokenCount.cs index 8b20dbf33a8..4c8794de961 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example14_MistralClientAgent_TokenCount.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example14_MistralClientAgent_TokenCount.cs @@ -62,4 +62,4 @@ public static async Task RunAsync() tokenCounterMiddleware.GetCompletionTokenCount().Should().BeGreaterThan(0); #endregion chat_with_agent } -} \ No newline at end of file +} diff --git a/dotnet/sample/AutoGen.BasicSamples/Example15_GPT4V_BinaryDataImageMessage.cs b/dotnet/sample/AutoGen.BasicSamples/Example15_GPT4V_BinaryDataImageMessage.cs index 7a3422cb863..f376342ed85 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example15_GPT4V_BinaryDataImageMessage.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example15_GPT4V_BinaryDataImageMessage.cs @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. All rights reserved. -// Example15_ImageMessage.cs +// Example15_GPT4V_BinaryDataImageMessage.cs using AutoGen.Core; using AutoGen.OpenAI; diff --git a/dotnet/src/AutoGen.Mistral/DTOs/ChatCompletionResponse.cs b/dotnet/src/AutoGen.Mistral/DTOs/ChatCompletionResponse.cs index 13e29e7139b..ff241f8d340 100644 --- a/dotnet/src/AutoGen.Mistral/DTOs/ChatCompletionResponse.cs +++ b/dotnet/src/AutoGen.Mistral/DTOs/ChatCompletionResponse.cs @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. All rights reserved. // ChatCompletionResponse.cs using System.Collections.Generic; diff --git a/dotnet/src/AutoGen.Mistral/DTOs/Error.cs b/dotnet/src/AutoGen.Mistral/DTOs/Error.cs index 8bddcfc776c..77eb2d341fb 100644 --- a/dotnet/src/AutoGen.Mistral/DTOs/Error.cs +++ b/dotnet/src/AutoGen.Mistral/DTOs/Error.cs @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. All rights reserved. // Error.cs using System.Text.Json.Serialization; diff --git a/dotnet/src/AutoGen.Mistral/DTOs/Model.cs b/dotnet/src/AutoGen.Mistral/DTOs/Model.cs index 70a4b3c997d..915d2f737ec 100644 --- a/dotnet/src/AutoGen.Mistral/DTOs/Model.cs +++ b/dotnet/src/AutoGen.Mistral/DTOs/Model.cs @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. All rights reserved. // Model.cs using System; diff --git a/dotnet/src/AutoGen.SemanticKernel/AutoGen.SemanticKernel.csproj b/dotnet/src/AutoGen.SemanticKernel/AutoGen.SemanticKernel.csproj index be2fa0a574b..3bd96f93b68 100644 --- a/dotnet/src/AutoGen.SemanticKernel/AutoGen.SemanticKernel.csproj +++ b/dotnet/src/AutoGen.SemanticKernel/AutoGen.SemanticKernel.csproj @@ -3,9 +3,6 @@ netstandard2.0 AutoGen.SemanticKernel - - - $(NoWarn);SKEXP0110 diff --git a/dotnet/src/AutoGen.SemanticKernel/Extension/KernelExtension.cs b/dotnet/src/AutoGen.SemanticKernel/Extension/KernelExtension.cs index f1589ab09e6..8eb11934da3 100644 --- a/dotnet/src/AutoGen.SemanticKernel/Extension/KernelExtension.cs +++ b/dotnet/src/AutoGen.SemanticKernel/Extension/KernelExtension.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // KernelExtension.cs +using System.Linq; using Microsoft.SemanticKernel; namespace AutoGen.SemanticKernel.Extension; @@ -11,4 +12,37 @@ public static SemanticKernelAgent ToSemanticKernelAgent(this Kernel kernel, stri { return new SemanticKernelAgent(kernel, name, systemMessage, settings); } + + /// + /// Convert a to a + /// + /// kernel function metadata + public static FunctionContract ToFunctionContract(this KernelFunctionMetadata metadata) + { + return new FunctionContract() + { + Name = metadata.Name, + Description = metadata.Description, + Parameters = metadata.Parameters.Select(p => p.ToFunctionParameterContract()).ToList(), + ReturnType = metadata.ReturnParameter.ParameterType, + ReturnDescription = metadata.ReturnParameter.Description, + ClassName = metadata.PluginName, + }; + } + + /// + /// Convert a to a + /// + /// kernel parameter metadata + public static FunctionParameterContract ToFunctionParameterContract(this KernelParameterMetadata metadata) + { + return new FunctionParameterContract() + { + Name = metadata.Name, + Description = metadata.Description, + DefaultValue = metadata.DefaultValue, + IsRequired = metadata.IsRequired, + ParameterType = metadata.ParameterType, + }; + } } diff --git a/dotnet/src/AutoGen.SemanticKernel/Middleware/KernelPluginMiddleware.cs b/dotnet/src/AutoGen.SemanticKernel/Middleware/KernelPluginMiddleware.cs new file mode 100644 index 00000000000..628915a0302 --- /dev/null +++ b/dotnet/src/AutoGen.SemanticKernel/Middleware/KernelPluginMiddleware.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// KernelPluginMiddleware.cs + +using System; +using System.Linq; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Threading; +using System.Threading.Tasks; +using AutoGen.SemanticKernel.Extension; +using Microsoft.SemanticKernel; + +namespace AutoGen.SemanticKernel; + +/// +/// A middleware that consumes +/// +public class KernelPluginMiddleware : IMiddleware +{ + private readonly KernelPlugin _kernelPlugin; + private readonly FunctionCallMiddleware _functionCallMiddleware; + public string? Name => nameof(KernelPluginMiddleware); + + public KernelPluginMiddleware(Kernel kernel, KernelPlugin kernelPlugin) + { + _kernelPlugin = kernelPlugin; + var functionContracts = kernelPlugin.Select(k => k.Metadata.ToFunctionContract()); + var functionMap = kernelPlugin.ToDictionary(kv => kv.Metadata.Name, kv => InvokeFunctionPartial(kernel, kv)); + _functionCallMiddleware = new FunctionCallMiddleware(functionContracts, functionMap, Name); + } + + public Task InvokeAsync(MiddlewareContext context, IAgent agent, CancellationToken cancellationToken = default) + { + return _functionCallMiddleware.InvokeAsync(context, agent, cancellationToken); + } + + private async Task InvokeFunctionAsync(Kernel kernel, KernelFunction function, string arguments) + { + var kernelArguments = new KernelArguments(); + var parameters = function.Metadata.Parameters; + var jsonObject = JsonSerializer.Deserialize(arguments) ?? new JsonObject(); + foreach (var parameter in parameters) + { + var parameterName = parameter.Name; + if (jsonObject.ContainsKey(parameterName)) + { + var parameterType = parameter.ParameterType ?? throw new ArgumentException($"Missing parameter type for {parameterName}"); + var parameterValue = jsonObject[parameterName]; + var parameterObject = parameterValue.Deserialize(parameterType); + kernelArguments.Add(parameterName, parameterObject); + } + else + { + if (parameter.DefaultValue != null) + { + kernelArguments.Add(parameterName, parameter.DefaultValue); + } + else if (parameter.IsRequired) + { + throw new ArgumentException($"Missing required parameter: {parameterName}"); + } + } + } + var result = await function.InvokeAsync(kernel, kernelArguments); + + return result.ToString(); + } + + private Func> InvokeFunctionPartial(Kernel kernel, KernelFunction function) + { + return async (string args) => + { + var result = await InvokeFunctionAsync(kernel, function, args); + return result.ToString(); + }; + } +} diff --git a/dotnet/test/AutoGen.Mistral.Tests/MistralClientAgentTests.cs b/dotnet/test/AutoGen.Mistral.Tests/MistralClientAgentTests.cs index bcd5f1309fa..2b6839dd0ef 100644 --- a/dotnet/test/AutoGen.Mistral.Tests/MistralClientAgentTests.cs +++ b/dotnet/test/AutoGen.Mistral.Tests/MistralClientAgentTests.cs @@ -92,7 +92,7 @@ public async Task MistralAgentFunctionCallMessageTest() new TextMessage(Role.User, "what's the weather in Seattle?"), new ToolCallMessage(this.GetWeatherFunctionContract.Name!, weatherFunctionArgumets, from: agent.Name), new ToolCallResultMessage(functionCallResult, this.GetWeatherFunctionContract.Name!, weatherFunctionArgumets), - ]; + ]; var reply = await agent.SendAsync(chatHistory: chatHistory); diff --git a/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromMethod.approved.txt b/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromMethod.approved.txt new file mode 100644 index 00000000000..677831d412b --- /dev/null +++ b/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromMethod.approved.txt @@ -0,0 +1,24 @@ +[ + { + "Name": "_ItCreateFunctionContractsFromMethod_b__2_0", + "Description": "", + "Parameters": [], + "ReturnType": "System.String, System.Private.CoreLib, Version=8.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e", + "ReturnDescription": "" + }, + { + "Name": "_ItCreateFunctionContractsFromMethod_b__2_1", + "Description": "", + "Parameters": [ + { + "Name": "message", + "Description": "", + "ParameterType": "System.String, System.Private.CoreLib, Version=8.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e", + "IsRequired": true, + "DefaultValue": "" + } + ], + "ReturnType": "System.String, System.Private.CoreLib, Version=8.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e", + "ReturnDescription": "" + } +] \ No newline at end of file diff --git a/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromPrompt.approved.txt b/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromPrompt.approved.txt new file mode 100644 index 00000000000..428f53572f1 --- /dev/null +++ b/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromPrompt.approved.txt @@ -0,0 +1,8 @@ +[ + { + "Name": "sayHello", + "Description": "Generic function, unknown purpose", + "Parameters": [], + "ReturnDescription": "" + } +] \ No newline at end of file diff --git a/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromTestPlugin.approved.txt b/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromTestPlugin.approved.txt new file mode 100644 index 00000000000..ee835b1ba08 --- /dev/null +++ b/dotnet/test/AutoGen.SemanticKernel.Tests/ApprovalTests/KernelFunctionExtensionTests.ItCreateFunctionContractsFromTestPlugin.approved.txt @@ -0,0 +1,26 @@ +[ + { + "ClassName": "test_plugin", + "Name": "GetState", + "Description": "Gets the state of the light.", + "Parameters": [], + "ReturnType": "System.String, System.Private.CoreLib, Version=8.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e", + "ReturnDescription": "" + }, + { + "ClassName": "test_plugin", + "Name": "ChangeState", + "Description": "Changes the state of the light.'", + "Parameters": [ + { + "Name": "newState", + "Description": "new state", + "ParameterType": "System.Boolean, System.Private.CoreLib, Version=8.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e", + "IsRequired": true, + "DefaultValue": "" + } + ], + "ReturnType": "System.String, System.Private.CoreLib, Version=8.0.0.0, Culture=neutral, PublicKeyToken=7cec85d7bea7798e", + "ReturnDescription": "" + } +] \ No newline at end of file diff --git a/dotnet/test/AutoGen.SemanticKernel.Tests/AutoGen.SemanticKernel.Tests.csproj b/dotnet/test/AutoGen.SemanticKernel.Tests/AutoGen.SemanticKernel.Tests.csproj new file mode 100644 index 00000000000..b6d03ddc4af --- /dev/null +++ b/dotnet/test/AutoGen.SemanticKernel.Tests/AutoGen.SemanticKernel.Tests.csproj @@ -0,0 +1,27 @@ + + + + $(TestTargetFramework) + enable + false + $(NoWarn);SKEXP0110 + True + + + + + + + + + + + + + + + + + + + diff --git a/dotnet/test/AutoGen.SemanticKernel.Tests/KernelFunctionExtensionTests.cs b/dotnet/test/AutoGen.SemanticKernel.Tests/KernelFunctionExtensionTests.cs new file mode 100644 index 00000000000..c898c98b3c0 --- /dev/null +++ b/dotnet/test/AutoGen.SemanticKernel.Tests/KernelFunctionExtensionTests.cs @@ -0,0 +1,104 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// KernelFunctionExtensionTests.cs + +using System.ComponentModel; +using ApprovalTests; +using ApprovalTests.Namers; +using ApprovalTests.Reporters; +using AutoGen.SemanticKernel.Extension; +using FluentAssertions; +using Microsoft.SemanticKernel; +using Newtonsoft.Json; +using Xunit; + +namespace AutoGen.SemanticKernel.Tests; + +public class TestPlugin +{ + public bool IsOn { get; set; } = false; + + [KernelFunction] + [Description("Gets the state of the light.")] + public string GetState() => this.IsOn ? "on" : "off"; + + [KernelFunction] + [Description("Changes the state of the light.'")] + public string ChangeState( + [Description("new state")] bool newState) + { + this.IsOn = newState; + var state = this.GetState(); + + // Print the state to the console + Console.ForegroundColor = ConsoleColor.DarkBlue; + Console.WriteLine($"[Light is now {state}]"); + Console.ResetColor(); + + return $"The status of the light is now {state}"; + } +} +public class KernelFunctionExtensionTests +{ + private readonly JsonSerializerSettings _serializerSettings = new JsonSerializerSettings + { + Formatting = Formatting.Indented, + NullValueHandling = NullValueHandling.Ignore, + StringEscapeHandling = StringEscapeHandling.Default, + }; + + [Fact] + [UseReporter(typeof(DiffReporter))] + [UseApprovalSubdirectory("ApprovalTests")] + public void ItCreateFunctionContractsFromTestPlugin() + { + var kernel = new Kernel(); + var plugin = kernel.ImportPluginFromType("test_plugin"); + + var functionContracts = plugin.Select(f => f.Metadata.ToFunctionContract()).ToList(); + + functionContracts.Count.Should().Be(2); + var json = JsonConvert.SerializeObject(functionContracts, _serializerSettings); + + Approvals.Verify(json); + } + + [Fact] + [UseReporter(typeof(DiffReporter))] + [UseApprovalSubdirectory("ApprovalTests")] + public void ItCreateFunctionContractsFromMethod() + { + var kernel = new Kernel(); + var sayHelloFunction = KernelFunctionFactory.CreateFromMethod(() => "Hello, World!"); + var echoFunction = KernelFunctionFactory.CreateFromMethod((string message) => message); + + var functionContracts = new[] + { + sayHelloFunction.Metadata.ToFunctionContract(), + echoFunction.Metadata.ToFunctionContract(), + }; + + var json = JsonConvert.SerializeObject(functionContracts, _serializerSettings); + + functionContracts.Length.Should().Be(2); + Approvals.Verify(json); + } + + [Fact] + [UseReporter(typeof(DiffReporter))] + [UseApprovalSubdirectory("ApprovalTests")] + public void ItCreateFunctionContractsFromPrompt() + { + var kernel = new Kernel(); + var sayHelloFunction = KernelFunctionFactory.CreateFromPrompt("Say {{hello}}, World!", functionName: "sayHello"); + + var functionContracts = new[] + { + sayHelloFunction.Metadata.ToFunctionContract(), + }; + + var json = JsonConvert.SerializeObject(functionContracts, _serializerSettings); + + functionContracts.Length.Should().Be(1); + Approvals.Verify(json); + } +} diff --git a/dotnet/test/AutoGen.SemanticKernel.Tests/KernelFunctionMiddlewareTests.cs b/dotnet/test/AutoGen.SemanticKernel.Tests/KernelFunctionMiddlewareTests.cs new file mode 100644 index 00000000000..f560419e8c8 --- /dev/null +++ b/dotnet/test/AutoGen.SemanticKernel.Tests/KernelFunctionMiddlewareTests.cs @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// KernelFunctionMiddlewareTests.cs + +using AutoGen.Core; +using AutoGen.OpenAI; +using AutoGen.OpenAI.Extension; +using AutoGen.Tests; +using Azure.AI.OpenAI; +using FluentAssertions; +using Microsoft.SemanticKernel; + +namespace AutoGen.SemanticKernel.Tests; + +public class KernelFunctionMiddlewareTests +{ + [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT")] + public async Task ItRegisterKernelFunctionMiddlewareFromTestPluginTests() + { + var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable."); + var key = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? throw new Exception("Please set AZURE_OPENAI_API_KEY environment variable."); + var openaiClient = new OpenAIClient(new Uri(endpoint), new Azure.AzureKeyCredential(key)); + + var kernel = new Kernel(); + var plugin = kernel.ImportPluginFromType(); + var kernelFunctionMiddleware = new KernelPluginMiddleware(kernel, plugin); + + var agent = new OpenAIChatAgent(openaiClient, "assistant", modelName: "gpt-35-turbo-16k") + .RegisterMessageConnector() + .RegisterMiddleware(kernelFunctionMiddleware); + + var reply = await agent.SendAsync("what's the status of the light?"); + reply.GetContent().Should().Be("off"); + reply.Should().BeOfType>(); + if (reply is AggregateMessage aggregateMessage) + { + var toolCallMessage = aggregateMessage.Message1; + toolCallMessage.ToolCalls.Should().HaveCount(1); + toolCallMessage.ToolCalls[0].FunctionName.Should().Be("GetState"); + + var toolCallResultMessage = aggregateMessage.Message2; + toolCallResultMessage.ToolCalls.Should().HaveCount(1); + toolCallResultMessage.ToolCalls[0].Result.Should().Be("off"); + } + + reply = await agent.SendAsync("change the status of the light to on"); + reply.GetContent().Should().Be("The status of the light is now on"); + reply.Should().BeOfType>(); + if (reply is AggregateMessage aggregateMessage1) + { + var toolCallMessage = aggregateMessage1.Message1; + toolCallMessage.ToolCalls.Should().HaveCount(1); + toolCallMessage.ToolCalls[0].FunctionName.Should().Be("ChangeState"); + + var toolCallResultMessage = aggregateMessage1.Message2; + toolCallResultMessage.ToolCalls.Should().HaveCount(1); + } + } + + [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT")] + public async Task ItRegisterKernelFunctionMiddlewareFromMethodTests() + { + var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable."); + var key = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? throw new Exception("Please set AZURE_OPENAI_API_KEY environment variable."); + var openaiClient = new OpenAIClient(new Uri(endpoint), new Azure.AzureKeyCredential(key)); + + var kernel = new Kernel(); + var getWeatherMethod = kernel.CreateFunctionFromMethod((string location) => $"The weather in {location} is sunny.", functionName: "GetWeather", description: "Get the weather for a location."); + var createPersonObjectMethod = kernel.CreateFunctionFromMethod((string name, string email, int age) => new Person(name, email, age), functionName: "CreatePersonObject", description: "Creates a person object."); + var plugin = kernel.ImportPluginFromFunctions("plugin", [getWeatherMethod, createPersonObjectMethod]); + var kernelFunctionMiddleware = new KernelPluginMiddleware(kernel, plugin); + + var agent = new OpenAIChatAgent(openaiClient, "assistant", modelName: "gpt-35-turbo-16k") + .RegisterMessageConnector() + .RegisterMiddleware(kernelFunctionMiddleware); + + var reply = await agent.SendAsync("what's the weather in Seattle?"); + reply.GetContent().Should().Be("The weather in Seattle is sunny."); + reply.Should().BeOfType>(); + if (reply is AggregateMessage getWeatherMessage) + { + var toolCallMessage = getWeatherMessage.Message1; + toolCallMessage.ToolCalls.Should().HaveCount(1); + toolCallMessage.ToolCalls[0].FunctionName.Should().Be("GetWeather"); + + var toolCallResultMessage = getWeatherMessage.Message2; + toolCallResultMessage.ToolCalls.Should().HaveCount(1); + } + + reply = await agent.SendAsync("Create a person object with name: John, email: 12345@gmail.com, age: 30"); + reply.GetContent().Should().Be("Name: John, Email: 12345@gmail.com, Age: 30"); + reply.Should().BeOfType>(); + if (reply is AggregateMessage createPersonObjectMessage) + { + var toolCallMessage = createPersonObjectMessage.Message1; + toolCallMessage.ToolCalls.Should().HaveCount(1); + toolCallMessage.ToolCalls[0].FunctionName.Should().Be("CreatePersonObject"); + + var toolCallResultMessage = createPersonObjectMessage.Message2; + toolCallResultMessage.ToolCalls.Should().HaveCount(1); + } + } +} + +public class Person +{ + public Person(string name, string email, int age) + { + this.Name = name; + this.Email = email; + this.Age = age; + } + + public string Name { get; set; } + public string Email { get; set; } + public int Age { get; set; } + + public override string ToString() + { + return $"Name: {this.Name}, Email: {this.Email}, Age: {this.Age}"; + } +} diff --git a/dotnet/test/AutoGen.Tests/SemanticKernelAgentTest.cs b/dotnet/test/AutoGen.SemanticKernel.Tests/SemanticKernelAgentTest.cs similarity index 98% rename from dotnet/test/AutoGen.Tests/SemanticKernelAgentTest.cs rename to dotnet/test/AutoGen.SemanticKernel.Tests/SemanticKernelAgentTest.cs index 0fcf5a6abe6..14c27cb48a7 100644 --- a/dotnet/test/AutoGen.Tests/SemanticKernelAgentTest.cs +++ b/dotnet/test/AutoGen.SemanticKernel.Tests/SemanticKernelAgentTest.cs @@ -1,18 +1,16 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // SemanticKernelAgentTest.cs -using System; -using System.Linq; -using System.Threading.Tasks; -using AutoGen.SemanticKernel; +using AutoGen.Core; using AutoGen.SemanticKernel.Extension; +using AutoGen.Tests; using FluentAssertions; using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Agents; using Microsoft.SemanticKernel.ChatCompletion; using Microsoft.SemanticKernel.Connectors.OpenAI; -namespace AutoGen.Tests; +namespace AutoGen.SemanticKernel.Tests; public partial class SemanticKernelAgentTest { diff --git a/dotnet/test/AutoGen.Tests/GlobalUsing.cs b/dotnet/test/AutoGen.Tests/GlobalUsing.cs index d00ae3ce4fc..d66bf001ed5 100644 --- a/dotnet/test/AutoGen.Tests/GlobalUsing.cs +++ b/dotnet/test/AutoGen.Tests/GlobalUsing.cs @@ -1,4 +1,4 @@ // Copyright (c) Microsoft Corporation. All rights reserved. -// globalUsing.cs +// GlobalUsing.cs global using AutoGen.Core; diff --git a/dotnet/website/articles/Installation.md b/dotnet/website/articles/Installation.md index 59699a957d6..a31998e5d93 100644 --- a/dotnet/website/articles/Installation.md +++ b/dotnet/website/articles/Installation.md @@ -4,6 +4,12 @@ AutoGen.Net provides the following packages, you can choose to install one or more of them based on your needs: +> [!Note] +> The `AutoGen.DotnetInteractive` has a dependency on `Microsoft.DotNet.Interactive.VisualStudio` which is not available on nuget.org. To restore the dependency, you need to add the following package source to your project: +> ```bash +> https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json +> ``` + - `AutoGen`: The one-in-all package. This package has dependencies over `AutoGen.Core`, `AutoGen.OpenAI`, `AutoGen.LMStudio`, `AutoGen.SemanticKernel` and `AutoGen.SourceGenerator`. - `AutoGen.Core`: The core package, this package provides the abstraction for message type, agent and group chat. - `AutoGen.OpenAI`: This package provides the integration agents over openai models. diff --git a/dotnet/website/articles/MistralChatAgent-count-token-usage.md b/dotnet/website/articles/MistralChatAgent-count-token-usage.md index b7f025aa11d..261845cf615 100644 --- a/dotnet/website/articles/MistralChatAgent-count-token-usage.md +++ b/dotnet/website/articles/MistralChatAgent-count-token-usage.md @@ -4,7 +4,7 @@ The following example shows how to create a `MistralAITokenCounterMiddleware` @A To collect the token usage for the entire chat session, one easy solution is simply collect all the responses from agent and sum up the token usage for each response. To collect all the agent responses, we can create a middleware which simply saves all responses to a list and register it with the agent. To get the token usage information for each response, because in the example we are using @AutoGen.Mistral.MistralClientAgent, we can simply get the token usage from the response object. > [!NOTE] -> You can find the complete example in the [Example13_OpenAIAgent_JsonMode](https://github.com/microsoft/autogen/tree/dotnet/dotnet/sample/AutoGen.BasicSamples/Example14_MistralClientAgent_TokenCount.cs). +> You can find the complete example in the [Example13_OpenAIAgent_JsonMode](https://github.com/microsoft/autogen/tree/main/dotnet/sample/AutoGen.BasicSamples/Example14_MistralClientAgent_TokenCount.cs). - Step 1: Adding using statement [!code-csharp[](../../sample/AutoGen.BasicSamples/Example14_MistralClientAgent_TokenCount.cs?name=using_statements)] diff --git a/dotnet/website/articles/OpenAIChatAgent-connect-to-third-party-api.md b/dotnet/website/articles/OpenAIChatAgent-connect-to-third-party-api.md index 2072c0f99a8..8321fc87a5c 100644 --- a/dotnet/website/articles/OpenAIChatAgent-connect-to-third-party-api.md +++ b/dotnet/website/articles/OpenAIChatAgent-connect-to-third-party-api.md @@ -1,7 +1,7 @@ The following example shows how to connect to third-party OpenAI API using @AutoGen.OpenAI.OpenAIChatAgent. > [!NOTE] -> You can find the complete code of this example in [Example16_OpenAIChatAgent_ConnectToThirdPartyBackend](https://github.com/microsoft/autogen/tree/dotnet/dotnet/sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs). +> You can find the complete code of this example in [Example16_OpenAIChatAgent_ConnectToThirdPartyBackend](https://github.com/microsoft/autogen/tree/main/dotnet/sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs). ## Overview A lot of LLM applications/platforms support spinning up a chat server that is compatible with OpenAI API, such as LM Studio, Ollama, Mistral etc. This means that you can connect to these servers using the @AutoGen.OpenAI.OpenAIChatAgent. diff --git a/dotnet/website/articles/OpenAIChatAgent-use-json-mode.md b/dotnet/website/articles/OpenAIChatAgent-use-json-mode.md index 4d69340f585..a822cb04633 100644 --- a/dotnet/website/articles/OpenAIChatAgent-use-json-mode.md +++ b/dotnet/website/articles/OpenAIChatAgent-use-json-mode.md @@ -9,7 +9,7 @@ JSON mode is a new feature in OpenAI which allows you to instruct model to alway ## How to enable JSON mode in OpenAIChatAgent. > [!NOTE] -> You can find the complete example in the [Example13_OpenAIAgent_JsonMode](https://github.com/microsoft/autogen/tree/dotnet/dotnet/sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs). +> You can find the complete example in the [Example13_OpenAIAgent_JsonMode](https://github.com/microsoft/autogen/tree/main/dotnet/sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs). To enable JSON mode for @AutoGen.OpenAI.OpenAIChatAgent, set `responseFormat` to `ChatCompletionsResponseFormat.JsonObject` when creating the agent. Note that when enabling JSON mode, you also need to instruct the agent to output JSON format in its system message. diff --git a/dotnet/website/articles/Run-dotnet-code.md b/dotnet/website/articles/Run-dotnet-code.md index e3d8fa78a0b..2cc247d37e3 100644 --- a/dotnet/website/articles/Run-dotnet-code.md +++ b/dotnet/website/articles/Run-dotnet-code.md @@ -19,6 +19,12 @@ For example, in data analysis scenario, agent can resolve tasks like "What is th ## How to run dotnet code snippet? The built-in feature of running dotnet code snippet is provided by [dotnet-interactive](https://github.com/dotnet/interactive). To run dotnet code snippet, you need to install the following package to your project, which provides the intergraion with dotnet-interactive: +> [!Note] +> The `AutoGen.DotnetInteractive` has a dependency on `Microsoft.DotNet.Interactive.VisualStudio` which is not available on nuget.org. To restore the dependency, you need to add the following package source to your project: +> ```bash +> https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json +> ``` + ```xml ``` diff --git a/dotnet/website/update.md b/dotnet/website/update.md index 3d905c0ab11..5b18a3f504b 100644 --- a/dotnet/website/update.md +++ b/dotnet/website/update.md @@ -1,8 +1,16 @@ -##### Update -- [API Breaking Change] Update the return type of `IStreamingAgent.GenerateStreamingReplyAsync` from `Task>` to `IAsyncEnumerable` -- [API Breaking Change] Update the return type of `IStreamingMiddleware.InvokeAsync` from `Task>` to `IAsyncEnumerable` -- [API Breaking Change] Mark `RegisterReply`, `RegisterPreProcess` and `RegisterPostProcess` as obsolete. You can replace them with `RegisterMiddleware` -- Fix [Issue 2609](https://github.com/microsoft/autogen/issues/2609) +##### Update on 0.0.13 (2024-05-09) +###### New features +- [Issue 2593](https://github.com/microsoft/autogen/issues/2593) Consume SK plugins in Agent. +- [Issue 1893](https://github.com/microsoft/autogen/issues/1893) Support inline-data in ImageMessage +- [Issue 2481](https://github.com/microsoft/autogen/issues/2481) Introduce `ChatCompletionAgent` to `AutoGen.SemanticKernel` +###### API Breaking Changes +- [Issue 2470](https://github.com/microsoft/autogen/issues/2470) Update the return type of `IStreamingAgent.GenerateStreamingReplyAsync` from `Task>` to `IAsyncEnumerable` +- [Issue 2470](https://github.com/microsoft/autogen/issues/2470) Update the return type of `IStreamingMiddleware.InvokeAsync` from `Task>` to `IAsyncEnumerable` +- Mark `RegisterReply`, `RegisterPreProcess` and `RegisterPostProcess` as obsolete. You can replace them with `RegisterMiddleware` + +###### Bug Fixes +- Fix [Issue 2609](https://github.com/microsoft/autogen/issues/2609) Constructor of conversableAgentConfig does not accept LMStudioConfig as ConfigList + ##### Update on 0.0.12 (2024-04-22) - Add AutoGen.Mistral package to support Mistral.AI models ##### Update on 0.0.11 (2024-04-10) diff --git a/notebook/agentchat_agentoptimizer.ipynb b/notebook/agentchat_agentoptimizer.ipynb index 13df0a0d77a..7177703ab06 100644 --- a/notebook/agentchat_agentoptimizer.ipynb +++ b/notebook/agentchat_agentoptimizer.ipynb @@ -53,7 +53,7 @@ "source": [ "# MathUserProxy with function_call\n", "\n", - "This agent is a customized MathUserProxy inherits from its [partent class](https://github.com/microsoft/autogen/blob/main/autogen/agentchat/contrib/math_user_proxy_agent.py).\n", + "This agent is a customized MathUserProxy inherits from its [parent class](https://github.com/microsoft/autogen/blob/main/autogen/agentchat/contrib/math_user_proxy_agent.py).\n", "\n", "It supports using both function_call and python to solve math problems.\n" ] diff --git a/test/agentchat/test_groupchat.py b/test/agentchat/test_groupchat.py index a4689bd539f..8dc3dc77746 100755 --- a/test/agentchat/test_groupchat.py +++ b/test/agentchat/test_groupchat.py @@ -1,14 +1,16 @@ #!/usr/bin/env python3 -m pytest import builtins +import io import json +import logging from typing import Any, Dict, List, Optional from unittest import mock import pytest import autogen -from autogen import Agent, GroupChat +from autogen import Agent, AssistantAgent, GroupChat, GroupChatManager from autogen.exception_utils import AgentNameConflict, UndefinedNextAgent @@ -1766,6 +1768,204 @@ def test_select_speaker_auto_messages(): ) +def test_manager_messages_to_string(): + """In this test we test the conversion of messages to a JSON string""" + messages = [ + { + "content": "You are an expert at finding the next speaker.", + "role": "system", + }, + { + "content": "Let's get this meeting started. First the Product_Manager will create 3 new product ideas.", + "name": "Chairperson", + "role": "assistant", + }, + ] + + groupchat = GroupChat(messages=messages, agents=[]) + manager = GroupChatManager(groupchat) + + # Convert the messages List[Dict] to a JSON string + converted_string = manager.messages_to_string(messages) + + # The conversion should match the original messages + assert json.loads(converted_string) == messages + + +def test_manager_messages_from_string(): + """In this test we test the conversion of a JSON string of messages to a messages List[Dict]""" + messages_str = r"""[{"content": "You are an expert at finding the next speaker.", "role": "system"}, {"content": "Let's get this meeting started. First the Product_Manager will create 3 new product ideas.", "name": "Chairperson", "role": "assistant"}]""" + + groupchat = GroupChat(messages=[], agents=[]) + manager = GroupChatManager(groupchat) + + # Convert the messages List[Dict] to a JSON string + messages = manager.messages_from_string(messages_str) + + # The conversion should match the original messages + assert messages_str == json.dumps(messages) + + +def test_manager_resume_functions(): + """Tests functions within the resume chat functionality""" + + # Setup + coder = AssistantAgent(name="Coder", llm_config=None) + groupchat = GroupChat(messages=[], agents=[coder]) + manager = GroupChatManager(groupchat) + + # Tests that messages are indeed passed in + with pytest.raises(Exception): + manager._valid_resume_messages(messages=[]) + + # Tests that the messages passed in match the agents of the group chat + messages = [ + { + "content": "You are an expert at finding the next speaker.", + "role": "system", + }, + { + "content": "Let's get this meeting started. First the Product_Manager will create 3 new product ideas.", + "name": "Chairperson", + "role": "assistant", + }, + ] + + # Chairperson does not exist as an agent + with pytest.raises(Exception): + manager._valid_resume_messages(messages) + + messages = [ + { + "content": "You are an expert at finding the next speaker.", + "role": "system", + }, + { + "content": "Let's get this meeting started. First the Product_Manager will create 3 new product ideas.", + "name": "Coder", + "role": "assistant", + }, + ] + + # Coder does exist as an agent, no error + manager._valid_resume_messages(messages) + + # Tests termination message replacement + final_msg = ( + "Let's get this meeting started. First the Product_Manager will create 3 new product ideas. TERMINATE this." + ) + messages = [ + { + "content": "You are an expert at finding the next speaker.", + "role": "system", + }, + { + "content": final_msg, + "name": "Coder", + "role": "assistant", + }, + ] + + manager._process_resume_termination(remove_termination_string="TERMINATE", messages=messages) + + # TERMINATE should be removed + assert messages[-1]["content"] == final_msg.replace("TERMINATE", "") + + # Check if the termination string doesn't exist there's no replacing of content + final_msg = ( + "Let's get this meeting started. First the Product_Manager will create 3 new product ideas. TERMINATE this." + ) + messages = [ + { + "content": "You are an expert at finding the next speaker.", + "role": "system", + }, + { + "content": final_msg, + "name": "Coder", + "role": "assistant", + }, + ] + + manager._process_resume_termination(remove_termination_string="THE-END", messages=messages) + + # It should not be changed + assert messages[-1]["content"] == final_msg + + # Test that it warns that the termination condition would match + manager._is_termination_msg = lambda x: x.get("content", "").find("TERMINATE") >= 0 + + # Attach a handler to the logger so we can check the log output + log_stream = io.StringIO() + handler = logging.StreamHandler(log_stream) + logger = logging.getLogger() # Get the root logger + logger.addHandler(handler) + + # We should get a warning that TERMINATE is still in the messages + manager._process_resume_termination(remove_termination_string="THE-END", messages=messages) + + # Get the logged output and check that the warning was provided. + log_output = log_stream.getvalue() + + assert "WARNING: Last message meets termination criteria and this may terminate the chat." in log_output + + +def test_manager_resume_returns(): + """Tests the return resume chat functionality""" + + # Test the return agent and message is correct + coder = AssistantAgent(name="Coder", llm_config=None) + groupchat = GroupChat(messages=[], agents=[coder]) + manager = GroupChatManager(groupchat) + messages = [ + { + "content": "You are an expert at coding.", + "role": "system", + }, + { + "content": "Let's get coding, should I use Python?", + "name": "Coder", + "role": "assistant", + }, + ] + + return_agent, return_message = manager.resume(messages=messages) + + assert return_agent == coder + assert return_message == messages[-1] + + # Test when no agent provided, the manager will be returned + messages = [{"content": "You are an expert at coding.", "role": "system", "name": "chat_manager"}] + + return_agent, return_message = manager.resume(messages=messages) + + assert return_agent == manager + assert return_message == messages[-1] + + +def test_manager_resume_messages(): + """Tests that the messages passed into resume are the correct format""" + + coder = AssistantAgent(name="Coder", llm_config=None) + groupchat = GroupChat(messages=[], agents=[coder]) + manager = GroupChatManager(groupchat) + messages = 1 + + # Only acceptable messages types are JSON str and List[Dict] + + # Try a number + with pytest.raises(Exception): + return_agent, return_message = manager.resume(messages=messages) + + # Try an empty string + with pytest.raises(Exception): + return_agent, return_message = manager.resume(messages="") + + # Try a message starter string, which isn't valid + with pytest.raises(Exception): + return_agent, return_message = manager.resume(messages="Let's get this conversation started.") + + if __name__ == "__main__": # test_func_call_groupchat() # test_broadcast() @@ -1784,7 +1984,12 @@ def test_select_speaker_auto_messages(): # test_role_for_select_speaker_messages() # test_select_speaker_message_and_prompt_templates() # test_speaker_selection_agent_name_match() - test_speaker_selection_auto_process_result() - test_speaker_selection_validate_speaker_name() - test_select_speaker_auto_messages() + # test_speaker_selection_auto_process_result() + # test_speaker_selection_validate_speaker_name() + # test_select_speaker_auto_messages() + test_manager_messages_to_string() + test_manager_messages_from_string() + test_manager_resume_functions() + test_manager_resume_returns() + test_manager_resume_messages() # pass diff --git a/test/coding/test_commandline_code_executor.py b/test/coding/test_commandline_code_executor.py index 0a0ded71e6c..4daf2e21bcb 100644 --- a/test/coding/test_commandline_code_executor.py +++ b/test/coding/test_commandline_code_executor.py @@ -2,12 +2,13 @@ import sys import tempfile import uuid +import venv from pathlib import Path import pytest from autogen.agentchat.conversable_agent import ConversableAgent -from autogen.code_utils import decide_use_docker, is_docker_running +from autogen.code_utils import WIN32, decide_use_docker, is_docker_running from autogen.coding.base import CodeBlock, CodeExecutor from autogen.coding.docker_commandline_code_executor import DockerCommandLineCodeExecutor from autogen.coding.factory import CodeExecutorFactory @@ -393,3 +394,20 @@ def test_silent_pip_install(cls, lang: str) -> None: code_blocks = [CodeBlock(code=code, language=lang)] code_result = executor.execute_code_blocks(code_blocks) assert code_result.exit_code == error_exit_code and "ERROR: " in code_result.output + + +def test_local_executor_with_custom_python_env(): + with tempfile.TemporaryDirectory() as temp_dir: + env_builder = venv.EnvBuilder(with_pip=True) + env_builder.create(temp_dir) + env_builder_context = env_builder.ensure_directories(temp_dir) + + executor = LocalCommandLineCodeExecutor(work_dir=temp_dir, virtual_env_context=env_builder_context) + code_blocks = [ + # https://stackoverflow.com/questions/1871549/how-to-determine-if-python-is-running-inside-a-virtualenv + CodeBlock(code="import sys; print(sys.prefix != sys.base_prefix)", language="python"), + ] + execution = executor.execute_code_blocks(code_blocks) + + assert execution.exit_code == 0 + assert execution.output.strip() == "True" diff --git a/test/test_code_utils.py b/test/test_code_utils.py index d6084f9b029..ade3855ad85 100755 --- a/test/test_code_utils.py +++ b/test/test_code_utils.py @@ -5,6 +5,7 @@ import tempfile import unittest from io import StringIO +from types import SimpleNamespace from unittest.mock import patch import pytest @@ -15,6 +16,7 @@ UNKNOWN, check_can_use_docker_or_throw, content_str, + create_virtual_env, decide_use_docker, execute_code, extract_code, @@ -500,6 +502,20 @@ def test_can_use_docker_or_throw(): check_can_use_docker_or_throw(True) +def test_create_virtual_env(): + with tempfile.TemporaryDirectory() as temp_dir: + venv_context = create_virtual_env(temp_dir) + assert isinstance(venv_context, SimpleNamespace) + assert venv_context.env_name == os.path.split(temp_dir)[1] + + +def test_create_virtual_env_with_extra_args(): + with tempfile.TemporaryDirectory() as temp_dir: + venv_context = create_virtual_env(temp_dir, with_pip=False) + assert isinstance(venv_context, SimpleNamespace) + assert venv_context.env_name == os.path.split(temp_dir)[1] + + def _test_improve(): try: import openai diff --git a/website/docs/FAQ.mdx b/website/docs/FAQ.mdx index d2a4b3b2a32..5a0adece6b0 100644 --- a/website/docs/FAQ.mdx +++ b/website/docs/FAQ.mdx @@ -277,3 +277,22 @@ To resolve this issue, you need to upgrade your Autogen library to version 0.2.2 ```python pip install --upgrade autogen ``` + +## None of the devcontainers are building due to "Hash sum mismatch", what should I do? + +This is an intermittent issue that appears to be caused by some combination of mirror and proxy issues. +If it arises, try to replace the `apt-get update` step with the following: + +```bash +RUN echo "Acquire::http::Pipeline-Depth 0;" > /etc/apt/apt.conf.d/99custom && \ + echo "Acquire::http::No-Cache true;" >> /etc/apt/apt.conf.d/99custom && \ + echo "Acquire::BrokenProxy true;" >> /etc/apt/apt.conf.d/99custom + +RUN apt-get clean && \ + rm -r /var/lib/apt/lists/* && \ + apt-get update -o Acquire::CompressionTypes::Order::=gz && \ + apt-get -y update && \ + apt-get install sudo git npm # and whatever packages need to be installed in this specific version of the devcontainer +``` + +This is a combination of StackOverflow suggestions [here](https://stackoverflow.com/a/48777773/2114580) and [here](https://stackoverflow.com/a/76092743/2114580). diff --git a/website/docs/topics/code-execution/cli-code-executor.ipynb b/website/docs/topics/code-execution/cli-code-executor.ipynb index 69df79754d0..11649b15a58 100644 --- a/website/docs/topics/code-execution/cli-code-executor.ipynb +++ b/website/docs/topics/code-execution/cli-code-executor.ipynb @@ -126,6 +126,35 @@ ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using a Python virtual environment\n", + "\n", + "By default, the LocalCommandLineCodeExecutor executes code and installs dependencies within the same Python environment as the AutoGen code. You have the option to specify a Python virtual environment to prevent polluting the base Python environment.\n", + "\n", + "### Example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen.code_utils import create_virtual_env\n", + "from autogen.coding import CodeBlock, LocalCommandLineCodeExecutor\n", + "\n", + "venv_dir = \".venv\"\n", + "venv_context = create_virtual_env(venv_dir)\n", + "\n", + "executor = LocalCommandLineCodeExecutor(virtual_env_context=venv_context)\n", + "print(\n", + " executor.execute_code_blocks(code_blocks=[CodeBlock(language=\"python\", code=\"import sys; print(sys.executable)\")])\n", + ")" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/website/docs/topics/groupchat/resuming_groupchat.ipynb b/website/docs/topics/groupchat/resuming_groupchat.ipynb new file mode 100644 index 00000000000..bc56cb3cd35 --- /dev/null +++ b/website/docs/topics/groupchat/resuming_groupchat.ipynb @@ -0,0 +1,760 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Resuming a GroupChat\n", + "\n", + "In GroupChat, we can resume a previous group chat by passing the messages from that conversation to the GroupChatManager's `resume` function (or `a_resume` for asynchronous workflows). This prepares the GroupChat, GroupChatManager, and group chat's agents for resuming. An agent's `initiate_chat` can then be called to resume the chat.\n", + "\n", + "The `resume` function returns the last agent in the messages as well as the last message itself. These can be used to run the `initiate_chat`.\n", + "\n", + "To resume, the agents, GroupChat, and GroupChatManager objects must exist and match the original group chat.\n", + "\n", + "The messages passed into the `resume` function can be passed in as a JSON string or a `List[Dict]` of messages, typically from the ChatResult's `chat_history` of the previous conversation or the GroupChat's `messages` property. Use the GroupChatManager's `messages_to_string` function to retrieve a JSON string that can be used for resuming:\n", + "\n", + "```text\n", + "# Save chat messages for resuming later on using the chat history\n", + "messages_json = mygroupchatmanager.messages_to_string(previous_chat_result.chat_history)\n", + "\n", + "# Alternatively you can use the GroupChat's messages property\n", + "messages_json = mygroupchatmanager.messages_to_string(mygroupchatmanager.groupchat.messages)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "An example of the JSON string:\n", + "```json\n", + "[{\"content\": \"Find the latest paper about gpt-4 on arxiv and find its potential applications in software.\", \"role\": \"user\", \"name\": \"Admin\"}, {\"content\": \"Plan:\\n1. **Engineer**: Search for the latest paper on GPT-4 on arXiv.\\n2. **Scientist**: Read the paper and summarize the key findings and potential applications of GPT-4.\\n3. **Engineer**: Identify potential software applications where GPT-4 can be utilized based on the scientist's summary.\\n4. **Scientist**: Provide insights on the feasibility and impact of implementing GPT-4 in the identified software applications.\\n5. **Engineer**: Develop a prototype or proof of concept to demonstrate how GPT-4 can be integrated into the selected software application.\\n6. **Scientist**: Evaluate the prototype, provide feedback, and suggest any improvements or modifications.\\n7. **Engineer**: Make necessary revisions based on the scientist's feedback and finalize the integration of GPT-4 into the software application.\\n8. **Admin**: Review the final software application with GPT-4 integration and approve for further development or implementation.\\n\\nFeedback from admin and critic is needed for further refinement of the plan.\", \"role\": \"user\", \"name\": \"Planner\"}, {\"content\": \"Agree\", \"role\": \"user\", \"name\": \"Admin\"}, {\"content\": \"Great! Let's proceed with the plan outlined earlier. I will start by searching for the latest paper on GPT-4 on arXiv. Once I find the paper, the scientist will summarize the key findings and potential applications of GPT-4. We will then proceed with the rest of the steps as outlined. I will keep you updated on our progress.\", \"role\": \"user\", \"name\": \"Planner\"}]\n", + "```\n", + "\n", + "When preparing for resuming, the messages will be validated against the groupchat's agents to make sure that the messages can be assigned to them. Messages will be allocated to the agents and then the last speaker and message will be returned for use in `initiate_chat`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Continuing a terminated conversation\n", + "If the previous group chat terminated and the resuming group chat has the same termination condition (such as if the message contains \"TERMINATE\") then the conversation will terminate when resuming as the terminate check occurs with the message passed in to `initiate_chat`.\n", + "\n", + "If the termination condition is based on a string within the message, you can pass in that string in the `remove_termination_string` parameter of the `resume` function and it will be removed. If the termination condition is more complicated, you will need to adjust the messages accordingly before calling `resume`.\n", + "\n", + "The `resume` function will then check if the last message provided still meets the termination condition and warns you, if so." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example of resuming a GroupChat\n", + "\n", + "Start with the LLM config. This can differ from the original group chat." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import os\n", + "\n", + "import autogen\n", + "\n", + "# Put your api key in the environment variable OPENAI_API_KEY\n", + "config_list = [\n", + " {\n", + " \"model\": \"gpt-4-0125-preview\",\n", + " \"api_key\": os.environ[\"OPENAI_API_KEY\"],\n", + " }\n", + "]\n", + "\n", + "gpt4_config = {\n", + " \"cache_seed\": 42, # change the cache_seed for different trials\n", + " \"temperature\": 0,\n", + " \"config_list\": config_list,\n", + " \"timeout\": 120,\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create the group chat objects, they should have the same `name` as the original group chat." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# Create Agents, GroupChat, and GroupChatManager in line with the original group chat\n", + "\n", + "planner = autogen.AssistantAgent(\n", + " name=\"Planner\",\n", + " system_message=\"\"\"Planner. Suggest a plan. Revise the plan based on feedback from admin and critic, until admin approval.\n", + "The plan may involve an engineer who can write code and a scientist who doesn't write code.\n", + "Explain the plan first. Be clear which step is performed by an engineer, and which step is performed by a scientist.\n", + "\"\"\",\n", + " llm_config=gpt4_config,\n", + ")\n", + "\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"Admin\",\n", + " system_message=\"A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved by this admin.\",\n", + " code_execution_config=False,\n", + ")\n", + "\n", + "engineer = autogen.AssistantAgent(\n", + " name=\"Engineer\",\n", + " llm_config=gpt4_config,\n", + " system_message=\"\"\"Engineer. You follow an approved plan. You write python/shell code to solve tasks. Wrap the code in a code block that specifies the script type. The user can't modify your code. So do not suggest incomplete code which requires others to modify. Don't use a code block if it's not intended to be executed by the executor.\n", + "Don't include multiple code blocks in one response. Do not ask others to copy and paste the result. Check the execution result returned by the executor.\n", + "If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.\n", + "\"\"\",\n", + ")\n", + "scientist = autogen.AssistantAgent(\n", + " name=\"Scientist\",\n", + " llm_config=gpt4_config,\n", + " system_message=\"\"\"Scientist. You follow an approved plan. You are able to categorize papers after seeing their abstracts printed. You don't write code.\"\"\",\n", + ")\n", + "\n", + "executor = autogen.UserProxyAgent(\n", + " name=\"Executor\",\n", + " system_message=\"Executor. Execute the code written by the engineer and report the result.\",\n", + " human_input_mode=\"NEVER\",\n", + " code_execution_config={\n", + " \"last_n_messages\": 3,\n", + " \"work_dir\": \"paper\",\n", + " \"use_docker\": False,\n", + " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", + ")\n", + "\n", + "groupchat = autogen.GroupChat(\n", + " agents=[user_proxy, engineer, scientist, planner, executor],\n", + " messages=[],\n", + " max_round=10,\n", + ")\n", + "\n", + "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt4_config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load the previous messages (from a JSON string or messages `List[Dict]`)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# Messages in a JSON string\n", + "previous_state = r\"\"\"[{\"content\": \"Find the latest paper about gpt-4 on arxiv and find its potential applications in software.\", \"role\": \"user\", \"name\": \"Admin\"}, {\"content\": \"Plan:\\n1. **Engineer**: Search for the latest paper on GPT-4 on arXiv.\\n2. **Scientist**: Read the paper and summarize the key findings and potential applications of GPT-4.\\n3. **Engineer**: Identify potential software applications where GPT-4 can be utilized based on the scientist's summary.\\n4. **Scientist**: Provide insights on the feasibility and impact of implementing GPT-4 in the identified software applications.\\n5. **Engineer**: Develop a prototype or proof of concept to demonstrate how GPT-4 can be integrated into the selected software application.\\n6. **Scientist**: Evaluate the prototype, provide feedback, and suggest any improvements or modifications.\\n7. **Engineer**: Make necessary revisions based on the scientist's feedback and finalize the integration of GPT-4 into the software application.\\n8. **Admin**: Review the final software application with GPT-4 integration and approve for further development or implementation.\\n\\nFeedback from admin and critic is needed for further refinement of the plan.\", \"role\": \"user\", \"name\": \"Planner\"}, {\"content\": \"Agree\", \"role\": \"user\", \"name\": \"Admin\"}, {\"content\": \"Great! Let's proceed with the plan outlined earlier. I will start by searching for the latest paper on GPT-4 on arXiv. Once I find the paper, the scientist will summarize the key findings and potential applications of GPT-4. We will then proceed with the rest of the steps as outlined. I will keep you updated on our progress.\", \"role\": \"user\", \"name\": \"Planner\"}]\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Resume the group chat using the last agent and last message." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prepared group chat with 4 messages, the last speaker is \u001b[33mPlanner\u001b[0m\n", + "\u001b[33mPlanner\u001b[0m (to chat_manager):\n", + "\n", + "Great! Let's proceed with the plan outlined earlier. I will start by searching for the latest paper on GPT-4 on arXiv. Once I find the paper, the scientist will summarize the key findings and potential applications of GPT-4. We will then proceed with the rest of the steps as outlined. I will keep you updated on our progress.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mEngineer\u001b[0m (to chat_manager):\n", + "\n", + "```python\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "\n", + "# Define the URL for the arXiv search\n", + "url = \"https://arxiv.org/search/?query=GPT-4&searchtype=all&source=header\"\n", + "\n", + "# Send a GET request to the URL\n", + "response = requests.get(url)\n", + "\n", + "# Parse the HTML content of the page\n", + "soup = BeautifulSoup(response.content, 'html.parser')\n", + "\n", + "# Find the first paper related to GPT-4\n", + "paper = soup.find('li', class_='arxiv-result')\n", + "if paper:\n", + " title = paper.find('p', class_='title').text.strip()\n", + " authors = paper.find('p', class_='authors').text.strip()\n", + " abstract = paper.find('p', class_='abstract').text.strip().replace('\\n', ' ')\n", + " link = paper.find('p', class_='list-title').find('a')['href']\n", + " print(f\"Title: {title}\\nAuthors: {authors}\\nAbstract: {abstract}\\nLink: {link}\")\n", + "else:\n", + " print(\"No GPT-4 papers found on arXiv.\")\n", + "```\n", + "This script searches for the latest paper on GPT-4 on arXiv, extracts the title, authors, abstract, and link to the paper, and prints this information.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", + "\u001b[33mExecutor\u001b[0m (to chat_manager):\n", + "\n", + "exitcode: 0 (execution succeeded)\n", + "Code output: \n", + "Title: Smurfs: Leveraging Multiple Proficiency Agents with Context-Efficiency for Tool Planning\n", + "Authors: Authors:\n", + "Junzhi Chen, \n", + " \n", + " Juhao Liang, \n", + " \n", + " Benyou Wang\n", + "Abstract: Abstract: …scenarios. Notably, Smurfs outmatches the ChatGPT-ReACT in the ToolBench I2 and I3 benchmark with a remarkable 84.4% win rate, surpassing the highest recorded performance of a GPT-4 model at 73.5%. Furthermore, through comprehensive ablation studies, we dissect the contribution of the core components of the multi-agent… ▽ More The emergence of large language models (LLMs) has opened up unprecedented possibilities for automating complex tasks that are often comparable to human performance. Despite their capabilities, LLMs still encounter difficulties in completing tasks that require high levels of accuracy and complexity due to their inherent limitations in handling multifaceted problems single-handedly. This paper introduces \"Smurfs\", a cutting-edge multi-agent framework designed to revolutionize the application of LLMs. By transforming a conventional LLM into a synergistic multi-agent ensemble, Smurfs enhances task decomposition and execution without necessitating extra training. This is achieved through innovative prompting strategies that allocate distinct roles within the model, thereby facilitating collaboration among specialized agents. The framework gives access to external tools to efficiently solve complex tasks. Our empirical investigation, featuring the mistral-7b-instruct model as a case study, showcases Smurfs' superior capability in intricate tool utilization scenarios. Notably, Smurfs outmatches the ChatGPT-ReACT in the ToolBench I2 and I3 benchmark with a remarkable 84.4% win rate, surpassing the highest recorded performance of a GPT-4 model at 73.5%. Furthermore, through comprehensive ablation studies, we dissect the contribution of the core components of the multi-agent framework to its overall efficacy. This not only verifies the effectiveness of the framework, but also sets a route for future exploration of multi-agent LLM systems. △ Less\n", + "Link: https://arxiv.org/abs/2405.05955\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mScientist\u001b[0m (to chat_manager):\n", + "\n", + "Based on the abstract of the paper titled \"Smurfs: Leveraging Multiple Proficiency Agents with Context-Efficiency for Tool Planning,\" the key findings and potential applications of GPT-4 can be summarized as follows:\n", + "\n", + "### Key Findings:\n", + "- The paper introduces \"Smurfs,\" a multi-agent framework that enhances the capabilities of large language models (LLMs) like GPT-4 by transforming them into a synergistic multi-agent ensemble. This approach allows for better task decomposition and execution without additional training.\n", + "- Smurfs utilize innovative prompting strategies to allocate distinct roles within the model, facilitating collaboration among specialized agents and giving access to external tools for solving complex tasks.\n", + "- In the ToolBench I2 and I3 benchmark, Smurfs outperformed ChatGPT-ReACT with an 84.4% win rate, surpassing the highest recorded performance of a GPT-4 model at 73.5%.\n", + "- Comprehensive ablation studies were conducted to understand the contribution of the core components of the multi-agent framework to its overall efficacy.\n", + "\n", + "### Potential Applications in Software:\n", + "- **Tool Planning and Automation**: Smurfs can be applied to software that requires complex tool planning and automation, enhancing the software's ability to perform tasks that involve multiple steps or require the use of external tools.\n", + "- **Collaborative Systems**: The multi-agent ensemble approach can be utilized in developing collaborative systems where different components or agents work together to complete tasks more efficiently than a single agent could.\n", + "- **Enhanced Problem-Solving**: Software that involves complex problem-solving can benefit from Smurfs by leveraging the specialized capabilities of different agents within the ensemble, leading to more accurate and efficient solutions.\n", + "- **Task Decomposition**: Applications that require breaking down complex tasks into simpler sub-tasks can use the Smurfs framework to improve task decomposition and execution, potentially leading to better performance and outcomes.\n", + "\n", + "The integration of GPT-4 with the Smurfs framework presents a novel approach to enhancing the capabilities of LLMs in software applications, particularly in areas that require complex task planning, execution, and problem-solving.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mEngineer\u001b[0m (to chat_manager):\n", + "\n", + "Given the scientist's summary on the potential applications of GPT-4 as enhanced by the Smurfs framework, we can identify several software applications where GPT-4 can be utilized effectively:\n", + "\n", + "1. **Project Management Tools**: Integration of GPT-4 with Smurfs can revolutionize project management software by automating complex planning tasks, optimizing resource allocation, and providing actionable insights for project execution.\n", + "\n", + "2. **Code Generation and Software Development**: Leveraging GPT-4 in IDEs (Integrated Development Environments) or other software development tools can enhance code generation capabilities, provide context-aware suggestions, and automate debugging processes.\n", + "\n", + "3. **Customer Support and Chatbots**: GPT-4 can be used to power advanced customer support chatbots that understand complex queries, provide accurate information, and automate problem-solving for customer issues.\n", + "\n", + "4. **Educational Platforms**: In educational software, GPT-4 can personalize learning experiences, automate content generation, and provide interactive tutoring services.\n", + "\n", + "5. **Healthcare Applications**: GPT-4 can assist in healthcare applications by analyzing medical data, providing diagnostic support, and offering personalized healthcare advice.\n", + "\n", + "6. **Creative Writing and Content Generation**: Software tools for creative writing and content generation can benefit from GPT-4's capabilities to produce original content, assist in storytelling, and generate ideas.\n", + "\n", + "7. **Business Intelligence and Analytics**: GPT-4 can enhance business intelligence software by automating data analysis, generating reports, and providing insights based on large datasets.\n", + "\n", + "8. **Security and Threat Analysis**: In cybersecurity applications, GPT-4 can be used to analyze threats, automate security protocols, and provide recommendations for threat mitigation.\n", + "\n", + "These applications demonstrate the versatility and potential impact of integrating GPT-4 into various software solutions, offering opportunities for automation, enhanced efficiency, and improved user experiences across different domains.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mAdmin\u001b[0m (to chat_manager):\n", + "\n", + "Approve\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mScientist\u001b[0m (to chat_manager):\n", + "\n", + "With the approval from the admin, the plan to explore and integrate GPT-4 into various software applications, leveraging its enhanced capabilities through the Smurfs framework, is set to proceed. This initiative promises to bring significant advancements in automation, efficiency, and user experience across a wide range of software applications, from project management tools to healthcare applications. The next steps involve detailed planning and execution for the development of prototypes or proof of concepts for selected applications, followed by evaluation, feedback, and finalization of GPT-4 integration into these software solutions.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mEngineer\u001b[0m (to chat_manager):\n", + "\n", + "Given the approval and the insights provided, the next steps involve detailed planning for the development and integration of GPT-4 into selected software applications. This process will include:\n", + "\n", + "1. **Selection of Specific Applications**: Based on the potential applications identified, select one or more specific software applications for prototype development. This selection will be based on factors such as feasibility, potential impact, and available resources.\n", + "\n", + "2. **Prototype Development**: Develop a prototype or proof of concept for the selected application(s). This will involve designing the architecture, integrating GPT-4 with the Smurfs framework, and implementing the necessary functionalities to demonstrate the application of GPT-4 in the software.\n", + "\n", + "3. **Evaluation and Feedback**: Once the prototype is developed, it will be evaluated to assess its performance, usability, and effectiveness in leveraging GPT-4's capabilities. Feedback will be gathered from potential users and stakeholders to identify areas for improvement.\n", + "\n", + "4. **Revisions and Finalization**: Based on the feedback received, necessary revisions and improvements will be made to the prototype. This step may involve refining the integration of GPT-4, optimizing the software's performance, and enhancing user experience.\n", + "\n", + "5. **Implementation and Deployment**: After finalizing the prototype, the next step will involve planning for the full-scale implementation and deployment of the software application with GPT-4 integration. This will include addressing any scalability, security, and maintenance considerations.\n", + "\n", + "6. **Continuous Improvement**: Post-deployment, it will be important to monitor the software's performance and user feedback continuously. This will enable ongoing improvements and updates to ensure that the software remains effective and relevant.\n", + "\n", + "This structured approach will ensure that the integration of GPT-4 into software applications is carried out effectively, leading to innovative solutions that harness the full potential of GPT-4 and the Smurfs framework.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mAdmin\u001b[0m (to chat_manager):\n", + "\n", + "Approve\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mEngineer\u001b[0m (to chat_manager):\n", + "\n", + "With the final approval from the admin, the project to integrate GPT-4 into selected software applications, leveraging its capabilities through the Smurfs framework, is officially set to move forward. This marks the beginning of an innovative journey towards developing advanced software solutions that can automate complex tasks, enhance efficiency, and improve user experiences across various domains. The focus will now shift to the execution phase, where detailed planning, development, and iterative improvements will bring these concepts to life. This initiative promises to showcase the transformative potential of GPT-4 in the software industry, setting new benchmarks for what is possible with artificial intelligence.\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# Prepare the group chat for resuming\n", + "last_agent, last_message = manager.resume(messages=previous_state)\n", + "\n", + "# Resume the chat using the last agent and message\n", + "result = last_agent.initiate_chat(recipient=manager, message=last_message, clear_history=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "#1, Admin: Find the latest paper about gpt-4 on arxiv and find its potential applications i ...\n", + "#2, Planner: Plan: 1. **Engineer**: Search for the latest paper on GPT-4 on arXiv. 2. **Scien ...\n", + "#3, Admin: Agree \n", + "#4, Planner: Great! Let's proceed with the plan outlined earlier. I will start by searching f ...\n", + "#5, Engineer: ```python import requests from bs4 import BeautifulSoup # Define the URL for th ...\n", + "#6, Executor: exitcode: 0 (execution succeeded) Code output: Title: Smurfs: Leveraging Multip ...\n", + "#7, Scientist: Based on the abstract of the paper titled \"Smurfs: Leveraging Multiple Proficien ...\n", + "#8, Engineer: Given the scientist's summary on the potential applications of GPT-4 as enhanced ...\n", + "#9, Admin: Approve \n", + "#10, Scientist: With the approval from the admin, the plan to explore and integrate GPT-4 into v ...\n", + "#11, Engineer: Given the approval and the insights provided, the next steps involve detailed pl ...\n", + "#12, Admin: Approve \n", + "#13, Engineer: With the final approval from the admin, the project to integrate GPT-4 into sele ...\n" + ] + } + ], + "source": [ + "# Output the final chat history showing the original 4 messages and resumed messages\n", + "for i, message in enumerate(groupchat.messages):\n", + " print(\n", + " f\"#{i + 1}, {message['name']}: {message['content'][:80]}\".replace(\"\\n\", \" \"),\n", + " f\"{'...' if len(message['content']) > 80 else ''}\".replace(\"\\n\", \" \"),\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example of resuming a terminated GroupChat\n", + "\n", + "This example shows how to resume a group chat that was terminated with a termination string.\n", + "\n", + "The termination string is `TERMINATE`" + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "metadata": {}, + "outputs": [], + "source": [ + "# Previous state with TERMINATE in the last message\n", + "previous_state = r\"\"\"[{\"content\": \"Let's get this meeting started. We'll have a set order of speakers. First the Product_Manager will create 3 new product ideas. Then the Chief_Marketing_Officer will speak and talk about the idea they have chosen to move forward with. Then the Digital_Marketer will create 3 marketing strategies for that idea. We MUST stick to the speaking order which is Product_Manager first, Chief_Marketing_Officer second, Digital_Marketer third, and finally the Chief_Marketing_Officer will speak and end the meeting.\", \"role\": \"user\", \"name\": \"Chairperson\"}, {\"content\": \"Sounds like a plan! Let's get started. As the Product_Manager, I'd like to present three new product ideas for our luxury car models: 1. 'EcoLux': A hybrid electric-gasoline vehicle that combines the best of both worlds, offering exceptional fuel efficiency and reduced carbon footprint. 2. 'AeroSport': A high-performance sports car with advanced aerodynamics, designed to deliver unparalleled speed and agility on the track or on the open road. 3. 'SmartDrive': An intelligent driver-assistance system that uses AI-powered sensors and cameras to anticipate and respond to potential hazards, ensuring a safer driving experience. Now it's your turn, Chief_Marketing_Officer! Which of these ideas do you think has the most potential?\", \"role\": \"user\", \"name\": \"Chief_Marketing_Officer\"}, {\"content\": \"Thank you for presenting those innovative product ideas, Product_Manager. After careful consideration, I believe 'EcoLux' has the most potential. With the growing concern about climate change and environmental sustainability, a hybrid electric-gasoline vehicle that offers exceptional fuel efficiency and reduced carbon footprint could be a game-changer in the luxury car market. Additionally, it aligns with our company's commitment to innovation and responsibility. Now it's your turn, Digital_Marketer! Can you come up with three marketing strategies for 'EcoLux'?\", \"role\": \"user\", \"name\": \"Product_Manager\"}, {\"content\": \"Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three marketing strategies: 1. 'Green Revolution' Campaign: Highlighting the eco-friendly features of EcoLux through a series of social media ads and influencer partnerships. We can partner with eco-conscious influencers to showcase how EcoLux is not only a luxury car but also an environmentally responsible choice. 2. 'Fuel for Thought' Content Series: Creating a content series that explores the intersection of technology, sustainability, and luxury. This could include blog posts, videos, and podcasts that delve into the innovative features of EcoLux and its impact on the environment. 3. 'EcoLux Experience' Event Marketing: Hosting exclusive events and test drives for potential customers to experience the performance and eco-friendliness of EcoLux firsthand. These events can be held at upscale locations and feature interactive exhibits, product demonstrations, and networking opportunities. These strategies will help position EcoLux as a leader in the luxury electric-vehicle market while appealing to environmentally conscious consumers who value innovation and sustainability. TERMINATE\", \"role\": \"user\", \"name\": \"Digital_Marketer\"}]\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create the group chat objects, they should have the same `name` as the original group chat." + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "metadata": {}, + "outputs": [], + "source": [ + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"Chairperson\",\n", + " system_message=\"The chairperson for the meeting.\",\n", + " code_execution_config={},\n", + " human_input_mode=\"TERMINATE\",\n", + ")\n", + "\n", + "cmo = autogen.AssistantAgent(\n", + " name=\"Chief_Marketing_Officer\",\n", + " # system_message is used in the select speaker message\n", + " description=\"The head of the marketing department working with the product manager and digital marketer to execute a strong marketing campaign for your car company.\",\n", + " # description is used to prompt the LLM as this agent\n", + " system_message=\"You, Jane titled Chief_Marketing_Officer, or CMO, are the head of the marketing department and your objective is to guide your team to producing and marketing unique ideas for your luxury car models. Don't include your name at the start of your response or speak for any other team member, let them come up with their own ideas and strategies, speak just for yourself as the head of marketing. When yourself, the Product_Manager, and the Digital_Marketer have spoken and the meeting is finished, say TERMINATE to conclude the meeting.\",\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", + " llm_config=gpt4_config,\n", + ")\n", + "\n", + "pm = autogen.AssistantAgent(\n", + " name=\"Product_Manager\",\n", + " # system_message is used in the select speaker message\n", + " description=\"Product head for the luxury model cars product line in the car company. Always coming up with new product enhancements for the cars.\",\n", + " # description is used to prompt the LLM as this agent\n", + " system_message=\"You, Alice titled Product_Manager, are always coming up with new product enhancements for the luxury car models you look after. Review the meeting so far and respond with the answer to your current task. Don't include your name at the start of your response and don't speak for anyone else, leave the Chairperson to pick the next person to speak.\",\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", + " llm_config=gpt4_config,\n", + ")\n", + "\n", + "digital = autogen.AssistantAgent(\n", + " name=\"Digital_Marketer\",\n", + " # system_message is used in the select speaker message\n", + " description=\"A seasoned digital marketer who comes up with online marketing strategies that highlight the key features of the luxury car models.\",\n", + " # description is used to prompt the LLM as this agent\n", + " system_message=\"You, Elizabeth titled Digital_Marketer, are a senior online marketing specialist who comes up with marketing strategies that highlight the key features of the luxury car models. Review the meeting so far and respond with the answer to your current task. Don't include your name at the start of your response and don't speak for anyone else, leave the Chairperson to pick the next person to speak.\",\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n", + " llm_config=gpt4_config,\n", + ")\n", + "\n", + "# Customised message, this is always the first message in the context\n", + "my_speaker_select_msg = \"\"\"You are a chairperson for a marketing meeting for this car manufacturer where multiple members of the team will speak.\n", + "The job roles of the team at the meeting, and their responsibilities, are:\n", + "{roles}\"\"\"\n", + "\n", + "# Customised prompt, this is always the last message in the context\n", + "my_speaker_select_prompt = \"\"\"Read the above conversation.\n", + "Then select ONLY THE NAME of the next job role from {agentlist} to speak. Do not explain why.\"\"\"\n", + "\n", + "groupchat = autogen.GroupChat(\n", + " agents=[user_proxy, cmo, pm, digital],\n", + " messages=[],\n", + " max_round=10,\n", + " select_speaker_message_template=my_speaker_select_msg,\n", + " select_speaker_prompt_template=my_speaker_select_prompt,\n", + " max_retries_for_selecting_speaker=2, # New\n", + " select_speaker_auto_verbose=False, # New\n", + ")\n", + "\n", + "manager = autogen.GroupChatManager(\n", + " groupchat=groupchat,\n", + " llm_config=gpt4_config,\n", + " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\", \"\"),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Prepare the resumption of the group chat without removing the termination condition. A warning will show. Then attempting to resume the chat will terminate immediately." + ] + }, + { + "cell_type": "code", + "execution_count": 88, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Last message meets termination criteria and this may terminate the chat. Set ignore_initial_termination_check=False to avoid checking termination at the start of the chat.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prepared group chat with 4 messages, the last speaker is \u001b[33mDigital_Marketer\u001b[0m\n" + ] + } + ], + "source": [ + "# Prepare the group chat for resuming WITHOUT removing the TERMINATE message\n", + "last_agent, last_message = manager.resume(messages=previous_state)" + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mDigital_Marketer\u001b[0m (to chat_manager):\n", + "\n", + "Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three marketing strategies: 1. 'Green Revolution' Campaign: Highlighting the eco-friendly features of EcoLux through a series of social media ads and influencer partnerships. We can partner with eco-conscious influencers to showcase how EcoLux is not only a luxury car but also an environmentally responsible choice. 2. 'Fuel for Thought' Content Series: Creating a content series that explores the intersection of technology, sustainability, and luxury. This could include blog posts, videos, and podcasts that delve into the innovative features of EcoLux and its impact on the environment. 3. 'EcoLux Experience' Event Marketing: Hosting exclusive events and test drives for potential customers to experience the performance and eco-friendliness of EcoLux firsthand. These events can be held at upscale locations and feature interactive exhibits, product demonstrations, and networking opportunities. These strategies will help position EcoLux as a leader in the luxury electric-vehicle market while appealing to environmentally conscious consumers who value innovation and sustainability. TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# Resume and it will terminate immediately\n", + "result = last_agent.initiate_chat(recipient=manager, message=last_message, clear_history=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This time, we will remove the termination message, by using the `remove_termination_string` parameter, and then resume." + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prepared group chat with 4 messages, the last speaker is \u001b[33mDigital_Marketer\u001b[0m\n" + ] + } + ], + "source": [ + "# Prepare the group chat for resuming WITH removal of TERMINATE message\n", + "last_agent, last_message = manager.resume(messages=previous_state, remove_termination_string=\"TERMINATE\")" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mDigital_Marketer\u001b[0m (to chat_manager):\n", + "\n", + "Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three marketing strategies: 1. 'Green Revolution' Campaign: Highlighting the eco-friendly features of EcoLux through a series of social media ads and influencer partnerships. We can partner with eco-conscious influencers to showcase how EcoLux is not only a luxury car but also an environmentally responsible choice. 2. 'Fuel for Thought' Content Series: Creating a content series that explores the intersection of technology, sustainability, and luxury. This could include blog posts, videos, and podcasts that delve into the innovative features of EcoLux and its impact on the environment. 3. 'EcoLux Experience' Event Marketing: Hosting exclusive events and test drives for potential customers to experience the performance and eco-friendliness of EcoLux firsthand. These events can be held at upscale locations and feature interactive exhibits, product demonstrations, and networking opportunities. These strategies will help position EcoLux as a leader in the luxury electric-vehicle market while appealing to environmentally conscious consumers who value innovation and sustainability. \n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mChief_Marketing_Officer\u001b[0m (to chat_manager):\n", + "\n", + "Thank you, Digital_Marketer, for those comprehensive and innovative marketing strategies. Each strategy you've outlined aligns perfectly with our vision for EcoLux, emphasizing its eco-friendly features, technological innovation, and luxury appeal. The 'Green Revolution' Campaign will leverage the power of social media and influencers to reach our target audience effectively. The 'Fuel for Thought' Content Series will educate and engage potential customers on the importance of sustainability in the luxury automotive sector. Lastly, the 'EcoLux Experience' Event Marketing will provide an immersive experience that showcases the unique value proposition of EcoLux. \n", + "\n", + "I believe these strategies will collectively create a strong market presence for EcoLux, appealing to both luxury car enthusiasts and environmentally conscious consumers. Let's proceed with these strategies and ensure that every touchpoint communicates EcoLux's commitment to luxury, innovation, and sustainability. \n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# Resume the chat using the last agent and message\n", + "result = last_agent.initiate_chat(recipient=manager, message=last_message, clear_history=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that the conversation continued, the Chief_Marketing_officer spoke and they terminated the conversation." + ] + }, + { + "cell_type": "code", + "execution_count": 92, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "#1, Chairperson: Let's get this meeting started. We'll have a set order of speakers. First the Pr ...\n", + "#2, Chief_Marketing_Officer: Sounds like a plan! Let's get started. As the Product_Manager, I'd like to present ...\n", + "#3, Product_Manager: Thank you for presenting those innovative product ideas, Product_Manager. After ...\n", + "#4, Digital_Marketer: Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three ...\n", + "#5, Chief_Marketing_Officer: Thank you, Digital_Marketer, for those comprehensive and innovative marketing st ...\n" + ] + } + ], + "source": [ + "# Output the final chat history showing the original 4 messages and the resumed message\n", + "for i, message in enumerate(groupchat.messages):\n", + " print(\n", + " f\"#{i + 1}, {message['name']}: {message['content'][:80]}\".replace(\"\\n\", \" \"),\n", + " f\"{'...' if len(message['content']) > 80 else ''}\".replace(\"\\n\", \" \"),\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example of resuming a terminated GroupChat with a new message and agent\n", + "\n", + "Rather than continuing a group chat by using the last message, we can resume a group chat using a new message.\n", + "\n", + "**IMPORTANT**: To remain in a group chat, use the GroupChatManager to initiate the chat, otherwise you can continue with an agent-to-agent conversation by using another agent to initiate the chat.\n", + "\n", + "We'll continue with the previous example by using the messages from that conversation and resuming it with a new conversation in the agent 'meeting'.\n", + "\n", + "We start by preparing the group chat by using the messages from the previous chat." + ] + }, + { + "cell_type": "code", + "execution_count": 93, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Last message meets termination criteria and this may terminate the chat. Set ignore_initial_termination_check=False to avoid checking termination at the start of the chat.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prepared group chat with 5 messages, the last speaker is \u001b[33mChief_Marketing_Officer\u001b[0m\n" + ] + } + ], + "source": [ + "# Prepare the group chat for resuming using the previous messages. We don't need to remove the TERMINATE string as we aren't using the last message for resuming.\n", + "last_agent, last_message = manager.resume(messages=groupchat.messages)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's continue the meeting with a new topic." + ] + }, + { + "cell_type": "code", + "execution_count": 94, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mchat_manager\u001b[0m (to Chief_Marketing_Officer):\n", + "\n", + "Team, let's now think of a name for the next vehicle that embodies that idea. Chief_Marketing_Officer and Product_manager can you both suggest one and then we can conclude.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mChief_Marketing_Officer\u001b[0m (to chat_manager):\n", + "\n", + "Given the focus on sustainability and luxury, I suggest the name \"VerdeVogue\" for our next vehicle. \"Verde\" reflects the green, eco-friendly aspect of the car, while \"Vogue\" emphasizes its stylish and trendsetting nature in the luxury market. This name encapsulates the essence of combining environmental responsibility with high-end design and performance. \n", + "\n", + "Now, I'd like to hear the Product_Manager's suggestion.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mProduct_Manager\u001b[0m (to chat_manager):\n", + "\n", + "For our next vehicle, I propose the name \"EcoPrestige.\" This name highlights the vehicle's eco-friendly nature and its luxurious, prestigious status in the market. \"Eco\" emphasizes our commitment to sustainability and environmental responsibility, while \"Prestige\" conveys the car's high-end quality, sophistication, and the elite status it offers to its owners. This name perfectly blends our goals of offering a sustainable luxury vehicle that doesn't compromise on performance or style.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mChief_Marketing_Officer\u001b[0m (to chat_manager):\n", + "\n", + "Thank you, Product_Manager, for your suggestion. Both \"VerdeVogue\" and \"EcoPrestige\" capture the essence of our new vehicle's eco-friendly luxury. As we move forward, we'll consider these names carefully to ensure our branding aligns perfectly with our product's unique value proposition and market positioning. \n", + "\n", + "This concludes our meeting. Thank you, everyone, for your valuable contributions. TERMINATE.\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# Resume the chat using a different agent and message\n", + "result = manager.initiate_chat(\n", + " recipient=cmo,\n", + " message=\"Team, let's now think of a name for the next vehicle that embodies that idea. Chief_Marketing_Officer and Product_manager can you both suggest one and then we can conclude.\",\n", + " clear_history=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 95, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "#1, Chairperson: Let's get this meeting started. We'll have a set order of speakers. First the Pr ...\n", + "#2, Chief_Marketing_Officer: Sounds like a plan! Let's get started. As the Product_Manager, I'd like to present ...\n", + "#3, Product_Manager: Thank you for presenting those innovative product ideas, Product_Manager. After ...\n", + "#4, Digital_Marketer: Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three ...\n", + "#5, Chief_Marketing_Officer: Given the focus on sustainability and luxury, I suggest the name \"VerdeVogue\" for ...\n", + "#6, Product_Manager: For our next vehicle, I propose the name \"EcoPrestige.\" This name highlights the ...\n", + "#7, Chief_Marketing_Officer: Thank you, Product_Manager, for your suggestion. Both \"VerdeVogue\" and \"EcoPrest ...\n" + ] + } + ], + "source": [ + "# Output the final chat history showing the original 4 messages and the resumed message\n", + "for i, message in enumerate(groupchat.messages):\n", + " print(\n", + " f\"#{i + 1}, {message['name']}: {message['content'][:80]}\".replace(\"\\n\", \" \"),\n", + " f\"{'...' if len(message['content']) > 80 else ''}\".replace(\"\\n\", \" \"),\n", + " )" + ] + } + ], + "metadata": { + "front_matter": { + "description": "Custom Speaker Selection Function", + "tags": [ + "orchestration", + "group chat" + ] + }, + "kernelspec": { + "display_name": "autogen", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}