diff --git a/autogen/agentchat/assistant_agent.py b/autogen/agentchat/assistant_agent.py index b5ec7de90c7..c1601ea9ba8 100644 --- a/autogen/agentchat/assistant_agent.py +++ b/autogen/agentchat/assistant_agent.py @@ -38,7 +38,7 @@ def __init__( llm_config: Optional[Union[Dict, Literal[False]]] = None, is_termination_msg: Optional[Callable[[Dict], bool]] = None, max_consecutive_auto_reply: Optional[int] = None, - human_input_mode: Optional[str] = "NEVER", + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", description: Optional[str] = None, **kwargs, ): diff --git a/autogen/agentchat/contrib/compressible_agent.py b/autogen/agentchat/contrib/compressible_agent.py index cbedb17ceed..bea4058b94a 100644 --- a/autogen/agentchat/contrib/compressible_agent.py +++ b/autogen/agentchat/contrib/compressible_agent.py @@ -1,8 +1,7 @@ -import asyncio import copy import inspect import logging -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union from warnings import warn from autogen import Agent, ConversableAgent, OpenAIWrapper @@ -60,7 +59,7 @@ def __init__( system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE, is_termination_msg: Optional[Callable[[Dict], bool]] = None, max_consecutive_auto_reply: Optional[int] = None, - human_input_mode: Optional[str] = "NEVER", + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", function_map: Optional[Dict[str, Callable]] = None, code_execution_config: Optional[Union[Dict, bool]] = False, llm_config: Optional[Union[Dict, bool]] = None, diff --git a/autogen/agentchat/contrib/math_user_proxy_agent.py b/autogen/agentchat/contrib/math_user_proxy_agent.py index d2b6b7cde00..699caeb85b3 100644 --- a/autogen/agentchat/contrib/math_user_proxy_agent.py +++ b/autogen/agentchat/contrib/math_user_proxy_agent.py @@ -1,7 +1,7 @@ import os import re from time import sleep -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union from pydantic import BaseModel, Extra, root_validator @@ -136,7 +136,7 @@ def __init__( is_termination_msg: Optional[ Callable[[Dict], bool] ] = _is_termination_msg_mathchat, # terminate if \boxed{} in message - human_input_mode: Optional[str] = "NEVER", # Fully automated + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", # Fully automated default_auto_reply: Optional[Union[str, Dict, None]] = DEFAULT_REPLY, max_invalid_q_per_step=3, # a parameter needed in MathChat **kwargs, diff --git a/autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py index 1ece138963f..ea81de6dff1 100644 --- a/autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +++ b/autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py @@ -1,4 +1,4 @@ -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, List, Literal, Optional from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent from autogen.agentchat.contrib.vectordb.utils import ( @@ -23,7 +23,7 @@ class QdrantRetrieveUserProxyAgent(RetrieveUserProxyAgent): def __init__( self, name="RetrieveChatAgent", # default set to RetrieveChatAgent - human_input_mode: Optional[str] = "ALWAYS", + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "ALWAYS", is_termination_msg: Optional[Callable[[Dict], bool]] = None, retrieve_config: Optional[Dict] = None, # config for the retrieve agent **kwargs, diff --git a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py index 261ab2062ef..59a4abccb1d 100644 --- a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py +++ b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py @@ -1,7 +1,7 @@ import hashlib import os import re -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union from IPython import get_ipython @@ -92,7 +92,7 @@ class RetrieveUserProxyAgent(UserProxyAgent): def __init__( self, name="RetrieveChatAgent", # default set to RetrieveChatAgent - human_input_mode: Optional[str] = "ALWAYS", + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "ALWAYS", is_termination_msg: Optional[Callable[[Dict], bool]] = None, retrieve_config: Optional[Dict] = None, # config for the retrieve agent **kwargs, diff --git a/autogen/agentchat/contrib/society_of_mind_agent.py b/autogen/agentchat/contrib/society_of_mind_agent.py index 97cf6aee1a5..2f6be5088a4 100644 --- a/autogen/agentchat/contrib/society_of_mind_agent.py +++ b/autogen/agentchat/contrib/society_of_mind_agent.py @@ -1,8 +1,6 @@ # ruff: noqa: E722 import copy -import json import traceback -from dataclasses import dataclass from typing import Callable, Dict, List, Literal, Optional, Tuple, Union from autogen import Agent, ConversableAgent, GroupChat, GroupChatManager, OpenAIWrapper @@ -36,7 +34,7 @@ def __init__( response_preparer: Optional[Union[str, Callable]] = None, is_termination_msg: Optional[Callable[[Dict], bool]] = None, max_consecutive_auto_reply: Optional[int] = None, - human_input_mode: Optional[str] = "TERMINATE", + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "TERMINATE", function_map: Optional[Dict[str, Callable]] = None, code_execution_config: Union[Dict, Literal[False]] = False, llm_config: Optional[Union[Dict, Literal[False]]] = False, diff --git a/autogen/agentchat/contrib/text_analyzer_agent.py b/autogen/agentchat/contrib/text_analyzer_agent.py index e917cca574f..62345156a53 100644 --- a/autogen/agentchat/contrib/text_analyzer_agent.py +++ b/autogen/agentchat/contrib/text_analyzer_agent.py @@ -1,6 +1,5 @@ -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Literal, Optional, Tuple, Union -from autogen import oai from autogen.agentchat.agent import Agent from autogen.agentchat.assistant_agent import ConversableAgent @@ -17,7 +16,7 @@ def __init__( self, name="analyzer", system_message: Optional[str] = system_message, - human_input_mode: Optional[str] = "NEVER", + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", llm_config: Optional[Union[Dict, bool]] = None, **kwargs, ): diff --git a/autogen/agentchat/contrib/web_surfer.py b/autogen/agentchat/contrib/web_surfer.py index 1a54aeebe15..af07be6d343 100644 --- a/autogen/agentchat/contrib/web_surfer.py +++ b/autogen/agentchat/contrib/web_surfer.py @@ -34,7 +34,7 @@ def __init__( description: Optional[str] = DEFAULT_DESCRIPTION, is_termination_msg: Optional[Callable[[Dict[str, Any]], bool]] = None, max_consecutive_auto_reply: Optional[int] = None, - human_input_mode: Optional[str] = "TERMINATE", + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "TERMINATE", function_map: Optional[Dict[str, Callable]] = None, code_execution_config: Union[Dict, Literal[False]] = False, llm_config: Optional[Union[Dict, Literal[False]]] = None, @@ -111,7 +111,9 @@ def _create_summarizer_client(self, summarizer_llm_config: Dict[str, Any], llm_c self.summarizer_llm_config = summarizer_llm_config # type: ignore[assignment] # Create the summarizer client - self.summarization_client = None if self.summarizer_llm_config is False else OpenAIWrapper(**self.summarizer_llm_config) # type: ignore[arg-type] + self.summarization_client = ( + None if self.summarizer_llm_config is False else OpenAIWrapper(**self.summarizer_llm_config) + ) # type: ignore[arg-type] def _register_functions(self) -> None: """Register the functions for the inner assistant and user proxy.""" @@ -250,7 +252,7 @@ def _answer_from_page( def _summarize_page( url: Annotated[ Optional[str], "[Optional] The url of the page to summarize. (Defaults to current page)" - ] = None + ] = None, ) -> str: return _answer_from_page(url=url, question=None) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 3e01139a1cb..48f11d526cc 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -609,7 +609,6 @@ def _auto_select_speaker( # Registered reply function for checking_agent, checks the result of the response for agent names def validate_speaker_name(recipient, messages, sender, config) -> Tuple[bool, Union[str, Dict, None]]: - # The number of retries left, starting at max_retries_for_selecting_speaker nonlocal attempts_left nonlocal attempt @@ -708,7 +707,6 @@ async def a_auto_select_speaker( # Registered reply function for checking_agent, checks the result of the response for agent names def validate_speaker_name(recipient, messages, sender, config) -> Tuple[bool, Union[str, Dict, None]]: - # The number of retries left, starting at max_retries_for_selecting_speaker nonlocal attempts_left nonlocal attempt @@ -782,7 +780,6 @@ def _validate_speaker_name( mentions = self._mentioned_agents(select_name, agents) if len(mentions) == 1: - # Success on retry, we have just one name mentioned selected_agent_name = next(iter(mentions)) @@ -864,17 +861,14 @@ def _process_speaker_selection_result(self, result, last_speaker: ConversableAge Used by auto_select_speaker and a_auto_select_speaker.""" if len(result.chat_history) > 0: - # Use the final message, which will have the selected agent or reason for failure final_message = result.chat_history[-1]["content"] if "[AGENT SELECTED]" in final_message: - # Have successfully selected an agent, return it return self.agent_by_name(final_message.replace("[AGENT SELECTED]", "")) else: # "[AGENT SELECTION FAILED]" - # Failed to select an agent, so we'll select the next agent in the list next_agent = self.next_agent(last_speaker, agents) @@ -945,7 +939,7 @@ def __init__( name: Optional[str] = "chat_manager", # unlimited consecutive auto reply by default max_consecutive_auto_reply: Optional[int] = sys.maxsize, - human_input_mode: Optional[str] = "NEVER", + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", system_message: Optional[Union[str, List]] = "Group chat manager.", silent: bool = False, **kwargs, @@ -1207,7 +1201,6 @@ def resume( # Load the messages into the group chat for i, message in enumerate(messages): - if "name" in message: message_speaker_agent = self._groupchat.agent_by_name(message["name"]) else: @@ -1312,7 +1305,6 @@ async def a_resume( # Load the messages into the group chat for i, message in enumerate(messages): - if "name" in message: message_speaker_agent = self._groupchat.agent_by_name(message["name"]) else: diff --git a/notebook/agentchat_agentoptimizer.ipynb b/notebook/agentchat_agentoptimizer.ipynb index ac82932da97..7de418b5ee7 100644 --- a/notebook/agentchat_agentoptimizer.ipynb +++ b/notebook/agentchat_agentoptimizer.ipynb @@ -34,7 +34,7 @@ "import copy\n", "import json\n", "import os\n", - "from typing import Any, Callable, Dict, List, Optional, Tuple, Union\n", + "from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union\n", "\n", "from openai import BadRequestError\n", "\n", @@ -119,7 +119,7 @@ " self,\n", " name: Optional[str] = \"MathChatAgent\",\n", " is_termination_msg: Optional[Callable[[Dict], bool]] = is_termination_msg_mathchat,\n", - " human_input_mode: Optional[str] = \"NEVER\",\n", + " human_input_mode: Literal[\"ALWAYS\", \"NEVER\", \"TERMINATE\"] = \"NEVER\",\n", " default_auto_reply: Optional[Union[str, Dict, None]] = DEFAULT_REPLY,\n", " max_invalid_q_per_step=3,\n", " **kwargs,\n", diff --git a/website/docs/topics/task_decomposition.ipynb b/website/docs/topics/task_decomposition.ipynb index 8001c37ea30..e4c24c9004e 100644 --- a/website/docs/topics/task_decomposition.ipynb +++ b/website/docs/topics/task_decomposition.ipynb @@ -1780,7 +1780,7 @@ " llm_config: Optional[Union[Dict, Literal[False]]] = None,\n", " is_termination_msg: Optional[Callable[[Dict], bool]] = None,\n", " max_consecutive_auto_reply: Optional[int] = None,\n", - " human_input_mode: Optional[str] = \"NEVER\",\n", + " human_input_mode: Literal[\"ALWAYS\", \"NEVER\", \"TERMINATE\"] = \"NEVER\",\n", " code_execution_config: Optional[Union[Dict, Literal[False]]] = False,\n", " description: Optional[\n", " str\n",