Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes human_input_mode annotations #2864

Merged
merged 6 commits into from
Jun 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __init__(
llm_config: Optional[Union[Dict, Literal[False]]] = None,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER",
description: Optional[str] = None,
**kwargs,
):
Expand Down
5 changes: 2 additions & 3 deletions autogen/agentchat/contrib/compressible_agent.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import asyncio
import copy
import inspect
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
from warnings import warn

from autogen import Agent, ConversableAgent, OpenAIWrapper
Expand Down Expand Up @@ -60,7 +59,7 @@ def __init__(
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER",
function_map: Optional[Dict[str, Callable]] = None,
code_execution_config: Optional[Union[Dict, bool]] = False,
llm_config: Optional[Union[Dict, bool]] = None,
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/math_user_proxy_agent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
import re
from time import sleep
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union

from pydantic import BaseModel, Extra, root_validator

Expand Down Expand Up @@ -136,7 +136,7 @@ def __init__(
is_termination_msg: Optional[
Callable[[Dict], bool]
] = _is_termination_msg_mathchat, # terminate if \boxed{} in message
human_input_mode: Optional[str] = "NEVER", # Fully automated
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", # Fully automated
default_auto_reply: Optional[Union[str, Dict, None]] = DEFAULT_REPLY,
max_invalid_q_per_step=3, # a parameter needed in MathChat
**kwargs,
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Callable, Dict, List, Optional
from typing import Callable, Dict, List, Literal, Optional

from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
from autogen.agentchat.contrib.vectordb.utils import (
Expand All @@ -23,7 +23,7 @@ class QdrantRetrieveUserProxyAgent(RetrieveUserProxyAgent):
def __init__(
self,
name="RetrieveChatAgent", # default set to RetrieveChatAgent
human_input_mode: Optional[str] = "ALWAYS",
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "ALWAYS",
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
retrieve_config: Optional[Dict] = None, # config for the retrieve agent
**kwargs,
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/retrieve_user_proxy_agent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import hashlib
import os
import re
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union

from IPython import get_ipython

Expand Down Expand Up @@ -92,7 +92,7 @@ class RetrieveUserProxyAgent(UserProxyAgent):
def __init__(
self,
name="RetrieveChatAgent", # default set to RetrieveChatAgent
human_input_mode: Optional[str] = "ALWAYS",
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "ALWAYS",
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
retrieve_config: Optional[Dict] = None, # config for the retrieve agent
**kwargs,
Expand Down
4 changes: 1 addition & 3 deletions autogen/agentchat/contrib/society_of_mind_agent.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
# ruff: noqa: E722
import copy
import json
import traceback
from dataclasses import dataclass
from typing import Callable, Dict, List, Literal, Optional, Tuple, Union

from autogen import Agent, ConversableAgent, GroupChat, GroupChatManager, OpenAIWrapper
Expand Down Expand Up @@ -36,7 +34,7 @@ def __init__(
response_preparer: Optional[Union[str, Callable]] = None,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "TERMINATE",
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "TERMINATE",
function_map: Optional[Dict[str, Callable]] = None,
code_execution_config: Union[Dict, Literal[False]] = False,
llm_config: Optional[Union[Dict, Literal[False]]] = False,
Expand Down
5 changes: 2 additions & 3 deletions autogen/agentchat/contrib/text_analyzer_agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing import Any, Dict, List, Literal, Optional, Tuple, Union

from autogen import oai
from autogen.agentchat.agent import Agent
from autogen.agentchat.assistant_agent import ConversableAgent

Expand All @@ -17,7 +16,7 @@ def __init__(
self,
name="analyzer",
system_message: Optional[str] = system_message,
human_input_mode: Optional[str] = "NEVER",
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER",
llm_config: Optional[Union[Dict, bool]] = None,
**kwargs,
):
Expand Down
8 changes: 5 additions & 3 deletions autogen/agentchat/contrib/web_surfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def __init__(
description: Optional[str] = DEFAULT_DESCRIPTION,
is_termination_msg: Optional[Callable[[Dict[str, Any]], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "TERMINATE",
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "TERMINATE",
function_map: Optional[Dict[str, Callable]] = None,
code_execution_config: Union[Dict, Literal[False]] = False,
llm_config: Optional[Union[Dict, Literal[False]]] = None,
Expand Down Expand Up @@ -111,7 +111,9 @@ def _create_summarizer_client(self, summarizer_llm_config: Dict[str, Any], llm_c
self.summarizer_llm_config = summarizer_llm_config # type: ignore[assignment]

# Create the summarizer client
self.summarization_client = None if self.summarizer_llm_config is False else OpenAIWrapper(**self.summarizer_llm_config) # type: ignore[arg-type]
self.summarization_client = (
None if self.summarizer_llm_config is False else OpenAIWrapper(**self.summarizer_llm_config)
) # type: ignore[arg-type]

def _register_functions(self) -> None:
"""Register the functions for the inner assistant and user proxy."""
Expand Down Expand Up @@ -250,7 +252,7 @@ def _answer_from_page(
def _summarize_page(
url: Annotated[
Optional[str], "[Optional] The url of the page to summarize. (Defaults to current page)"
] = None
] = None,
) -> str:
return _answer_from_page(url=url, question=None)

Expand Down
10 changes: 1 addition & 9 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -609,7 +609,6 @@ def _auto_select_speaker(

# Registered reply function for checking_agent, checks the result of the response for agent names
def validate_speaker_name(recipient, messages, sender, config) -> Tuple[bool, Union[str, Dict, None]]:

# The number of retries left, starting at max_retries_for_selecting_speaker
nonlocal attempts_left
nonlocal attempt
Expand Down Expand Up @@ -708,7 +707,6 @@ async def a_auto_select_speaker(

# Registered reply function for checking_agent, checks the result of the response for agent names
def validate_speaker_name(recipient, messages, sender, config) -> Tuple[bool, Union[str, Dict, None]]:

# The number of retries left, starting at max_retries_for_selecting_speaker
nonlocal attempts_left
nonlocal attempt
Expand Down Expand Up @@ -782,7 +780,6 @@ def _validate_speaker_name(
mentions = self._mentioned_agents(select_name, agents)

if len(mentions) == 1:

# Success on retry, we have just one name mentioned
selected_agent_name = next(iter(mentions))

Expand Down Expand Up @@ -864,17 +861,14 @@ def _process_speaker_selection_result(self, result, last_speaker: ConversableAge

Used by auto_select_speaker and a_auto_select_speaker."""
if len(result.chat_history) > 0:

# Use the final message, which will have the selected agent or reason for failure
final_message = result.chat_history[-1]["content"]

if "[AGENT SELECTED]" in final_message:

# Have successfully selected an agent, return it
return self.agent_by_name(final_message.replace("[AGENT SELECTED]", ""))

else: # "[AGENT SELECTION FAILED]"

# Failed to select an agent, so we'll select the next agent in the list
next_agent = self.next_agent(last_speaker, agents)

Expand Down Expand Up @@ -945,7 +939,7 @@ def __init__(
name: Optional[str] = "chat_manager",
# unlimited consecutive auto reply by default
max_consecutive_auto_reply: Optional[int] = sys.maxsize,
human_input_mode: Optional[str] = "NEVER",
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER",
system_message: Optional[Union[str, List]] = "Group chat manager.",
silent: bool = False,
**kwargs,
Expand Down Expand Up @@ -1207,7 +1201,6 @@ def resume(

# Load the messages into the group chat
for i, message in enumerate(messages):

if "name" in message:
message_speaker_agent = self._groupchat.agent_by_name(message["name"])
else:
Expand Down Expand Up @@ -1312,7 +1305,6 @@ async def a_resume(

# Load the messages into the group chat
for i, message in enumerate(messages):

if "name" in message:
message_speaker_agent = self._groupchat.agent_by_name(message["name"])
else:
Expand Down
4 changes: 2 additions & 2 deletions notebook/agentchat_agentoptimizer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
"import copy\n",
"import json\n",
"import os\n",
"from typing import Any, Callable, Dict, List, Optional, Tuple, Union\n",
"from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union\n",
"\n",
"from openai import BadRequestError\n",
"\n",
Expand Down Expand Up @@ -119,7 +119,7 @@
" self,\n",
" name: Optional[str] = \"MathChatAgent\",\n",
" is_termination_msg: Optional[Callable[[Dict], bool]] = is_termination_msg_mathchat,\n",
" human_input_mode: Optional[str] = \"NEVER\",\n",
" human_input_mode: Literal[\"ALWAYS\", \"NEVER\", \"TERMINATE\"] = \"NEVER\",\n",
" default_auto_reply: Optional[Union[str, Dict, None]] = DEFAULT_REPLY,\n",
" max_invalid_q_per_step=3,\n",
" **kwargs,\n",
Expand Down
2 changes: 1 addition & 1 deletion website/docs/topics/task_decomposition.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1780,7 +1780,7 @@
" llm_config: Optional[Union[Dict, Literal[False]]] = None,\n",
" is_termination_msg: Optional[Callable[[Dict], bool]] = None,\n",
" max_consecutive_auto_reply: Optional[int] = None,\n",
" human_input_mode: Optional[str] = \"NEVER\",\n",
" human_input_mode: Literal[\"ALWAYS\", \"NEVER\", \"TERMINATE\"] = \"NEVER\",\n",
" code_execution_config: Optional[Union[Dict, Literal[False]]] = False,\n",
" description: Optional[\n",
" str\n",
Expand Down
Loading