From 0746465ac67ee8f5a0d52cce07974e5f2e61b00b Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 27 Mar 2024 10:01:23 -0400 Subject: [PATCH 1/3] Mark cache as a protocl and update type hints to reflect --- autogen/agentchat/chat.py | 2 +- .../contrib/capabilities/generate_images.py | 6 +-- autogen/agentchat/conversable_agent.py | 14 ++--- autogen/cache/__init__.py | 4 +- autogen/cache/abstract_cache_base.py | 52 ++++--------------- autogen/cache/cache.py | 2 +- autogen/oai/__init__.py | 2 - autogen/oai/client.py | 4 +- 8 files changed, 26 insertions(+), 60 deletions(-) diff --git a/autogen/agentchat/chat.py b/autogen/agentchat/chat.py index 9bbfac36fd2..bd56cf2f579 100644 --- a/autogen/agentchat/chat.py +++ b/autogen/agentchat/chat.py @@ -150,7 +150,7 @@ def initiate_chats(chat_queue: List[Dict[str, Any]]) -> List[ChatResult]: - "recipient": the recipient agent. - "clear_history" (bool): whether to clear the chat history with the agent. Default is True. - "silent" (bool or None): (Experimental) whether to print the messages in this conversation. Default is False. - - "cache" (Cache or None): the cache client to use for this conversation. Default is None. + - "cache" (AbstractCache or None): the cache client to use for this conversation. Default is None. - "max_turns" (int or None): maximum number of turns for the chat. If None, the chat will continue until a termination condition is met. Default is None. - "summary_method" (str or callable): a string or callable specifying the method to get a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg". - "summary_args" (dict): a dictionary of arguments to be passed to the summary_method. Default is {}. diff --git a/autogen/agentchat/contrib/capabilities/generate_images.py b/autogen/agentchat/contrib/capabilities/generate_images.py index 778e7256558..d16121ddb9a 100644 --- a/autogen/agentchat/contrib/capabilities/generate_images.py +++ b/autogen/agentchat/contrib/capabilities/generate_images.py @@ -5,7 +5,7 @@ from PIL.Image import Image from autogen import Agent, ConversableAgent, code_utils -from autogen.cache import Cache +from autogen.cache import AbstractCache from autogen.agentchat.contrib import img_utils from autogen.agentchat.contrib.capabilities.agent_capability import AgentCapability from autogen.agentchat.contrib.text_analyzer_agent import TextAnalyzerAgent @@ -142,7 +142,7 @@ class ImageGeneration(AgentCapability): def __init__( self, image_generator: ImageGenerator, - cache: Optional[Cache] = None, + cache: Optional[AbstractCache] = None, text_analyzer_llm_config: Optional[Dict] = None, text_analyzer_instructions: str = PROMPT_INSTRUCTIONS, verbosity: int = 0, @@ -151,7 +151,7 @@ def __init__( """ Args: image_generator (ImageGenerator): The image generator you would like to use to generate images. - cache (None or Cache): The cache client to use to store and retrieve generated images. If None, + cache (None or AbstractCache): The cache client to use to store and retrieve generated images. If None, no caching will be used. text_analyzer_llm_config (Dict or None): The LLM config for the text analyzer. If None, the LLM config will be retrieved from the agent you're adding the ability to. diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index eb9d1c9cbcb..2b051fc0694 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -15,7 +15,7 @@ from autogen.exception_utils import InvalidCarryOverType, SenderRequired from .._pydantic import model_dump -from ..cache.cache import Cache +from ..cache.cache import AbstractCache from ..code_utils import ( UNKNOWN, check_can_use_docker_or_throw, @@ -865,7 +865,7 @@ def initiate_chat( recipient: "ConversableAgent", clear_history: bool = True, silent: Optional[bool] = False, - cache: Optional[Cache] = None, + cache: Optional[AbstractCache] = None, max_turns: Optional[int] = None, summary_method: Optional[Union[str, Callable]] = DEFAULT_SUMMARY_METHOD, summary_args: Optional[dict] = {}, @@ -882,7 +882,7 @@ def initiate_chat( recipient: the recipient agent. clear_history (bool): whether to clear the chat history with the agent. Default is True. silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False. - cache (Cache or None): the cache client to be used for this conversation. Default is None. + cache (AbstractCache or None): the cache client to be used for this conversation. Default is None. max_turns (int or None): the maximum number of turns for the chat between the two agents. One turn means one conversation round trip. Note that this is different from [max_consecutive_auto_reply](#max_consecutive_auto_reply) which is the maximum number of consecutive auto replies; and it is also different from [max_rounds in GroupChat](./groupchat#groupchat-objects) which is the maximum number of rounds in a group chat session. If max_turns is set to None, the chat will continue until a termination condition is met. Default is None. @@ -1007,7 +1007,7 @@ async def a_initiate_chat( recipient: "ConversableAgent", clear_history: bool = True, silent: Optional[bool] = False, - cache: Optional[Cache] = None, + cache: Optional[AbstractCache] = None, max_turns: Optional[int] = None, summary_method: Optional[Union[str, Callable]] = DEFAULT_SUMMARY_METHOD, summary_args: Optional[dict] = {}, @@ -1073,7 +1073,7 @@ def _summarize_chat( summary_method, summary_args, recipient: Optional[Agent] = None, - cache: Optional[Cache] = None, + cache: Optional[AbstractCache] = None, ) -> str: """Get a chat summary from an agent participating in a chat. @@ -1141,7 +1141,7 @@ def _relfection_with_llm_as_summary(sender, recipient, summary_args): return summary def _reflection_with_llm( - self, prompt, messages, llm_agent: Optional[Agent] = None, cache: Optional[Cache] = None + self, prompt, messages, llm_agent: Optional[Agent] = None, cache: Optional[AbstractCache] = None ) -> str: """Get a chat summary using reflection with an llm client based on the conversation history. @@ -1149,7 +1149,7 @@ def _reflection_with_llm( prompt (str): The prompt (in this method it is used as system prompt) used to get the summary. messages (list): The messages generated as part of a chat conversation. llm_agent: the agent with an llm client. - cache (Cache or None): the cache client to be used for this conversation. + cache (AbstractCache or None): the cache client to be used for this conversation. """ system_msg = [ { diff --git a/autogen/cache/__init__.py b/autogen/cache/__init__.py index 0eb8cfa71c8..febfa8c7c5d 100644 --- a/autogen/cache/__init__.py +++ b/autogen/cache/__init__.py @@ -1,3 +1,3 @@ -from .cache import Cache +from .cache import Cache, AbstractCache -__all__ = ["Cache"] +__all__ = ["Cache", "AbstractCache"] diff --git a/autogen/cache/abstract_cache_base.py b/autogen/cache/abstract_cache_base.py index 233702e777a..811a89a6271 100644 --- a/autogen/cache/abstract_cache_base.py +++ b/autogen/cache/abstract_cache_base.py @@ -1,6 +1,5 @@ -from abc import ABC, abstractmethod from types import TracebackType -from typing import Any, Optional, Type +from typing import Any, Optional, Protocol, Type import sys if sys.version_info >= (3, 11): @@ -9,23 +8,17 @@ from typing_extensions import Self -class AbstractCache(ABC): +class AbstractCache(Protocol): """ - Abstract base class for cache implementations. - - This class defines the basic interface for cache operations. + This protocol defines the basic interface for cache operations. Implementing classes should provide concrete implementations for these methods to handle caching mechanisms. """ - @abstractmethod def get(self, key: str, default: Optional[Any] = None) -> Optional[Any]: """ Retrieve an item from the cache. - Abstract method that must be implemented by subclasses to - retrieve an item from the cache. - Args: key (str): The key identifying the item in the cache. default (optional): The default value to return if the key is not found. @@ -33,53 +26,34 @@ def get(self, key: str, default: Optional[Any] = None) -> Optional[Any]: Returns: The value associated with the key if found, else the default value. - - Raises: - NotImplementedError: If the subclass does not implement this method. """ + ... - @abstractmethod def set(self, key: str, value: Any) -> None: """ Set an item in the cache. - Abstract method that must be implemented by subclasses to - store an item in the cache. - Args: key (str): The key under which the item is to be stored. value: The value to be stored in the cache. - - Raises: - NotImplementedError: If the subclass does not implement this method. """ + ... - @abstractmethod def close(self) -> None: """ - Close the cache. - - Abstract method that should be implemented by subclasses to - perform any necessary cleanup, such as closing network connections or + Close the cache. Perform any necessary cleanup, such as closing network connections or releasing resources. - - Raises: - NotImplementedError: If the subclass does not implement this method. """ + ... - @abstractmethod def __enter__(self) -> Self: """ Enter the runtime context related to this object. - The with statement will bind this method’s return value to the target(s) + The with statement will bind this method's return value to the target(s) specified in the as clause of the statement, if any. - - Raises: - NotImplementedError: If the subclass does not implement this method. """ - - @abstractmethod + ... def __exit__( self, exc_type: Optional[Type[BaseException]], @@ -89,15 +63,9 @@ def __exit__( """ Exit the runtime context and close the cache. - Abstract method that should be implemented by subclasses to handle - the exit from a with statement. It is responsible for resource - release and cleanup. - Args: exc_type: The exception type if an exception was raised in the context. exc_value: The exception value if an exception was raised in the context. traceback: The traceback if an exception was raised in the context. - - Raises: - NotImplementedError: If the subclass does not implement this method. """ + ... \ No newline at end of file diff --git a/autogen/cache/cache.py b/autogen/cache/cache.py index 1cb507ad79a..31bbfa13529 100644 --- a/autogen/cache/cache.py +++ b/autogen/cache/cache.py @@ -14,7 +14,7 @@ from typing_extensions import Self -class Cache: +class Cache(AbstractCache): """ A wrapper class for managing cache configuration and instances. diff --git a/autogen/oai/__init__.py b/autogen/oai/__init__.py index 9e8437cecc7..65934ec44c5 100644 --- a/autogen/oai/__init__.py +++ b/autogen/oai/__init__.py @@ -9,7 +9,6 @@ config_list_from_dotenv, filter_config, ) -from autogen.cache.cache import Cache __all__ = [ "OpenAIWrapper", @@ -23,5 +22,4 @@ "config_list_from_json", "config_list_from_dotenv", "filter_config", - "Cache", ] diff --git a/autogen/oai/client.py b/autogen/oai/client.py index ff7f8d37880..f288ece3961 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -10,7 +10,7 @@ from pydantic import BaseModel from typing import Protocol -from autogen.cache.cache import Cache +from autogen.cache import Cache from autogen.io.base import IOStream from autogen.oai.openai_utils import get_key, is_valid_api_key, OAI_PRICE1K from autogen.token_count_utils import count_token @@ -517,7 +517,7 @@ def create(self, **config: Any) -> ModelClient.ModelClientResponseProtocol: The actual prompt will be: "Complete the following sentence: Today I feel". More examples can be found at [templating](/docs/Use-Cases/enhanced_inference#templating). - - cache (Cache | None): A Cache object to use for response cache. Default to None. + - cache (AbstractCache | None): A Cache object to use for response cache. Default to None. Note that the cache argument overrides the legacy cache_seed argument: if this argument is provided, then the cache_seed argument is ignored. If this argument is not provided or None, then the cache_seed argument is used. From e65c63e8f3a0157ab0906a4cc2401b9f98155966 Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 27 Mar 2024 10:01:53 -0400 Subject: [PATCH 2/3] int --- autogen/cache/abstract_cache_base.py | 3 ++- test/agentchat/contrib/test_web_surfer.py | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/autogen/cache/abstract_cache_base.py b/autogen/cache/abstract_cache_base.py index 811a89a6271..ebf1cecfa40 100644 --- a/autogen/cache/abstract_cache_base.py +++ b/autogen/cache/abstract_cache_base.py @@ -54,6 +54,7 @@ def __enter__(self) -> Self: specified in the as clause of the statement, if any. """ ... + def __exit__( self, exc_type: Optional[Type[BaseException]], @@ -68,4 +69,4 @@ def __exit__( exc_value: The exception value if an exception was raised in the context. traceback: The traceback if an exception was raised in the context. """ - ... \ No newline at end of file + ... diff --git a/test/agentchat/contrib/test_web_surfer.py b/test/agentchat/contrib/test_web_surfer.py index 71325fc9c15..e53342fd8e0 100755 --- a/test/agentchat/contrib/test_web_surfer.py +++ b/test/agentchat/contrib/test_web_surfer.py @@ -6,7 +6,6 @@ import pytest from autogen import UserProxyAgent, config_list_from_json from autogen.oai.openai_utils import filter_config -from autogen.cache import Cache sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) from conftest import MOCK_OPEN_AI_API_KEY, skip_openai # noqa: E402 From 3692139106a878fb9f69dc370dae4e4d2d547dcf Mon Sep 17 00:00:00 2001 From: Jack Gerrits Date: Wed, 27 Mar 2024 12:53:00 -0400 Subject: [PATCH 3/3] undo init change --- autogen/oai/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/autogen/oai/__init__.py b/autogen/oai/__init__.py index 65934ec44c5..9e8437cecc7 100644 --- a/autogen/oai/__init__.py +++ b/autogen/oai/__init__.py @@ -9,6 +9,7 @@ config_list_from_dotenv, filter_config, ) +from autogen.cache.cache import Cache __all__ = [ "OpenAIWrapper", @@ -22,4 +23,5 @@ "config_list_from_json", "config_list_from_dotenv", "filter_config", + "Cache", ]