diff --git a/.gitignore b/.gitignore index 836acf2a8a2..70bb7c9bf1d 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,7 @@ node_modules/ *.log # Python virtualenv -.venv +.venv* # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/README.md b/README.md index 85b5f806d24..3fe3139f873 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,15 @@ The easiest way to start playing is 2. Copy OAI_CONFIG_LIST_sample to ./notebook folder, name to OAI_CONFIG_LIST, and set the correct configuration. 3. Start playing with the notebooks! +## Using existing docker image +Install docker, save your oai key into an environment variable name OPENAI_API_KEY, and then run the following. +``` +docker pull yuandongtian/autogen:latest +docker run -it -e OPENAI_API_KEY=$OPENAI_API_KEY -p 8081:8081 docker.io/yuandongtian/autogen:latest +``` + +Then open `http://localhost:8081/` in your browser to use AutoGen. The UI is from `./samples/apps/autogen-assistant`. See docker hub [link](https://hub.docker.com/r/yuandongtian/autogen) for more details. ## Installation diff --git a/autogen/_pydantic.py b/autogen/_pydantic.py new file mode 100644 index 00000000000..ef0cad66e74 --- /dev/null +++ b/autogen/_pydantic.py @@ -0,0 +1,110 @@ +from typing import Any, Dict, Optional, Tuple, Type, Union, get_args + +from pydantic import BaseModel +from pydantic.version import VERSION as PYDANTIC_VERSION +from typing_extensions import get_origin + +__all__ = ("JsonSchemaValue", "model_dump", "model_dump_json", "type2schema") + +PYDANTIC_V1 = PYDANTIC_VERSION.startswith("1.") + +if not PYDANTIC_V1: + from pydantic import TypeAdapter + from pydantic._internal._typing_extra import eval_type_lenient as evaluate_forwardref + from pydantic.json_schema import JsonSchemaValue + + def type2schema(t: Optional[Type]) -> JsonSchemaValue: + """Convert a type to a JSON schema + + Args: + t (Type): The type to convert + + Returns: + JsonSchemaValue: The JSON schema + """ + return TypeAdapter(t).json_schema() + + def model_dump(model: BaseModel) -> Dict[str, Any]: + """Convert a pydantic model to a dict + + Args: + model (BaseModel): The model to convert + + Returns: + Dict[str, Any]: The dict representation of the model + + """ + return model.model_dump() + + def model_dump_json(model: BaseModel) -> str: + """Convert a pydantic model to a JSON string + + Args: + model (BaseModel): The model to convert + + Returns: + str: The JSON string representation of the model + """ + return model.model_dump_json() + + +# Remove this once we drop support for pydantic 1.x +else: # pragma: no cover + from pydantic import schema_of + from pydantic.typing import evaluate_forwardref as evaluate_forwardref + + JsonSchemaValue = Dict[str, Any] + + def type2schema(t: Optional[Type]) -> JsonSchemaValue: + """Convert a type to a JSON schema + + Args: + t (Type): The type to convert + + Returns: + JsonSchemaValue: The JSON schema + """ + if PYDANTIC_V1: + if t is None: + return {"type": "null"} + elif get_origin(t) is Union: + return {"anyOf": [type2schema(tt) for tt in get_args(t)]} + elif get_origin(t) in [Tuple, tuple]: + prefixItems = [type2schema(tt) for tt in get_args(t)] + return { + "maxItems": len(prefixItems), + "minItems": len(prefixItems), + "prefixItems": prefixItems, + "type": "array", + } + + d = schema_of(t) + if "title" in d: + d.pop("title") + if "description" in d: + d.pop("description") + + return d + + def model_dump(model: BaseModel) -> Dict[str, Any]: + """Convert a pydantic model to a dict + + Args: + model (BaseModel): The model to convert + + Returns: + Dict[str, Any]: The dict representation of the model + + """ + return model.dict() + + def model_dump_json(model: BaseModel) -> str: + """Convert a pydantic model to a JSON string + + Args: + model (BaseModel): The model to convert + + Returns: + str: The JSON string representation of the model + """ + return model.json() diff --git a/autogen/agentchat/assistant_agent.py b/autogen/agentchat/assistant_agent.py index ce7836da166..ffb2e598d65 100644 --- a/autogen/agentchat/assistant_agent.py +++ b/autogen/agentchat/assistant_agent.py @@ -70,7 +70,7 @@ def __init__( **kwargs, ) - # Update the provided desciption if None, and we are using the default system_message, + # Update the provided description if None, and we are using the default system_message, # then use the default description. if description is None: if system_message == self.DEFAULT_SYSTEM_MESSAGE: diff --git a/autogen/agentchat/contrib/compressible_agent.py b/autogen/agentchat/contrib/compressible_agent.py index dc8c80a01ff..72f31b6f3ea 100644 --- a/autogen/agentchat/contrib/compressible_agent.py +++ b/autogen/agentchat/contrib/compressible_agent.py @@ -225,7 +225,7 @@ def _manage_history_on_token_limit(self, messages, token_used, max_token_allowed # 1. mode = "TERMINATE", terminate the agent if no token left. if self.compress_config["mode"] == "TERMINATE": if max_token_allowed - token_used <= 0: - # Teminate if no token left. + # Terminate if no token left. print( colored( f'Warning: Terminate Agent "{self.name}" due to no token left for oai reply. max token for {model}: {max_token_allowed}, existing token count: {token_used}', @@ -320,7 +320,7 @@ def on_oai_token_limit( cmsg["role"] = "user" sender._oai_messages[self][i] = cmsg - # sucessfully compressed, return False, None for generate_oai_reply to be called with the updated messages + # successfully compressed, return False, None for generate_oai_reply to be called with the updated messages return False, None return final, None @@ -332,7 +332,7 @@ def compress_messages( """Compress a list of messages into one message. The first message (the initial prompt) will not be compressed. - The rest of the messages will be compressed into one message, the model is asked to distinuish the role of each message: USER, ASSISTANT, FUNCTION_CALL, FUNCTION_RETURN. + The rest of the messages will be compressed into one message, the model is asked to distinguish the role of each message: USER, ASSISTANT, FUNCTION_CALL, FUNCTION_RETURN. Check out the compress_sys_msg. TODO: model used in compression agent is different from assistant agent: For example, if original model used by is gpt-4; we start compressing at 70% of usage, 70% of 8092 = 5664; and we use gpt 3.5 here max_toke = 4096, it will raise error. choosinng model automatically? diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index 2fd431e99a2..351d94cf167 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -203,7 +203,7 @@ def _get_run_response(self, thread, run): new_messages.append( { "role": msg.role, - "content": f"Recieved file id={content.image_file.file_id}", + "content": f"Received file id={content.image_file.file_id}", } ) return new_messages @@ -219,7 +219,7 @@ def _get_run_response(self, thread, run): } logger.info( - "Intermediate executing(%s, Sucess: %s) : %s", + "Intermediate executing(%s, Success: %s) : %s", tool_response["name"], is_exec_success, tool_response["content"], diff --git a/autogen/agentchat/contrib/img_utils.py b/autogen/agentchat/contrib/img_utils.py index a8e1a96876a..4fc08f8f357 100644 --- a/autogen/agentchat/contrib/img_utils.py +++ b/autogen/agentchat/contrib/img_utils.py @@ -26,7 +26,7 @@ def get_image_data(image_file: str, use_b64=True) -> bytes: return content -def llava_formater(prompt: str, order_image_tokens: bool = False) -> Tuple[str, List[str]]: +def llava_formatter(prompt: str, order_image_tokens: bool = False) -> Tuple[str, List[str]]: """ Formats the input prompt by replacing image tags and returns the new prompt along with image locations. diff --git a/autogen/agentchat/contrib/llava_agent.py b/autogen/agentchat/contrib/llava_agent.py index 641c5fa06c5..65c39fd1e20 100644 --- a/autogen/agentchat/contrib/llava_agent.py +++ b/autogen/agentchat/contrib/llava_agent.py @@ -10,7 +10,7 @@ from regex import R from autogen.agentchat.agent import Agent -from autogen.agentchat.contrib.img_utils import get_image_data, llava_formater +from autogen.agentchat.contrib.img_utils import get_image_data, llava_formatter from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent from autogen.code_utils import content_str @@ -162,7 +162,7 @@ def llava_call(prompt: str, llm_config: dict) -> str: Makes a call to the LLaVA service to generate text based on a given prompt """ - prompt, images = llava_formater(prompt, order_image_tokens=False) + prompt, images = llava_formatter(prompt, order_image_tokens=False) for im in images: if len(im) == 0: diff --git a/autogen/agentchat/contrib/math_user_proxy_agent.py b/autogen/agentchat/contrib/math_user_proxy_agent.py index a432211fad4..67c86daf05d 100644 --- a/autogen/agentchat/contrib/math_user_proxy_agent.py +++ b/autogen/agentchat/contrib/math_user_proxy_agent.py @@ -4,6 +4,7 @@ from typing import Any, Callable, Dict, List, Optional, Union, Tuple from time import sleep +from autogen._pydantic import PYDANTIC_V1 from autogen.agentchat import Agent, UserProxyAgent from autogen.code_utils import UNKNOWN, extract_code, execute_code, infer_lang from autogen.math_utils import get_answer @@ -384,7 +385,8 @@ class WolframAlphaAPIWrapper(BaseModel): class Config: """Configuration for this pydantic object.""" - extra = Extra.forbid + if PYDANTIC_V1: + extra = Extra.forbid @root_validator(skip_on_failure=True) def validate_environment(cls, values: Dict) -> Dict: @@ -395,8 +397,8 @@ def validate_environment(cls, values: Dict) -> Dict: try: import wolframalpha - except ImportError: - raise ImportError("wolframalpha is not installed. " "Please install it with `pip install wolframalpha`") + except ImportError as e: + raise ImportError("wolframalpha is not installed. Please install it with `pip install wolframalpha`") from e client = wolframalpha.Client(wolfram_alpha_appid) values["wolfram_client"] = client diff --git a/autogen/agentchat/contrib/multimodal_conversable_agent.py b/autogen/agentchat/contrib/multimodal_conversable_agent.py index 9a8c36591e9..e6f3720186c 100644 --- a/autogen/agentchat/contrib/multimodal_conversable_agent.py +++ b/autogen/agentchat/contrib/multimodal_conversable_agent.py @@ -16,9 +16,14 @@ def colored(x, *args, **kwargs): from autogen.code_utils import content_str DEFAULT_LMM_SYS_MSG = """You are a helpful AI assistant.""" +DEFAULT_MODEL = "gpt-4-vision-preview" class MultimodalConversableAgent(ConversableAgent): + DEFAULT_CONFIG = { + "model": DEFAULT_MODEL, + } + def __init__( self, name: str, diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 493a83da8a5..d627450251e 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1,14 +1,15 @@ import asyncio import copy import functools +import inspect import json import logging from collections import defaultdict -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, Union - -from autogen import OpenAIWrapper -from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang +from typing import Any, Awaitable, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union +from .. import OpenAIWrapper +from ..code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang +from ..function_utils import get_function_schema, load_basemodels_if_needed, serialize_to_str from .agent import Agent try: @@ -19,8 +20,12 @@ def colored(x, *args, **kwargs): return x +__all__ = ("ConversableAgent",) + logger = logging.getLogger(__name__) +F = TypeVar("F", bound=Callable[..., Any]) + class ConversableAgent(Agent): """(In preview) A class for generic conversable agents which can be configured as assistant or user proxy. @@ -676,7 +681,7 @@ def generate_code_execution_reply( else: messages_to_scan += 1 - # iterate through the last n messages reversly + # iterate through the last n messages reversely # if code blocks are found, execute the code blocks and return the output # if no code blocks are found, continue for i in range(min(len(messages), messages_to_scan)): @@ -1292,7 +1297,7 @@ def update_function_signature(self, func_sig: Union[str, Dict], is_remove: None) Args: func_sig (str or dict): description/name of the function to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions - is_remove: whether removing the funciton from llm_config with name 'func_sig' + is_remove: whether removing the function from llm_config with name 'func_sig' """ if not self.llm_config: @@ -1330,3 +1335,157 @@ def can_execute_function(self, name: str) -> bool: def function_map(self) -> Dict[str, Callable]: """Return the function map.""" return self._function_map + + def _wrap_function(self, func: F) -> F: + """Wrap the function to dump the return value to json. + + Handles both sync and async functions. + + Args: + func: the function to be wrapped. + + Returns: + The wrapped function. + """ + + @load_basemodels_if_needed + @functools.wraps(func) + def _wrapped_func(*args, **kwargs): + retval = func(*args, **kwargs) + + return serialize_to_str(retval) + + @load_basemodels_if_needed + @functools.wraps(func) + async def _a_wrapped_func(*args, **kwargs): + retval = await func(*args, **kwargs) + return serialize_to_str(retval) + + wrapped_func = _a_wrapped_func if inspect.iscoroutinefunction(func) else _wrapped_func + + # needed for testing + wrapped_func._origin = func + + return wrapped_func + + def register_for_llm( + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + ) -> Callable[[F], F]: + """Decorator factory for registering a function to be used by an agent. + + It's return value is used to decorate a function to be registered to the agent. The function uses type hints to + specify the arguments and return type. The function name is used as the default name for the function, + but a custom name can be provided. The function description is used to describe the function in the + agent's configuration. + + Args: + name (optional(str)): name of the function. If None, the function name will be used (default: None). + description (optional(str)): description of the function (default: None). It is mandatory + for the initial decorator, but the following ones can omit it. + + Returns: + The decorator for registering a function to be used by an agent. + + Examples: + ``` + @user_proxy.register_for_execution() + @agent2.register_for_llm() + @agent1.register_for_llm(description="This is a very useful function") + def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str: + return a + str(b * c) + ``` + + """ + + def _decorator(func: F) -> F: + """Decorator for registering a function to be used by an agent. + + Args: + func: the function to be registered. + + Returns: + The function to be registered, with the _description attribute set to the function description. + + Raises: + ValueError: if the function description is not provided and not propagated by a previous decorator. + RuntimeError: if the LLM config is not set up before registering a function. + + """ + # name can be overwriten by the parameter, by default it is the same as function name + if name: + func._name = name + elif not hasattr(func, "_name"): + func._name = func.__name__ + + # description is propagated from the previous decorator, but it is mandatory for the first one + if description: + func._description = description + else: + if not hasattr(func, "_description"): + raise ValueError("Function description is required, none found.") + + # get JSON schema for the function + f = get_function_schema(func, name=func._name, description=func._description) + + # register the function to the agent if there is LLM config, raise an exception otherwise + if self.llm_config is None: + raise RuntimeError("LLM config must be setup before registering a function for LLM.") + + self.update_function_signature(f, is_remove=False) + + return func + + return _decorator + + def register_for_execution( + self, + name: Optional[str] = None, + ) -> Callable[[F], F]: + """Decorator factory for registering a function to be executed by an agent. + + It's return value is used to decorate a function to be registered to the agent. + + Args: + name (optional(str)): name of the function. If None, the function name will be used (default: None). + + Returns: + The decorator for registering a function to be used by an agent. + + Examples: + ``` + @user_proxy.register_for_execution() + @agent2.register_for_llm() + @agent1.register_for_llm(description="This is a very useful function") + def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14): + return a + str(b * c) + ``` + + """ + + def _decorator(func: F) -> F: + """Decorator for registering a function to be used by an agent. + + Args: + func: the function to be registered. + + Returns: + The function to be registered, with the _description attribute set to the function description. + + Raises: + ValueError: if the function description is not provided and not propagated by a previous decorator. + + """ + # name can be overwriten by the parameter, by default it is the same as function name + if name: + func._name = name + elif not hasattr(func, "_name"): + func._name = func.__name__ + + self.register_function({func._name: self._wrap_function(func)}) + + return func + + return _decorator diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 5b12a97e6b1..03251eccfb7 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -64,8 +64,10 @@ def agent_by_name(self, name: str) -> Agent: """Returns the agent with a given name.""" return self.agents[self.agent_names.index(name)] - def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent: + def next_agent(self, agent: Agent, agents: Optional[List[Agent]] = None) -> Agent: """Return the next agent in the list.""" + if agents is None: + agents = self.agents # What index is the agent? (-1 if not present) idx = self.agent_names.index(agent.name) if agent.name in self.agent_names else -1 @@ -79,20 +81,26 @@ def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent: if self.agents[(offset + i) % len(self.agents)] in agents: return self.agents[(offset + i) % len(self.agents)] - def select_speaker_msg(self, agents: List[Agent]) -> str: + def select_speaker_msg(self, agents: Optional[List[Agent]] = None) -> str: """Return the system message for selecting the next speaker. This is always the *first* message in the context.""" + if agents is None: + agents = self.agents return f"""You are in a role play game. The following roles are available: {self._participant_roles(agents)}. Read the following conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.""" - def select_speaker_prompt(self, agents: List[Agent]) -> str: + def select_speaker_prompt(self, agents: Optional[List[Agent]] = None) -> str: """Return the floating system prompt selecting the next speaker. This is always the *last* message in the context.""" + if agents is None: + agents = self.agents return f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role." - def manual_select_speaker(self, agents: List[Agent]) -> Union[Agent, None]: + def manual_select_speaker(self, agents: Optional[List[Agent]] = None) -> Union[Agent, None]: """Manually select the next speaker.""" + if agents is None: + agents = self.agents print("Please select the next speaker from the following list:") _n_agents = len(agents) diff --git a/autogen/function_utils.py b/autogen/function_utils.py new file mode 100644 index 00000000000..05493cc3df5 --- /dev/null +++ b/autogen/function_utils.py @@ -0,0 +1,330 @@ +import functools +import inspect +import json +from logging import getLogger +from typing import Any, Callable, Dict, ForwardRef, List, Optional, Set, Tuple, Type, TypeVar, Union + +from pydantic import BaseModel, Field +from typing_extensions import Annotated, Literal, get_args, get_origin + +from ._pydantic import JsonSchemaValue, evaluate_forwardref, model_dump, model_dump_json, type2schema + +logger = getLogger(__name__) + +T = TypeVar("T") + + +def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any: + """Get the type annotation of a parameter. + + Args: + annotation: The annotation of the parameter + globalns: The global namespace of the function + + Returns: + The type annotation of the parameter + """ + if isinstance(annotation, str): + annotation = ForwardRef(annotation) + annotation = evaluate_forwardref(annotation, globalns, globalns) + return annotation + + +def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature: + """Get the signature of a function with type annotations. + + Args: + call: The function to get the signature for + + Returns: + The signature of the function with type annotations + """ + signature = inspect.signature(call) + globalns = getattr(call, "__globals__", {}) + typed_params = [ + inspect.Parameter( + name=param.name, + kind=param.kind, + default=param.default, + annotation=get_typed_annotation(param.annotation, globalns), + ) + for param in signature.parameters.values() + ] + typed_signature = inspect.Signature(typed_params) + return typed_signature + + +def get_typed_return_annotation(call: Callable[..., Any]) -> Any: + """Get the return annotation of a function. + + Args: + call: The function to get the return annotation for + + Returns: + The return annotation of the function + """ + signature = inspect.signature(call) + annotation = signature.return_annotation + + if annotation is inspect.Signature.empty: + return None + + globalns = getattr(call, "__globals__", {}) + return get_typed_annotation(annotation, globalns) + + +def get_param_annotations(typed_signature: inspect.Signature) -> Dict[int, Union[Annotated[Type, str], Type]]: + """Get the type annotations of the parameters of a function + + Args: + typed_signature: The signature of the function with type annotations + + Returns: + A dictionary of the type annotations of the parameters of the function + """ + return { + k: v.annotation for k, v in typed_signature.parameters.items() if v.annotation is not inspect.Signature.empty + } + + +class Parameters(BaseModel): + """Parameters of a function as defined by the OpenAI API""" + + type: Literal["object"] = "object" + properties: Dict[str, JsonSchemaValue] + required: List[str] + + +class Function(BaseModel): + """A function as defined by the OpenAI API""" + + description: Annotated[str, Field(description="Description of the function")] + name: Annotated[str, Field(description="Name of the function")] + parameters: Annotated[Parameters, Field(description="Parameters of the function")] + + +def get_parameter_json_schema( + k: str, v: Union[Annotated[Type, str], Type], default_values: Dict[str, Any] +) -> JsonSchemaValue: + """Get a JSON schema for a parameter as defined by the OpenAI API + + Args: + k: The name of the parameter + v: The type of the parameter + default_values: The default values of the parameters of the function + + Returns: + A Pydanitc model for the parameter + """ + + def type2description(k: str, v: Union[Annotated[Type, str], Type]) -> str: + # handles Annotated + if hasattr(v, "__metadata__"): + return v.__metadata__[0] + else: + return k + + schema = type2schema(v) + if k in default_values: + dv = default_values[k] + schema["default"] = dv + + schema["description"] = type2description(k, v) + + return schema + + +def get_required_params(typed_signature: inspect.Signature) -> List[str]: + """Get the required parameters of a function + + Args: + signature: The signature of the function as returned by inspect.signature + + Returns: + A list of the required parameters of the function + """ + return [k for k, v in typed_signature.parameters.items() if v.default == inspect.Signature.empty] + + +def get_default_values(typed_signature: inspect.Signature) -> Dict[str, Any]: + """Get default values of parameters of a function + + Args: + signature: The signature of the function as returned by inspect.signature + + Returns: + A dictionary of the default values of the parameters of the function + """ + return {k: v.default for k, v in typed_signature.parameters.items() if v.default != inspect.Signature.empty} + + +def get_parameters( + required: List[str], param_annotations: Dict[str, Union[Annotated[Type, str], Type]], default_values: Dict[str, Any] +) -> Parameters: + """Get the parameters of a function as defined by the OpenAI API + + Args: + required: The required parameters of the function + hints: The type hints of the function as returned by typing.get_type_hints + + Returns: + A Pydantic model for the parameters of the function + """ + return Parameters( + properties={ + k: get_parameter_json_schema(k, v, default_values) + for k, v in param_annotations.items() + if v is not inspect.Signature.empty + }, + required=required, + ) + + +def get_missing_annotations(typed_signature: inspect.Signature, required: List[str]) -> Tuple[Set[str], Set[str]]: + """Get the missing annotations of a function + + Ignores the parameters with default values as they are not required to be annotated, but logs a warning. + Args: + typed_signature: The signature of the function with type annotations + required: The required parameters of the function + + Returns: + A set of the missing annotations of the function + """ + all_missing = {k for k, v in typed_signature.parameters.items() if v.annotation is inspect.Signature.empty} + missing = all_missing.intersection(set(required)) + unannotated_with_default = all_missing.difference(missing) + return missing, unannotated_with_default + + +def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, description: str) -> Dict[str, Any]: + """Get a JSON schema for a function as defined by the OpenAI API + + Args: + f: The function to get the JSON schema for + name: The name of the function + description: The description of the function + + Returns: + A JSON schema for the function + + Raises: + TypeError: If the function is not annotated + + Examples: + ``` + def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1) -> None: + pass + + get_function_schema(f, description="function f") + + # {'type': 'function', + # 'function': {'description': 'function f', + # 'name': 'f', + # 'parameters': {'type': 'object', + # 'properties': {'a': {'type': 'str', 'description': 'Parameter a'}, + # 'b': {'type': 'int', 'description': 'b'}, + # 'c': {'type': 'float', 'description': 'Parameter c'}}, + # 'required': ['a']}}} + ``` + + """ + typed_signature = get_typed_signature(f) + required = get_required_params(typed_signature) + default_values = get_default_values(typed_signature) + param_annotations = get_param_annotations(typed_signature) + return_annotation = get_typed_return_annotation(f) + missing, unannotated_with_default = get_missing_annotations(typed_signature, required) + + if return_annotation is None: + logger.warning( + f"The return type of the function '{f.__name__}' is not annotated. Although annotating it is " + + "optional, the function should return either a string, a subclass of 'pydantic.BaseModel'." + ) + + if unannotated_with_default != set(): + unannotated_with_default_s = [f"'{k}'" for k in sorted(unannotated_with_default)] + logger.warning( + f"The following parameters of the function '{f.__name__}' with default values are not annotated: " + + f"{', '.join(unannotated_with_default_s)}." + ) + + if missing != set(): + missing_s = [f"'{k}'" for k in sorted(missing)] + raise TypeError( + f"All parameters of the function '{f.__name__}' without default values must be annotated. " + + f"The annotations are missing for the following parameters: {', '.join(missing_s)}" + ) + + fname = name if name else f.__name__ + + parameters = get_parameters(required, param_annotations, default_values=default_values) + + function = Function( + description=description, + name=fname, + parameters=parameters, + ) + + return model_dump(function) + + +def get_load_param_if_needed_function(t: Any) -> Optional[Callable[[T, Type], BaseModel]]: + """Get a function to load a parameter if it is a Pydantic model + + Args: + t: The type annotation of the parameter + + Returns: + A function to load the parameter if it is a Pydantic model, otherwise None + + """ + if get_origin(t) is Annotated: + return get_load_param_if_needed_function(get_args(t)[0]) + + def load_base_model(v: Dict[str, Any], t: Type[BaseModel]) -> BaseModel: + return t(**v) + + return load_base_model if isinstance(t, type) and issubclass(t, BaseModel) else None + + +def load_basemodels_if_needed(func: Callable[..., Any]) -> Callable[..., Any]: + """A decorator to load the parameters of a function if they are Pydantic models + + Args: + func: The function with annotated parameters + + Returns: + A function that loads the parameters before calling the original function + + """ + # get the type annotations of the parameters + typed_signature = get_typed_signature(func) + param_annotations = get_param_annotations(typed_signature) + + # get functions for loading BaseModels when needed based on the type annotations + kwargs_mapping = {k: get_load_param_if_needed_function(t) for k, t in param_annotations.items()} + + # remove the None values + kwargs_mapping = {k: f for k, f in kwargs_mapping.items() if f is not None} + + # a function that loads the parameters before calling the original function + @functools.wraps(func) + def load_parameters_if_needed(*args, **kwargs): + # load the BaseModels if needed + for k, f in kwargs_mapping.items(): + kwargs[k] = f(kwargs[k], param_annotations[k]) + + # call the original function + return func(*args, **kwargs) + + return load_parameters_if_needed + + +def serialize_to_str(x: Any) -> str: + if isinstance(x, str): + return x + elif isinstance(x, BaseModel): + return model_dump_json(x) + else: + return json.dumps(x) diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 14abb63ad6c..a6569ec0240 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -6,6 +6,7 @@ import logging import inspect from flaml.automl.logger import logger_formatter +from pydantic import ValidationError from autogen.oai.openai_utils import get_key, oai_price1k from autogen.token_count_utils import count_token @@ -13,7 +14,7 @@ TOOL_ENABLED = False try: import openai - from openai import OpenAI, APIError + from openai import OpenAI, APIError, __version__ as OPENAIVERSION from openai.types.chat import ChatCompletion from openai.types.chat.chat_completion import ChatCompletionMessage, Choice from openai.types.completion import Completion @@ -136,7 +137,7 @@ def _separate_create_config(self, config): return create_config, extra_kwargs def _client(self, config, openai_config): - """Create a client with the given config to overrdie openai_config, + """Create a client with the given config to override openai_config, after removing extra kwargs. """ openai_config = {**openai_config, **{k: v for k, v in config.items() if k in self.openai_kwargs}} @@ -244,7 +245,7 @@ def yes_or_no_filter(context, response): try: response.cost except AttributeError: - # update atrribute if cost is not calculated + # update attribute if cost is not calculated response.cost = self.cost(response) cache.set(key, response) self._update_usage_summary(response, use_cache=True) @@ -329,15 +330,27 @@ def _completions_create(self, client, params): ), ) for i in range(len(response_contents)): - response.choices.append( - Choice( + if OPENAIVERSION >= "1.5": # pragma: no cover + # OpenAI versions 1.5.0 and above + choice = Choice( index=i, finish_reason=finish_reasons[i], message=ChatCompletionMessage( role="assistant", content=response_contents[i], function_call=None ), + logprobs=None, ) - ) + else: + # OpenAI versions below 1.5.0 + choice = Choice( + index=i, + finish_reason=finish_reasons[i], + message=ChatCompletionMessage( + role="assistant", content=response_contents[i], function_call=None + ), + ) + + response.choices.append(choice) else: # If streaming is not enabled or using functions, send a regular chat completion request # Functions are not supported, so ensure streaming is disabled @@ -349,7 +362,7 @@ def _completions_create(self, client, params): def _update_usage_summary(self, response: ChatCompletion | Completion, use_cache: bool) -> None: """Update the usage summary. - Usage is calculated no mattter filter is passed or not. + Usage is calculated no matter filter is passed or not. """ def update_usage(usage_summary): diff --git a/autogen/oai/completion.py b/autogen/oai/completion.py index ec99c9f24eb..e1eeb53021b 100644 --- a/autogen/oai/completion.py +++ b/autogen/oai/completion.py @@ -430,7 +430,7 @@ def _eval(cls, config: dict, prune=True, eval_only=False): if previous_num_completions: n_tokens_list[i] += n_output_tokens responses_list[i].extend(responses) - # Assumption 1: assuming requesting n1, n2 responses separatively then combining them + # Assumption 1: assuming requesting n1, n2 responses separately then combining them # is the same as requesting (n1+n2) responses together else: n_tokens_list.append(n_output_tokens) diff --git a/notebook/agentchat_autobuild.ipynb b/notebook/agentchat_autobuild.ipynb index 02c592b5bb3..886ae60fa0f 100644 --- a/notebook/agentchat_autobuild.ipynb +++ b/notebook/agentchat_autobuild.ipynb @@ -8,10 +8,10 @@ }, "source": [ "# AutoBuild\n", - "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", + "AutoGen offers conversable agents powered by LLMs, tools, or humans, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", "Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n", "\n", - "In this notebook, we introduce a new class, `AgentBuilder`, to help user build an automatic task solving process powered by multi-agent system. Specifically, in `build()`, we prompt a LLM to create multiple participant agent and initialize a group chat, and specify whether this task need programming to solve. AgentBuilder also support open-source LLMs by [vLLM](https://docs.vllm.ai/en/latest/index.html) and [Fastchat](https://github.com/lm-sys/FastChat). Check the supported model list [here](https://docs.vllm.ai/en/latest/models/supported_models.html)." + "In this notebook, we introduce a new class, `AgentBuilder`, to help users build an automatic task-solving process powered by a multi-agent system. Specifically, in `build()`, we prompt an LLM to create multiple participant agents, initialize a group chat, and specify whether this task need programming to solve. AgentBuilder also supports open-source LLMs by [vLLM](https://docs.vllm.ai/en/latest/index.html) and [Fastchat](https://github.com/lm-sys/FastChat). Check the supported model list [here](https://docs.vllm.ai/en/latest/models/supported_models.html)." ] }, { @@ -23,7 +23,7 @@ "source": [ "## Requirement\n", "\n", - "AutoBuild need the latest version of AutoGen.\n", + "AutoGen requires `Python>=3.8`. AutoBuild need the latest version of AutoGen.\n", "You can install AutoGen by the following command:" ] }, @@ -47,7 +47,7 @@ }, "source": [ "## Step 1: prepare configuration\n", - "Prepare a `config_path` for assistant agent to limit the choice of LLM you want to use in this task. This config can be a path of json file or a name of environment variable. A `default_llm_config` is also required for initialize the specific config of LLMs like seed, temperature, etc..." + "Prepare a `config_path` for assistant agent to limit the choice of LLM you want to use in this task. This config can be a path to a json file or a name of an environment variable. A `default_llm_config` is also required to initialize the specific configurations of LLMs like seed, temperature, etc..." ] }, { @@ -77,7 +77,7 @@ }, "source": [ "## Step 2: create a AgentBuilder\n", - "Create a `AgentBuilder` with the specified `config_path`. AgentBuilder will use GPT-4 in default to complete the whole process, you can also change the `builder_model` to other OpenAI model if you want. You can also specify a OpenAI or open-source LLM as agent backbone, see blog for more details." + "Create an `AgentBuilder` with the specified `config_path`. AgentBuilder will use GPT-4 in default to complete the whole process, you can also change the `builder_model` to other OpenAI models. You can also specify an OpenAI or open-source LLM as the agent backbone, see [blog](https://microsoft.github.io/autogen/blog/2023/07/14/Local-LLMs/) for more details." ] }, { @@ -107,7 +107,7 @@ "source": [ "## Step 3: specify a building task\n", "\n", - "Specify a building task with a general description. Building task will help build manager (a LLM) decide what agents should be build." + "Specify a building task with a general description. A building task will help the build manager (an LLM) decide what agents should be built." ] }, { @@ -181,7 +181,7 @@ }, "source": [ "## Step 5: execute task\n", - "Let agents generated in `build()` to complete the task collaboratively in a group chat." + "Let agents generated in `build()` complete the task collaboratively in a group chat." ] }, { @@ -417,7 +417,7 @@ }, "source": [ "## Step 6 (Optional): clear all agents and prepare for the next task\n", - "You can clear all agents generated in this task by the following code if your task is completed or the next task is largely different from the current task. If the agent's backbone is an open-source LLM, this process will also shutdown the endpoint server. If necessary, you can use `recycle_endpoint=False` to retain the previous open-source LLMs' endpoint server." + "You can clear all agents generated in this task with the following code if your task is complete or the next task is significantly different from the current one. If the agent's backbone is an open-source LLM, this process will also shutdown the endpoint server. If necessary, you can use `recycle_endpoint=False` to retain the previous open-source LLMs' endpoint server." ] }, { @@ -453,7 +453,7 @@ "source": [ "## Save & load configs\n", "\n", - "You can save all necessary information of the built group chat agents. Here is a case for those agents generated in the above task:\n", + "You can save all necessary information of the built group chat agents. The following information is for those agents generated in the above task:\n", "```json\n", "{\n", " \"building_task\": \"Find a paper on arxiv by programming, and analysis its application in some domain. For example, find a latest paper about gpt-4 on arxiv and find its potential applications in software.\",\n", @@ -480,7 +480,7 @@ " }\n", "}\n", "```\n", - "These information will be saved in JSON format. You can provide a specific filename, otherwise, AgentBuilder will save config to the current path with a generated filename 'save_config_TASK_MD5.json'." + "This information will be saved in JSON format. You can provide a specific filename; otherwise, AgentBuilder will save the config to the current path with a generated filename 'save_config_TASK_MD5.json'." ] }, { @@ -514,7 +514,7 @@ "collapsed": false }, "source": [ - "After that, you can load the saved config and skip the building process. AgentBuilder will create agents with those information without prompting the builder manager." + "After that, you can load the saved config and skip the building process. AgentBuilder will create agents with the config information without prompting the builder manager." ] }, { @@ -874,7 +874,7 @@ "## Use OpenAI Assistant\n", "\n", "[The Assistants API](https://platform.openai.com/docs/assistants/overview) allows you to build AI assistants within your own applications. An Assistant has instructions and can leverage models, tools, and knowledge to respond to user queries.\n", - "AutoBuild also support assistant api by adding `use_oai_assistant=True` to `build()`." + "AutoBuild also supports assistants api by adding `use_oai_assistant=True` to `build()`." ] }, { diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb index 3ea8171054f..da15be2124a 100644 --- a/notebook/agentchat_function_call.ipynb +++ b/notebook/agentchat_function_call.ipynb @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "# %pip install \"pyautogen~=0.2.0b2\"" + "# %pip install \"pyautogen~=0.2.2\"" ] }, { @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "9fb85afb", "metadata": {}, "outputs": [ @@ -133,13 +133,7 @@ "\u001b[32m***** Suggested function Call: python *****\u001b[0m\n", "Arguments: \n", "{\n", - " \"cell\": \"import matplotlib.pyplot as plt\\n\n", - "# Initialize an empty figure and axis\\n\n", - "fig, ax = plt.subplots()\\n\n", - "# Create the chatboxes for messages\\n\n", - "ax.text(0.5, 0.6, 'Agent1: Hi!', bbox=dict(facecolor='red', alpha=0.5))\\n\n", - "ax.text(0.5, 0.5, 'Agent2: Hello!', bbox=dict(facecolor='blue', alpha=0.5))\\n\n", - "plt.axis('off')\"\n", + " \"cell\": \"import matplotlib.pyplot as plt\\nimport matplotlib.patches as patches\\n\\n# Create a figure to draw\\nfig, ax = plt.subplots(figsize=(8, 5))\\n\\n# Set plot limits to avoid text spilling over\\nax.set_xlim(0, 2)\\nax.set_ylim(0, 2)\\n\\n# Hide axes\\nax.axis('off')\\n\\n# Draw two agents\\nhead_radius = 0.1\\n\\n# Agent A\\nax.add_patch(patches.Circle((0.5, 1.5), head_radius, color='blue'))\\n# Agent B\\nax.add_patch(patches.Circle((1.5, 1.5), head_radius, color='green'))\\n\\n# Example dialog\\nbbox_props = dict(boxstyle=\\\"round,pad=0.3\\\", ec=\\\"black\\\", lw=1, fc=\\\"white\\\")\\nax.text(0.5, 1.3, \\\"Hello, how are you?\\\", ha=\\\"center\\\", va=\\\"center\\\", size=8, bbox=bbox_props)\\nax.text(1.5, 1.3, \\\"I'm fine, thanks!\\\", ha=\\\"center\\\", va=\\\"center\\\", size=8, bbox=bbox_props)\\n\"\n", "}\n", "\u001b[32m*******************************************\u001b[0m\n", "\n", @@ -151,18 +145,18 @@ { "data": { "text/plain": [ - "(0.0, 1.0, 0.0, 1.0)" + "Text(1.5, 1.3, \"I'm fine, thanks!\")" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAGFCAYAAABg2vAPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAUuklEQVR4nO3dfZBVhZ3n4W/zotCoKLQY2sUYXwqJEUIQjGWhZLRYiDOzSbRiZWISqmICyZjZZK1Z45g3zIs6GUeTsqLUDpFyMo6aGRNTEV0lhjdfojINRhTUlAJBQLrBhk6roPT+IdNrj0kEBDv6e56q/qPPuefc371UcT997rnnNnR1dXUFACirT28PAAD0LjEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKK5fbw8AvHna29vT2dnZ22O87TU2Nmbw4MG9PQbsMjEARbS3t+fqb34z21tbe3uUt73+TU05/6tfFQS8ZYgBKKKzszPbW1vzkYEDc2hjY2+P87a1sbMzt7S2prOzUwzwliEGoJhDGxsz/MADe3uMt7fnn+/tCWC3OIEQAIoTAwBQnBgAgOLEAMAfMGfp0hx82WW9PQbsc2IA6OG+NWvS95JLcuYNN/TaDE8/91waZs7M0vXreyxf/uyzOevmm3PkVVelYebMXHX//Xu0/2k//Wk+dOONr1k+/+mn0zBzZp574YUkyTnHH5/Hv/CFHrf5xvz5ee+11+7R/cKfKjEA9DC7pSVfmDAhC1etyjNbt/b2OD10bt+eow4+OJedcUbeccAB+/z+Bvbvn2GDBu3z+4He5qOFQLeObdty0/Lleegzn8n6jo7MWbo0fzdxYo/b/Gzlylxw551Z096ek0eMyLQxYzLt1luz+cILc/CAAUmSxatX56Jf/CIPPfNMmhob8+Hjjsulp5+eQfvtlyQ58qqr8tlx4/Lkpk358aOP5pABA/KVU0/NZ8eNS5K863vfS5KMnTUrSXLaO9+Z+dOmZfzhh2f84YcnSb48b94+fz7mLF2aL95xR5778pf3+X1Bb3JkAOh28/LlOa6pKSObmnLu6NH5YUtLurq6utc/tXlzzr755nxo5MgsmzEj08eNy8V3391jH7/ZtClTfvSjnDVqVB6eMSM3nX12Fq9enfNvv73H7a64776c2NyclunT8/nx4/O5227Lyp1XR3zgvPOSJPM+8Ymsu+CC3HLOObv8GOYsXZqGmTP39CmAkhwZALrNbmnJuSeckCSZcswxaX/xxSxYtSqTjjwySTJryZKMbGrKdydPTpKMbGrKI88+m28vWtS9j0sXL87HTzghX3z/+5Mkxw4dmu9PnZrT5szJNWeemQH9Xvlv54PHHpvPjx+fJLnwlFNy5f3355dPP52RTU05dOeh+aGNjbv9dsDg/ffPyKFDX/d2P3/88Rzwne/0WPbyq8IHKhEDQJJkZWtrHli7Nj/Z+Vd4vz59cs7xx2d2S0t3DKxsa8v45uYe203Yedj+Py3bsCEPb9iQf/n1r7uXdSXZ0dWVpzZvzqhDD02SjB42rHt9Q0ND3nHAAXn2d797w4/jw6NG5cOjRr3u7T7wrnflmjPP7LHsV7/9bc79yU/e8AzwViMGgCSvHBV4aceONF9xRfeyriT79+2bq6dOzeCd5wO8no5t2zJ93Lj8zUknvWbdEa+6Vn//vn17rGvIK8HwZhnUv3+OGTKkx7Lfbtnypt0//CkRA0Be2rEj1y9blismT87ko4/use5DN96Yf33kkcw48cSMHDo0c594osf6B9eu7fH7+4YPz6MbN77mhXZ37LczFF7esWOP97GvfGPSpHxj0qTeHgP2KicQAvn5449n8wsv5NNjx+Y9w4b1+Dlr1KjMbmlJkkwfNy4rWltz4V135fG2tty8fHnmLFuW5JW/7JNX3v+/d82anD93bpauX58n2tpy64oVOX/u3F2eZ9igQRnYr1/uePLJbOjoSPvOz/1ve/nlLF2/PkvXr8+2l1/O2i1bsnT9+jy5aVP3tj957LEcd/XVe+eJ+T0umjcvH77ppn22f+gNYgDI7JaWnHHUUb/3rYCz3v3uPPTMM3l4w4a865BD8m8f/WhuWbEio6+5Jtc89FAu3vnRw/13nhg4+rDDsmDatDze1paJ112XsbNm5Wvz56d5N74psV+fPvn+1KmZtWRJmv/xH/M/dl4g6JmtWzN21qyMnTUr6zo68g/33Zexs2blvJ/9rHvb9hdfzMq2tjfydPxR6zo68tTmzfts/9AbGrq6nD4LFaxbty6zLroo04cO3atfYfzthQtz7ZIlWfOlL+21fb6Vrdu6NbPa2jL90kszfPjw3h4HdolzBoDd8oMHH8z45uYMbWzMPatX57v33pvzJ0zo7bGAN0AMALvliba2fGvhwmx6/vkcMXhwLjj55Fz0X65SCLy1iAFgt1w5ZUqunDKlt8cA9iInEAJAcWIAAIoTAwBQnHMGoJiNnZ29PcLbmueXtyIxAEU0Njamf1NTbmltTZ5/vrfHeVvr39SUxsbG3h4DdpmLDkEh7e3t6fSX6z7X2NiYwa/6Uib4UycGAKA4JxACQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBACjO5YihEFcgfOtyVUP2JTEARbS3t+eb37w6ra3be3sU9kBTU/989avnCwL2CTEARXR2dqa1dXsGDvxIGhsP7e1x2A2dnRvT2npLOjs7xQD7hBiAYhobD82BBw7v7THYTb5okn3JCYQAUJwYAIDixAAAFCcGAN6AmTMbsmLFT5Mkzz33dGbObMj69Ut7dSbYXWIA6GHNmvtyySV9c8MNZ/baDH/oRXXJkv+T666bmMsvPySXX35Irr/+jKxd+8Bu73/OnEm5444vvmb50qVzctllB+/Z0G/AH5oH3ixiAOihpWV2Jkz4QlatWpitW5/p7XF6WLVqft7zno/lU5/6ZT796fsyePCI/PM/T86WLWt7ezR4S/PRQqDbtm0dWb78pnzmMw+lo2N9li6dk4kT/67HbVau/FnuvPOCtLevyYgRJ2fMmGm59dZpufDCzRkw4OAkyerVi/OLX1yUZ555KI2NTTnuuA/n9NMvzX77DUqSXHXVkRk37rPZtOnJPProjzNgwCE59dSvZNy4zyZJvve9dyVJZs0amyR55ztPy7Rp8/ORj/xLj1n+4i/+KY8++u956qlfZMyYT+6T52TFiluzYMHMbNz4aA48sDljxnwqp556cfr02bX/Pp9+ekHuuutvs2HDsgwcOCRjxnwqf/Zn39rl7eHN4MgA0G358pvT1HRcmppGZvToc9PS8sN0dXV1r9+8+ancfPPZGTnyQ5kxY1nGjZueu+++uMc+Nm36TX70oykZNeqszJjxcM4++6asXr04t99+fo/b3XffFWluPjHTp7dk/PjP57bbPpfW1pVJkvPOe+XQ/yc+MS8XXLAu55xzy++dd/v2zuzYsT0DBw7pXjZ//jdy1VVH7o2nI6tWLcpPf/rJnHTS/8xf//Wj+fM/n5Vly+Zk4cJv79L2W7aszQ03fDDNzeMzY8aynHnmNWlpmZ2FC7+1V+aDvUWaAt1aWmbnhBPOTZIcc8yUvPhie1atWpAjj5yUJFmyZFaamkZm8uTvJkmamkbm2WcfyaJF///FcfHiS3PCCR/P+9//xSTJ0KHHZurU72fOnNNy5pnXpF+/AUmSY4/9YMaP/3yS5JRTLsz991+Zp5/+ZZqaRmbQoFeukNjYODQHHPCOPzjvvHkX5sADm3PUUWd0L2tsbMqQIUe/7mN98MEf5D/+4596LNux46Xu+ZJkwYKZOeWUL+e97/1UkuSQQ47KBz7wzdx11//OpElf36X7OOigEfngB69OQ0NDmpqOy9atz2TevAtz2mlfS0ODv8f40yAGgCRJa+vKrF37QM455ydJkj59+uX4489JS8vs7hhoa1uZ5ubxPbY7/PAJPX7fsGFZNmx4OL/+9asP6Xelq2tHNm9+KoceOipJMmzY6O61DQ0NOeCAd+R3v3t2l+ddvPiyPPLIjZk2bX6PF/AJE87PhAnn/5EtXzF69MczcWLPoxqPPXZLFi36To/HsmbNPT1ip6vr5bz00gvZvr0z/fs3/tH7aG19LCNGnJyGhobuZSNGnJJt2zqyZctvM3jwEa87J7wZxACQ5JWjAjt2vJQrrmh+1dKu9O27f6ZOvToDBuzaNfG3bevIuHHTc9JJf/Oada9+8evbt/9/WduQrq4du3Qf9977D1m8+LJ88pPzcthho19/g99j//0HZ8iQY3osGzRoWI/ft23ryKRJMzNq1Edes/2rAwTe6sQAkB07XsqyZddn8uQrcvTRk3usu/HGD+WRR/41J544I0OHjswTT8ztsX7t2gd7/D58+PuyceOjr3mh3R19++63c66XX7Punnv+PosWfTvnnvt/09x84h7fx64YPvx9aW1ducePpalpVB577N/T1dXVfXRgzZp7st9+B+agg/5b9+2mTZu/N8aFPeYNKyCPP/7zvPDC5owd++kMG/aeHj+jRp2VlpbZSZJx46antXVF7rrrwrS1PZ7ly2/OsmVzdu7llRe7U065MGvW3Ju5c8/P+vVL09b2RFasuDVz577+ofv/NGjQsPTrNzBPPnlHOjo25IUX2pMkixdfnl/+8qv5y7/8YQ4++Mh0dKxPR8f6bNvW0b3tAw9cneuvP32vPC+nnvq1PPzw9Zk/f2aefXZ5Nm58LI88cmPuvvsru7T9+PGfz5Yta3L77V9Ia+uKrFhxa+bP/3pOPvl/9Thf4PrrT8+iRZfulZlhTzgyAKSlZXaOOuqM3/tWwLvffVbuvffvs2HDwznssNH56Ef/LXfeeUF+9avvZcSIkzNx4sW57bbPpV+//ZMkhx02OtOmLcjdd1+c666bmK6urgwZcnSOP/6cXZ6nT59+mTr1+1mw4JLMn/+1HHHExEybNj8PPXRNXn55W37847N73P60076eSZO+kSTp7GzNpk2/2fMn41WOOea/52Mf+3kWLrwk99xzefr27Z+mpuMydux5u7T9QQcdnr/6q7m5666/zbXXjsnAgUMyduync+qpPWNi06bfZNiwE/bKzLAnGrpe/bkh4G1r3bp1ueiiWRk6dPpe/QrjhQu/nSVLrs2XvrRmr+2TnrZuXZe2tlm59NLpGT7c10+z9zkyAOyWBx/8QZqbx6excWhWr74n99773V06ex/40yUGgN3S1vZEFi78Vp5/flMGDz4iJ598QSZOvKi3xwLeADEA7JYpU67MlClX9vYYwF7k0wQAUJwYAIDixAAAFOecASims3Njb4/AbvJvxr4mBqCIxsbGNDX1T2vrLXn++d6eht3V1NQ/jY1//IuRYE+56BAU0t7ens7Ozt4egz3Q2NiYwYN37cuiYHeJAQAozgmEAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcf8PWgarshV+kfQAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAGVCAYAAABuPkCWAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAeYElEQVR4nO3dfbDWdZ3/8de5QxQRwszwHmNFgXO4U1HSkhtpbH6TZogmK+uyiU6xOVvZaqvrNLujdLOWld1gOSZQuobZjWWIwoRSoImJN+iaQjeYYgqCIjfnXL8/znASuREUuDjn83jMnBk4h+v7/VzcvHme7/W9vt+aSqVSCQAAxait9gIAANi9BCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBh6qu9AMpSqSTLlydLlmz6sXJlsm5dsn59669raEg6dUq6dUuOOGLTjwMOSGpqqrN+gDeqVCpZ/uryLFmxZJOPlWtXZl3zuqxvbh1sDXUN6VTXKd326pYjuh+xyccB+xyQGoON3UgAskutXp3ce28ye3Yyc2by+OPJ2rV//3pdXVJbm7S0tH5UKq2fr6lp/fzGrzU3//0xe+2VHHNMMnp0MmJEctJJSZcuu/d5AeVavW517v3jvZn9zOzMfHpmHl/+eNY2/32w1dXUpbamNi2VlrRUWlJJ62CrSU1qa2rbvtZc+ftg26turxxzwDEZfeTojOg1IicddlK6dDLY2HVqKpWN/+XCzvHEE8n06clddyX3398ab/X1yYYNO3c/G7dZV5ccd1xy6qnJuHFJnz47dz8AT7zwRKYvmp67/nBX7l92f5orzamvrc+Glp072DZus66mLscddFxOfc+pGdc4Ln3eabCxcwlAdooNG5Kf/jT5+teTOXNao+z1R+12h437HD48mTQp+dCHWiMR4K3Y0LIhP33ip/n6gq9nzpI5qaup2+So3e6wcZ/DjxieScdPyof6fCj1tQYbb58A5G158cXkG99IvvnN5LnnqhN+b7RxDQcemHz8460x2KNHddcEtB8vrnkx31jwjXzz/m/muVeeq0r4vdHGNRzY5cB8/LiPZ9Lxk9Jjb4ONt04A8pZs2JBMmZJ87nPJqlWt5+ntiWprk65dk6uvTi64wBFBYOs2tGzIlN9Nyefu/lxWrVuVlsqeOdhqa2rTtVPXXD3y6lww5AJHBHlLBCA7bNGi5Lzzkt//vtor2TEDBiRTpyaNjdVeCbCnWfTcopz34/Py++fa12AbcOCATP3w1DQeaLCxY1wHkO1WqSTXXJMMHpw88ki1V7PjHn20de3XXPP3dxsDZatUKrnmN9dk8JTBeeT59jfYHl3+aAZPGZxrfnNNHM9hRzgCyHapVJLPfjb58pervZKd45JLki98wfUEoWSVSiWfveuz+fJvOsZgu2TYJfnCqC+4niDbRQDyppqbk4kTkxtuqPZKdq4JE1rPY6yrq/ZKgN2tuaU5E38+MTcs7FiDbcKgCZny/6akrtZgY9sEINvU3JyMHZv8+Mcd72XTmprkzDOTW24RgVCS5pbmjP3R2Pz48R+3XaS5o6hJTc485szcMuYWEcg2OQeQbfrqV5Pbbut48Ze0PqcZM5Jrr632SoDd6au//Wpue/y2Dhd/SVJJJTMen5Fr5xtsbJsjgGzVE08kTU2t9+jtyDp1Sh5+2B1EoARPvPBEmr7dlHXNHXuwdarrlIcvetgdRNgqRwDZoubmZPz4Pff6fjtTS0vrc632BayBXau5pTnjfzx+j72+387UUmnJ+NvHp7nFYGPLBCBbNHVqsmDBzr9/755ow4bW5zp1arVXAuxKUx+emgXLFuz0+/fuiTa0bMiCvyzI1IcNNrZMALJFP/xhWW+MqK1Nbr652qsAdqUfLvph6mrKGWy1NbW5+RGDjS1zDiCbWbEiOeCAMo7+vV59fbJ8edK9e7VXAuxsK15bkQO+dEARR/9er762PssvWZ7unbtXeynsYRwBZDM//3l58Ze0Puc77qj2KoBd4edP/ry4+EtaXwq+40mDjc0JQDYzd27r0bDSNDS0Pneg45m7dG7qa8sbbA21DZn7R4ONzQlANrNyZRnv/n2j5ubW5w50PCvXrizi3b9v1Fxpzsq1BhubE4BsZt26jnnh5zdTqSRr11Z7FcCusK55XUo85b1SqWTtBoONzQlANtO1a+u7YktTW5vst1+1VwHsCl336pramvIGW21Nbfbby2Bjc+X9a+BNHX54tVdQHTU1yWGHVXsVwK5weLcyB1tNanJYN4ONzQlANnP66WXeFWPDhuSMM6q9CmBXOL3P6WmulDfYNlQ25Iyjz6j2MtgDCUA2M3hwcvDB1V7F7nfIIcmgQdVeBbArDO45OAd3LW+wHbLfIRn0boONzQlANlNTk4wdW9alYOrrW59zTU21VwLsCjU1NRnbb2xRl4Kpr63P2L5jU2OwsQUCkC266KKyYqi2NrnwwmqvAtiVLjr2otSknMFWW1ObC4812NgyAcgWHXVUctVV1V7F7nPVVa3PGei4jtr/qFw1spzBdtWIq3LU/gYbW+ZewGxVc3MybFjy4IMd99Zw9fXJkCHJffcldeXcIx6K1dzSnGHfG5YH//pgh701XH1tfYb0HJL7JtyXulqDjS1zBJCtqqtLbrqpNZI64nUB6+pan9v3vy/+oBR1tXW56cM3pb62vkNeF7Cupi71tfX5/hnfF39sU8f7289O1adPcvfdSZcuHSuS6uqSffZJ7rmn9TkC5ejzzj65e/zd6dLQJXU1HWew1dXUZZ+GfXLP+HvS550GG9vmJWC2y8MPJyNGtN4rt72/HFxfn3Tr1hp/TU3VXg1QLQ8/93BGfH9EVq5d2e5fDq6vqU+3zt1yzz/dk6YDDTbenABkuz31VDJ8ePLss+33QtF1dUnPnsns2Unv3tVeDVBtT734VIZ/f3ieXfVsu71QdF1NXXp27ZnZ/zQ7vXsYbGwfLwGz3Xr3ThYuTM46q/Xn7em8wI1rPeus1ucg/oAk6d2jdxZeuDBn9WsdbLXt6L/FjWs9q99ZWXjhQvHHDnEEkLfkJz9JPvGJZNmyZE//G1RTkxx0UHLdda23uQPYkp8s/kk+8YtPZNmqZalkzx5sNanJQV0PynUfvC6nH22wseMEIG/ZmjXJl7+cTJ7c+uNkz4nBjRex3nvv5NJLk898pvXHANuyZv2afHnelzP5vslZs751sO0pMbjxItZ7N+ydS997aT4z7DPZu8Fg460RgLxtq1cn06cnX/ta8thjrW+yqNYbRTbuu2/f5JOfTMaNS/bdtzprAdqv1etWZ/rD0/O1BV/LY8sfS31tfdXeKLJx330P6JtPHv/JjGsal307GWy8PQKQnaZSSebNa32p9Uc/Stav3z0xuHEfDQ3JmDHJpEnJiSeWdSs7YNeoVCqZ96d5ue7+6/Kjx36U9S3rd0sMbtxHQ21DxvQdk0nHT8qJh5zovr7sNAKQXWLNmtYYvOeeZObM1ruJtLS0vgu3trY1Dt+KhobW7TQ3t25nyJDk1FNbL1EzbJiXeYFdZ836NZn3p3m555l7MvPpmXnw2QfTUmlJXU1damtqs77lrQ22htqGtFRa0lxpTm1NbYb0HJJTjzw1I3qNyLBDh3mZl11CALJbvPxycu+9ySOPJEuXJs8803pZmT/9KXnttW0/tnPn5NBDW9+526tXcvjhSf/+ycknJ1277p71A7zRy2tfzr1/vDePPP9Ilq5YmmdWPJOnXnwqf3r5T3ltw7YHW+f6zjl0v0PTu0fv9OreK4d3Pzz939U/Jx92crruZbCx6wlAqqpSSV54ofUC0+vW/f3IYEND0qlT6wWb3/lOL+cC7UelUskLr76QlWtXZl3zuqxvbh1sDXUN6VTXKd326pZ37vNOL+dSVQIQAKAw7eeKlwAA7BQCEACgMAIQAKAwAhAAoDACEACgMAIQAKAwAhAAoDD11V4AO2bNmjX53e9+l1deeaXaS+Et6NKlS4YMGZK93bMOdkilUsmjjz6aZcuWxeVrd4+ampocdNBB6devn4tWd0ACsJ2oVCq56qqrcvXVV4u/dq5Lly657LLL8rnPfc5Qhe3wu9/9Luecc06eeuqpai+lSL17987NN9+cIUOGVHsp7EQCsJ2YNm1aLr/88vzbv/1bxo8fn/333188tDOVSiV/+9vfctNNN+Xyyy/PYYcdlvPOO6/ay4I92sqVKzNq1Kj8wz/8Q+68884cffTRqaurq/ayitDc3JzFixfniiuuyKhRo7JkyZJ069at2stiJ3EruHbi1FNPTaVSyaxZs6q9FHaCkSNHpra2NnfddVe1lwJ7tOnTp+cf//Efs3Tp0hx22GHVXk6R/vjHP+bwww/PtGnTMm7cuGovh53Em0DaicWLF2fYsGHVXgY7yXvf+94sXry42suAPd7ixYtz6KGHir8qOuyww3LooYeaWR2MAGwnNmzYkE6dOm3yuSOOOCIPPfTQJp875ZRTcvvtt7/p9l7/684///x89atffVvr2979luKxxx7L+9///jQ1NeXoo4/OHXfcscnXO3XqlA0bNlRpddB+bGn2JZvOv/PPPz9z5sx52/u68sorc/TRR2fo0KF54IEHcvbZZ7/tbW7NihUrMnny5E0+tyvm6FuZ71tah5nV8TgHkA6rubm5aucKVSqVTJkyJX369Mlvf/vbfOQjH8lf/vKXqqwF2D5f/OIX8/TTT6dnz55JkltuuWWX7WtjAF566aW7bB+wLY4AdmCrVq3KBRdckOOPPz5NTU2ZOHFi1q1bt83HrF69OhMmTEj//v3Tv3//fP7zn9/u/d177705+eST8573vCcXXXRR2+eff/75nHnmmWlsbEz//v3zne98J0kyc+bMjB49Okny8ssvp6GhIVOmTEmS3HTTTZkwYcJm+/jrX/+a4cOHZ8iQIenXr18mTZqUlpaWJMmNN96Y4cOH5yMf+UgaGxuzYMGC3H///RkxYkSOPfbYDBo0KLfeeutm21y2bFkOPPDAvPrqq22fO/fcc/Otb30rSfKrX/0qgwcPTlNTU97//vfnscceS5LMmTMnAwcObHvMI488kiOOOCJJ0q9fv/Tp0ydJ8uqrr2avvfba7t9HYMd069at7Sjh+eefn4kTJ2bUqFHp1atXJkyYkAULFuSUU07JkUcemU996lNb3MawYcPy2muvZfTo0fnkJz+5yb/vJUuWpHv37rnyyiszZMiQ9O7dO7/4xS/aHrs9c+aNLrrooqxatSoDBw7Mscce2/b5rc3RH/zgBxk6dGgGDRqUAQMG5Gc/+1nb10455ZR85jOf2eLjXm/u3Lnp27dvHnjggSxfvjyjR49OY2Njmpqa8s///M9vumY6FkcA27mzzz57k2vKvf4yCZ/+9Kdz8skn5/rrr0+lUskFF1yQa6+9NpdccslWt/df//VfWbt2bR5++OGsWbMmJ510Uo4++ujteinkD3/4Q2bPnp3169enb9+++c1vfpMTTzwx//qv/5o+ffrktttuy/PPP58hQ4ZkwIABOfnkk3POOedk7dq1mT17do477rjMmjUrEydOzF133ZXTTjtts3107949P/vZz7Lvvvumubk5p59+ev73f/8355xzTpJk/vz5WbhwYfr06ZMVK1Zk+PDh+cUvfpGePXvmhRdeyODBgzNs2LAcfPDBbds86KCDMmrUqEybNi0TJ07Mc889l1mzZmXKlCl5/vnnc+6552bOnDlpbGzM9OnTM2bMmDz66KPb9efz5JNP5vzzz8+3v/3t7fr1wI679tprN/n5okWLMnv27NTW1qZv37556aWXctddd2XdunU58sgj8y//8i/p16/fJo+ZN29eampqMnfu3HTv3n2zl5RXrlyZpqamfP7zn8+dd96Ziy++OB/84AezYsWKTJw48U3nzBt9+9vfzsCBAzc7jWdrc/QDH/hAPvrRj6ampiZLlizJCSeckKVLl7Z9c7m1x210yy235Oqrr84dd9yRXr165Stf+Up69eqVmTNnJklefPHFHf1tp51zBLCdu+WWW/LQQw+1fbz+O8nbb789X/rSlzJw4MAMGjQoc+fOfdPraM2aNSsXXHBBamtr06VLl4wfP36736l69tlnp76+PnvvvXcGDhyYP/zhD23bvPDCC5Mk73rXu3LmmWdm1qxZbb/uvvvuy6xZs3LppZfmwQcfTEtLS+65556MGDFis320tLTk3//93zNgwIAMGjQoDzzwwCYDdNiwYW1H3ubNm5enn346p512WgYOHJhRo0YlSZ544onNtnvxxRfnuuuuS5Jcf/31+ehHP5p999038+fPT2NjYxobG5Mk48aNy7Jly7b75dxx48blyiuvzAc/+MHt+vXA23f66aenc+fO6dSpUxobG/OBD3wgDQ0N6dKlS/r27Zv/+7//2+Ftdu7cOWeeeWaS5MQTT2ybbzsyZ7bH1uboM888k9NOOy39+/fPGWeckRdffDHPPPPMmz4uSaZOnZr/+Z//yezZs9OrV68kyQknnJBf/vKX+fSnP52f/OQn6dKly1taL+2XI4AdWKVSyYwZM3LUUUe95W3syLUGO3fu3Pbjurq6rZ4w/Pptjho1KrNmzcqvf/3rTJ48OY2NjZk2bVre8Y535N3vfvdmj73mmmvy/PPPZ/78+encuXM+9alP5bXXXmv7+r777tv240qlkn79+mXevHlvuvbjjz8+++yzT2bPnp0pU6Zs1+V26uvr09zc3Pbz169jo4ULF+bDH/7wm24L2HneOIu2dzZty1577dU2u+rq6tr+7e/InNkeW1vrOeeck8mTJ2fMmDFJkh49emwyc7b1HJuamjJ37twsWrQo73vf+5K0RuxDDz2UWbNm5bbbbssVV1yRhQsXpq6ubqe8oYY9nyOAHdgZZ5yRL3zhC22D4KWXXnrTI4CjRo3K9773vVQqlbzyyiuZOnVq23l63/jGN3LZZZft8DpGjRqV66+/PkmyfPny3HbbbTn11FPbvvaDH/wg3bt3T5cuXTJq1Kj853/+Z9t30W/00ksv5d3vfnc6d+6cv/71r9s812bYsGF55plnNom5hx56aKvnQV588cUZP358jjnmmLZoPuGEE7Jo0aI88sgjSZKbb745Bx98cA4++OAceeSRWbp0aZYvX56k9bvsN5o2bVq6du36Zr9FQDv1ZnNm5MiRWbBgwWaP22+//bJmzZo3PS97o5deeqnt6N20adPy0ksvbfcaN54zOGHChNx5551JWo8o7rvvvhk7dmy+/vWv58knn8zq1au3e5u0fwKwA/vKV77S9nJAU1NTRo4cmSVLlmzzMVdccUUaGhrS2NiYoUOH5kMf+lDGjh2bpPXSJvvvv/8Or+NrX/taHn/88TQ2Nmb48OH5j//4jwwdOjRJcuyxx2blypUZOXJkktYLXi9durTt52908cUXZ/78+enXr1/OO++8rYZikrzjHe/IHXfckauuuioDBgxI3759c+mll7a9aeSNxowZk9WrV2fSpEltnzvggAMyffr0jB8/Pk1NTfnWt76VW2+9te0emZ/97Gdz/PHH54QTTkiPHj022+bkyZO3eGQQ6Bi2NWeam5vz+9//Pocccshmj+vRo0fbXHn9qTtbc+2112bMmDEZNGhQFi5cuMPXRTzmmGPyq1/9KhdffHFmzJiROXPmZMiQIRk4cGCGDRuWL33pS213+fjYxz6W2bNn79D2aX/cCaSd6NmzZz7xiU/k8ssvr9oaTjrppPzyl7/ssEe0HnjggZx77rlZvHhxamt37fdG//3f/53rrrsuzz777C7dD7R3l112WW699dZ2eR/g+++/P9/5znfy3e9+t9pLedt69+6ds846K1dffXW1l8JO4hzAdqTarX7vvfdWdf+70sc+9rHMnDkz3/3ud3d5/CXV/7OE9qS9/ns57rjjctxxx1V7GTtFe/0zYOsEYDvRrVs3R4t2od39HfqyZcvSvXv33bpPaI+6deuW5cuXZ/369WloaKj2coq0fv36LF++3MzqYJwD2E6MHj06M2bMyJ///OdqL4W36c9//nNmzJjR9uYaYOtGjx6dVatW5cYbb6z2Uop14403ZtWqVWZWB+McwHZi6dKled/73pcXX3wxI0eOzP77779Dl2ih+iqVSv72t7/l7rvvTo8ePfLrX/86hx9+eLWXBXu0SqWSCRMm5MYbb8zQoUNzzDHHVO0Wj6Vpbm7O448/nvnz5+f888/PDTfc4P+dDkQAtiPLli3LDTfckDlz5uSVV16p9nJ4C7p06ZJTTjklEyZMyEEHHVTt5UC70NzcnJtvvjm33357/vKXvzgfbTepqanJwQcfnDPOOCPnnHOO8O5gBCAAQGGcAwgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUJj/DyUINMAbF8wxAAAAAElFTkSuQmCC", "text/plain": [ - "
" + "
" ] }, "metadata": {}, @@ -175,45 +169,30 @@ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", "\u001b[32m***** Response from calling function \"python\" *****\u001b[0m\n", - "(0.0, 1.0, 0.0, 1.0)\n", + "Text(1.5, 1.3, \"I'm fine, thanks!\")\n", "\u001b[32m***************************************************\u001b[0m\n", "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "The drawing of two agents with example dialog has been executed, but as instructed, `plt.show()` has not been added, so the image will not be displayed here. However, the script created a matplotlib figure with two agents represented by circles, one blue and one green, along with example dialog text in speech bubbles.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "TERMINATE\n", + "\n", "--------------------------------------------------------------------------------\n" ] } ], "source": [ "llm_config = {\n", - " \"functions\": [\n", - " {\n", - " \"name\": \"python\",\n", - " \"description\": \"run cell in ipython and return the execution result.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"cell\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Valid Python cell to execute.\",\n", - " }\n", - " },\n", - " \"required\": [\"cell\"],\n", - " },\n", - " },\n", - " {\n", - " \"name\": \"sh\",\n", - " \"description\": \"run a shell script and return the execution result.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"script\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Valid shell script to execute.\",\n", - " }\n", - " },\n", - " \"required\": [\"script\"],\n", - " },\n", - " },\n", - " ],\n", " \"config_list\": config_list,\n", " \"timeout\": 120,\n", "}\n", @@ -234,8 +213,12 @@ "\n", "# define functions according to the function description\n", "from IPython import get_ipython\n", + "from typing_extensions import Annotated\n", + "\n", "\n", - "def exec_python(cell):\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(name=\"python\", description=\"run cell in ipython and return the execution result.\")\n", + "def exec_python(cell: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", " ipython = get_ipython()\n", " result = ipython.run_cell(cell)\n", " log = str(result.result)\n", @@ -245,23 +228,27 @@ " log += f\"\\n{result.error_in_exec}\"\n", " return log\n", "\n", - "def exec_sh(script):\n", + "\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(name=\"sh\", description=\"run a shell script and return the execution result.\")\n", + "def exec_sh(script: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", " return user_proxy.execute_code_blocks([(\"sh\", script)])\n", "\n", - "# register the functions\n", - "user_proxy.register_function(\n", - " function_map={\n", - " \"python\": exec_python,\n", - " \"sh\": exec_sh,\n", - " }\n", - ")\n", "\n", "# start the conversation\n", "user_proxy.initiate_chat(\n", " chatbot,\n", " message=\"Draw two agents chatting with each other with an example dialog. Don't add plt.show().\",\n", - ")\n" + ")" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ab081090", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -280,7 +267,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.10.13" } }, "nbformat": 4, diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index 49f61afec26..3864c4899fc 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "9fb85afb", "metadata": {}, "outputs": [ @@ -132,9 +132,7 @@ "\n", "\u001b[32m***** Suggested function Call: timer *****\u001b[0m\n", "Arguments: \n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m******************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -151,9 +149,7 @@ "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", "Arguments: \n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m**********************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -178,52 +174,10 @@ "# define functions according to the function description\n", "import time\n", "\n", - "# An example async function\n", - "async def timer(num_seconds):\n", - " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", - " # should print to stdout\n", - " return \"Timer is done!\"\n", - "\n", - "# An example sync function \n", - "def stopwatch(num_seconds):\n", - " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", - " return \"Stopwatch is done!\"\n", - "\n", "llm_config = {\n", - " \"functions\": [\n", - " {\n", - " \"name\": \"timer\",\n", - " \"description\": \"create a timer for N seconds\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"num_seconds\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Number of seconds in the timer.\",\n", - " }\n", - " },\n", - " \"required\": [\"num_seconds\"],\n", - " },\n", - " },\n", - " {\n", - " \"name\": \"stopwatch\",\n", - " \"description\": \"create a stopwatch for N seconds\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"num_seconds\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Number of seconds in the stopwatch.\",\n", - " }\n", - " },\n", - " \"required\": [\"num_seconds\"],\n", - " },\n", - " },\n", - " ],\n", " \"config_list\": config_list,\n", "}\n", + "\n", "coder = autogen.AssistantAgent(\n", " name=\"chatbot\",\n", " system_message=\"For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", @@ -240,21 +194,35 @@ " code_execution_config={\"work_dir\": \"coding\"},\n", ")\n", "\n", - "# register the functions\n", - "user_proxy.register_function(\n", - " function_map={\n", - " \"timer\": timer,\n", - " \"stopwatch\": stopwatch,\n", - " }\n", - ")\n", + "from typing_extensions import Annotated\n", + "\n", + "# An example async function\n", + "@user_proxy.register_for_execution()\n", + "@coder.register_for_llm(description=\"create a timer for N seconds\")\n", + "async def timer(num_seconds: Annotated[str, \"Number of seconds in the timer.\"]) -> str:\n", + " for i in range(int(num_seconds)):\n", + " time.sleep(1)\n", + " # should print to stdout\n", + " return \"Timer is done!\"\n", + "\n", + "\n", + "# An example sync function\n", + "@user_proxy.register_for_execution()\n", + "@coder.register_for_llm(description=\"create a stopwatch for N seconds\")\n", + "def stopwatch(num_seconds: Annotated[str, \"Number of seconds in the stopwatch.\"]) -> str:\n", + " for i in range(int(num_seconds)):\n", + " time.sleep(1)\n", + " return \"Stopwatch is done!\"\n", + "\n", + "\n", "# start the conversation\n", - "# 'await' is used to pause and resume code execution for async IO operations. \n", + "# 'await' is used to pause and resume code execution for async IO operations.\n", "# Without 'await', an async function returns a coroutine object but doesn't execute the function.\n", "# With 'await', the async function is executed and the current function is paused until the awaited function returns a result.\n", "await user_proxy.a_initiate_chat(\n", " coder,\n", " message=\"Create a timer for 5 seconds and then a stopwatch for 5 seconds.\",\n", - ")\n" + ")" ] }, { @@ -268,62 +236,36 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "2472f95c", "metadata": {}, "outputs": [], "source": [ - "\n", - "\n", - "# Add a function for robust group chat termination\n", - "def terminate_group_chat(message):\n", - " return f\"[GROUPCHAT_TERMINATE] {message}\"\n", - "\n", - "# update LLM config\n", - "llm_config[\"functions\"].append(\n", - " {\n", - " \"name\": \"terminate_group_chat\",\n", - " \"description\": \"terminate the group chat\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"message\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Message to be sent to the group chat.\",\n", - " }\n", - " },\n", - " \"required\": [\"message\"],\n", - " },\n", - " }\n", - ")\n", - "\n", - "# redefine the coder agent so that it uses the new llm_config\n", - "coder = autogen.AssistantAgent(\n", - " name=\"chatbot\",\n", - " system_message=\"For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", - " llm_config=llm_config,\n", - ")\n", - "\n", - "# register the new function with user proxy agent\n", - "user_proxy.register_function(\n", - " function_map={\n", - " \"terminate_group_chat\": terminate_group_chat,\n", - " }\n", - ")\n", "markdownagent = autogen.AssistantAgent(\n", " name=\"Markdown_agent\",\n", " system_message=\"Respond in markdown only\",\n", " llm_config=llm_config,\n", ")\n", + "\n", + "# Add a function for robust group chat termination\n", + "@user_proxy.register_for_execution()\n", + "@markdownagent.register_for_llm()\n", + "@coder.register_for_llm(description=\"terminate the group chat\")\n", + "def terminate_group_chat(message: Annotated[str, \"Message to be sent to the group chat.\"]) -> str:\n", + " return f\"[GROUPCHAT_TERMINATE] {message}\"\n", + "\n", + "\n", "groupchat = autogen.GroupChat(agents=[user_proxy, coder, markdownagent], messages=[], max_round=12)\n", - "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config,\n", - " is_termination_msg=lambda x: \"GROUPCHAT_TERMINATE\" in x.get(\"content\", \"\"),\n", - " )" + "manager = autogen.GroupChatManager(\n", + " groupchat=groupchat,\n", + " llm_config=llm_config,\n", + " is_termination_msg=lambda x: \"GROUPCHAT_TERMINATE\" in x.get(\"content\", \"\"),\n", + ")" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "e2c9267a", "metadata": {}, "outputs": [ @@ -340,25 +282,21 @@ "4) when 1-3 are done, terminate the group chat\n", "\n", "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: timer *****\u001b[0m\n", "Arguments: \n", - "\n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m******************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", - ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ + ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n", "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"timer\" *****\u001b[0m\n", @@ -366,14 +304,16 @@ "\u001b[32m**************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", "Arguments: \n", - "\n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m**********************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -388,19 +328,18 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", "\n", - "```markdown\n", - "# Results \n", + "The results are as follows:\n", + "\n", + "- Timer: Completed after `5 seconds`.\n", + "- Stopwatch: Recorded time of `5 seconds`.\n", + "\n", + "**Timer and Stopwatch Summary:**\n", + "Both the timer and stopwatch were set for `5 seconds` and have now concluded successfully. \n", "\n", - "1. Timer: The timer for 5 seconds has completed.\n", - "2. Stopwatch: The stopwatch for 5 seconds has completed.\n", - "```\n", - "By the way, step 3 is done now. Moving on to step 4.\n", + "Now, let's proceed to terminate the group chat as requested.\n", "\u001b[32m***** Suggested function Call: terminate_group_chat *****\u001b[0m\n", "Arguments: \n", - "\n", - "{\n", - " \"message\": \"The tasks have been completed. Terminating the group chat now.\"\n", - "}\n", + "{\"message\":\"All tasks have been completed. The group chat will now be terminated. Goodbye!\"}\n", "\u001b[32m*********************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -409,7 +348,7 @@ "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"terminate_group_chat\" *****\u001b[0m\n", - "[GROUPCHAT_TERMINATE] The tasks have been completed. Terminating the group chat now.\n", + "[GROUPCHAT_TERMINATE] All tasks have been completed. The group chat will now be terminated. Goodbye!\n", "\u001b[32m*****************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n" @@ -417,13 +356,23 @@ } ], "source": [ - "await user_proxy.a_initiate_chat(manager,\n", - " message=\"\"\"\n", + "await user_proxy.a_initiate_chat(\n", + " manager,\n", + " message=\"\"\"\n", "1) Create a timer for 5 seconds.\n", "2) a stopwatch for 5 seconds.\n", "3) Pretty print the result as md.\n", - "4) when 1-3 are done, terminate the group chat\"\"\")\n" + "4) when 1-3 are done, terminate the group chat\"\"\",\n", + ")" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d074e51", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -442,7 +391,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.10.13" } }, "nbformat": 4, diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb new file mode 100644 index 00000000000..c388db936ac --- /dev/null +++ b/notebook/agentchat_function_call_currency_calculator.ipynb @@ -0,0 +1,551 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "ae1f50ec", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "9a71fa36", + "metadata": {}, + "source": [ + "# Auto Generated Agent Chat: Task Solving with Provided Tools as Functions\n", + "\n", + "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "\n", + "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs must be passed to `AssistantAgent` to initialize the agent. The corresponding functions must be passed to `UserProxyAgent`, which will execute any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to ensure the instructions align with the function call descriptions.\n", + "\n", + "## Requirements\n", + "\n", + "AutoGen requires `Python>=3.8`. To run this notebook example, please install `pyautogen`:\n", + "```bash\n", + "pip install pyautogen\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "2b803c17", + "metadata": {}, + "outputs": [], + "source": [ + "# %pip install \"pyautogen~=0.2.2\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "5ebd2397", + "metadata": {}, + "source": [ + "## Set your API Endpoint\n", + "\n", + "The [`config_list_from_json`](https://microsoft.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "dca301a4", + "metadata": {}, + "outputs": [], + "source": [ + "import autogen\n", + "\n", + "config_list = autogen.config_list_from_json(\n", + " \"OAI_CONFIG_LIST\",\n", + " filter_dict={\n", + " \"model\": [\"gpt-4\", \"gpt-3.5-turbo\", \"gpt-3.5-turbo-16k\"],\n", + " },\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "92fde41f", + "metadata": {}, + "source": [ + "It first looks for environment variable \"OAI_CONFIG_LIST\" which needs to be a valid json string. If that variable is not found, it then looks for a json file named \"OAI_CONFIG_LIST\". It filters the configs by models (you can filter by other keys as well). Only the models with matching names are kept in the list based on the filter condition.\n", + "\n", + "The config list looks like the following:\n", + "```python\n", + "config_list = [\n", + " {\n", + " 'model': 'gpt-4',\n", + " 'api_key': '',\n", + " },\n", + " {\n", + " 'model': 'gpt-3.5-turbo',\n", + " 'api_key': '',\n", + " 'base_url': '',\n", + " 'api_type': 'azure',\n", + " 'api_version': '2023-08-01-preview',\n", + " },\n", + " {\n", + " 'model': 'gpt-3.5-turbo-16k',\n", + " 'api_key': '',\n", + " 'base_url': '',\n", + " 'api_type': 'azure',\n", + " 'api_version': '2023-08-01-preview',\n", + " },\n", + "]\n", + "```\n", + "\n", + "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/main/notebook/oai_openai_utils.ipynb) for full code examples of the different methods." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "2b9526e7", + "metadata": {}, + "source": [ + "## Making Function Calls\n", + "\n", + "In this example, we demonstrate function call execution with `AssistantAgent` and `UserProxyAgent`. With the default system prompt of `AssistantAgent`, we allow the LLM assistant to perform tasks with code, and the `UserProxyAgent` would extract code blocks from the LLM response and execute them. With the new \"function_call\" feature, we define functions and specify the description of the function in the OpenAI config for the `AssistantAgent`. Then we register the functions in `UserProxyAgent`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9fb85afb", + "metadata": {}, + "outputs": [], + "source": [ + "llm_config = {\n", + " \"config_list\": config_list,\n", + " \"timeout\": 120,\n", + "}\n", + "\n", + "chatbot = autogen.AssistantAgent(\n", + " name=\"chatbot\",\n", + " system_message=\"For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", + " llm_config=llm_config,\n", + ")\n", + "\n", + "# create a UserProxyAgent instance named \"user_proxy\"\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"user_proxy\",\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + ")\n", + "\n", + "from typing import Literal\n", + "\n", + "from typing_extensions import Annotated\n", + "\n", + "CurrencySymbol = Literal[\"USD\", \"EUR\"]\n", + "\n", + "\n", + "def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float:\n", + " if base_currency == quote_currency:\n", + " return 1.0\n", + " elif base_currency == \"USD\" and quote_currency == \"EUR\":\n", + " return 1 / 1.1\n", + " elif base_currency == \"EUR\" and quote_currency == \"USD\":\n", + " return 1.1\n", + " else:\n", + " raise ValueError(f\"Unknown currencies {base_currency}, {quote_currency}\")\n", + "\n", + "\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", + "def currency_calculator(\n", + " base_amount: Annotated[float, \"Amount of currency in base_currency\"],\n", + " base_currency: Annotated[CurrencySymbol, \"Base currency\"] = \"USD\",\n", + " quote_currency: Annotated[CurrencySymbol, \"Quote currency\"] = \"EUR\",\n", + ") -> str:\n", + " quote_amount = exchange_rate(base_currency, quote_currency) * base_amount\n", + " return f\"{quote_amount} {quote_currency}\"" + ] + }, + { + "cell_type": "markdown", + "id": "39464dc3", + "metadata": {}, + "source": [ + "The decorator `@chatbot.register_for_llm()` reads the annotated signature of the function `currency_calculator` and generates the following JSON schema used by OpenAI API to suggest calling the function. We can check the JSON schema generated as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3e52bbfe", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'description': 'Currency exchange calculator.',\n", + " 'name': 'currency_calculator',\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'base_amount': {'type': 'number',\n", + " 'description': 'Amount of currency in base_currency'},\n", + " 'base_currency': {'enum': ['USD', 'EUR'],\n", + " 'type': 'string',\n", + " 'default': 'USD',\n", + " 'description': 'Base currency'},\n", + " 'quote_currency': {'enum': ['USD', 'EUR'],\n", + " 'type': 'string',\n", + " 'default': 'EUR',\n", + " 'description': 'Quote currency'}},\n", + " 'required': ['base_amount']}}]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chatbot.llm_config[\"functions\"]" + ] + }, + { + "cell_type": "markdown", + "id": "662bd12a", + "metadata": {}, + "source": [ + "The decorator `@user_proxy.register_for_execution()` maps the name of the function to be proposed by OpenAI API to the actual implementation. The function mapped is wrapped since we also automatically handle serialization of the output of function as follows:\n", + "\n", + "- string are untouched, and\n", + "\n", + "- objects of the Pydantic BaseModel type are serialized to JSON.\n", + "\n", + "We can check the correctness of of function map by using `._origin` property of the wrapped funtion as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "bd943369", + "metadata": {}, + "outputs": [], + "source": [ + "assert user_proxy.function_map[\"currency_calculator\"]._origin == currency_calculator" + ] + }, + { + "cell_type": "markdown", + "id": "8a3a09c9", + "metadata": {}, + "source": [ + "Finally, we can use this function to accurately calculate exchange amounts:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "d5518947", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "How much is 123.45 USD in EUR?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "\u001b[32m***** Suggested function Call: currency_calculator *****\u001b[0m\n", + "Arguments: \n", + "{\"base_amount\":123.45,\"base_currency\":\"USD\",\"quote_currency\":\"EUR\"}\n", + "\u001b[32m********************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[35m\n", + ">>>>>>>> EXECUTING FUNCTION currency_calculator...\u001b[0m\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[32m***** Response from calling function \"currency_calculator\" *****\u001b[0m\n", + "112.22727272727272 EUR\n", + "\u001b[32m****************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "123.45 USD is equivalent to approximately 112.23 EUR.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# start the conversation\n", + "user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 123.45 USD in EUR?\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "bd9d61cf", + "metadata": {}, + "source": [ + "### Pydantic models" + ] + }, + { + "cell_type": "markdown", + "id": "2d79fec0", + "metadata": {}, + "source": [ + "We can also use Pydantic Base models to rewrite the function as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "7b3d8b58", + "metadata": {}, + "outputs": [], + "source": [ + "llm_config = {\n", + " \"config_list\": config_list,\n", + " \"timeout\": 120,\n", + "}\n", + "\n", + "chatbot = autogen.AssistantAgent(\n", + " name=\"chatbot\",\n", + " system_message=\"For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", + " llm_config=llm_config,\n", + ")\n", + "\n", + "# create a UserProxyAgent instance named \"user_proxy\"\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"user_proxy\",\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + ")\n", + "\n", + "from typing import Literal\n", + "\n", + "from pydantic import BaseModel, Field\n", + "from typing_extensions import Annotated\n", + "\n", + "class Currency(BaseModel):\n", + " currency: Annotated[CurrencySymbol, Field(..., description=\"Currency symbol\")]\n", + " amount: Annotated[float, Field(0, description=\"Amount of currency\", ge=0)]\n", + "\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", + "def currency_calculator(\n", + " base: Annotated[Currency, \"Base currency: amount and currency symbol\"],\n", + " quote_currency: Annotated[CurrencySymbol, \"Quote currency symbol\"] = \"USD\",\n", + ") -> Currency:\n", + " quote_amount = exchange_rate(base.currency, quote_currency) * base.amount\n", + " return Currency(amount=quote_amount, currency=quote_currency)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "971ed0d5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'description': 'Currency exchange calculator.',\n", + " 'name': 'currency_calculator',\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'base': {'properties': {'currency': {'description': 'Currency symbol',\n", + " 'enum': ['USD', 'EUR'],\n", + " 'title': 'Currency',\n", + " 'type': 'string'},\n", + " 'amount': {'default': 0,\n", + " 'description': 'Amount of currency',\n", + " 'minimum': 0.0,\n", + " 'title': 'Amount',\n", + " 'type': 'number'}},\n", + " 'required': ['currency'],\n", + " 'title': 'Currency',\n", + " 'type': 'object',\n", + " 'description': 'Base currency: amount and currency symbol'},\n", + " 'quote_currency': {'enum': ['USD', 'EUR'],\n", + " 'type': 'string',\n", + " 'default': 'USD',\n", + " 'description': 'Quote currency symbol'}},\n", + " 'required': ['base']}}]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chatbot.llm_config[\"functions\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "ab081090", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "How much is 112.23 Euros in US Dollars?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "\u001b[32m***** Suggested function Call: currency_calculator *****\u001b[0m\n", + "Arguments: \n", + "{\"base\":{\"currency\":\"EUR\",\"amount\":112.23},\"quote_currency\":\"USD\"}\n", + "\u001b[32m********************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[35m\n", + ">>>>>>>> EXECUTING FUNCTION currency_calculator...\u001b[0m\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[32m***** Response from calling function \"currency_calculator\" *****\u001b[0m\n", + "{\"currency\":\"USD\",\"amount\":123.45300000000002}\n", + "\u001b[32m****************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "112.23 Euros is equivalent to approximately 123.45 US Dollars.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# start the conversation\n", + "user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 112.23 Euros in US Dollars?\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0064d9cd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "How much is 123.45 US Dollars in Euros?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "\u001b[32m***** Suggested function Call: currency_calculator *****\u001b[0m\n", + "Arguments: \n", + "{\"base\":{\"currency\":\"USD\",\"amount\":123.45},\"quote_currency\":\"EUR\"}\n", + "\u001b[32m********************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[35m\n", + ">>>>>>>> EXECUTING FUNCTION currency_calculator...\u001b[0m\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[32m***** Response from calling function \"currency_calculator\" *****\u001b[0m\n", + "{\"currency\":\"EUR\",\"amount\":112.22727272727272}\n", + "\u001b[32m****************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "123.45 US Dollars is approximately 112.23 Euros.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# start the conversation\n", + "user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 123.45 US Dollars in Euros?\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "06137f23", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flaml_dev", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebook/oai_client_cost.ipynb b/notebook/oai_client_cost.ipynb index 50d3dfdc67b..857ee327a55 100644 --- a/notebook/oai_client_cost.ipynb +++ b/notebook/oai_client_cost.ipynb @@ -59,7 +59,7 @@ "config_list = autogen.config_list_from_json(\n", " \"OAI_CONFIG_LIST\",\n", " filter_dict={\n", - " \"model\": [\"gpt-3.5-turbo\"],\n", + " \"model\": [\"gpt-3.5-turbo\", \"gpt-35-turbo\"],\n", " },\n", ")" ] diff --git a/samples/apps/autogen-assistant/README.md b/samples/apps/autogen-assistant/README.md deleted file mode 100644 index ab2dbbfbb1b..00000000000 --- a/samples/apps/autogen-assistant/README.md +++ /dev/null @@ -1,120 +0,0 @@ -# AutoGen Assistant - -![ARA](./docs/ara_stockprices.png) - -AutoGen Assistant is an Autogen-powered AI app (user interface) that can converse with you to help you conduct research, write and execute code, run saved skills, create new skills (explicitly and by demonstration), and adapt in response to your interactions. - -### Capabilities / Roadmap - -Some of the capabilities supported by the app frontend include the following: - -- [x] Select from a list of agents (current support for two agent workflows - `UserProxyAgent` and `AssistantAgent`) -- [x] Modify agent configuration (e.g. temperature, model, agent system message, model etc) and chat with updated agent configurations. -- [x] View agent messages and output files in the UI from agent runs. -- [ ] Support for more complex agent workflows (e.g. `GroupChat` workflows) -- [ ] Improved user experience (e.g., streaming intermediate model output, better summarization of agent responses, etc) - -Project Structure: - -- _autogenra/_ code for the backend classes and web api (FastAPI) -- _frontend/_ code for the webui, built with Gatsby and Tailwind - -### Installation - -1. **Install from PyPi** - - We recommend using a virtual environment (e.g., conda) to avoid conflicts with existing Python packages. With Python 3.10 or newer active in your virtual environment, use pip to install AutoGen Assistant: - - ```bash - pip install autogenra - ``` - -2. **Install from Source** - - > Note: This approach requires some familiarity with building interfaces in React. - - If you prefer to install from source, ensure you have Python 3.10+ and Node.js (version above 14.15.0) installed. Here's how you get started: - - - Clone the AutoGen Assistant repository and install its Python dependencies: - - ```bash - pip install -e . - ``` - - - Navigate to the `samples/apps/autogen-assistant/frontend` directory, install dependencies, and build the UI: - - ```bash - npm install -g gatsby-cli - npm install --global yarn - yarn install - yarn build - ``` - - - For Windows users, to build the frontend, you may need alternative commands to build the frontend. - - ```bash - gatsby clean && rmdir /s /q ..\\autogenra\\web\\ui && (set \"PREFIX_PATH_VALUE=\" || ver>nul) && gatsby build --prefix-paths && xcopy /E /I /Y public ..\\autogenra\\web\\ui - ``` - - Navigate to the `samples/apps/autogen-assistant` directory and install the `autogenra` library in your current Python environment: - - ```bash - pip install -e . - ``` - -### Running the Application - -Once installed, run the web UI by entering the following in your terminal: - -```bash -autogenra ui --port 8081 -``` - -This will start the application on the specified port. Open your web browser and go to `http://localhost:8081/` to begin using AutoGen Assistant. - -Now that you have AutoGen Assistant installed and running, you are ready to explore its capabilities, including defining and modifying agent workflows, interacting with agents and sessions, and expanding agent skills. - -## Capabilities - -This demo focuses on the research assistant use case with some generalizations: - -- **Skills**: The agent is provided with a list of skills that it can leverage while attempting to address a user's query. Each skill is a python function that may be in any file in a folder made availabe to the agents. We separate the concept of global skills available to all agents `backend/files/global_utlis_dir` and user level skills `backend/files/user//utils_dir`, relevant in a multi user environment. Agents are aware skills as they are appended to the system message. A list of example skills is available in the `backend/global_utlis_dir` folder. Modify the file or create a new file with a function in the same directory to create new global skills. - -- **Conversation Persistence**: Conversation history is persisted in an sqlite database `database.sqlite`. - -- **Default Agent Workflow**: The default a sample workflow with two agents - a user proxy agent and an assistant agent. - -## Example Usage - -Let us use a simple query demonstrating the capabilities of the research assistant. - -``` -Plot a chart of NVDA and TESLA stock price YTD. Save the result to a file named nvda_tesla.png -``` - -The agents responds by _writing and executing code_ to create a python program to generate the chart with the stock prices. - -> Note than there could be multiple turns between the `AssistantAgent` and the `UserProxyAgent` to produce and execute the code in order to complete the task. - -![ARA](./docs/ara_stockprices.png) - -> Note: You can also view the debug console that generates useful information to see how the agents are interacting in the background. - - - -## FAQ - -**Q: How can I add more skills to the AutoGen Assistant?** -A: You can extend the capabilities of your agents by adding new Python functions. The AutoGen Assistant interface also lets you directly paste functions that can be reused in the agent workflow. - -**Q: Where can I adjust the agent configurations and settings?** -A: You can modify agent configurations directly from the UI or by editing the default configurations in the `utils.py` file under the `get_default_agent_config()` method (assuming you are building your own UI). - -**Q: If I want to reset the conversation with an agent, how do I go about it?** -A: To reset your conversation history, you can delete the `database.sqlite` file. If you need to clear user-specific data, remove the relevant `autogenra/web/files/user/` folder. - -**Q: Is it possible to view the output and messages generated by the agents during interactions?** -A: Yes, you can view the generated messages in the debug console of the web UI, providing insights into the agent interactions. Alternatively, you can inspect the `database.sqlite` file for a comprehensive record of messages. - -## Acknowledgements - -AutoGen assistant is Based on the [AutoGen](https://microsoft.github.io/autogen) project. It is adapted in October 2023 from a research prototype (original credits: Gagan Bansal, Adam Fourney, Victor Dibia, Piali Choudhury, Saleema Amershi, Ahmed Awadallah, Chi Wang) diff --git a/samples/apps/autogen-assistant/autogenra/__init__.py b/samples/apps/autogen-assistant/autogenra/__init__.py deleted file mode 100644 index db6a9e38f35..00000000000 --- a/samples/apps/autogen-assistant/autogenra/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .autogenflow import * -from .autogenchat import * -from .datamodel import * diff --git a/samples/apps/autogen-assistant/autogenra/utils/dbutils.py b/samples/apps/autogen-assistant/autogenra/utils/dbutils.py deleted file mode 100644 index 7ed163da317..00000000000 --- a/samples/apps/autogen-assistant/autogenra/utils/dbutils.py +++ /dev/null @@ -1,325 +0,0 @@ -import json -import logging -import sqlite3 -import threading -import os -from typing import Any, List, Dict, Tuple -from ..datamodel import Gallery, Message, Session - - -MESSAGES_TABLE_SQL = """ - CREATE TABLE IF NOT EXISTS messages ( - user_id TEXT NOT NULL, - session_id TEXT, - root_msg_id TEXT NOT NULL, - msg_id TEXT, - role TEXT NOT NULL, - content TEXT NOT NULL, - metadata TEXT, - timestamp DATETIME, - UNIQUE (user_id, root_msg_id, msg_id) - ) - """ - -SESSIONS_TABLE_SQL = """ - CREATE TABLE IF NOT EXISTS sessions ( - session_id TEXT NOT NULL, - user_id TEXT NOT NULL, - timestamp DATETIME NOT NULL, - flow_config TEXT, - UNIQUE (user_id, session_id) - ) - """ - -SKILLS_TABLE_SQL = """ - CREATE TABLE IF NOT EXISTS sessions ( - session_id TEXT NOT NULL, - user_id TEXT NOT NULL, - timestamp DATETIME NOT NULL, - flow_config TEXT, - UNIQUE (user_id, session_id) - ) - """ -GALLERY_TABLE_SQL = """ - CREATE TABLE IF NOT EXISTS gallery ( - gallery_id TEXT NOT NULL, - session TEXT, - messages TEXT, - tags TEXT, - timestamp DATETIME NOT NULL, - UNIQUE ( gallery_id) - ) - """ - - -lock = threading.Lock() -logger = logging.getLogger() - - -class DBManager: - """ - A database manager class that handles the creation and interaction with an SQLite database. - """ - - def __init__(self, path: str = "database.sqlite", **kwargs: Any) -> None: - """ - Initializes the DBManager object, creates a database if it does not exist, and establishes a connection. - - Args: - path (str): The file path to the SQLite database file. - **kwargs: Additional keyword arguments to pass to the sqlite3.connect method. - """ - self.path = path - # check if the database exists, if not create it - if not os.path.exists(self.path): - logger.info("Creating database") - self.init_db(path=self.path, **kwargs) - - try: - self.conn = sqlite3.connect(self.path, check_same_thread=False, **kwargs) - self.cursor = self.conn.cursor() - except Exception as e: - logger.error("Error connecting to database: %s", e) - raise e - - def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: - """ - Initializes the database by creating necessary tables. - - Args: - path (str): The file path to the SQLite database file. - **kwargs: Additional keyword arguments to pass to the sqlite3.connect method. - """ - # Connect to the database (or create a new one if it doesn't exist) - self.conn = sqlite3.connect(path, check_same_thread=False, **kwargs) - self.cursor = self.conn.cursor() - - # Create the table with the specified columns, appropriate data types, and a UNIQUE constraint on (root_msg_id, msg_id) - self.cursor.execute(MESSAGES_TABLE_SQL) - - # Create a sessions table - self.cursor.execute(SESSIONS_TABLE_SQL) - - # Create a skills - self.cursor.execute(SKILLS_TABLE_SQL) - - # Create a gallery table - self.cursor.execute(GALLERY_TABLE_SQL) - - # Commit the changes and close the connection - self.conn.commit() - - def query(self, query: str, args: Tuple = (), return_json: bool = False) -> List[Dict[str, Any]]: - """ - Executes a given SQL query and returns the results. - - Args: - query (str): The SQL query to execute. - args (Tuple): The arguments to pass to the SQL query. - return_json (bool): If True, the results will be returned as a list of dictionaries. - - Returns: - List[Dict[str, Any]]: The result of the SQL query. - """ - try: - with lock: - self.cursor.execute(query, args) - result = self.cursor.fetchall() - self.commit() - if return_json: - result = [dict(zip([key[0] for key in self.cursor.description], row)) for row in result] - return result - except Exception as e: - logger.error("Error running query with query %s and args %s: %s", query, args, e) - raise e - - def commit(self) -> None: - """ - Commits the current transaction to the database. - """ - self.conn.commit() - - def close(self) -> None: - """ - Closes the database connection. - """ - self.conn.close() - - -def save_message(message: Message, dbmanager: DBManager) -> None: - """ - Save a message in the database using the provided database manager. - - :param message: The Message object containing message data - :param dbmanager: The DBManager instance used to interact with the database - """ - query = "INSERT INTO messages (user_id, root_msg_id, msg_id, role, content, metadata, timestamp, session_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?)" - args = ( - message.user_id, - message.root_msg_id, - message.msg_id, - message.role, - message.content, - message.metadata, - message.timestamp, - message.session_id, - ) - dbmanager.query(query=query, args=args) - - -def load_messages(user_id: str, session_id: str, dbmanager: DBManager) -> List[dict]: - """ - Load messages for a specific user and session from the database, sorted by timestamp. - - :param user_id: The ID of the user whose messages are to be loaded - :param session_id: The ID of the session whose messages are to be loaded - :param dbmanager: The DBManager instance to interact with the database - - :return: A list of dictionaries, each representing a message - """ - query = "SELECT * FROM messages WHERE user_id = ? AND session_id = ?" - args = (user_id, session_id) - result = dbmanager.query(query=query, args=args, return_json=True) - # Sort by timestamp ascending - result = sorted(result, key=lambda k: k["timestamp"], reverse=False) - return result - - -def get_sessions(user_id: str, dbmanager: DBManager) -> List[dict]: - """ - Load sessions for a specific user from the database, sorted by timestamp. - - :param user_id: The ID of the user whose sessions are to be loaded - :param dbmanager: The DBManager instance to interact with the database - :return: A list of dictionaries, each representing a session - """ - query = "SELECT * FROM sessions WHERE user_id = ?" - args = (user_id,) - result = dbmanager.query(query=query, args=args, return_json=True) - # Sort by timestamp ascending - result = sorted(result, key=lambda k: k["timestamp"], reverse=True) - for row in result: - row["flow_config"] = json.loads(row["flow_config"]) - return result - - -def create_session(user_id: str, session: Session, dbmanager: DBManager) -> List[dict]: - """ - Create a new session for a specific user in the database. - - :param user_id: The ID of the user whose session is to be created - :param dbmanager: The DBManager instance to interact with the database - :return: A list of dictionaries, each representing a session - """ - - query = "INSERT INTO sessions (user_id, session_id, timestamp, flow_config) VALUES (?, ?, ?,?)" - args = (session.user_id, session.session_id, session.timestamp, json.dumps(session.flow_config.dict())) - dbmanager.query(query=query, args=args) - sessions = get_sessions(user_id=user_id, dbmanager=dbmanager) - - return sessions - - -def publish_session(session: Session, dbmanager: DBManager, tags: List[str] = []) -> Gallery: - """ - Publish a session to the gallery table in the database. Fetches the session messages first, then saves session and messages object to the gallery database table. - :param session: The Session object containing session data - :param dbmanager: The DBManager instance used to interact with the database - :param tags: A list of tags to associate with the session - :return: A gallery object containing the session and messages objects - """ - - messages = load_messages(user_id=session.user_id, session_id=session.session_id, dbmanager=dbmanager) - gallery_item = Gallery(session=session, messages=messages, tags=tags) - query = "INSERT INTO gallery (gallery_id, session, messages, tags, timestamp) VALUES (?, ?, ?, ?,?)" - args = ( - gallery_item.id, - json.dumps(gallery_item.session.dict()), - json.dumps([message.dict() for message in gallery_item.messages]), - json.dumps(gallery_item.tags), - gallery_item.timestamp, - ) - dbmanager.query(query=query, args=args) - return gallery_item - - -def get_gallery(gallery_id, dbmanager: DBManager) -> List[Gallery]: - """ - Load gallery items from the database, sorted by timestamp. If gallery_id is provided, only the gallery item with the matching gallery_id will be returned. - - :param gallery_id: The ID of the gallery item to be loaded - :param dbmanager: The DBManager instance to interact with the database - :return: A list of Gallery objects - """ - - if gallery_id: - query = "SELECT * FROM gallery WHERE gallery_id = ?" - args = (gallery_id,) - else: - query = "SELECT * FROM gallery" - args = () - result = dbmanager.query(query=query, args=args, return_json=True) - # Sort by timestamp ascending - result = sorted(result, key=lambda k: k["timestamp"], reverse=True) - gallery = [] - for row in result: - gallery_item = Gallery( - id=row["gallery_id"], - session=Session(**json.loads(row["session"])), - messages=[Message(**message) for message in json.loads(row["messages"])], - tags=json.loads(row["tags"]), - timestamp=row["timestamp"], - ) - gallery.append(gallery_item) - return gallery - - -def delete_user_sessions(user_id: str, session_id: str, dbmanager: DBManager, delete_all: bool = False) -> List[dict]: - """ - Delete a specific session or all sessions for a user from the database. - - :param user_id: The ID of the user whose session is to be deleted - :param session_id: The ID of the specific session to be deleted (ignored if delete_all is True) - :param dbmanager: The DBManager instance to interact with the database - :param delete_all: If True, all sessions for the user will be deleted - :return: A list of the remaining sessions if not all were deleted, otherwise an empty list - """ - if delete_all: - query = "DELETE FROM sessions WHERE user_id = ?" - args = (user_id,) - dbmanager.query(query=query, args=args) - return [] - else: - query = "DELETE FROM sessions WHERE user_id = ? AND session_id = ?" - args = (user_id, session_id) - dbmanager.query(query=query, args=args) - sessions = get_sessions(user_id=user_id, dbmanager=dbmanager) - - return sessions - - -def delete_message( - user_id: str, msg_id: str, session_id: str, dbmanager: DBManager, delete_all: bool = False -) -> List[dict]: - """ - Delete a specific message or all messages for a user and session from the database. - - :param user_id: The ID of the user whose messages are to be deleted - :param msg_id: The ID of the specific message to be deleted (ignored if delete_all is True) - :param session_id: The ID of the session whose messages are to be deleted - :param dbmanager: The DBManager instance to interact with the database - :param delete_all: If True, all messages for the user will be deleted - :return: A list of the remaining messages if not all were deleted, otherwise an empty list - """ - - if delete_all: - query = "DELETE FROM messages WHERE user_id = ? AND session_id = ?" - args = (user_id, session_id) - dbmanager.query(query=query, args=args) - return [] - else: - query = "DELETE FROM messages WHERE user_id = ? AND msg_id = ? AND session_id = ?" - args = (user_id, msg_id, session_id) - dbmanager.query(query=query, args=args) - messages = load_messages(user_id=user_id, session_id=session_id, dbmanager=dbmanager) - return messages diff --git a/samples/apps/autogen-assistant/autogenra/version.py b/samples/apps/autogen-assistant/autogenra/version.py deleted file mode 100644 index 005ced7ab62..00000000000 --- a/samples/apps/autogen-assistant/autogenra/version.py +++ /dev/null @@ -1,2 +0,0 @@ -VERSION = "0.0.09a" -APP_NAME = "autogenra" diff --git a/samples/apps/autogen-assistant/autogenra/web/skills/global/fetch_profile.py b/samples/apps/autogen-assistant/autogenra/web/skills/global/fetch_profile.py deleted file mode 100644 index be82545a47a..00000000000 --- a/samples/apps/autogen-assistant/autogenra/web/skills/global/fetch_profile.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Optional -import requests -from bs4 import BeautifulSoup - - -def fetch_user_profile(url: str) -> Optional[str]: - """ - Fetches the text content from a personal website. - - Given a URL of a person's personal website, this function scrapes - the content of the page and returns the text found within the . - - Args: - url (str): The URL of the person's personal website. - - Returns: - Optional[str]: The text content of the website's body, or None if any error occurs. - """ - try: - # Send a GET request to the URL - response = requests.get(url) - # Check for successful access to the webpage - if response.status_code == 200: - # Parse the HTML content of the page using BeautifulSoup - soup = BeautifulSoup(response.text, "html.parser") - # Extract the content of the tag - body_content = soup.find("body") - # Return all the text in the body tag, stripping leading/trailing whitespaces - return " ".join(body_content.stripped_strings) if body_content else None - else: - # Return None if the status code isn't 200 (success) - return None - except requests.RequestException: - # Return None if any request-related exception is caught - return None diff --git a/samples/apps/autogen-assistant/autogenra/web/skills/global/find_papers_arxiv.py b/samples/apps/autogen-assistant/autogenra/web/skills/global/find_papers_arxiv.py deleted file mode 100644 index 3a4359245af..00000000000 --- a/samples/apps/autogen-assistant/autogenra/web/skills/global/find_papers_arxiv.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -import re -import json -import hashlib - - -def search_arxiv(query, max_results=10): - """ - Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead. - - Args: - query (str): The search query. - max_results (int, optional): The maximum number of search results to return. Defaults to 10. - - Returns: - jresults (list): A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url' - - Example: - >>> results = search_arxiv("attention is all you need") - >>> print(results) - """ - - import arxiv - - key = hashlib.md5(("search_arxiv(" + str(max_results) + ")" + query).encode("utf-8")).hexdigest() - # Create the cache if it doesn't exist - cache_dir = ".cache" - if not os.path.isdir(cache_dir): - os.mkdir(cache_dir) - - fname = os.path.join(cache_dir, key + ".cache") - - # Cache hit - if os.path.isfile(fname): - fh = open(fname, "r", encoding="utf-8") - data = json.loads(fh.read()) - fh.close() - return data - - # Normalize the query, removing operator keywords - query = re.sub(r"[^\s\w]", " ", query.lower()) - query = re.sub(r"\s(and|or|not)\s", " ", " " + query + " ") - query = re.sub(r"[^\s\w]", " ", query.lower()) - query = re.sub(r"\s+", " ", query).strip() - - search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance) - - jresults = list() - for result in search.results(): - r = dict() - r["entry_id"] = result.entry_id - r["updated"] = str(result.updated) - r["published"] = str(result.published) - r["title"] = result.title - r["authors"] = [str(a) for a in result.authors] - r["summary"] = result.summary - r["comment"] = result.comment - r["journal_ref"] = result.journal_ref - r["doi"] = result.doi - r["primary_category"] = result.primary_category - r["categories"] = result.categories - r["links"] = [str(link) for link in result.links] - r["pdf_url"] = result.pdf_url - jresults.append(r) - - if len(jresults) > max_results: - jresults = jresults[0:max_results] - - # Save to cache - fh = open(fname, "w") - fh.write(json.dumps(jresults)) - fh.close() - return jresults diff --git a/samples/apps/autogen-assistant/autogenra/web/skills/global/generate_images.py b/samples/apps/autogen-assistant/autogenra/web/skills/global/generate_images.py deleted file mode 100644 index 7090730be49..00000000000 --- a/samples/apps/autogen-assistant/autogenra/web/skills/global/generate_images.py +++ /dev/null @@ -1,51 +0,0 @@ -# filename: generate_images.py - -from typing import List -import uuid -import requests # to perform HTTP requests -from pathlib import Path - -from openai import OpenAI - - -def generate_and_save_images(query: str, image_size: str = "1024x1024") -> List[str]: - """ - Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image. - - :param query: A natural language description of the image to be generated. - :param image_size: The size of the image to be generated. (default is "1024x1024") - :return: A list of filenames for the saved images. - """ - - client = OpenAI() # Initialize the OpenAI client - response = client.images.generate(model="dall-e-3", prompt=query, n=1, size=image_size) # Generate images - - # List to store the file names of saved images - saved_files = [] - - # Check if the response is successful - if response.data: - for image_data in response.data: - # Generate a random UUID as the file name - file_name = str(uuid.uuid4()) + ".png" # Assuming the image is a PNG - file_path = Path(file_name) - - img_url = image_data.url - img_response = requests.get(img_url) - if img_response.status_code == 200: - # Write the binary content to a file - with open(file_path, "wb") as img_file: - img_file.write(img_response.content) - print(f"Image saved to {file_path}") - saved_files.append(str(file_path)) - else: - print(f"Failed to download the image from {img_url}") - else: - print("No image data found in the response!") - - # Return the list of saved files - return saved_files - - -# Example usage of the function: -# generate_and_save_images("A cute baby sea otter") diff --git a/samples/apps/autogen-assistant/docs/ara_stockprices.png b/samples/apps/autogen-assistant/docs/ara_stockprices.png deleted file mode 100644 index 6bd17465e32..00000000000 Binary files a/samples/apps/autogen-assistant/docs/ara_stockprices.png and /dev/null differ diff --git a/samples/apps/autogen-assistant/frontend/src/components/views/ra/agents.tsx b/samples/apps/autogen-assistant/frontend/src/components/views/ra/agents.tsx deleted file mode 100644 index 1add93a8b8e..00000000000 --- a/samples/apps/autogen-assistant/frontend/src/components/views/ra/agents.tsx +++ /dev/null @@ -1,332 +0,0 @@ -import { AdjustmentsVerticalIcon } from "@heroicons/react/24/outline"; -import { Modal, Select, Slider } from "antd"; -import * as React from "react"; -import { ControlRowView, GroupView, ModelSelector } from "../../atoms"; -import { IAgentFlowSpec, IFlowConfig, IModelConfig } from "../../types"; -import TextArea from "antd/es/input/TextArea"; -import { useConfigStore } from "../../../hooks/store"; -import debounce from "lodash.debounce"; -import { getModels } from "../../utils"; - -const FlowView = ({ - title, - flowSpec, - setFlowSpec, -}: { - title: string; - flowSpec: IAgentFlowSpec; - setFlowSpec: (newFlowSpec: IAgentFlowSpec) => void; -}) => { - // Local state for the FlowView component - const [localFlowSpec, setLocalFlowSpec] = - React.useState(flowSpec); - - // Event handlers for updating local state and propagating changes - - const onControlChange = (value: any, key: string) => { - const updatedFlowSpec = { - ...localFlowSpec, - config: { ...localFlowSpec.config, [key]: value }, - }; - setLocalFlowSpec(updatedFlowSpec); - setFlowSpec(updatedFlowSpec); - }; - - const onDebouncedControlChange = React.useCallback( - debounce((value: any, key: string) => { - onControlChange(value, key); - }, 3000), - [onControlChange] - ); - const modelConfigs = getModels(); - return ( - <> -
{title}
- - { - onControlChange(value, "max_consecutive_auto_reply"); - }} - /> - } - /> - - { - onControlChange(value, "human_input_mode"); - }} - options={ - [ - { label: "NEVER", value: "NEVER" }, - { label: "TERMINATE", value: "TERMINATE" }, - { label: "ALWAYS", value: "ALWAYS" }, - ] as any - } - /> - } - /> - - { - onDebouncedControlChange(e.target.value, "system_message"); - }} - /> - } - /> - - {flowSpec.config.llm_config && ( - { - const llm_config = { - ...flowSpec.config.llm_config, - config_list, - }; - onControlChange(llm_config, "llm_config"); - }} - /> - } - /> - )} - - - ); -}; - -const AgentsControlView = ({ - flowConfig, - setFlowConfig, - selectedConfig, - setSelectedConfig, - flowConfigs, - setFlowConfigs, -}: { - flowConfig: IFlowConfig; - setFlowConfig: (newFlowConfig: IFlowConfig) => void; - selectedConfig: number; - setSelectedConfig: (index: number) => void; - flowConfigs: IFlowConfig[]; - setFlowConfigs: (newFlowConfigs: IFlowConfig[]) => void; -}) => { - const [isModalVisible, setIsModalVisible] = React.useState(false); - - // Function to update a specific flowConfig by index - const updateFlowConfigs = (index: number, newFlowConfig: IFlowConfig) => { - const updatedFlowConfigs = [...flowConfigs]; - updatedFlowConfigs[index] = newFlowConfig; - setFlowConfigs(updatedFlowConfigs); - }; - - React.useEffect(() => { - updateFlowConfigs(selectedConfig, flowConfig); - }, [flowConfig]); - - const FlowConfigViewer = ({ - flowConfig, - setFlowConfig, - }: { - flowConfig: IFlowConfig; - setFlowConfig: (newFlowConfig: IFlowConfig) => void; - }) => { - // Local state for sender and receiver FlowSpecs - const [senderFlowSpec, setSenderFlowSpec] = React.useState( - flowConfig.sender - ); - const [receiverFlowSpec, setReceiverFlowSpec] = - React.useState(flowConfig.receiver); - - // Update the local state and propagate changes to the parent component - const updateSenderFlowSpec = (newFlowSpec: IAgentFlowSpec) => { - setSenderFlowSpec(newFlowSpec); - setFlowConfig({ ...flowConfig, sender: newFlowSpec }); - }; - - const updateReceiverFlowSpec = (newFlowSpec: IAgentFlowSpec) => { - setReceiverFlowSpec(newFlowSpec); - setFlowConfig({ ...flowConfig, receiver: newFlowSpec }); - }; - - return ( - <> -
{flowConfig.name}
-
-
-
- -
-
-
- -
-
- - ); - }; - - return ( -
- - - AutoGen Agent Settings - - } - open={isModalVisible} - onCancel={() => { - setIsModalVisible(false); - }} - onOk={() => { - setIsModalVisible(false); - }} - > - { - setSelectedConfig(value); - setFlowConfig(flowConfigs[value]); - }} - options={ - flowConfigs.map((config, index) => { - return { label: config.name, value: index }; - }) as any - } - /> - } - /> - - - -

- {" "} - Learn more about AutoGen Agent parameters{" "} - - here - - . -

-
- -
{ - setIsModalVisible(true); - }} - className="text-right flex-1 -mt-1 text-accent" - > - Settings{" "} - -
-
- ); -}; - -const AgentsView = () => { - const flowConfigs = useConfigStore((state) => state.flowConfigs); - const setFlowConfigs = useConfigStore((state) => state.setFlowConfigs); - - const flowConfig = useConfigStore((state) => state.flowConfig); - const setFlowConfig = useConfigStore((state) => state.setFlowConfig); - - const [selectedConfig, setSelectedConfig] = React.useState(0); - // const [flowConfig, setFlowConfig] = React.useState( - // flowConfigs[selectedConfig] - // ); - - return ( -
-
Agents
-
- {" "} - Select or create an agent workflow.{" "} -
-
-
Agent Workflow
-
- -
-
- - updateNewModelConfig("api_version", e.target.value)} + />
); @@ -841,3 +861,528 @@ export const PdfViewer = ({ url }: { url: string }) => { ); }; + +export const AgentFlowSpecView = ({ + title = "Agent Specification", + flowSpec, + setFlowSpec, +}: { + title: string; + flowSpec: IAgentFlowSpec; + setFlowSpec: (newFlowSpec: IAgentFlowSpec) => void; + editMode?: boolean; +}) => { + // Local state for the FlowView component + const [localFlowSpec, setLocalFlowSpec] = + React.useState(flowSpec); + + // Event handlers for updating local state and propagating changes + + const onControlChange = (value: any, key: string) => { + const updatedFlowSpec = { + ...localFlowSpec, + config: { ...localFlowSpec.config, [key]: value }, + }; + setLocalFlowSpec(updatedFlowSpec); + setFlowSpec(updatedFlowSpec); + }; + + const onDebouncedControlChange = React.useCallback( + debounce((value: any, key: string) => { + onControlChange(value, key); + }, 3000), + [onControlChange] + ); + + const llm_config = localFlowSpec.config.llm_config || { config_list: [] }; + + return ( + <> +
{title}
+ + { + onControlChange(e.target.value, "name"); + }} + /> + } + /> + + { + const updatedFlowSpec = { + ...localFlowSpec, + description: e.target.value, + }; + setLocalFlowSpec(updatedFlowSpec); + setFlowSpec(updatedFlowSpec); + }} + /> + } + /> + + { + onControlChange(value, "max_consecutive_auto_reply"); + }} + /> + } + /> + + { + onControlChange(value, "human_input_mode"); + }} + options={ + [ + { label: "NEVER", value: "NEVER" }, + { label: "TERMINATE", value: "TERMINATE" }, + { label: "ALWAYS", value: "ALWAYS" }, + ] as any + } + /> + } + /> + + {llm_config && ( + { + onDebouncedControlChange(e.target.value, "system_message"); + }} + /> + } + /> + )} + + {llm_config && ( + { + const llm_config = { + ...flowSpec.config.llm_config, + config_list, + }; + onControlChange(llm_config, "llm_config"); + }} + /> + } + /> + )} + + { + { + const updatedFlowSpec = { + ...localFlowSpec, + skills, + }; + setLocalFlowSpec(updatedFlowSpec); + setFlowSpec(updatedFlowSpec); + }} + /> + } + /> + } + + + ); +}; + +interface SkillSelectorProps { + skills: ISkill[]; + setSkills: (skills: ISkill[]) => void; + className?: string; +} + +export const SkillSelector: React.FC = ({ + skills, + setSkills, + className, +}) => { + const [isModalVisible, setIsModalVisible] = useState(false); + const [showSkillModal, setShowSkillModal] = React.useState(false); + const [newSkill, setNewSkill] = useState(null); + + const [localSkills, setLocalSkills] = useState(skills); + const [selectedSkill, setSelectedSkill] = useState(null); + + const handleRemoveSkill = (index: number) => { + const updatedSkills = localSkills.filter((_, i) => i !== index); + setLocalSkills(updatedSkills); + setSkills(updatedSkills); + }; + + const handleAddSkill = () => { + if (newSkill) { + const updatedSkills = [...localSkills, newSkill]; + setLocalSkills(updatedSkills); + setSkills(updatedSkills); + setNewSkill(null); + } + }; + + useEffect(() => { + if (selectedSkill) { + setShowSkillModal(true); + } + }, [selectedSkill]); + + return ( + <> + { + setShowSkillModal(false); + setSelectedSkill(null); + }} + onCancel={() => { + setShowSkillModal(false); + setSelectedSkill(null); + }} + > + {selectedSkill && ( +
+
{selectedSkill.file_name}
+ +
+ )} +
+ +
+ {localSkills.map((skill, index) => ( +
+ { + setSelectedSkill(skill); + }} + className=" inline-block " + > + {skill.title} + + handleRemoveSkill(index)} + className="ml-1 text-primary hover:text-accent duration-300 w-4 h-4 inline-block" + /> +
+ ))} + +
{ + setIsModalVisible(true); + }} + > + add +
+
+ + setIsModalVisible(false)} + footer={[ + , + , + ]} + > + + + + ); +}; + +export const SkillLoader = ({ + skill, + setSkill, +}: { + skill: ISkill | null; + setSkill: (skill: ISkill | null) => void; +}) => { + const [skills, setSkills] = useState([]); + const [loading, setLoading] = useState(false); + const [error, setError] = React.useState({ + status: true, + message: "All good", + }); + const serverUrl = getServerUrl(); + const { user } = React.useContext(appContext); + const listSkillsUrl = `${serverUrl}/skills?user_id=${user?.email}`; + + const fetchSkills = () => { + setError(null); + setLoading(true); + // const fetch; + const payLoad = { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }; + + const onSuccess = (data: any) => { + if (data && data.status) { + message.success(data.message); + setSkills(data.data); + if (data.data.length > 0) { + setSkill(data.data[0]); + } + } else { + message.error(data.message); + } + setLoading(false); + }; + const onError = (err: any) => { + setError(err); + message.error(err.message); + setLoading(false); + }; + fetchJSON(listSkillsUrl, payLoad, onSuccess, onError); + }; + + useEffect(() => { + fetchSkills(); + }, []); + + const skillOptions = skills.map((skill: ISkill, index: number) => ({ + label: skill.title, + value: index, + })); + return ( +
+ + + {skills && ( + <> + { + setNewSkillTitle(e.target.value); + }} + />