Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
qingyun-wu authored Dec 25, 2023
2 parents 93f159c + 70cc1f4 commit 02c46c3
Show file tree
Hide file tree
Showing 126 changed files with 5,621 additions and 2,005 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ node_modules/
*.log

# Python virtualenv
.venv
.venv*

# Byte-compiled / optimized / DLL files
__pycache__/
Expand Down
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,15 @@ The easiest way to start playing is
2. Copy OAI_CONFIG_LIST_sample to ./notebook folder, name to OAI_CONFIG_LIST, and set the correct configuration.
3. Start playing with the notebooks!

## Using existing docker image
Install docker, save your oai key into an environment variable name OPENAI_API_KEY, and then run the following.

```
docker pull yuandongtian/autogen:latest
docker run -it -e OPENAI_API_KEY=$OPENAI_API_KEY -p 8081:8081 docker.io/yuandongtian/autogen:latest
```

Then open `http://localhost:8081/` in your browser to use AutoGen. The UI is from `./samples/apps/autogen-assistant`. See docker hub [link](https://hub.docker.com/r/yuandongtian/autogen) for more details.

## Installation

Expand Down
110 changes: 110 additions & 0 deletions autogen/_pydantic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
from typing import Any, Dict, Optional, Tuple, Type, Union, get_args

from pydantic import BaseModel
from pydantic.version import VERSION as PYDANTIC_VERSION
from typing_extensions import get_origin

__all__ = ("JsonSchemaValue", "model_dump", "model_dump_json", "type2schema")

PYDANTIC_V1 = PYDANTIC_VERSION.startswith("1.")

if not PYDANTIC_V1:
from pydantic import TypeAdapter
from pydantic._internal._typing_extra import eval_type_lenient as evaluate_forwardref
from pydantic.json_schema import JsonSchemaValue

def type2schema(t: Optional[Type]) -> JsonSchemaValue:
"""Convert a type to a JSON schema
Args:
t (Type): The type to convert
Returns:
JsonSchemaValue: The JSON schema
"""
return TypeAdapter(t).json_schema()

def model_dump(model: BaseModel) -> Dict[str, Any]:
"""Convert a pydantic model to a dict
Args:
model (BaseModel): The model to convert
Returns:
Dict[str, Any]: The dict representation of the model
"""
return model.model_dump()

def model_dump_json(model: BaseModel) -> str:
"""Convert a pydantic model to a JSON string
Args:
model (BaseModel): The model to convert
Returns:
str: The JSON string representation of the model
"""
return model.model_dump_json()


# Remove this once we drop support for pydantic 1.x
else: # pragma: no cover
from pydantic import schema_of
from pydantic.typing import evaluate_forwardref as evaluate_forwardref

JsonSchemaValue = Dict[str, Any]

def type2schema(t: Optional[Type]) -> JsonSchemaValue:
"""Convert a type to a JSON schema
Args:
t (Type): The type to convert
Returns:
JsonSchemaValue: The JSON schema
"""
if PYDANTIC_V1:
if t is None:
return {"type": "null"}
elif get_origin(t) is Union:
return {"anyOf": [type2schema(tt) for tt in get_args(t)]}
elif get_origin(t) in [Tuple, tuple]:
prefixItems = [type2schema(tt) for tt in get_args(t)]
return {
"maxItems": len(prefixItems),
"minItems": len(prefixItems),
"prefixItems": prefixItems,
"type": "array",
}

d = schema_of(t)
if "title" in d:
d.pop("title")
if "description" in d:
d.pop("description")

return d

def model_dump(model: BaseModel) -> Dict[str, Any]:
"""Convert a pydantic model to a dict
Args:
model (BaseModel): The model to convert
Returns:
Dict[str, Any]: The dict representation of the model
"""
return model.dict()

def model_dump_json(model: BaseModel) -> str:
"""Convert a pydantic model to a JSON string
Args:
model (BaseModel): The model to convert
Returns:
str: The JSON string representation of the model
"""
return model.json()
2 changes: 1 addition & 1 deletion autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def __init__(
**kwargs,
)

# Update the provided desciption if None, and we are using the default system_message,
# Update the provided description if None, and we are using the default system_message,
# then use the default description.
if description is None:
if system_message == self.DEFAULT_SYSTEM_MESSAGE:
Expand Down
6 changes: 3 additions & 3 deletions autogen/agentchat/contrib/compressible_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def _manage_history_on_token_limit(self, messages, token_used, max_token_allowed
# 1. mode = "TERMINATE", terminate the agent if no token left.
if self.compress_config["mode"] == "TERMINATE":
if max_token_allowed - token_used <= 0:
# Teminate if no token left.
# Terminate if no token left.
print(
colored(
f'Warning: Terminate Agent "{self.name}" due to no token left for oai reply. max token for {model}: {max_token_allowed}, existing token count: {token_used}',
Expand Down Expand Up @@ -320,7 +320,7 @@ def on_oai_token_limit(
cmsg["role"] = "user"
sender._oai_messages[self][i] = cmsg

# sucessfully compressed, return False, None for generate_oai_reply to be called with the updated messages
# successfully compressed, return False, None for generate_oai_reply to be called with the updated messages
return False, None
return final, None

Expand All @@ -332,7 +332,7 @@ def compress_messages(
"""Compress a list of messages into one message.
The first message (the initial prompt) will not be compressed.
The rest of the messages will be compressed into one message, the model is asked to distinuish the role of each message: USER, ASSISTANT, FUNCTION_CALL, FUNCTION_RETURN.
The rest of the messages will be compressed into one message, the model is asked to distinguish the role of each message: USER, ASSISTANT, FUNCTION_CALL, FUNCTION_RETURN.
Check out the compress_sys_msg.
TODO: model used in compression agent is different from assistant agent: For example, if original model used by is gpt-4; we start compressing at 70% of usage, 70% of 8092 = 5664; and we use gpt 3.5 here max_toke = 4096, it will raise error. choosinng model automatically?
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/gpt_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def _get_run_response(self, thread, run):
new_messages.append(
{
"role": msg.role,
"content": f"Recieved file id={content.image_file.file_id}",
"content": f"Received file id={content.image_file.file_id}",
}
)
return new_messages
Expand All @@ -219,7 +219,7 @@ def _get_run_response(self, thread, run):
}

logger.info(
"Intermediate executing(%s, Sucess: %s) : %s",
"Intermediate executing(%s, Success: %s) : %s",
tool_response["name"],
is_exec_success,
tool_response["content"],
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/contrib/img_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def get_image_data(image_file: str, use_b64=True) -> bytes:
return content


def llava_formater(prompt: str, order_image_tokens: bool = False) -> Tuple[str, List[str]]:
def llava_formatter(prompt: str, order_image_tokens: bool = False) -> Tuple[str, List[str]]:
"""
Formats the input prompt by replacing image tags and returns the new prompt along with image locations.
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/llava_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from regex import R

from autogen.agentchat.agent import Agent
from autogen.agentchat.contrib.img_utils import get_image_data, llava_formater
from autogen.agentchat.contrib.img_utils import get_image_data, llava_formatter
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
from autogen.code_utils import content_str

Expand Down Expand Up @@ -162,7 +162,7 @@ def llava_call(prompt: str, llm_config: dict) -> str:
Makes a call to the LLaVA service to generate text based on a given prompt
"""

prompt, images = llava_formater(prompt, order_image_tokens=False)
prompt, images = llava_formatter(prompt, order_image_tokens=False)

for im in images:
if len(im) == 0:
Expand Down
8 changes: 5 additions & 3 deletions autogen/agentchat/contrib/math_user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
from time import sleep

from autogen._pydantic import PYDANTIC_V1
from autogen.agentchat import Agent, UserProxyAgent
from autogen.code_utils import UNKNOWN, extract_code, execute_code, infer_lang
from autogen.math_utils import get_answer
Expand Down Expand Up @@ -384,7 +385,8 @@ class WolframAlphaAPIWrapper(BaseModel):
class Config:
"""Configuration for this pydantic object."""

extra = Extra.forbid
if PYDANTIC_V1:
extra = Extra.forbid

@root_validator(skip_on_failure=True)
def validate_environment(cls, values: Dict) -> Dict:
Expand All @@ -395,8 +397,8 @@ def validate_environment(cls, values: Dict) -> Dict:
try:
import wolframalpha

except ImportError:
raise ImportError("wolframalpha is not installed. " "Please install it with `pip install wolframalpha`")
except ImportError as e:
raise ImportError("wolframalpha is not installed. Please install it with `pip install wolframalpha`") from e
client = wolframalpha.Client(wolfram_alpha_appid)
values["wolfram_client"] = client

Expand Down
5 changes: 5 additions & 0 deletions autogen/agentchat/contrib/multimodal_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,14 @@ def colored(x, *args, **kwargs):
from autogen.code_utils import content_str

DEFAULT_LMM_SYS_MSG = """You are a helpful AI assistant."""
DEFAULT_MODEL = "gpt-4-vision-preview"


class MultimodalConversableAgent(ConversableAgent):
DEFAULT_CONFIG = {
"model": DEFAULT_MODEL,
}

def __init__(
self,
name: str,
Expand Down
Loading

0 comments on commit 02c46c3

Please sign in to comment.