Skip to content

Commit

Permalink
Improve initialization efficiency of AgentScope (#353)
Browse files Browse the repository at this point in the history
  • Loading branch information
DavdGao authored Jul 23, 2024
1 parent 83537a1 commit 527d274
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 38 deletions.
4 changes: 2 additions & 2 deletions src/agentscope/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@


class _AgentMeta(ABCMeta):
"""The meta-class for agent.
"""The metaclass for agent.
1. record the init args into `_init_settings` field.
2. register class name into `registry` field.
Expand Down Expand Up @@ -232,7 +232,7 @@ def get_agent_class(cls, agent_class_name: str) -> Type[AgentBase]:
ValueError: Agent class name not exits.
Returns:
Type[AgentBase]: the AgentBase sub-class.
Type[AgentBase]: the AgentBase subclass.
"""
if agent_class_name not in cls._registry:
raise ValueError(f"Agent class <{agent_class_name}> not found.")
Expand Down
28 changes: 11 additions & 17 deletions src/agentscope/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,6 @@
from ..message import Msg
from ..utils.tools import _convert_to_str

try:
import litellm
except ImportError:
litellm = None


class LiteLLMWrapperBase(ModelWrapperBase, ABC):
"""The model wrapper based on LiteLLM API."""
Expand Down Expand Up @@ -59,16 +54,6 @@ def __init__(

super().__init__(config_name=config_name)

if litellm is None:
raise ImportError(
"Cannot import litellm package in current python environment."
"You should try:"
"1. Install litellm by `pip install litellm`"
"2. If you still have import error, you should try to "
"update the openai to higher version, e.g. "
"by running `pip install openai==1.25.1",
)

self.model_name = model_name
self.generate_args = generate_args or {}
self._register_default_metrics()
Expand All @@ -86,7 +71,7 @@ def format(

class LiteLLMChatWrapper(LiteLLMWrapperBase):
"""The model wrapper based on litellm chat API.
To use the LiteLLM wrapper, environent variables must be set.
To use the LiteLLM wrapper, environment variables must be set.
Different model_name could be using different environment variables.
For example:
- for model_name: "gpt-3.5-turbo", you need to set "OPENAI_API_KEY"
Expand Down Expand Up @@ -207,6 +192,15 @@ def __call__(
"and 'content' key for LiteLLM API.",
)

# Import litellm only when it is used
try:
import litellm
except ImportError as e:
raise ImportError(
"Cannot find litellm in current environment, please "
"install it by `pip install litellm`.",
) from e

# step3: forward to generate response
if stream is None:
stream = self.stream
Expand Down Expand Up @@ -295,7 +289,7 @@ def format(
Note that the format function might not be the optimal way to construct
prompt for every model, but a common way to do so.
Developers are encouraged to implement their own prompt
engineering strategies if have strong performance concerns.
engineering strategies if they have strong performance concerns.
Args:
args (`Union[Msg, Sequence[Msg]]`):
Expand Down
18 changes: 8 additions & 10 deletions src/agentscope/models/openai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,6 @@
from ..message import Msg
from ..utils.tools import _convert_to_str, _to_openai_image_url

try:
import openai
except ImportError:
openai = None

from ..utils.token_utils import get_openai_max_length
from ..constants import _DEFAULT_API_BUDGET

Expand Down Expand Up @@ -66,14 +61,17 @@ def __init__(

super().__init__(config_name=config_name)

if openai is None:
raise ImportError(
"Cannot find openai package in current python environment.",
)

self.model_name = model_name
self.generate_args = generate_args or {}

try:
import openai
except ImportError as e:
raise ImportError(
"Cannot find openai package, please install it by "
"`pip install openai`",
) from e

self.client = openai.OpenAI(
api_key=api_key,
organization=organization,
Expand Down
8 changes: 4 additions & 4 deletions src/agentscope/rpc/rpc_agent_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def is_alive(self) -> bool:
"""Check if the agent server is alive.
Returns:
bool: Indecate whether the server is alive.
bool: Indicate whether the server is alive.
"""

try:
Expand Down Expand Up @@ -151,7 +151,7 @@ def create_agent(
agent_id (`str`): agent_id of the created agent.
Returns:
bool: Indecate whether the creation is successful
bool: Indicate whether the creation is successful
"""
try:
with grpc.insecure_channel(f"{self.host}:{self.port}") as channel:
Expand Down Expand Up @@ -190,7 +190,7 @@ def delete_agent(
agent_id (`str`): id of the agent to be deleted.
Returns:
bool: Indecate whether the deletion is successful
bool: Indicate whether the deletion is successful
"""
with grpc.insecure_channel(f"{self.host}:{self.port}") as channel:
stub = RpcAgentStub(channel)
Expand Down Expand Up @@ -370,7 +370,7 @@ def call_in_thread(
Args:
client (`RpcAgentClient`): The rpc client.
x (`str`): The value of the reqeust.
value (`str`): The value of the request.
func_name (`str`): The name of the function being called.
Returns:
Expand Down
2 changes: 1 addition & 1 deletion src/agentscope/strategy/mixture_of_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def __init__(
in each round.
Can take both config name of model or model instance as input.
We encourage using different models to get better diversity.
Emperically, responses generated by heterogeneous models
Empirically, responses generated by heterogeneous models
contribute more than those produced by the same model.
rounds (`int`):
The number of processing rounds to refine the responses.
Expand Down
7 changes: 3 additions & 4 deletions tests/litellm_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,9 @@ def setUp(self) -> None:
{"role": "assistant", "content": "How can I assist you?"},
]

@patch("agentscope.models.litellm_model.litellm")
@patch("litellm.completion")
def test_chat(self, mock_litellm: MagicMock) -> None:
"""
Test chat"""
"""Test chat"""
mock_response = MagicMock()
mock_response.model_dump.return_value = {
"choices": [
Expand All @@ -36,7 +35,7 @@ def test_chat(self, mock_litellm: MagicMock) -> None:
0
].message.content = "Hello, this is a mocked response!"

mock_litellm.completion.return_value = mock_response
mock_litellm.return_value = mock_response

agentscope.init(
model_configs={
Expand Down

0 comments on commit 527d274

Please sign in to comment.