From 6b920fa53c7490b46d08a40977c797cf588d5fb6 Mon Sep 17 00:00:00 2001 From: Haijian Wang <130898843+Haijian06@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:53:46 +0800 Subject: [PATCH 1/9] Add files via upload --- src/agentscope/models/yi_model.py | 222 ++++++++++++++++++++++++++++++ 1 file changed, 222 insertions(+) create mode 100644 src/agentscope/models/yi_model.py diff --git a/src/agentscope/models/yi_model.py b/src/agentscope/models/yi_model.py new file mode 100644 index 000000000..f6b1409a1 --- /dev/null +++ b/src/agentscope/models/yi_model.py @@ -0,0 +1,222 @@ +from abc import ABC +from openai import OpenAI +from .model import ModelWrapperBase, ModelResponse +from typing import Union, Sequence, Any, Dict, List +import logging +from ..message import Msg +logger = logging.getLogger(__name__) + +_DEFAULT_API_BUDGET = float("inf") + +try: + import openai +except ImportError: + openai = None + +class YiWrapperBase(ModelWrapperBase, ABC): + """The model wrapper for Yi API.""" + + def __init__( + self, + config_name: str, + model_name: str = None, + api_key: str = None, + region: str = "domestic", # "domestic" or "overseas" + client_args: dict = None, + generate_args: dict = None, + budget: float = _DEFAULT_API_BUDGET, + **kwargs: Any, + ) -> None: + """Initialize the Yi client. + + Args: + config_name (`str`): + The name of the model config. + model_name (`str`, default `None`): + The name of the model to use in Yi API. + api_key (`str`, default `None`): + The API key for Yi API. + region (`str`, default "domestic"): + The region for API base URL. Either "domestic" or "overseas". + client_args (`dict`, default `None`): + The extra keyword arguments to initialize the Yi client. + generate_args (`dict`, default `None`): + The extra keyword arguments used in Yi api generation, + e.g. `temperature`, `max_tokens`. + budget (`float`, default `None`): + The total budget using this model. Set to `None` means no + limit. + """ + if model_name is None: + model_name = config_name + logger.warning("model_name is not set, use config_name instead.") + + super().__init__(config_name=config_name) + + if openai is None: + raise ImportError( + "Cannot find openai package in current python environment.", + ) + + self.model_name = model_name + self.generate_args = generate_args or {} + + base_url = "https://api.lingyiwanwu.com/v1" if region == "domestic" else "https://api.01.ai/v1" + if base_url: + self.base_url = base_url + elif region == "overseas": + self.base_url = "https://api.01.ai/v1" + else: + self.base_url = "https://api.lingyiwanwu.com/v1" + + if region == "overseas" and model_name not in ["yi-large"]: + logger.warning( + f"Model {model_name} may not be available for overseas region. Only yi-large is confirmed to work.More information can be found here https://platform.01.ai/docs#models-and-pricing") + self.client = OpenAI( + api_key=api_key, + base_url=self.base_url, + **(client_args or {}), + ) + + # Set the max length of Yi model (this might need to be adjusted) + self.max_length = 4096 # Placeholder value, adjust as needed + + # Set monitor accordingly + self._register_budget(model_name, budget) + self._register_default_metrics() + + def _register_default_metrics(self) -> None: + # Set monitor accordingly + self.monitor.register( + self._metric("call_counter"), + metric_unit="times", + ) + self.monitor.register( + self._metric("prompt_tokens"), + metric_unit="token", + ) + self.monitor.register( + self._metric("completion_tokens"), + metric_unit="token", + ) + self.monitor.register( + self._metric("total_tokens"), + metric_unit="token", + ) + + def format( + self, + *args: Union[Msg, Sequence[Msg]], + ) -> Union[List[dict], str]: + raise NotImplementedError( + f"Model Wrapper [{type(self).__name__}] doesn't " + f"implement the format method. Please implement it " + f"in the subclass." + ) + + +class YiChatWrapper(YiWrapperBase): + """The model wrapper for Yi's chat API.""" + + model_type: str = "yi_chat" + + def __call__( + self, + messages: list, + **kwargs: Any, + ) -> ModelResponse: + """Processes a list of messages to construct a payload for the Yi + API call. It then makes a request to the Yi API and returns the + response. This method also updates monitoring metrics based on the + API response. + + Args: + messages (`list`): + A list of messages to process. + **kwargs (`Any`): + The keyword arguments to Yi chat completions API, + e.g. `temperature`, `max_tokens`, etc. + + Returns: + `ModelResponse`: + The response text in text field, and the raw response in + raw field. + """ + # Prepare keyword arguments + kwargs = {**self.generate_args, **kwargs} + + # Checking messages + if not isinstance(messages, list): + raise ValueError( + "Yi `messages` field expected type `list`, " + f"got `{type(messages)}` instead.", + ) + if not all("role" in msg and "content" in msg for msg in messages): + raise ValueError( + "Each message in the 'messages' list must contain a 'role' " + "and 'content' key for Yi API.", + ) + + # Forward to generate response + response = self.client.chat.completions.create( + model=self.model_name, + messages=messages, + **kwargs, + ) + + # Record the api invocation if needed + self._save_model_invocation( + arguments={ + "model": self.model_name, + "messages": messages, + **kwargs, + }, + response=response.model_dump(), + ) + + # Update monitor accordingly + self.update_monitor(call_counter=1, **response.usage.model_dump()) + + # Return response + return ModelResponse( + text=response.choices[0].message.content, + raw=response.model_dump(), + ) + + def format( + self, + *args: Union[Msg, Sequence[Msg]], + ) -> List[dict]: + """Format the input string and dictionary into the format that + Yi Chat API required. + + Args: + args (`Union[Msg, Sequence[Msg]]`): + The input arguments to be formatted, where each argument + should be a `Msg` object, or a list of `Msg` objects. + + Returns: + `List[dict]`: + The formatted messages in the format that Yi Chat API + required. + """ + messages = [] + for arg in args: + if arg is None: + continue + if isinstance(arg, Msg): + messages.append( + { + "role": arg.role, + "content": str(arg.content), + } + ) + elif isinstance(arg, list): + messages.extend(self.format(*arg)) + else: + raise TypeError( + f"The input should be a Msg object or a list " + f"of Msg objects, got {type(arg)}.", + ) + + return messages \ No newline at end of file From 19761e9c0271b623940b09142ff0687c6170ab41 Mon Sep 17 00:00:00 2001 From: Haijian Wang <130898843+Haijian06@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:54:39 +0800 Subject: [PATCH 2/9] Update __init__.py --- src/agentscope/models/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/agentscope/models/__init__.py b/src/agentscope/models/__init__.py index 929a8334a..ed217c6a5 100644 --- a/src/agentscope/models/__init__.py +++ b/src/agentscope/models/__init__.py @@ -40,7 +40,9 @@ from .litellm_model import ( LiteLLMChatWrapper, ) - +from .yi_model import ( + YiChatWrapper, +) __all__ = [ "ModelWrapperBase", @@ -67,6 +69,7 @@ "load_config_by_name", "read_model_configs", "clear_model_configs", + "YiChatWrapper", ] _MODEL_CONFIGS: dict[str, dict] = {} From 6c2bc1f9f96a7792e050ba573ecf266f9d43ac5c Mon Sep 17 00:00:00 2001 From: Haijian Wang <130898843+Haijian06@users.noreply.github.com> Date: Wed, 17 Jul 2024 15:52:21 +0800 Subject: [PATCH 3/9] Update yi_model.py --- src/agentscope/models/yi_model.py | 88 ++++++++----------------------- 1 file changed, 22 insertions(+), 66 deletions(-) diff --git a/src/agentscope/models/yi_model.py b/src/agentscope/models/yi_model.py index f6b1409a1..2e3b8252d 100644 --- a/src/agentscope/models/yi_model.py +++ b/src/agentscope/models/yi_model.py @@ -1,9 +1,13 @@ +"""Model wrapper for Yi models""" from abc import ABC +import logging +from typing import Union, Sequence, Any, List + from openai import OpenAI + from .model import ModelWrapperBase, ModelResponse -from typing import Union, Sequence, Any, Dict, List -import logging from ..message import Msg + logger = logging.getLogger(__name__) _DEFAULT_API_BUDGET = float("inf") @@ -27,26 +31,7 @@ def __init__( budget: float = _DEFAULT_API_BUDGET, **kwargs: Any, ) -> None: - """Initialize the Yi client. - - Args: - config_name (`str`): - The name of the model config. - model_name (`str`, default `None`): - The name of the model to use in Yi API. - api_key (`str`, default `None`): - The API key for Yi API. - region (`str`, default "domestic"): - The region for API base URL. Either "domestic" or "overseas". - client_args (`dict`, default `None`): - The extra keyword arguments to initialize the Yi client. - generate_args (`dict`, default `None`): - The extra keyword arguments used in Yi api generation, - e.g. `temperature`, `max_tokens`. - budget (`float`, default `None`): - The total budget using this model. Set to `None` means no - limit. - """ + """Initialize the Yi client.""" if model_name is None: model_name = config_name logger.warning("model_name is not set, use config_name instead.") @@ -55,23 +40,22 @@ def __init__( if openai is None: raise ImportError( - "Cannot find openai package in current python environment.", + "Cannot find openai package in current python environment." ) self.model_name = model_name self.generate_args = generate_args or {} - base_url = "https://api.lingyiwanwu.com/v1" if region == "domestic" else "https://api.01.ai/v1" - if base_url: - self.base_url = base_url - elif region == "overseas": - self.base_url = "https://api.01.ai/v1" - else: - self.base_url = "https://api.lingyiwanwu.com/v1" + base_url = ("https://api.lingyiwanwu.com/v1" if region == "domestic" + else "https://api.01.ai/v1") + self.base_url = base_url if region == "overseas" and model_name not in ["yi-large"]: logger.warning( - f"Model {model_name} may not be available for overseas region. Only yi-large is confirmed to work.More information can be found here https://platform.01.ai/docs#models-and-pricing") + f"Model {model_name} may not be available for overseas region. " + "Only yi-large is confirmed to work. More information can be " + "found here https://platform.01.ai/docs#models-and-pricing" + ) self.client = OpenAI( api_key=api_key, base_url=self.base_url, @@ -125,36 +109,20 @@ def __call__( messages: list, **kwargs: Any, ) -> ModelResponse: - """Processes a list of messages to construct a payload for the Yi - API call. It then makes a request to the Yi API and returns the - response. This method also updates monitoring metrics based on the - API response. - - Args: - messages (`list`): - A list of messages to process. - **kwargs (`Any`): - The keyword arguments to Yi chat completions API, - e.g. `temperature`, `max_tokens`, etc. - - Returns: - `ModelResponse`: - The response text in text field, and the raw response in - raw field. - """ + """Processes a list of messages and makes a request to the Yi API.""" # Prepare keyword arguments kwargs = {**self.generate_args, **kwargs} # Checking messages if not isinstance(messages, list): raise ValueError( - "Yi `messages` field expected type `list`, " - f"got `{type(messages)}` instead.", + f"Yi `messages` field expected type `list`, " + f"got `{type(messages)}` instead." ) if not all("role" in msg and "content" in msg for msg in messages): raise ValueError( "Each message in the 'messages' list must contain a 'role' " - "and 'content' key for Yi API.", + "and 'content' key for Yi API." ) # Forward to generate response @@ -187,19 +155,7 @@ def format( self, *args: Union[Msg, Sequence[Msg]], ) -> List[dict]: - """Format the input string and dictionary into the format that - Yi Chat API required. - - Args: - args (`Union[Msg, Sequence[Msg]]`): - The input arguments to be formatted, where each argument - should be a `Msg` object, or a list of `Msg` objects. - - Returns: - `List[dict]`: - The formatted messages in the format that Yi Chat API - required. - """ + """Format the input messages for the Yi Chat API.""" messages = [] for arg in args: if arg is None: @@ -216,7 +172,7 @@ def format( else: raise TypeError( f"The input should be a Msg object or a list " - f"of Msg objects, got {type(arg)}.", + f"of Msg objects, got {type(arg)}." ) - return messages \ No newline at end of file + return messages From 614fab24545b3bf68c9cf92bd039f8b7c793f546 Mon Sep 17 00:00:00 2001 From: Haijian Wang <130898843+Haijian06@users.noreply.github.com> Date: Wed, 17 Jul 2024 16:37:30 +0800 Subject: [PATCH 4/9] Update yi_model.py --- src/agentscope/models/yi_model.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/agentscope/models/yi_model.py b/src/agentscope/models/yi_model.py index 2e3b8252d..29700ce74 100644 --- a/src/agentscope/models/yi_model.py +++ b/src/agentscope/models/yi_model.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Model wrapper for Yi models""" from abc import ABC import logging @@ -52,9 +53,10 @@ def __init__( if region == "overseas" and model_name not in ["yi-large"]: logger.warning( - f"Model {model_name} may not be available for overseas region. " + "Model %s may not be available for overseas region. " "Only yi-large is confirmed to work. More information can be " - "found here https://platform.01.ai/docs#models-and-pricing" + "found here https://platform.01.ai/docs#models-and-pricing", + model_name ) self.client = OpenAI( api_key=api_key, From fbdd76418d72510a75ac04742a7652937357b481 Mon Sep 17 00:00:00 2001 From: Haijian Wang <130898843+Haijian06@users.noreply.github.com> Date: Thu, 18 Jul 2024 16:30:16 +0800 Subject: [PATCH 5/9] Update yi_model.py --- src/agentscope/models/yi_model.py | 54 +++++++++++++++++-------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/src/agentscope/models/yi_model.py b/src/agentscope/models/yi_model.py index 29700ce74..ed89ce99b 100644 --- a/src/agentscope/models/yi_model.py +++ b/src/agentscope/models/yi_model.py @@ -18,19 +18,20 @@ except ImportError: openai = None + class YiWrapperBase(ModelWrapperBase, ABC): """The model wrapper for Yi API.""" def __init__( - self, - config_name: str, - model_name: str = None, - api_key: str = None, - region: str = "domestic", # "domestic" or "overseas" - client_args: dict = None, - generate_args: dict = None, - budget: float = _DEFAULT_API_BUDGET, - **kwargs: Any, + self, + config_name: str, + model_name: str = None, + api_key: str = None, + region: str = "domestic", # "domestic" or "overseas" + client_args: dict = None, + generate_args: dict = None, + budget: float = _DEFAULT_API_BUDGET, + **kwargs: Any, ) -> None: """Initialize the Yi client.""" if model_name is None: @@ -41,14 +42,17 @@ def __init__( if openai is None: raise ImportError( - "Cannot find openai package in current python environment." + "Cannot find openai package in current python environment.", ) self.model_name = model_name self.generate_args = generate_args or {} - base_url = ("https://api.lingyiwanwu.com/v1" if region == "domestic" - else "https://api.01.ai/v1") + base_url = ( + "https://api.lingyiwanwu.com/v1" + if region == "domestic" + else "https://api.01.ai/v1" + ) self.base_url = base_url if region == "overseas" and model_name not in ["yi-large"]: @@ -56,7 +60,7 @@ def __init__( "Model %s may not be available for overseas region. " "Only yi-large is confirmed to work. More information can be " "found here https://platform.01.ai/docs#models-and-pricing", - model_name + model_name, ) self.client = OpenAI( api_key=api_key, @@ -91,13 +95,13 @@ def _register_default_metrics(self) -> None: ) def format( - self, - *args: Union[Msg, Sequence[Msg]], + self, + *args: Union[Msg, Sequence[Msg]], ) -> Union[List[dict], str]: raise NotImplementedError( f"Model Wrapper [{type(self).__name__}] doesn't " f"implement the format method. Please implement it " - f"in the subclass." + f"in the subclass.", ) @@ -107,9 +111,9 @@ class YiChatWrapper(YiWrapperBase): model_type: str = "yi_chat" def __call__( - self, - messages: list, - **kwargs: Any, + self, + messages: list, + **kwargs: Any, ) -> ModelResponse: """Processes a list of messages and makes a request to the Yi API.""" # Prepare keyword arguments @@ -119,12 +123,12 @@ def __call__( if not isinstance(messages, list): raise ValueError( f"Yi `messages` field expected type `list`, " - f"got `{type(messages)}` instead." + f"got `{type(messages)}` instead.", ) if not all("role" in msg and "content" in msg for msg in messages): raise ValueError( "Each message in the 'messages' list must contain a 'role' " - "and 'content' key for Yi API." + "and 'content' key for Yi API.", ) # Forward to generate response @@ -154,8 +158,8 @@ def __call__( ) def format( - self, - *args: Union[Msg, Sequence[Msg]], + self, + *args: Union[Msg, Sequence[Msg]], ) -> List[dict]: """Format the input messages for the Yi Chat API.""" messages = [] @@ -167,14 +171,14 @@ def format( { "role": arg.role, "content": str(arg.content), - } + }, ) elif isinstance(arg, list): messages.extend(self.format(*arg)) else: raise TypeError( f"The input should be a Msg object or a list " - f"of Msg objects, got {type(arg)}." + f"of Msg objects, got {type(arg)}.", ) return messages From 71af20bb3e37e4d114cf5789cd00ac13a2443354 Mon Sep 17 00:00:00 2001 From: Haijian Wang <130898843+Haijian06@users.noreply.github.com> Date: Tue, 30 Jul 2024 14:40:02 +0800 Subject: [PATCH 6/9] Update __init__.py --- src/agentscope/models/__init__.py | 376 ++++++++++++++++-------------- 1 file changed, 207 insertions(+), 169 deletions(-) diff --git a/src/agentscope/models/__init__.py b/src/agentscope/models/__init__.py index ed217c6a5..ba2dd4073 100644 --- a/src/agentscope/models/__init__.py +++ b/src/agentscope/models/__init__.py @@ -1,192 +1,230 @@ # -*- coding: utf-8 -*- -""" Import modules in models package.""" -import json -from typing import Union, Type - -from loguru import logger - -from .config import _ModelConfig -from .model import ModelWrapperBase -from .response import ModelResponse -from .post_model import ( - PostAPIModelWrapperBase, - PostAPIChatWrapper, -) -from .openai_model import ( - OpenAIWrapperBase, - OpenAIChatWrapper, - OpenAIDALLEWrapper, - OpenAIEmbeddingWrapper, -) -from .dashscope_model import ( - DashScopeChatWrapper, - DashScopeImageSynthesisWrapper, - DashScopeTextEmbeddingWrapper, - DashScopeMultiModalWrapper, -) -from .ollama_model import ( - OllamaChatWrapper, - OllamaEmbeddingWrapper, - OllamaGenerationWrapper, -) -from .gemini_model import ( - GeminiChatWrapper, - GeminiEmbeddingWrapper, -) -from .zhipu_model import ( - ZhipuAIChatWrapper, - ZhipuAIEmbeddingWrapper, -) -from .litellm_model import ( - LiteLLMChatWrapper, -) -from .yi_model import ( - YiChatWrapper, -) - -__all__ = [ - "ModelWrapperBase", - "ModelResponse", - "PostAPIModelWrapperBase", - "PostAPIChatWrapper", - "OpenAIWrapperBase", - "OpenAIChatWrapper", - "OpenAIDALLEWrapper", - "OpenAIEmbeddingWrapper", - "DashScopeChatWrapper", - "DashScopeImageSynthesisWrapper", - "DashScopeTextEmbeddingWrapper", - "DashScopeMultiModalWrapper", - "OllamaChatWrapper", - "OllamaEmbeddingWrapper", - "OllamaGenerationWrapper", - "GeminiChatWrapper", - "GeminiEmbeddingWrapper", - "ZhipuAIChatWrapper", - "ZhipuAIEmbeddingWrapper", - "LiteLLMChatWrapper", - "load_model_by_config_name", - "load_config_by_name", - "read_model_configs", - "clear_model_configs", - "YiChatWrapper", -] - -_MODEL_CONFIGS: dict[str, dict] = {} - - -def _get_model_wrapper(model_type: str) -> Type[ModelWrapperBase]: - """Get the specific type of model wrapper - - Args: - model_type (`str`): The model type name. - - Returns: - `Type[ModelWrapperBase]`: The corresponding model wrapper class. - """ - wrapper = ModelWrapperBase.get_wrapper(model_type=model_type) - if wrapper is None: - logger.warning( - f"Unsupported model_type [{model_type}]," - "use PostApiModelWrapper instead.", - ) - return PostAPIModelWrapperBase - return wrapper - - -def load_config_by_name(config_name: str) -> Union[dict, None]: - """Load the model config by name, and return the config dict.""" - return _MODEL_CONFIGS.get(config_name, None) - +"""Model wrapper for Yi models""" +from abc import ABC +import logging +from typing import Union, Sequence, Any, List + +from openai import OpenAI + +from .model import ModelWrapperBase, ModelResponse +from ..message import Msg + +logger = logging.getLogger(__name__) + +_DEFAULT_API_BUDGET = float("inf") + +try: + import openai +except ImportError: + openai = None + + +class YiWrapperBase(ModelWrapperBase, ABC): + """The model wrapper for Yi API.""" + + def __init__( + self, + config_name: str, + model_name: str = None, + api_key: str = None, + region: str = "China", # "China" or "International" + client_args: dict = None, + generate_args: dict = None, + budget: float = _DEFAULT_API_BUDGET, + **kwargs: Any, + ) -> None: + """Initialize the Yi client.""" + if model_name is None: + model_name = config_name + logger.warning("model_name is not set, use config_name instead.") + + super().__init__(config_name=config_name) + + if openai is None: + raise ImportError( + "Cannot find openai package in current python environment.", + ) -def load_model_by_config_name(config_name: str) -> ModelWrapperBase: - """Load the model by config name, and return the model wrapper.""" - if len(_MODEL_CONFIGS) == 0: - raise ValueError( - "No model configs loaded, please call " - "`read_model_configs` first.", - ) + self.model_name = model_name + self.generate_args = generate_args or {} - # Find model config by name - if config_name not in _MODEL_CONFIGS: - raise ValueError( - f"Cannot find [{config_name}] in loaded configurations.", + base_url = ( + "https://api.lingyiwanwu.com/v1" + if region == "China" + else "https://api.01.ai/v1" ) - config = _MODEL_CONFIGS.get(config_name, None) + self.base_url = base_url - if config is None: - raise ValueError( - f"Cannot find [{config_name}] in loaded configurations.", + if region == "International" and model_name not in ["yi-large"]: + logger.warning( + "Model %s may not be available for overseas region. " + "Only yi-large is confirmed to work. More information can be " + "found here https://platform.01.ai/docs#models-and-pricing", + model_name, + ) + self.client = OpenAI( + api_key=api_key, + base_url=self.base_url, + **(client_args or {}), ) - model_type = config.model_type + # Set the max length of Yi model (this might need to be adjusted) + self.max_length = 4096 # Placeholder value, adjust as needed - kwargs = {k: v for k, v in config.items() if k != "model_type"} + # Set monitor accordingly + self._register_budget(model_name, budget) + self._register_default_metrics() - return _get_model_wrapper(model_type=model_type)(**kwargs) + def _register_default_metrics(self) -> None: + # Set monitor accordingly + self.monitor.register( + self._metric("call_counter"), + metric_unit="times", + ) + self.monitor.register( + self._metric("prompt_tokens"), + metric_unit="token", + ) + self.monitor.register( + self._metric("completion_tokens"), + metric_unit="token", + ) + self.monitor.register( + self._metric("total_tokens"), + metric_unit="token", + ) + def format( + self, + *args: Union[Msg, Sequence[Msg]], + ) -> Union[List[dict], str]: + raise NotImplementedError( + f"Model Wrapper [{type(self).__name__}] doesn't " + f"implement the format method. Please implement it " + f"in the subclass.", + ) -def clear_model_configs() -> None: - """Clear the loaded model configs.""" - _MODEL_CONFIGS.clear() +class YiChatWrapper(YiWrapperBase): + """The model wrapper for Yi's chat API.""" -def read_model_configs( - configs: Union[dict, str, list], - clear_existing: bool = False, -) -> None: - """read model configs from a path or a list of dicts. + model_type: str = "yi_chat" - Args: - configs (`Union[str, list, dict]`): - The path of the model configs | a config dict | a list of model - configs. - clear_existing (`bool`, defaults to `False`): - Whether to clear the loaded model configs before reading. + def __call__( + self, + messages: list, + stream: bool = False, + **kwargs: Any, + ) -> ModelResponse: + """Processes a list of messages and makes a request to the Yi API.""" + # Prepare keyword arguments + kwargs = {**self.generate_args, **kwargs} - Returns: - `dict`: - The model configs. - """ - if clear_existing: - clear_model_configs() + # Checking messages + if not isinstance(messages, list): + raise ValueError( + f"Yi `messages` field expected type `list`, " + f"got `{type(messages)}` instead.", + ) + if not all("role" in msg and "content" in msg for msg in messages): + raise ValueError( + "Each message in the 'messages' list must contain a 'role' " + "and 'content' key for Yi API.", + ) - cfgs = None + # Forward to generate response + if stream: + response = self.client.chat.completions.create( + model=self.model_name, + messages=messages, + stream=True, + **kwargs, + ) + else: + response = self.client.chat.completions.create( + model=self.model_name, + messages=messages, + **kwargs, + ) - if isinstance(configs, str): - with open(configs, "r", encoding="utf-8") as f: - cfgs = json.load(f) + # Record the api invocation if needed + self._save_model_invocation( + arguments={ + "model": self.model_name, + "messages": messages, + **kwargs, + }, + response=response.model_dump(), + ) - if isinstance(configs, dict): - cfgs = [configs] + # Update monitor accordingly + if not stream: + self.update_monitor(call_counter=1, **response.usage.model_dump()) - if isinstance(configs, list): - if not all(isinstance(_, dict) for _ in configs): - raise ValueError( - "The model config unit should be a dict.", + # Return response + if stream: + # Handle the stream of responses + return ModelResponse( + text="", # Initialize with empty string for streaming + raw=response, # Return the stream object + ) + else: + return ModelResponse( + text=response.choices[0].message.content, + raw=response.model_dump(), ) - cfgs = configs - if cfgs is None: - raise TypeError( - f"Invalid type of model_configs, it could be a dict, a list of " - f"dicts, or a path to a json file (containing a dict or a list " - f"of dicts), but got {type(configs)}", + def format( + self, + *args: Union[Msg, Sequence[Msg]], + ) -> List[dict]: + """Format the input messages for the Yi Chat API.""" + input_msgs = [] + for arg in args: + if arg is None: + continue + if isinstance(arg, Msg): + input_msgs.append(arg) + elif isinstance(arg, list) and all( + isinstance(msg, Msg) for msg in arg + ): + input_msgs.extend(arg) + else: + raise TypeError( + f"The input should be a Msg object or a list " + f"of Msg objects, got {type(arg)}.", + ) + + messages = [] + + # record dialog history as a list of strings + dialogue = [] + for i, unit in enumerate(input_msgs): + if i == 0 and unit.role == "system": + # system prompt + messages.append( + { + "role": unit.role, + "content": str(unit.content), + }, + ) + else: + # Merge all messages into a dialogue history prompt + dialogue.append( + f"{unit.name}: {str(unit.content)}", + ) + + dialogue_history = "\n".join(dialogue) + + user_content_template = "## Dialogue History\n{dialogue_history}" + + messages.append( + { + "role": "user", + "content": user_content_template.format( + dialogue_history=dialogue_history, + ), + }, ) - format_configs = _ModelConfig.format_configs(configs=cfgs) - - # check if name is unique - for cfg in format_configs: - if cfg.config_name in _MODEL_CONFIGS: - logger.warning( - f"config_name [{cfg.config_name}] already exists.", - ) - continue - _MODEL_CONFIGS[cfg.config_name] = cfg - - # print the loaded model configs - logger.info( - "Load configs for model wrapper: {}", - ", ".join(_MODEL_CONFIGS.keys()), - ) + return messages From c344169b6584561031ef89543ad143f380c88643 Mon Sep 17 00:00:00 2001 From: Haijian Wang <130898843+Haijian06@users.noreply.github.com> Date: Tue, 30 Jul 2024 14:40:53 +0800 Subject: [PATCH 7/9] Update __init__.py --- src/agentscope/models/__init__.py | 376 ++++++++++++++---------------- 1 file changed, 169 insertions(+), 207 deletions(-) diff --git a/src/agentscope/models/__init__.py b/src/agentscope/models/__init__.py index ba2dd4073..ed217c6a5 100644 --- a/src/agentscope/models/__init__.py +++ b/src/agentscope/models/__init__.py @@ -1,230 +1,192 @@ # -*- coding: utf-8 -*- -"""Model wrapper for Yi models""" -from abc import ABC -import logging -from typing import Union, Sequence, Any, List - -from openai import OpenAI - -from .model import ModelWrapperBase, ModelResponse -from ..message import Msg - -logger = logging.getLogger(__name__) - -_DEFAULT_API_BUDGET = float("inf") - -try: - import openai -except ImportError: - openai = None - - -class YiWrapperBase(ModelWrapperBase, ABC): - """The model wrapper for Yi API.""" - - def __init__( - self, - config_name: str, - model_name: str = None, - api_key: str = None, - region: str = "China", # "China" or "International" - client_args: dict = None, - generate_args: dict = None, - budget: float = _DEFAULT_API_BUDGET, - **kwargs: Any, - ) -> None: - """Initialize the Yi client.""" - if model_name is None: - model_name = config_name - logger.warning("model_name is not set, use config_name instead.") - - super().__init__(config_name=config_name) - - if openai is None: - raise ImportError( - "Cannot find openai package in current python environment.", - ) - - self.model_name = model_name - self.generate_args = generate_args or {} - - base_url = ( - "https://api.lingyiwanwu.com/v1" - if region == "China" - else "https://api.01.ai/v1" +""" Import modules in models package.""" +import json +from typing import Union, Type + +from loguru import logger + +from .config import _ModelConfig +from .model import ModelWrapperBase +from .response import ModelResponse +from .post_model import ( + PostAPIModelWrapperBase, + PostAPIChatWrapper, +) +from .openai_model import ( + OpenAIWrapperBase, + OpenAIChatWrapper, + OpenAIDALLEWrapper, + OpenAIEmbeddingWrapper, +) +from .dashscope_model import ( + DashScopeChatWrapper, + DashScopeImageSynthesisWrapper, + DashScopeTextEmbeddingWrapper, + DashScopeMultiModalWrapper, +) +from .ollama_model import ( + OllamaChatWrapper, + OllamaEmbeddingWrapper, + OllamaGenerationWrapper, +) +from .gemini_model import ( + GeminiChatWrapper, + GeminiEmbeddingWrapper, +) +from .zhipu_model import ( + ZhipuAIChatWrapper, + ZhipuAIEmbeddingWrapper, +) +from .litellm_model import ( + LiteLLMChatWrapper, +) +from .yi_model import ( + YiChatWrapper, +) + +__all__ = [ + "ModelWrapperBase", + "ModelResponse", + "PostAPIModelWrapperBase", + "PostAPIChatWrapper", + "OpenAIWrapperBase", + "OpenAIChatWrapper", + "OpenAIDALLEWrapper", + "OpenAIEmbeddingWrapper", + "DashScopeChatWrapper", + "DashScopeImageSynthesisWrapper", + "DashScopeTextEmbeddingWrapper", + "DashScopeMultiModalWrapper", + "OllamaChatWrapper", + "OllamaEmbeddingWrapper", + "OllamaGenerationWrapper", + "GeminiChatWrapper", + "GeminiEmbeddingWrapper", + "ZhipuAIChatWrapper", + "ZhipuAIEmbeddingWrapper", + "LiteLLMChatWrapper", + "load_model_by_config_name", + "load_config_by_name", + "read_model_configs", + "clear_model_configs", + "YiChatWrapper", +] + +_MODEL_CONFIGS: dict[str, dict] = {} + + +def _get_model_wrapper(model_type: str) -> Type[ModelWrapperBase]: + """Get the specific type of model wrapper + + Args: + model_type (`str`): The model type name. + + Returns: + `Type[ModelWrapperBase]`: The corresponding model wrapper class. + """ + wrapper = ModelWrapperBase.get_wrapper(model_type=model_type) + if wrapper is None: + logger.warning( + f"Unsupported model_type [{model_type}]," + "use PostApiModelWrapper instead.", ) - self.base_url = base_url + return PostAPIModelWrapperBase + return wrapper - if region == "International" and model_name not in ["yi-large"]: - logger.warning( - "Model %s may not be available for overseas region. " - "Only yi-large is confirmed to work. More information can be " - "found here https://platform.01.ai/docs#models-and-pricing", - model_name, - ) - self.client = OpenAI( - api_key=api_key, - base_url=self.base_url, - **(client_args or {}), - ) - # Set the max length of Yi model (this might need to be adjusted) - self.max_length = 4096 # Placeholder value, adjust as needed +def load_config_by_name(config_name: str) -> Union[dict, None]: + """Load the model config by name, and return the config dict.""" + return _MODEL_CONFIGS.get(config_name, None) - # Set monitor accordingly - self._register_budget(model_name, budget) - self._register_default_metrics() - def _register_default_metrics(self) -> None: - # Set monitor accordingly - self.monitor.register( - self._metric("call_counter"), - metric_unit="times", - ) - self.monitor.register( - self._metric("prompt_tokens"), - metric_unit="token", +def load_model_by_config_name(config_name: str) -> ModelWrapperBase: + """Load the model by config name, and return the model wrapper.""" + if len(_MODEL_CONFIGS) == 0: + raise ValueError( + "No model configs loaded, please call " + "`read_model_configs` first.", ) - self.monitor.register( - self._metric("completion_tokens"), - metric_unit="token", - ) - self.monitor.register( - self._metric("total_tokens"), - metric_unit="token", + + # Find model config by name + if config_name not in _MODEL_CONFIGS: + raise ValueError( + f"Cannot find [{config_name}] in loaded configurations.", ) + config = _MODEL_CONFIGS.get(config_name, None) - def format( - self, - *args: Union[Msg, Sequence[Msg]], - ) -> Union[List[dict], str]: - raise NotImplementedError( - f"Model Wrapper [{type(self).__name__}] doesn't " - f"implement the format method. Please implement it " - f"in the subclass.", + if config is None: + raise ValueError( + f"Cannot find [{config_name}] in loaded configurations.", ) + model_type = config.model_type -class YiChatWrapper(YiWrapperBase): - """The model wrapper for Yi's chat API.""" + kwargs = {k: v for k, v in config.items() if k != "model_type"} - model_type: str = "yi_chat" + return _get_model_wrapper(model_type=model_type)(**kwargs) - def __call__( - self, - messages: list, - stream: bool = False, - **kwargs: Any, - ) -> ModelResponse: - """Processes a list of messages and makes a request to the Yi API.""" - # Prepare keyword arguments - kwargs = {**self.generate_args, **kwargs} - # Checking messages - if not isinstance(messages, list): - raise ValueError( - f"Yi `messages` field expected type `list`, " - f"got `{type(messages)}` instead.", - ) - if not all("role" in msg and "content" in msg for msg in messages): - raise ValueError( - "Each message in the 'messages' list must contain a 'role' " - "and 'content' key for Yi API.", - ) +def clear_model_configs() -> None: + """Clear the loaded model configs.""" + _MODEL_CONFIGS.clear() - # Forward to generate response - if stream: - response = self.client.chat.completions.create( - model=self.model_name, - messages=messages, - stream=True, - **kwargs, - ) - else: - response = self.client.chat.completions.create( - model=self.model_name, - messages=messages, - **kwargs, - ) - # Record the api invocation if needed - self._save_model_invocation( - arguments={ - "model": self.model_name, - "messages": messages, - **kwargs, - }, - response=response.model_dump(), - ) +def read_model_configs( + configs: Union[dict, str, list], + clear_existing: bool = False, +) -> None: + """read model configs from a path or a list of dicts. - # Update monitor accordingly - if not stream: - self.update_monitor(call_counter=1, **response.usage.model_dump()) + Args: + configs (`Union[str, list, dict]`): + The path of the model configs | a config dict | a list of model + configs. + clear_existing (`bool`, defaults to `False`): + Whether to clear the loaded model configs before reading. - # Return response - if stream: - # Handle the stream of responses - return ModelResponse( - text="", # Initialize with empty string for streaming - raw=response, # Return the stream object - ) - else: - return ModelResponse( - text=response.choices[0].message.content, - raw=response.model_dump(), + Returns: + `dict`: + The model configs. + """ + if clear_existing: + clear_model_configs() + + cfgs = None + + if isinstance(configs, str): + with open(configs, "r", encoding="utf-8") as f: + cfgs = json.load(f) + + if isinstance(configs, dict): + cfgs = [configs] + + if isinstance(configs, list): + if not all(isinstance(_, dict) for _ in configs): + raise ValueError( + "The model config unit should be a dict.", ) + cfgs = configs - def format( - self, - *args: Union[Msg, Sequence[Msg]], - ) -> List[dict]: - """Format the input messages for the Yi Chat API.""" - input_msgs = [] - for arg in args: - if arg is None: - continue - if isinstance(arg, Msg): - input_msgs.append(arg) - elif isinstance(arg, list) and all( - isinstance(msg, Msg) for msg in arg - ): - input_msgs.extend(arg) - else: - raise TypeError( - f"The input should be a Msg object or a list " - f"of Msg objects, got {type(arg)}.", - ) - - messages = [] - - # record dialog history as a list of strings - dialogue = [] - for i, unit in enumerate(input_msgs): - if i == 0 and unit.role == "system": - # system prompt - messages.append( - { - "role": unit.role, - "content": str(unit.content), - }, - ) - else: - # Merge all messages into a dialogue history prompt - dialogue.append( - f"{unit.name}: {str(unit.content)}", - ) - - dialogue_history = "\n".join(dialogue) - - user_content_template = "## Dialogue History\n{dialogue_history}" - - messages.append( - { - "role": "user", - "content": user_content_template.format( - dialogue_history=dialogue_history, - ), - }, + if cfgs is None: + raise TypeError( + f"Invalid type of model_configs, it could be a dict, a list of " + f"dicts, or a path to a json file (containing a dict or a list " + f"of dicts), but got {type(configs)}", ) - return messages + format_configs = _ModelConfig.format_configs(configs=cfgs) + + # check if name is unique + for cfg in format_configs: + if cfg.config_name in _MODEL_CONFIGS: + logger.warning( + f"config_name [{cfg.config_name}] already exists.", + ) + continue + _MODEL_CONFIGS[cfg.config_name] = cfg + + # print the loaded model configs + logger.info( + "Load configs for model wrapper: {}", + ", ".join(_MODEL_CONFIGS.keys()), + ) From 3697b889455472f06fc29717f9f940b84d099ac4 Mon Sep 17 00:00:00 2001 From: Haijian Wang <130898843+Haijian06@users.noreply.github.com> Date: Tue, 30 Jul 2024 14:41:16 +0800 Subject: [PATCH 8/9] Update yi_model.py --- src/agentscope/models/yi_model.py | 88 +++++++++++++++++++++++-------- 1 file changed, 67 insertions(+), 21 deletions(-) diff --git a/src/agentscope/models/yi_model.py b/src/agentscope/models/yi_model.py index ed89ce99b..ba2dd4073 100644 --- a/src/agentscope/models/yi_model.py +++ b/src/agentscope/models/yi_model.py @@ -27,7 +27,7 @@ def __init__( config_name: str, model_name: str = None, api_key: str = None, - region: str = "domestic", # "domestic" or "overseas" + region: str = "China", # "China" or "International" client_args: dict = None, generate_args: dict = None, budget: float = _DEFAULT_API_BUDGET, @@ -50,12 +50,12 @@ def __init__( base_url = ( "https://api.lingyiwanwu.com/v1" - if region == "domestic" + if region == "China" else "https://api.01.ai/v1" ) self.base_url = base_url - if region == "overseas" and model_name not in ["yi-large"]: + if region == "International" and model_name not in ["yi-large"]: logger.warning( "Model %s may not be available for overseas region. " "Only yi-large is confirmed to work. More information can be " @@ -113,6 +113,7 @@ class YiChatWrapper(YiWrapperBase): def __call__( self, messages: list, + stream: bool = False, **kwargs: Any, ) -> ModelResponse: """Processes a list of messages and makes a request to the Yi API.""" @@ -132,11 +133,19 @@ def __call__( ) # Forward to generate response - response = self.client.chat.completions.create( - model=self.model_name, - messages=messages, - **kwargs, - ) + if stream: + response = self.client.chat.completions.create( + model=self.model_name, + messages=messages, + stream=True, + **kwargs, + ) + else: + response = self.client.chat.completions.create( + model=self.model_name, + messages=messages, + **kwargs, + ) # Record the api invocation if needed self._save_model_invocation( @@ -149,36 +158,73 @@ def __call__( ) # Update monitor accordingly - self.update_monitor(call_counter=1, **response.usage.model_dump()) + if not stream: + self.update_monitor(call_counter=1, **response.usage.model_dump()) # Return response - return ModelResponse( - text=response.choices[0].message.content, - raw=response.model_dump(), - ) + if stream: + # Handle the stream of responses + return ModelResponse( + text="", # Initialize with empty string for streaming + raw=response, # Return the stream object + ) + else: + return ModelResponse( + text=response.choices[0].message.content, + raw=response.model_dump(), + ) def format( self, *args: Union[Msg, Sequence[Msg]], ) -> List[dict]: """Format the input messages for the Yi Chat API.""" - messages = [] + input_msgs = [] for arg in args: if arg is None: continue if isinstance(arg, Msg): + input_msgs.append(arg) + elif isinstance(arg, list) and all( + isinstance(msg, Msg) for msg in arg + ): + input_msgs.extend(arg) + else: + raise TypeError( + f"The input should be a Msg object or a list " + f"of Msg objects, got {type(arg)}.", + ) + + messages = [] + + # record dialog history as a list of strings + dialogue = [] + for i, unit in enumerate(input_msgs): + if i == 0 and unit.role == "system": + # system prompt messages.append( { - "role": arg.role, - "content": str(arg.content), + "role": unit.role, + "content": str(unit.content), }, ) - elif isinstance(arg, list): - messages.extend(self.format(*arg)) else: - raise TypeError( - f"The input should be a Msg object or a list " - f"of Msg objects, got {type(arg)}.", + # Merge all messages into a dialogue history prompt + dialogue.append( + f"{unit.name}: {str(unit.content)}", ) + dialogue_history = "\n".join(dialogue) + + user_content_template = "## Dialogue History\n{dialogue_history}" + + messages.append( + { + "role": "user", + "content": user_content_template.format( + dialogue_history=dialogue_history, + ), + }, + ) + return messages From 65cd11397b6879cefbcea3c7eed8f715a4d94841 Mon Sep 17 00:00:00 2001 From: DavdGao Date: Wed, 28 Aug 2024 17:15:07 +0800 Subject: [PATCH 9/9] Modify yi chat wrapper to fit current AgentScope --- README.md | 37 +- README_ZH.md | 37 +- .../en/source/tutorial/203-model.md | 9 +- .../zh_CN/source/tutorial/203-model.md | 1 + .../yi_chat_template.json | 11 + src/agentscope/models/openai_model.py | 4 +- src/agentscope/models/yi_model.py | 408 ++++++++++-------- 7 files changed, 292 insertions(+), 215 deletions(-) create mode 100644 examples/model_configs_template/yi_chat_template.json diff --git a/README.md b/README.md index aa3d8c247..b585ff747 100644 --- a/README.md +++ b/README.md @@ -109,24 +109,25 @@ applications in a centralized programming manner for streamlined development. AgentScope provides a list of `ModelWrapper` to support both local model services and third-party model APIs. -| API | Task | Model Wrapper | Configuration | Some Supported Models | -|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|-----------------------------------------------------------------| -| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) |[guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4o, gpt-4, gpt-3.5-turbo, ... | -| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... | -| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 | -| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... | -| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 | -| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... | -| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat | -| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... | -| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... | -| ZhipuAI API | Chat | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_chat_template.json) | glm-4, ... | -| | Embedding | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_embedding_template.json) | embedding-2, ... | -| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama3, llama2, Mistral, ... | -| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... | -| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... | -| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [models supported by litellm](https://docs.litellm.ai/docs/)... | -| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - | +| API | Task | Model Wrapper | Configuration | Some Supported Models | +|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------| +| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4o, gpt-4, gpt-3.5-turbo, ... | +| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... | +| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 | +| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... | +| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 | +| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... | +| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat | +| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... | +| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... | +| ZhipuAI API | Chat | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_chat_template.json) | glm-4, ... | +| | Embedding | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_embedding_template.json) | embedding-2, ... | +| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama3, llama2, Mistral, ... | +| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... | +| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... | +| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [models supported by litellm](https://docs.litellm.ai/docs/)... | +| Yi API | Chat | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/yi_chat_template.json) | yi-large, yi-medium, ... | +| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - | **Supported Local Model Deployment** diff --git a/README_ZH.md b/README_ZH.md index 20337db41..f73306edc 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -100,24 +100,25 @@ AgentScope是一个创新的多智能体开发平台,旨在赋予开发人员 AgentScope提供了一系列`ModelWrapper`来支持本地模型服务和第三方模型API。 -| API | Task | Model Wrapper | Configuration | Some Supported Models | -|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|-----------------------------------------------| -| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) |[guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4o, gpt-4, gpt-3.5-turbo, ... | -| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... | -| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 | -| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... | -| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 | -| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... | -| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat | -| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... | -| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... | -| ZhipuAI API | Chat | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_chat_template.json) | glm-4, ... | -| | Embedding | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_embedding_template.json) | embedding-2, ... | -| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama3, llama2, Mistral, ... | -| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... | -| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... | -| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [models supported by litellm](https://docs.litellm.ai/docs/)... | -| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - | +| API | Task | Model Wrapper | Configuration | Some Supported Models | +|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| +| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4o, gpt-4, gpt-3.5-turbo, ... | +| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... | +| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 | +| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... | +| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 | +| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... | +| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat | +| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... | +| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... | +| ZhipuAI API | Chat | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_chat_template.json) | glm-4, ... | +| | Embedding | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_embedding_template.json) | embedding-2, ... | +| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama3, llama2, Mistral, ... | +| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... | +| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... | +| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [models supported by litellm](https://docs.litellm.ai/docs/)... | +| Yi API | Chat | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/yi_chat_template.json) | yi-large, yi-medium, ... | +| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - | **支持的本地模型部署** diff --git a/docs/sphinx_doc/en/source/tutorial/203-model.md b/docs/sphinx_doc/en/source/tutorial/203-model.md index 9ac18e62b..2aad86e1e 100644 --- a/docs/sphinx_doc/en/source/tutorial/203-model.md +++ b/docs/sphinx_doc/en/source/tutorial/203-model.md @@ -74,7 +74,7 @@ In the current AgentScope, the supported `model_type` types, the corresponding | API | Task | Model Wrapper | `model_type` | Some Supported Models | |------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|-------------------------------|--------------------------------------------------| -| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_chat"` | gpt-4, gpt-3.5-turbo, ... | +| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_chat"` | gpt-4, gpt-3.5-turbo, ... | | | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_embedding"` | text-embedding-ada-002, ... | | | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | `"openai_dall_e"` | dall-e-2, dall-e-3 | | DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | `"dashscope_chat"` | qwen-plus, qwen-max, ... | @@ -83,12 +83,13 @@ In the current AgentScope, the supported `model_type` types, the corresponding | | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | `"dashscope_multimodal"` | qwen-vl-plus, qwen-vl-max, qwen-audio-turbo, ... | | Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | `"gemini_chat"` | gemini-pro, ... | | | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | `"gemini_embedding"` | models/embedding-001, ... | -| ZhipuAI API | Chat | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | `"zhipuai_chat"` | glm4, ... | -| | Embedding | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | `"zhipuai_embedding"` | embedding-2, ... | +| ZhipuAI API | Chat | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | `"zhipuai_chat"` | glm4, ... | +| | Embedding | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | `"zhipuai_embedding"` | embedding-2, ... | | ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | `"ollama_chat"` | llama2, ... | | | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | `"ollama_embedding"` | llama2, ... | | | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | `"ollama_generate"` | llama2, ... | -| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | `"litellm_chat"` | - | +| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | `"litellm_chat"` | - | +| Yi API | Chat | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | `"yi_chat"` | yi-large, yi-medium, ... | | Post Request based API | - | [`PostAPIModelWrapperBase`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `"post_api"` | - | | | Chat | [`PostAPIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `"post_api_chat"` | meta-llama/Meta-Llama-3-8B-Instruct, ... | | | Image Synthesis | [`PostAPIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `post_api_dall_e` | - | | diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md b/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md index 217a4ae14..dda8afe22 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md @@ -109,6 +109,7 @@ API如下: | | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | `"ollama_embedding"` | llama2, ... | | | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | `"ollama_generate"` | llama2, ... | | LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | `"litellm_chat"` | - | +| Yi API | Chat | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | `"yi_chat"` | yi-large, yi-medium, ... | | Post Request based API | - | [`PostAPIModelWrapperBase`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `"post_api"` | - | | | Chat | [`PostAPIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `"post_api_chat"` | meta-llama/Meta-Llama-3-8B-Instruct, ... | | | Image Synthesis | [`PostAPIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `post_api_dall_e` | - | | diff --git a/examples/model_configs_template/yi_chat_template.json b/examples/model_configs_template/yi_chat_template.json new file mode 100644 index 000000000..cda4b4818 --- /dev/null +++ b/examples/model_configs_template/yi_chat_template.json @@ -0,0 +1,11 @@ +[ + { + "config_name": "yi_yi-large", + "model_type": "yi_chat", + "model_name": "yi-large", + "api_key": "{your_api_key}", + "temperature": 0.3, + "top_p": 0.9, + "max_tokens": 1000 + } +] \ No newline at end of file diff --git a/src/agentscope/models/openai_model.py b/src/agentscope/models/openai_model.py index 772b43c09..e25fc9061 100644 --- a/src/agentscope/models/openai_model.py +++ b/src/agentscope/models/openai_model.py @@ -188,7 +188,7 @@ def __init__( def __call__( self, - messages: list, + messages: list[dict], stream: Optional[bool] = None, **kwargs: Any, ) -> ModelResponse: @@ -331,7 +331,7 @@ def _save_model_invocation_and_update_monitor( response=response, ) - usage = response.get("usage") + usage = response.get("usage", None) if usage is not None: self.monitor.update_text_and_embedding_tokens( model_name=self.model_name, diff --git a/src/agentscope/models/yi_model.py b/src/agentscope/models/yi_model.py index ba2dd4073..9d02dd17c 100644 --- a/src/agentscope/models/yi_model.py +++ b/src/agentscope/models/yi_model.py @@ -1,124 +1,112 @@ # -*- coding: utf-8 -*- """Model wrapper for Yi models""" -from abc import ABC -import logging -from typing import Union, Sequence, Any, List - -from openai import OpenAI - +import json +from typing import ( + List, + Union, + Sequence, + Optional, + Generator, +) + +import requests + +from ._model_utils import ( + _verify_text_content_in_openai_message_response, + _verify_text_content_in_openai_delta_response, +) from .model import ModelWrapperBase, ModelResponse from ..message import Msg -logger = logging.getLogger(__name__) -_DEFAULT_API_BUDGET = float("inf") +class YiChatWrapper(ModelWrapperBase): + """The model wrapper for Yi Chat API. -try: - import openai -except ImportError: - openai = None + Response: + - From https://platform.lingyiwanwu.com/docs + ```json + { + "id": "cmpl-ea89ae83", + "object": "chat.completion", + "created": 5785971, + "model": "yi-large-rag", + "usage": { + "completion_tokens": 113, + "prompt_tokens": 896, + "total_tokens": 1009 + }, + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Today in Los Angeles, the weather ...", + }, + "finish_reason": "stop" + } + ] + } + ``` + """ -class YiWrapperBase(ModelWrapperBase, ABC): - """The model wrapper for Yi API.""" + model_type: str = "yi_chat" def __init__( self, config_name: str, - model_name: str = None, - api_key: str = None, - region: str = "China", # "China" or "International" - client_args: dict = None, - generate_args: dict = None, - budget: float = _DEFAULT_API_BUDGET, - **kwargs: Any, + model_name: str, + api_key: str, + max_tokens: Optional[int] = None, + top_p: float = 0.9, + temperature: float = 0.3, + stream: bool = False, ) -> None: - """Initialize the Yi client.""" - if model_name is None: - model_name = config_name - logger.warning("model_name is not set, use config_name instead.") - - super().__init__(config_name=config_name) - - if openai is None: - raise ImportError( - "Cannot find openai package in current python environment.", + """Initialize the Yi chat model wrapper. + + Args: + config_name (`str`): + The name of the configuration to use. + model_name (`str`): + The name of the model to use, e.g. yi-large, yi-medium, etc. + api_key (`str`): + The API key for the Yi API. + max_tokens (`Optional[int]`, defaults to `None`): + The maximum number of tokens to generate, defaults to `None`. + top_p (`float`, defaults to `0.9`): + The randomness parameters in the range [0, 1]. + temperature (`float`, defaults to `0.3`): + The temperature parameter in the range [0, 2]. + stream (`bool`, defaults to `False`): + Whether to stream the response or not. + """ + + super().__init__(config_name, model_name) + + if top_p > 1 or top_p < 0: + raise ValueError( + f"The `top_p` parameter must be in the range [0, 1], but got " + f"{top_p} instead.", ) - self.model_name = model_name - self.generate_args = generate_args or {} - - base_url = ( - "https://api.lingyiwanwu.com/v1" - if region == "China" - else "https://api.01.ai/v1" - ) - self.base_url = base_url - - if region == "International" and model_name not in ["yi-large"]: - logger.warning( - "Model %s may not be available for overseas region. " - "Only yi-large is confirmed to work. More information can be " - "found here https://platform.01.ai/docs#models-and-pricing", - model_name, + if temperature < 0 or temperature > 2: + raise ValueError( + f"The `temperature` parameter must be in the range [0, 2], " + f"but got {temperature} instead.", ) - self.client = OpenAI( - api_key=api_key, - base_url=self.base_url, - **(client_args or {}), - ) - - # Set the max length of Yi model (this might need to be adjusted) - self.max_length = 4096 # Placeholder value, adjust as needed - - # Set monitor accordingly - self._register_budget(model_name, budget) - self._register_default_metrics() - - def _register_default_metrics(self) -> None: - # Set monitor accordingly - self.monitor.register( - self._metric("call_counter"), - metric_unit="times", - ) - self.monitor.register( - self._metric("prompt_tokens"), - metric_unit="token", - ) - self.monitor.register( - self._metric("completion_tokens"), - metric_unit="token", - ) - self.monitor.register( - self._metric("total_tokens"), - metric_unit="token", - ) - - def format( - self, - *args: Union[Msg, Sequence[Msg]], - ) -> Union[List[dict], str]: - raise NotImplementedError( - f"Model Wrapper [{type(self).__name__}] doesn't " - f"implement the format method. Please implement it " - f"in the subclass.", - ) - -class YiChatWrapper(YiWrapperBase): - """The model wrapper for Yi's chat API.""" - - model_type: str = "yi_chat" + self.api_key = api_key + self.max_tokens = max_tokens + self.top_p = top_p + self.temperature = temperature + self.stream = stream def __call__( self, - messages: list, - stream: bool = False, - **kwargs: Any, + messages: list[dict], + stream: Optional[bool] = None, ) -> ModelResponse: - """Processes a list of messages and makes a request to the Yi API.""" - # Prepare keyword arguments - kwargs = {**self.generate_args, **kwargs} + """Invoke the Yi Chat API by sending a list of messages.""" # Checking messages if not isinstance(messages, list): @@ -126,105 +114,179 @@ def __call__( f"Yi `messages` field expected type `list`, " f"got `{type(messages)}` instead.", ) + if not all("role" in msg and "content" in msg for msg in messages): raise ValueError( "Each message in the 'messages' list must contain a 'role' " "and 'content' key for Yi API.", ) - # Forward to generate response - if stream: - response = self.client.chat.completions.create( - model=self.model_name, - messages=messages, - stream=True, - **kwargs, - ) - else: - response = self.client.chat.completions.create( - model=self.model_name, - messages=messages, - **kwargs, - ) + if stream is None: + stream = self.stream - # Record the api invocation if needed - self._save_model_invocation( - arguments={ + # Forward to generate response + kwargs = { + "url": "https://api.lingyiwanwu.com/v1/chat/completions", + "json": { "model": self.model_name, "messages": messages, - **kwargs, + "temperature": self.temperature, + "max_tokens": self.max_tokens, + "top_p": self.top_p, + "stream": stream, }, - response=response.model_dump(), - ) + "headers": { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + }, + } - # Update monitor accordingly - if not stream: - self.update_monitor(call_counter=1, **response.usage.model_dump()) + response = requests.post(**kwargs) + response.raise_for_status() - # Return response if stream: - # Handle the stream of responses + + def generator() -> Generator[str, None, None]: + text = "" + last_chunk = {} + for line in response.iter_lines(): + if line: + line_str = line.decode("utf-8").strip() + + # Remove prefix "data: " if exists + json_str = line_str.removeprefix("data: ") + + # The last response is "data: [DONE]" + if json_str == "[DONE]": + continue + + try: + chunk = json.loads(json_str) + if _verify_text_content_in_openai_delta_response( + chunk, + ): + text += chunk["choices"][0]["delta"]["content"] + yield text + last_chunk = chunk + + except json.decoder.JSONDecodeError as e: + raise json.decoder.JSONDecodeError( + f"Invalid JSON: {json_str}", + e.doc, + e.pos, + ) from e + + # In Yi Chat API, the last valid chunk will save all the text + # in this message + self._save_model_invocation_and_update_monitor( + kwargs, + last_chunk, + ) + return ModelResponse( - text="", # Initialize with empty string for streaming - raw=response, # Return the stream object + stream=generator(), ) else: - return ModelResponse( - text=response.choices[0].message.content, - raw=response.model_dump(), + response = response.json() + self._save_model_invocation_and_update_monitor( + kwargs, + response, ) + # Re-use the openai response checking function + if _verify_text_content_in_openai_message_response(response): + return ModelResponse( + text=response["choices"][0]["message"]["content"], + raw=response, + ) + else: + raise RuntimeError( + f"Invalid response from Yi Chat API: {response}", + ) + def format( self, *args: Union[Msg, Sequence[Msg]], ) -> List[dict]: - """Format the input messages for the Yi Chat API.""" - input_msgs = [] - for arg in args: - if arg is None: - continue - if isinstance(arg, Msg): - input_msgs.append(arg) - elif isinstance(arg, list) and all( - isinstance(msg, Msg) for msg in arg - ): - input_msgs.extend(arg) - else: - raise TypeError( - f"The input should be a Msg object or a list " - f"of Msg objects, got {type(arg)}.", - ) + """Format the messages into the required format of Yi Chat API. - messages = [] - - # record dialog history as a list of strings - dialogue = [] - for i, unit in enumerate(input_msgs): - if i == 0 and unit.role == "system": - # system prompt - messages.append( - { - "role": unit.role, - "content": str(unit.content), - }, - ) - else: - # Merge all messages into a dialogue history prompt - dialogue.append( - f"{unit.name}: {str(unit.content)}", - ) + Note this strategy maybe not suitable for all scenarios, + and developers are encouraged to implement their own prompt + engineering strategies. - dialogue_history = "\n".join(dialogue) + The following is an example: - user_content_template = "## Dialogue History\n{dialogue_history}" + .. code-block:: python - messages.append( - { - "role": "user", - "content": user_content_template.format( - dialogue_history=dialogue_history, - ), - }, + prompt1 = model.format( + Msg("system", "You're a helpful assistant", role="system"), + Msg("Bob", "Hi, how can I help you?", role="assistant"), + Msg("user", "What's the date today?", role="user") + ) + + The prompt will be as follows: + + .. code-block:: python + + # prompt1 + [ + { + "role": "user", + "content": ( + "You're a helpful assistant\\n" + "\\n" + "## Conversation History\\n" + "Bob: Hi, how can I help you?\\n" + "user: What's the date today?" + ) + } + ] + + Args: + args (`Union[Msg, Sequence[Msg]]`): + The input arguments to be formatted, where each argument + should be a `Msg` object, or a list of `Msg` objects. + In distribution, placeholder is also allowed. + + Returns: + `List[dict]`: + The formatted messages. + """ + + # TODO: Support Vision model + if self.model_name == "yi-vision": + raise NotImplementedError( + "Yi Vision model is not supported in the current version, " + "please format the messages manually.", + ) + + return ModelWrapperBase.format_for_common_chat_models(*args) + + def _save_model_invocation_and_update_monitor( + self, + kwargs: dict, + response: dict, + ) -> None: + """Save model invocation and update the monitor accordingly. + + Args: + kwargs (`dict`): + The keyword arguments used in model invocation + response (`dict`): + The response from model API + """ + self._save_model_invocation( + arguments=kwargs, + response=response, ) - return messages + usage = response.get("usage", None) + if usage is not None: + prompt_tokens = usage.get("prompt_tokens", 0) + completion_tokens = usage.get("completion_tokens", 0) + + self.monitor.update_text_and_embedding_tokens( + model_name=self.model_name, + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + )