From 812b7f9666b589243789591681ce655db98d52f2 Mon Sep 17 00:00:00 2001 From: levscaut <57213911+levscaut@users.noreply.github.com> Date: Fri, 12 Apr 2024 10:53:29 -0400 Subject: [PATCH] add support for Anthropic Claude function call (#2311) * add support for function call * clear pip install output * add convert function from `tools` to `functions` * fix empty user input error(temporary) --- .../non-openai-models/cloud-anthropic.ipynb | 232 ++++++++++++++---- 1 file changed, 185 insertions(+), 47 deletions(-) diff --git a/website/docs/topics/non-openai-models/cloud-anthropic.ipynb b/website/docs/topics/non-openai-models/cloud-anthropic.ipynb index 4d15a0e06d9..ec8793be4bc 100644 --- a/website/docs/topics/non-openai-models/cloud-anthropic.ipynb +++ b/website/docs/topics/non-openai-models/cloud-anthropic.ipynb @@ -1,7 +1,6 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", "metadata": { "slideshow": { @@ -14,7 +13,9 @@ "In this notebook, we demonstrate how a to use Anthropic Claude model for AgentChat.\n", "\n", "## Requirements\n", - "To use Anthropic Claude with AutoGen, first you need to install the `pyautogen` and `anthropic` package.\n" + "To use Anthropic Claude with AutoGen, first you need to install the `pyautogen` and `anthropic` package.\n", + "\n", + "To try out the function call feature of Claude model, you need to install `anthropic>=0.23.1`.\n" ] }, { @@ -23,24 +24,34 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install pyautogen anthropic" + "# !pip install pyautogen\n", + "!pip install \"anthropic>=0.23.1\"" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "import inspect\n", + "import json\n", "from typing import Any, Dict, List, Union\n", "\n", "from anthropic import Anthropic\n", + "from anthropic import __version__ as anthropic_version\n", "from anthropic.types import Completion, Message\n", + "from openai.types.chat.chat_completion import ChatCompletionMessage\n", + "from typing_extensions import Annotated\n", "\n", "import autogen\n", "from autogen import AssistantAgent, UserProxyAgent\n", - "from autogen.oai.openai_utils import OAI_PRICE1K" + "\n", + "TOOL_ENABLED = anthropic_version >= \"0.23.1\"\n", + "if TOOL_ENABLED:\n", + " from anthropic.types.beta.tools import ToolsBetaMessage\n", + "else:\n", + " ToolsBetaMessage = object" ] }, { @@ -120,7 +131,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -132,39 +143,79 @@ " filter_dict = {k: v for k, v in config.items() if k in anthropic_kwargs}\n", " self._client = Anthropic(**filter_dict)\n", "\n", - " def message_retrieval(self, response: Message) -> Union[List[str], List]:\n", + " self._last_tooluse_status = {}\n", + "\n", + " def message_retrieval(\n", + " self, response: Union[Message, ToolsBetaMessage]\n", + " ) -> Union[List[str], List[ChatCompletionMessage]]:\n", " \"\"\"Retrieve the messages from the response.\"\"\"\n", - " choices = response.content\n", - " if isinstance(response, Message):\n", - " return [choice.text for choice in choices] # type: ignore [union-attr]\n", + " messages = response.content\n", + " if len(messages) == 0:\n", + " return [None]\n", + " res = []\n", + " if TOOL_ENABLED:\n", + " for choice in messages:\n", + " if choice.type == \"tool_use\":\n", + " res.insert(0, self.response_to_openai_message(choice))\n", + " self._last_tooluse_status[\"tool_use\"] = choice.model_dump()\n", + " else:\n", + " res.append(choice.text)\n", + " self._last_tooluse_status[\"think\"] = choice.text\n", + "\n", + " return res\n", "\n", - " # claude python SDK and API not yet support function calls\n", + " else:\n", + " return [ # type: ignore [return-value]\n", + " choice.text if choice.message.function_call is not None else choice.message.content # type: ignore [union-attr]\n", + " for choice in messages\n", + " ]\n", "\n", " def create(self, params: Dict[str, Any]) -> Completion:\n", - " \"\"\"Create a completion for a given config using openai's client.\n", + " \"\"\"Create a completion for a given config.\n", "\n", " Args:\n", - " client: The openai client.\n", " params: The params for the completion.\n", "\n", " Returns:\n", " The completion.\n", " \"\"\"\n", - " if \"messages\" in params:\n", - " raw_contents = params[\"messages\"]\n", - " if raw_contents[0][\"role\"] == \"system\":\n", - " system_message = raw_contents[0][\"content\"]\n", - " raw_contents = raw_contents[1:]\n", - " params[\"messages\"] = raw_contents\n", - " params[\"system\"] = system_message\n", - " completions: Completion = self._client.messages # type: ignore [attr-defined]\n", + " if \"tools\" in params:\n", + " converted_functions = self.convert_tools_to_functions(params[\"tools\"])\n", + " params[\"functions\"] = params.get(\"functions\", []) + converted_functions\n", + "\n", + " raw_contents = params[\"messages\"]\n", + " processed_messages = []\n", + " for message in raw_contents:\n", + "\n", + " if message[\"role\"] == \"system\":\n", + " params[\"system\"] = message[\"content\"]\n", + " elif message[\"role\"] == \"function\":\n", + " processed_messages.append(self.return_function_call_result(message[\"content\"]))\n", + " elif \"function_call\" in message:\n", + " processed_messages.append(self.restore_last_tooluse_status())\n", + " elif message[\"content\"] == \"\":\n", + " # I'm not sure how to elegantly terminate the conversation, please give me some advice about this.\n", + " message[\"content\"] = \"I'm done. Please send TERMINATE\"\n", + " processed_messages.append(message)\n", + " else:\n", + " processed_messages.append(message)\n", + "\n", + " params[\"messages\"] = processed_messages\n", + "\n", + " if TOOL_ENABLED and \"functions\" in params:\n", + " completions: Completion = self._client.beta.tools.messages\n", " else:\n", - " completions: Completion = self._client.completions\n", + " completions: Completion = self._client.messages # type: ignore [attr-defined]\n", "\n", " # Not yet support stream\n", " params = params.copy()\n", " params[\"stream\"] = False\n", " params.pop(\"model_client_cls\")\n", + " params[\"max_tokens\"] = params.get(\"max_tokens\", 4096)\n", + " if \"functions\" in params:\n", + " tools_configs = params.pop(\"functions\")\n", + " tools_configs = [self.openai_func_to_anthropic(tool) for tool in tools_configs]\n", + " params[\"tools\"] = tools_configs\n", " response = completions.create(**params)\n", "\n", " return response\n", @@ -185,6 +236,40 @@ "\n", " return total\n", "\n", + " def response_to_openai_message(self, response) -> ChatCompletionMessage:\n", + " dict_response = response.model_dump()\n", + " return ChatCompletionMessage(\n", + " content=None,\n", + " role=\"assistant\",\n", + " function_call={\"name\": dict_response[\"name\"], \"arguments\": json.dumps(dict_response[\"input\"])},\n", + " )\n", + "\n", + " def restore_last_tooluse_status(self) -> Dict:\n", + " cached_content = []\n", + " if \"think\" in self._last_tooluse_status:\n", + " cached_content.append({\"type\": \"text\", \"text\": self._last_tooluse_status[\"think\"]})\n", + " cached_content.append(self._last_tooluse_status[\"tool_use\"])\n", + " res = {\"role\": \"assistant\", \"content\": cached_content}\n", + " return res\n", + "\n", + " def return_function_call_result(self, result: str) -> Dict:\n", + " return {\n", + " \"role\": \"user\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"tool_result\",\n", + " \"tool_use_id\": self._last_tooluse_status[\"tool_use\"][\"id\"],\n", + " \"content\": result,\n", + " }\n", + " ],\n", + " }\n", + "\n", + " @staticmethod\n", + " def openai_func_to_anthropic(openai_func: dict) -> dict:\n", + " res = openai_func.copy()\n", + " res[\"input_schema\"] = res.pop(\"parameters\")\n", + " return res\n", + "\n", " @staticmethod\n", " def get_usage(response: Completion) -> Dict:\n", " return {\n", @@ -195,7 +280,16 @@ " ),\n", " \"cost\": response.cost if hasattr(response, \"cost\") else 0,\n", " \"model\": response.model,\n", - " }" + " }\n", + "\n", + " @staticmethod\n", + " def convert_tools_to_functions(tools: List) -> List:\n", + " functions = []\n", + " for tool in tools:\n", + " if tool.get(\"type\") == \"function\" and \"function\" in tool:\n", + " functions.append(tool[\"function\"])\n", + "\n", + " return functions" ] }, { @@ -211,7 +305,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -220,7 +314,7 @@ "config_list_claude = [\n", " {\n", " # Choose your model name.\n", - " \"model\": \"claude-3-opus-20240229\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", " # You need to provide your API key here.\n", " \"api_key\": os.getenv(\"ANTHROPIC_API_KEY\"),\n", " \"base_url\": \"https://api.anthropic.com\",\n", @@ -237,22 +331,19 @@ "source": [ "## Construct Agents\n", "\n", - "Construct a simple conversation between a User proxy and an ConversableAgent based on Claude-3 model.\n", - "\n", - "\n", - "`max_tokens` argument is mandatory in the `llm_config`." + "Construct a simple conversation between a User proxy and an ConversableAgent based on Claude-3 model.\n" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[autogen.oai.client: 04-04 18:06:52] {418} INFO - Detected custom model client in config: AnthropicClient, model client can not be used until register_model_client is called.\n" + "[autogen.oai.client: 04-08 22:15:59] {419} INFO - Detected custom model client in config: AnthropicClient, model client can not be used until register_model_client is called.\n" ] } ], @@ -261,19 +352,49 @@ " \"assistant\",\n", " llm_config={\n", " \"config_list\": config_list_claude,\n", - " \"max_tokens\": 100,\n", " },\n", - " system_message=\"\"\"\n", - " You are an AI cat based on the AI model you used.\n", - " Anyone ask you who you are, just introduce yourself.\n", - " \"\"\",\n", ")\n", + "\n", "user_proxy = UserProxyAgent(\n", " \"user_proxy\",\n", - " code_execution_config=False,\n", + " human_input_mode=\"NEVER\",\n", + " code_execution_config={\n", + " \"work_dir\": \"coding\",\n", + " \"use_docker\": False,\n", + " },\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " max_consecutive_auto_reply=1,\n", ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Function Call in Latest Anthropic API \n", + "Anthropic just announced that tool use is now in public beta in the Anthropic API. To use this feature, please install `anthropic>=0.23.1`." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[autogen.oai.client: 04-08 22:15:59] {419} INFO - Detected custom model client in config: AnthropicClient, model client can not be used until register_model_client is called.\n" + ] + } + ], + "source": [ + "@user_proxy.register_for_execution()\n", + "@assistant.register_for_llm(name=\"get_weather\", description=\"Get the current weather in a given location.\")\n", + "def preprocess(location: Annotated[str, \"The city and state, e.g. Toronto, ON.\"]) -> str:\n", + " return \"Absolutely cloudy and rainy\"" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -283,7 +404,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -292,21 +413,38 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 16, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", + "user_proxy (to assistant):\n", + "\n", + "What's the weather in Toronto?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "assistant (to user_proxy):\n", + "\n", + "***** Suggested function call: get_weather *****\n", + "Arguments: \n", + "{\"location\": \"Toronto, ON\"}\n", + "************************************************\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\n", + ">>>>>>>> EXECUTING FUNCTION get_weather...\n", + "user_proxy (to assistant):\n", "\n", - "Who are you?\n", + "***** Response from calling function (get_weather) *****\n", + "Absolutely cloudy and rainy\n", + "********************************************************\n", "\n", "--------------------------------------------------------------------------------\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", + "assistant (to user_proxy):\n", "\n", - "*meows* Hello there! I'm Claude, an AI assistant created by Anthropic. I'm not a real cat, but rather an artificial intelligence that has been trained to engage in conversation and help with various tasks. It's a pleasure to meet you! Let me know if there is anything I can assist you with.\n", + "The tool returned that the current weather in Toronto, ON is absolutely cloudy and rainy.\n", "\n", "--------------------------------------------------------------------------------\n" ] @@ -314,10 +452,10 @@ { "data": { "text/plain": [ - "ChatResult(chat_id=None, chat_history=[{'content': 'Who are you?', 'role': 'assistant'}, {'content': \"*meows* Hello there! I'm Claude, an AI assistant created by Anthropic. I'm not a real cat, but rather an artificial intelligence that has been trained to engage in conversation and help with various tasks. It's a pleasure to meet you! Let me know if there is anything I can assist you with.\", 'role': 'user'}], summary=\"*meows* Hello there! I'm Claude, an AI assistant created by Anthropic. I'm not a real cat, but rather an artificial intelligence that has been trained to engage in conversation and help with various tasks. It's a pleasure to meet you! Let me know if there is anything I can assist you with.\", cost=({'total_cost': 0.0058200000000000005, 'claude-3-opus-20240229': {'cost': 0.0058200000000000005, 'prompt_tokens': 38, 'completion_tokens': 70, 'total_tokens': 108}}, {'total_cost': 0.0058200000000000005, 'claude-3-opus-20240229': {'cost': 0.0058200000000000005, 'prompt_tokens': 38, 'completion_tokens': 70, 'total_tokens': 108}}), human_input=['exit'])" + "ChatResult(chat_id=None, chat_history=[{'content': \"What's the weather in Toronto?\", 'role': 'assistant'}, {'function_call': {'arguments': '{\"location\": \"Toronto, ON\"}', 'name': 'get_weather'}, 'content': None, 'role': 'assistant'}, {'content': 'Absolutely cloudy and rainy', 'name': 'get_weather', 'role': 'function'}, {'content': 'The tool returned that the current weather in Toronto, ON is absolutely cloudy and rainy.', 'role': 'user'}], summary='The tool returned that the current weather in Toronto, ON is absolutely cloudy and rainy.', cost=({'total_cost': 0.030494999999999998, 'claude-3-sonnet-20240229': {'cost': 0.030494999999999998, 'prompt_tokens': 1533, 'completion_tokens': 100, 'total_tokens': 1633}}, {'total_cost': 0}), human_input=[])" ] }, - "execution_count": 7, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -325,7 +463,7 @@ "source": [ "user_proxy.initiate_chat(\n", " assistant,\n", - " message=\"Who are you?\",\n", + " message=\"What's the weather in Toronto?\",\n", ")" ] } @@ -352,7 +490,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.17" + "version": "3.9.7" }, "vscode": { "interpreter": {