From be032827742a91c6bd5ec71d83bec4704086eda9 Mon Sep 17 00:00:00 2001 From: Charles Packer Date: Mon, 4 Dec 2023 12:49:08 -0800 Subject: [PATCH] Update autogen.md to include Azure config example + patch for `pyautogen>=0.2.0` (#555) * Update autogen.md * in groupchat example add an azure elif * fixed missing azure mappings + corrected the gpt-4-turbo one * Updated MemGPT AutoGen agent to take credentials and store them in the config (allows users to use memgpt+autogen without running memgpt configure), also patched api_base kwarg for autogen >=v0.2 * add note about 0.2 testing * added overview to autogen integration page * default examples to openai, sync config header between the two main examples, change speaker mode to round-robin in 2-way chat to supress warning * sync config header on last example (not used in docs) * refactor to make sure we use existing config when writing out extra credentials * fixed bug in local LLM where we need to comment out api_type (for pyautogen>=0.2.0) --- docs/autogen.md | 105 +++++++++++++++++++-- memgpt/autogen/examples/agent_autoreply.py | 87 +++++++++++++---- memgpt/autogen/examples/agent_docs.py | 75 ++++++++++++--- memgpt/autogen/examples/agent_groupchat.py | 68 +++++++++++-- memgpt/autogen/memgpt_agent.py | 32 +++++-- memgpt/openai_tools.py | 9 ++ 6 files changed, 325 insertions(+), 51 deletions(-) diff --git a/docs/autogen.md b/docs/autogen.md index 60e5105b71..1d4d24cfd0 100644 --- a/docs/autogen.md +++ b/docs/autogen.md @@ -4,11 +4,61 @@ You can also check the [GitHub discussion page](https://github.com/cpacker/MemGPT/discussions/65), but the Discord server is the official support channel and is monitored more actively. -[examples/agent_groupchat.py](https://github.com/cpacker/MemGPT/blob/main/memgpt/autogen/examples/agent_groupchat.py) contains an example of a groupchat where one of the agents is powered by MemGPT. +!!! warning "Tested with `pyautogen` v0.2.0 -If you are using OpenAI, you can also run it using the [example notebook](https://github.com/cpacker/MemGPT/blob/main/memgpt/autogen/examples/memgpt_coder_autogen.ipynb). + The MemGPT+AutoGen integration was last tested using AutoGen version v0.2.0. + + If you are having issues, please first try installing the specific version of AutoGen using `pip install pyautogen==0.2.0` + +## Overview -In the next section, we detail how to set up MemGPT and AutoGen to run with local LLMs. +MemGPT includes an AutoGen agent class ([MemGPTAgent](https://github.com/cpacker/MemGPT/blob/main/memgpt/autogen/memgpt_agent.py)) that mimics the interface of AutoGen's [ConversableAgent](https://microsoft.github.io/autogen/docs/reference/agentchat/conversable_agent#conversableagent-objects), allowing you to plug MemGPT into the AutoGen framework. + +To create a MemGPT AutoGen agent for use in an AutoGen script, you can use the `create_memgpt_autogen_agent_from_config` constructor: +```python +from memgpt.autogen.memgpt_agent import create_memgpt_autogen_agent_from_config + +# create a config for the MemGPT AutoGen agent +config_list_memgpt = [ + { + "model": "gpt-4", + "context_window": 8192, + "preset": "memgpt_chat", + # OpenAI specific + "model_endpoint_type": "openai", + "openai_key": YOUR_OPENAI_KEY, + }, +] +llm_config_memgpt = {"config_list": config_list_memgpt, "seed": 42} + +# there are some additional options to do with how you want the interface to look (more info below) +interface_kwargs = { + "debug": False, + "show_inner_thoughts": True, + "show_function_outputs": False, +} + +# then pass the config to the constructor +memgpt_autogen_agent = create_memgpt_autogen_agent_from_config( + "MemGPT_agent", + llm_config=llm_config_memgpt, + system_message=f"Your desired MemGPT persona", + interface_kwargs=interface_kwargs, + default_auto_reply="...", +) +``` + +Now this `memgpt_autogen_agent` can be used in standard AutoGen scripts: +```python +import autogen + +# ... assuming we have some other AutoGen agents other_agent_1 and 2 +groupchat = autogen.GroupChat(agents=[memgpt_autogen_agent, other_agent_1, other_agent_2], messages=[], max_round=12) +``` + +[examples/agent_groupchat.py](https://github.com/cpacker/MemGPT/blob/main/memgpt/autogen/examples/agent_groupchat.py) contains an example of a groupchat where one of the agents is powered by MemGPT. If you are using OpenAI, you can also run the example using the [notebook](https://github.com/cpacker/MemGPT/blob/main/memgpt/autogen/examples/memgpt_coder_autogen.ipynb). + +In the next section, we'll go through the example in depth to demonstrate how to set up MemGPT and AutoGen to run with a local LLM backend. ## Example: connecting AutoGen + MemGPT to non-OpenAI LLMs @@ -58,8 +108,9 @@ Going back to the example we first mentioned, [examples/agent_groupchat.py](http In order to run this example on a local LLM, go to lines 46-66 in [examples/agent_groupchat.py](https://github.com/cpacker/MemGPT/blob/main/memgpt/autogen/examples/agent_groupchat.py) and fill in the config files with your local LLM's deployment details. -`config_list` is used by non-MemGPT AutoGen agents, which expect an OpenAI-compatible API. `config_list_memgpt` is used by MemGPT AutoGen agents, and requires additional settings specific to MemGPT (such as the `model_wrapper` and `context_window`. +`config_list` is used by non-MemGPT AutoGen agents, which expect an OpenAI-compatible API. `config_list_memgpt` is used by MemGPT AutoGen agents, and requires additional settings specific to MemGPT (such as the `model_wrapper` and `context_window`. Depending on what LLM backend you want to use, you'll have to set up your `config_list` and `config_list_memgpt` differently: +#### web UI example For example, if you are using web UI, it will look something like this: ```python # Non-MemGPT agents will still use local LLMs, but they will use the ChatCompletions endpoint @@ -85,6 +136,7 @@ config_list_memgpt = [ ] ``` +#### LM Studio example If you are using LM Studio, then you'll need to change the `api_base` in `config_list`, and `model_endpoint_type` + `model_endpoint` in `config_list_memgpt`: ```python # Non-MemGPT agents will still use local LLMs, but they will use the ChatCompletions endpoint @@ -110,12 +162,13 @@ config_list_memgpt = [ ] ``` +#### OpenAI example If you are using the OpenAI API (e.g. using `gpt-4-turbo` via your own OpenAI API account), then the `config_list` for the AutoGen agent and `config_list_memgpt` for the MemGPT AutoGen agent will look different (a lot simpler): ```python # This config is for autogen agents that are not powered by MemGPT config_list = [ { - "model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) + "model": "gpt-4", "api_key": os.getenv("OPENAI_API_KEY"), } ] @@ -123,13 +176,48 @@ config_list = [ # This config is for autogen agents that powered by MemGPT config_list_memgpt = [ { - "model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) "preset": DEFAULT_PRESET, - "model": None, + "model": "gpt-4", "model_wrapper": None, "model_endpoint_type": None, "model_endpoint": None, - "context_window": 128000, # gpt-4-turbo + "context_window": 8192, # gpt-4 context window + }, +] +``` + +#### Azure OpenAI example +Azure OpenAI API setup will be similar to OpenAI API, but requires additional config variables. First, make sure that you've set all the related Azure variables referenced in [our MemGPTAzure setup page](https://memgpt.readthedocs.io/en/latest/endpoints) (`AZURE_OPENAI_API_KEY`, `AZURE_OPENAI_VERSION`, `AZURE_OPENAI_ENDPOINT`, etc). If you have all the variables set correctly, you should be able to create configs by pulling from the env variables: +```python +# This config is for autogen agents that are not powered by MemGPT +# See Auto +config_list = [ + { + "model": "gpt-4", # make sure you choose a model that you have access to deploy on your Azure account + "api_type": "azure", + "api_key": os.getenv("AZURE_OPENAI_API_KEY"), + "api_version": os.getenv("AZURE_OPENAI_VERSION"), + "api_base": os.getenv("AZURE_OPENAI_ENDPOINT"), + } +] + +# This config is for autogen agents that powered by MemGPT +config_list_memgpt = [ + { + "preset": DEFAULT_PRESET, + "model": "gpt-4", # make sure you choose a model that you have access to deploy on your Azure account + "model_wrapper": None, + "model_endpoint_type": None, + "model_endpoint": None, + "context_window": 8192, # gpt-4 context window + # required setup for Azure + "model_endpoint_type": "azure", + "model_endpoint": os.getenv("AZURE_OPENAI_ENDPOINT"), + "azure_key": os.getenv("AZURE_OPENAI_API_KEY"), + "azure_endpoint": os.getenv("AZURE_OPENAI_ENDPOINT"), + "azure_version": os.getenv("AZURE_OPENAI_VERSION"), + # if you are using Azure for embeddings too, include the following line: + "embedding_embedding_endpoint_type": "azure", }, ] ``` @@ -220,7 +308,6 @@ User_proxy (to chat_manager): ### Part 4: Attaching documents to MemGPT AutoGen agents - [examples/agent_docs.py](https://github.com/cpacker/MemGPT/blob/main/memgpt/autogen/examples/agent_docs.py) contains an example of a groupchat where the MemGPT autogen agent has access to documents. First, follow the instructions in [Example - chat with your data - Creating an external data source](../example_data/#creating-an-external-data-source): diff --git a/memgpt/autogen/examples/agent_autoreply.py b/memgpt/autogen/examples/agent_autoreply.py index e8a39a213c..3ee12a2a6a 100644 --- a/memgpt/autogen/examples/agent_autoreply.py +++ b/memgpt/autogen/examples/agent_autoreply.py @@ -13,32 +13,82 @@ import autogen from memgpt.autogen.memgpt_agent import create_memgpt_autogen_agent_from_config from memgpt.presets.presets import DEFAULT_PRESET +from memgpt.constants import LLM_MAX_TOKENS -# USE_OPENAI = True -USE_OPENAI = False -if USE_OPENAI: - # This config is for autogen agents that are not powered by MemGPT +LLM_BACKEND = "openai" +# LLM_BACKEND = "azure" +# LLM_BACKEND = "local" + +if LLM_BACKEND == "openai": + # For demo purposes let's use gpt-4 + model = "gpt-4" + + openai_api_key = os.getenv("OPENAI_API_KEY") + assert openai_api_key, "You must set OPENAI_API_KEY to run this example" + + # This config is for AutoGen agents that are not powered by MemGPT config_list = [ { - "model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) + "model": model, "api_key": os.getenv("OPENAI_API_KEY"), } ] - # This config is for autogen agents that powered by MemGPT + # This config is for AutoGen agents that powered by MemGPT config_list_memgpt = [ { - "model": "gpt-4-1106-preview", # gpt-4-turbo (https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) - "preset": "memgpt_docs", - "model": None, + "model": model, + "context_window": LLM_MAX_TOKENS[model], + "preset": DEFAULT_PRESET, "model_wrapper": None, - "model_endpoint_type": None, - "model_endpoint": None, - "context_window": 128000, # gpt-4-turbo + # OpenAI specific + "model_endpoint_type": "openai", + "model_endpoint": "https://api.openai.com/v1", + "openai_key": openai_api_key, }, ] -else: +elif LLM_BACKEND == "azure": + # Make sure that you have access to this deployment/model on your Azure account! + # If you don't have access to the model, the code will fail + model = "gpt-4" + + azure_openai_api_key = os.getenv("AZURE_OPENAI_KEY") + azure_openai_version = os.getenv("AZURE_OPENAI_VERSION") + azure_openai_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") + assert ( + azure_openai_api_key is not None and azure_openai_version is not None and azure_openai_endpoint is not None + ), "Set all the required OpenAI Azure variables (see: https://memgpt.readthedocs.io/en/latest/endpoints/#azure)" + + # This config is for AutoGen agents that are not powered by MemGPT + config_list = [ + { + "model": model, + "api_type": "azure", + "api_key": azure_openai_api_key, + "api_version": azure_openai_version, + # NOTE: on versions of pyautogen < 0.2.0, use "api_base" + # "api_base": azure_openai_endpoint, + "base_url": azure_openai_endpoint, + } + ] + + # This config is for AutoGen agents that powered by MemGPT + config_list_memgpt = [ + { + "model": model, + "context_window": LLM_MAX_TOKENS[model], + "preset": DEFAULT_PRESET, + "model_wrapper": None, + # Azure specific + "model_endpoint_type": "azure", + "azure_key": azure_openai_api_key, + "azure_endpoint": azure_openai_endpoint, + "azure_version": azure_openai_version, + }, + ] + +elif LLM_BACKEND == "local": # Example using LM Studio on a local machine # You will have to change the parameters based on your setup @@ -46,9 +96,11 @@ config_list = [ { "model": "NULL", # not needed - "api_base": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio + # NOTE: on versions of pyautogen < 0.2.0 use "api_base", and also uncomment "api_type" + # "api_base": "http://localhost:1234/v1", + # "api_type": "open_ai", + "base_url": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio "api_key": "NULL", # not needed - "api_type": "open_ai", }, ] @@ -57,13 +109,16 @@ { "preset": DEFAULT_PRESET, "model": None, # only required for Ollama, see: https://memgpt.readthedocs.io/en/latest/ollama/ + "context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192) "model_wrapper": "airoboros-l2-70b-2.1", # airoboros is the default wrapper and should work for most models "model_endpoint_type": "lmstudio", # can use webui, ollama, llamacpp, etc. "model_endpoint": "http://localhost:1234", # the IP address of your LLM backend - "context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192) }, ] +else: + raise ValueError(LLM_BACKEND) + # If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo # (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb) diff --git a/memgpt/autogen/examples/agent_docs.py b/memgpt/autogen/examples/agent_docs.py index 4da9132723..fc2530228c 100644 --- a/memgpt/autogen/examples/agent_docs.py +++ b/memgpt/autogen/examples/agent_docs.py @@ -15,14 +15,20 @@ import os import autogen from memgpt.autogen.memgpt_agent import create_memgpt_autogen_agent_from_config +from memgpt.presets.presets import DEFAULT_PRESET from memgpt.constants import LLM_MAX_TOKENS -USE_OPENAI = True -# USE_OPENAI = False -if USE_OPENAI: +LLM_BACKEND = "openai" +# LLM_BACKEND = "azure" +# LLM_BACKEND = "local" + +if LLM_BACKEND == "openai": # For demo purposes let's use gpt-4 model = "gpt-4" + openai_api_key = os.getenv("OPENAI_API_KEY") + assert openai_api_key, "You must set OPENAI_API_KEY to run this example" + # This config is for AutoGen agents that are not powered by MemGPT config_list = [ { @@ -35,15 +41,57 @@ config_list_memgpt = [ { "model": model, - "preset": "memgpt_docs", + "context_window": LLM_MAX_TOKENS[model], + "preset": DEFAULT_PRESET, "model_wrapper": None, + # OpenAI specific "model_endpoint_type": "openai", "model_endpoint": "https://api.openai.com/v1", + "openai_key": openai_api_key, + }, + ] + +elif LLM_BACKEND == "azure": + # Make sure that you have access to this deployment/model on your Azure account! + # If you don't have access to the model, the code will fail + model = "gpt-4" + + azure_openai_api_key = os.getenv("AZURE_OPENAI_KEY") + azure_openai_version = os.getenv("AZURE_OPENAI_VERSION") + azure_openai_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") + assert ( + azure_openai_api_key is not None and azure_openai_version is not None and azure_openai_endpoint is not None + ), "Set all the required OpenAI Azure variables (see: https://memgpt.readthedocs.io/en/latest/endpoints/#azure)" + + # This config is for AutoGen agents that are not powered by MemGPT + config_list = [ + { + "model": model, + "api_type": "azure", + "api_key": azure_openai_api_key, + "api_version": azure_openai_version, + # NOTE: on versions of pyautogen < 0.2.0, use "api_base" + # "api_base": azure_openai_endpoint, + "base_url": azure_openai_endpoint, + } + ] + + # This config is for AutoGen agents that powered by MemGPT + config_list_memgpt = [ + { + "model": model, "context_window": LLM_MAX_TOKENS[model], + "preset": DEFAULT_PRESET, + "model_wrapper": None, + # Azure specific + "model_endpoint_type": "azure", + "azure_key": azure_openai_api_key, + "azure_endpoint": azure_openai_endpoint, + "azure_version": azure_openai_version, }, ] -else: +elif LLM_BACKEND == "local": # Example using LM Studio on a local machine # You will have to change the parameters based on your setup @@ -51,31 +99,36 @@ config_list = [ { "model": "NULL", # not needed - "api_base": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio + # NOTE: on versions of pyautogen < 0.2.0 use "api_base", and also uncomment "api_type" + # "api_base": "http://localhost:1234/v1", + # "api_type": "open_ai", + "base_url": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio "api_key": "NULL", # not needed - "api_type": "open_ai", }, ] # MemGPT-powered agents will also use local LLMs, but they need additional setup (also they use the Completions endpoint) config_list_memgpt = [ { - "preset": "memgpt_docs", + "preset": DEFAULT_PRESET, "model": None, # only required for Ollama, see: https://memgpt.readthedocs.io/en/latest/ollama/ + "context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192) "model_wrapper": "airoboros-l2-70b-2.1", # airoboros is the default wrapper and should work for most models "model_endpoint_type": "lmstudio", # can use webui, ollama, llamacpp, etc. "model_endpoint": "http://localhost:1234", # the IP address of your LLM backend - "context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192) }, ] +else: + raise ValueError(LLM_BACKEND) + # Set to True if you want to print MemGPT's inner workings. DEBUG = False interface_kwargs = { "debug": DEBUG, "show_inner_thoughts": True, - "show_function_outputs": False, + "show_function_outputs": True, # let's set this to True so that we can see the search function in action } llm_config = {"config_list": config_list, "seed": 42} @@ -103,7 +156,7 @@ memgpt_agent.load_and_attach("memgpt_research_paper", "directory") # Initialize the group chat between the agents -groupchat = autogen.GroupChat(agents=[user_proxy, memgpt_agent], messages=[], max_round=12) +groupchat = autogen.GroupChat(agents=[user_proxy, memgpt_agent], messages=[], max_round=12, speaker_selection_method="round_robin") manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config) # Begin the group chat with a message from the user diff --git a/memgpt/autogen/examples/agent_groupchat.py b/memgpt/autogen/examples/agent_groupchat.py index 9ddc81a97d..d1c4c005ac 100644 --- a/memgpt/autogen/examples/agent_groupchat.py +++ b/memgpt/autogen/examples/agent_groupchat.py @@ -16,12 +16,17 @@ from memgpt.presets.presets import DEFAULT_PRESET from memgpt.constants import LLM_MAX_TOKENS -USE_OPENAI = True -# USE_OPENAI = False -if USE_OPENAI: +LLM_BACKEND = "openai" +# LLM_BACKEND = "azure" +# LLM_BACKEND = "local" + +if LLM_BACKEND == "openai": # For demo purposes let's use gpt-4 model = "gpt-4" + openai_api_key = os.getenv("OPENAI_API_KEY") + assert openai_api_key, "You must set OPENAI_API_KEY to run this example" + # This config is for AutoGen agents that are not powered by MemGPT config_list = [ { @@ -34,15 +39,57 @@ config_list_memgpt = [ { "model": model, + "context_window": LLM_MAX_TOKENS[model], "preset": DEFAULT_PRESET, "model_wrapper": None, + # OpenAI specific "model_endpoint_type": "openai", "model_endpoint": "https://api.openai.com/v1", + "openai_key": openai_api_key, + }, + ] + +elif LLM_BACKEND == "azure": + # Make sure that you have access to this deployment/model on your Azure account! + # If you don't have access to the model, the code will fail + model = "gpt-4" + + azure_openai_api_key = os.getenv("AZURE_OPENAI_KEY") + azure_openai_version = os.getenv("AZURE_OPENAI_VERSION") + azure_openai_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") + assert ( + azure_openai_api_key is not None and azure_openai_version is not None and azure_openai_endpoint is not None + ), "Set all the required OpenAI Azure variables (see: https://memgpt.readthedocs.io/en/latest/endpoints/#azure)" + + # This config is for AutoGen agents that are not powered by MemGPT + config_list = [ + { + "model": model, + "api_type": "azure", + "api_key": azure_openai_api_key, + "api_version": azure_openai_version, + # NOTE: on versions of pyautogen < 0.2.0, use "api_base" + # "api_base": azure_openai_endpoint, + "base_url": azure_openai_endpoint, + } + ] + + # This config is for AutoGen agents that powered by MemGPT + config_list_memgpt = [ + { + "model": model, "context_window": LLM_MAX_TOKENS[model], + "preset": DEFAULT_PRESET, + "model_wrapper": None, + # Azure specific + "model_endpoint_type": "azure", + "azure_key": azure_openai_api_key, + "azure_endpoint": azure_openai_endpoint, + "azure_version": azure_openai_version, }, ] -else: +elif LLM_BACKEND == "local": # Example using LM Studio on a local machine # You will have to change the parameters based on your setup @@ -50,9 +97,11 @@ config_list = [ { "model": "NULL", # not needed - "api_base": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio + # NOTE: on versions of pyautogen < 0.2.0 use "api_base", and also uncomment "api_type" + # "api_base": "http://localhost:1234/v1", + # "api_type": "open_ai", + "base_url": "http://localhost:1234/v1", # ex. "http://127.0.0.1:5001/v1" if you are using webui, "http://localhost:1234/v1/" if you are using LM Studio "api_key": "NULL", # not needed - "api_type": "open_ai", }, ] @@ -61,13 +110,16 @@ { "preset": DEFAULT_PRESET, "model": None, # only required for Ollama, see: https://memgpt.readthedocs.io/en/latest/ollama/ + "context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192) "model_wrapper": "airoboros-l2-70b-2.1", # airoboros is the default wrapper and should work for most models "model_endpoint_type": "lmstudio", # can use webui, ollama, llamacpp, etc. "model_endpoint": "http://localhost:1234", # the IP address of your LLM backend - "context_window": 8192, # the context window of your model (for Mistral 7B-based models, it's likely 8192) }, ] +else: + raise ValueError(LLM_BACKEND) + # If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo # (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb) # If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent @@ -78,7 +130,7 @@ interface_kwargs = { "debug": DEBUG, - "show_inner_thoughts": DEBUG, + "show_inner_thoughts": True, "show_function_outputs": DEBUG, } diff --git a/memgpt/autogen/memgpt_agent.py b/memgpt/autogen/memgpt_agent.py index f9e2e19205..924e7a53c8 100644 --- a/memgpt/autogen/memgpt_agent.py +++ b/memgpt/autogen/memgpt_agent.py @@ -9,7 +9,7 @@ import memgpt.constants as constants import memgpt.utils as utils import memgpt.presets.presets as presets -from memgpt.config import AgentConfig +from memgpt.config import AgentConfig, MemGPTConfig from memgpt.cli.cli import attach from memgpt.cli.cli_load import load_directory, load_webpage, load_index, load_database, load_vector_database from memgpt.connectors.storage import StorageConnector @@ -48,17 +48,35 @@ def create_memgpt_autogen_agent_from_config( else: user_desc = "Work by yourself, the user won't reply. Elaborate as much as possible." + # If using azure or openai, save the credentials to the config + if llm_config["model_endpoint_type"] in ["azure", "openai"]: + # we load here to make sure we don't override existing values + # all we want to do is add extra credentials + config = MemGPTConfig.load() + + if llm_config["model_endpoint_type"] == "azure": + config.azure_key = llm_config["azure_key"] + config.azure_endpoint = llm_config["azure_endpoint"] + config.azure_version = llm_config["azure_version"] + llm_config.pop("azure_key") + llm_config.pop("azure_endpoint") + llm_config.pop("azure_version") + + elif llm_config["model_endpoint_type"] == "openai": + config.openai_key = llm_config["openai_key"] + llm_config.pop("openai_key") + + config.save() + # Create an AgentConfig option from the inputs + llm_config.pop("name", None) + llm_config.pop("persona", None) + llm_config.pop("human", None) agent_config = AgentConfig( name=name, persona=persona_desc, human=user_desc, - preset=llm_config["preset"], - model=llm_config["model"], - model_wrapper=llm_config["model_wrapper"], - model_endpoint_type=llm_config["model_endpoint_type"], - model_endpoint=llm_config["model_endpoint"], - context_window=llm_config["context_window"], + **llm_config, ) if function_map is not None or code_execution_config is not None: diff --git a/memgpt/openai_tools.py b/memgpt/openai_tools.py index adc2b23fd6..9b2811b782 100644 --- a/memgpt/openai_tools.py +++ b/memgpt/openai_tools.py @@ -11,6 +11,15 @@ from memgpt.local_llm.chat_completion_proxy import get_chat_completion from memgpt.constants import CLI_WARNING_PREFIX +MODEL_TO_AZURE_ENGINE = { + "gpt-4-1106-preview": "gpt-4", + "gpt-4": "gpt-4", + "gpt-4-32k": "gpt-4-32k", + "gpt-3.5": "gpt-35-turbo", + "gpt-3.5-turbo": "gpt-35-turbo", + "gpt-3.5-turbo-16k": "gpt-35-turbo-16k", +} + def is_context_overflow_error(exception): from memgpt.utils import printd