From feed806489a4c099dda3e95c40fe7606db304420 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Mon, 5 Feb 2024 18:45:18 -0800 Subject: [PATCH] Autogenstudio Updates [CSV support, Workflow Export, Skill Editing, Windows Testing ] (#1475) * support groupchat, other QOL fixes * remove gallery success toast * Fix #1328. Add CSVLoader component and related support for rendering CSV files. Add download link in the modal for appropriate file types including CSV, Code, and PDF. * add name and description field to session datamodel * Update website/blog/2023-12-01-AutoGenStudio/index.mdx Co-authored-by: Chi Wang * sanitize llmconfig, remove additional fields * improve models UX, only modify models from model tab. * readme updates * improve db defaults * improve ui hover behavior and add note on models * general qol updats * add support for returning summary_method * use ant design tables * icon and layout updates * css and layout updates * readme updates and QOL updates * fix bug where empty string is used as apikey #1415 * add speaker selection to UI #1373 * Fixed a bug that localAgent updates were not synchronized between GroupChatFlowSpecView and AgentFlowSpecView. * Fixed a bug in Agent Specification Modal that caused localAgent updates to remain in state when closing a modal other than onOk. * Fixed a bug that the updated Agent Specification Modal was not saved when the content of FlowConfigViewer Modal was changed after the Agent Specification Modal was updated when an updatedFlowConfig was created using localFlowConfig. * add version to package * remove sample key * early support for versions table and testing models * Add support for testing model when created #1404 * remove unused imports, qol updates * fix bug on workflowmanager * make file_name optional in skills datamodel * update instructions on models * fix errors from merge conflict with main * santize workflow before download * add support for editing skills in full fledged editor (monaco) #1442 * fix merge artifacts * Fix build command for windows Replaced && to & to continue execution when the 'ui' folder doesn't exist and also suppressed error "The system cannot find the file specified." * Fix setup instructions The config file starts with a dot (according to gatsby-config.ts). * Throw error if env file doesn't exist Otherwise the app will not work (issue very hard to trace) * version bump * formattin gupdates * formatting updates * Show warning instead of error if env config file doesn't exist Fix: https://github.com/microsoft/autogen/pull/1475#issuecomment-1918114520 * add rel noopener to a tags * formating updates * remove double section in readme. * update dev readme * format update * add default autoreply to agent config datamodel * add check for empty messages list * improve groupchat behavior, add sender to list of agents * update system_message defaults to fit autogen default system message #1474 * simplify response from test_model to only return content, fix serialization issue in #1404 * readme and other formatting updates * add support for showing temp and default auto reply #1521 * formatting updates * formating and other updates --------- Co-authored-by: Paul Retherford Co-authored-by: Chi Wang Co-authored-by: junkei_okinawa Co-authored-by: Christopher Pereira --- samples/apps/autogen-studio/README.md | 24 +- .../autogen-studio/autogenstudio/__init__.py | 1 + .../autogenstudio/chatmanager.py | 3 +- .../autogen-studio/autogenstudio/datamodel.py | 6 +- .../autogenstudio/utils/dbdefaults.json | 106 +---- .../autogenstudio/utils/dbutils.py | 20 + .../autogenstudio/utils/utils.py | 45 +- .../autogen-studio/autogenstudio/version.py | 3 +- .../autogen-studio/autogenstudio/web/app.py | 31 +- .../autogenstudio/workflowmanager.py | 67 ++- .../apps/autogen-studio/frontend/.env.default | 4 - .../apps/autogen-studio/frontend/README.md | 5 +- .../autogen-studio/frontend/gatsby-config.ts | 11 +- .../apps/autogen-studio/frontend/package.json | 5 +- .../frontend/src/components/atoms.tsx | 367 ++++++++++++--- .../frontend/src/components/footer.tsx | 4 +- .../frontend/src/components/icons.tsx | 36 ++ .../frontend/src/components/types.ts | 5 +- .../frontend/src/components/utils.ts | 71 ++- .../src/components/views/builder/agents.tsx | 25 +- .../src/components/views/builder/build.tsx | 2 +- .../src/components/views/builder/models.tsx | 280 ++++++++---- .../src/components/views/builder/skills.tsx | 173 ++++++-- .../src/components/views/builder/workflow.tsx | 75 +++- .../components/views/playground/chatbox.tsx | 50 +-- .../components/views/playground/metadata.tsx | 51 ++- .../components/views/playground/sessions.tsx | 106 +++-- .../frontend/src/styles/global.css | 6 + .../frontend/tailwind.config.js | 1 + .../autogen-studio/notebooks/agent_spec.json | 9 +- .../notebooks/groupchat_spec.json | 2 +- .../autogen-studio/notebooks/tutorial.ipynb | 419 +++++------------- .../blog/2023-12-01-AutoGenStudio/index.mdx | 10 +- 33 files changed, 1257 insertions(+), 766 deletions(-) diff --git a/samples/apps/autogen-studio/README.md b/samples/apps/autogen-studio/README.md index 0f007731d1fa..48b8883bc1f7 100644 --- a/samples/apps/autogen-studio/README.md +++ b/samples/apps/autogen-studio/README.md @@ -1,4 +1,5 @@ # AutoGen Studio + [![PyPI version](https://badge.fury.io/py/autogenstudio.svg)](https://badge.fury.io/py/autogenstudio) [![Downloads](https://static.pepy.tech/badge/autogenstudio/week)](https://pepy.tech/project/autogenstudio) @@ -10,6 +11,9 @@ Code for AutoGen Studio is on GitHub at [microsoft/autogen](https://github.com/m > **Note**: AutoGen Studio is meant to help you rapidly prototype multi-agent workflows and demonstrate an example of end user interfaces built with AutoGen. It is not meant to be a production-ready app. +> [!WARNING] +> AutoGen Studio is currently under active development and we are iterating quickly. Kindly consider that we may introduce breaking changes in the releases during the upcoming weeks, and also the `README` might be outdated. We'll update the `README` as soon as we stabilize the API. + ### Capabilities / Roadmap Some of the capabilities supported by the app frontend include the following: @@ -30,6 +34,8 @@ Project Structure: There are two ways to install AutoGen Studio - from PyPi or from source. We **recommend installing from PyPi** unless you plan to modify the source code. +There are two ways to install AutoGen Studio - from PyPi or from source. We **recommend installing from PyPi** unless you plan to modify the source code. + 1. **Install from PyPi** We recommend using a virtual environment (e.g., conda) to avoid conflicts with existing Python packages. With Python 3.10 or newer active in your virtual environment, use pip to install AutoGen Studio: @@ -64,7 +70,7 @@ For Windows users, to build the frontend, you may need alternative commands to b ```bash - gatsby clean && rmdir /s /q ..\\autogenstudio\\web\\ui && (set \"PREFIX_PATH_VALUE=\" || ver>nul) && gatsby build --prefix-paths && xcopy /E /I /Y public ..\\autogenstudio\\web\\ui + gatsby clean && rmdir /s /q ..\\autogenstudio\\web\\ui 2>nul & (set \"PREFIX_PATH_VALUE=\" || ver>nul) && gatsby build --prefix-paths && xcopy /E /I /Y public ..\\autogenstudio\\web\\ui ``` @@ -77,6 +83,7 @@ autogenstudio ui --port 8081 ``` This will start the application on the specified port. Open your web browser and go to `http://localhost:8081/` to begin using AutoGen Studio. +AutoGen Studio also takes a `--host ` argument to specify the host address. By default, it is set to `localhost`. You can also use the `--appdir ` argument to specify the directory where the app files (e.g., database and generated user files) are stored. By default, it is set to the directory where autogen pip package is installed. Now that you have AutoGen Studio installed and running, you are ready to explore its capabilities, including defining and modifying agent workflows, interacting with agents and sessions, and expanding agent skills. @@ -124,19 +131,30 @@ We welcome contributions to AutoGen Studio. We recommend the following general s ## FAQ +**Q: How do I specify the directory where files(e.g. database) are stored?** + +A: You can specify the directory where files are stored by setting the `--appdir` argument when running the application. For example, `autogenstudio ui --appdir /path/to/folder`. This will store the database and other files in the specified directory e.g. `/path/to/folder/database.sqlite`. + **Q: Where can I adjust the default skills, agent and workflow configurations?** A: You can modify agent configurations directly from the UI or by editing the [dbdefaults.json](autogenstudio/utils/dbdefaults.json) file which is used to initialize the database. **Q: If I want to reset the entire conversation with an agent, how do I go about it?** -A: To reset your conversation history, you can delete the `database.sqlite` file. If you need to clear user-specific data, remove the relevant `autogenstudio/web/files/user/` folder. +A: To reset your conversation history, you can delete the `database.sqlite` file in the `--appdir` directory. This will reset the entire conversation history. To delete user files, you can delete the `files` directory in the `--appdir` directory. **Q: Is it possible to view the output and messages generated by the agents during interactions?** A: Yes, you can view the generated messages in the debug console of the web UI, providing insights into the agent interactions. Alternatively, you can inspect the `database.sqlite` file for a comprehensive record of messages. **Q: Can I use other models with AutoGen Studio?** -Yes. AutoGen standardizes on the openai model api format, and you can use any api server that offers an openai compliant endpoint. In the AutoGen Studio UI, each agent has an `llm_config` field where you can input your model endpoint details including `model name`, `api key`, `base url`, `model type` and `api version`. For Azure OpenAI models, you can find these details in the Azure portal. Note that for Azure OpenAI, the `model name` is the deployment id or engine, and the `model type` is "azure". +Yes. AutoGen standardizes on the openai model api format, and you can use any api server that offers an openai compliant endpoint. In the AutoGen Studio UI, each agent has an `llm_config` field where you can input your model endpoint details including `model`, `api key`, `base url`, `model type` and `api version`. For Azure OpenAI models, you can find these details in the Azure portal. Note that for Azure OpenAI, the `model name` is the deployment id or engine, and the `model type` is "azure". For other OSS models, we recommend using a server such as vllm to instantiate an openai compliant endpoint. +**Q: The server starts but I can't access the UI** +A: If you are running the server on a remote machine (or a local machine that fails to resolve localhost correstly), you may need to specify the host address. By default, the host address is set to `localhost`. You can specify the host address using the `--host ` argument. For example, to start the server on port 8081 and local address such that it is accessible from other machines on the network, you can run the following command: + +```bash +autogenstudio ui --port 8081 --host 0.0.0.0 +``` + ## Acknowledgements AutoGen Studio is Based on the [AutoGen](https://microsoft.github.io/autogen) project. It was adapted from a research prototype built in October 2023 (original credits: Gagan Bansal, Adam Fourney, Victor Dibia, Piali Choudhury, Saleema Amershi, Ahmed Awadallah, Chi Wang). diff --git a/samples/apps/autogen-studio/autogenstudio/__init__.py b/samples/apps/autogen-studio/autogenstudio/__init__.py index 784328a73bd5..611b1c5203ce 100644 --- a/samples/apps/autogen-studio/autogenstudio/__init__.py +++ b/samples/apps/autogen-studio/autogenstudio/__init__.py @@ -1,3 +1,4 @@ from .chatmanager import * from .workflowmanager import * from .datamodel import * +from .version import __version__ diff --git a/samples/apps/autogen-studio/autogenstudio/chatmanager.py b/samples/apps/autogen-studio/autogenstudio/chatmanager.py index 034a139e9542..ee6d51d9d68b 100644 --- a/samples/apps/autogen-studio/autogenstudio/chatmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/chatmanager.py @@ -34,7 +34,7 @@ def chat(self, message: Message, history: List, flow_config: AgentWorkFlowConfig if flow_config.summary_method == "last": successful_code_blocks = extract_successful_code_blocks(flow.agent_history) - last_message = flow.agent_history[-1]["message"]["content"] + last_message = flow.agent_history[-1]["message"]["content"] if flow.agent_history else "" successful_code_blocks = "\n\n".join(successful_code_blocks) output = (last_message + "\n" + successful_code_blocks) if successful_code_blocks else last_message elif flow_config.summary_method == "llm": @@ -43,6 +43,7 @@ def chat(self, message: Message, history: List, flow_config: AgentWorkFlowConfig output = "" metadata["code"] = "" + metadata["summary_method"] = flow_config.summary_method end_time = time.time() metadata["time"] = end_time - start_time modified_files = get_modified_files(start_time, end_time, scratch_dir, dest_dir=work_dir) diff --git a/samples/apps/autogen-studio/autogenstudio/datamodel.py b/samples/apps/autogen-studio/autogenstudio/datamodel.py index 283cd2610d59..723576cd09cc 100644 --- a/samples/apps/autogen-studio/autogenstudio/datamodel.py +++ b/samples/apps/autogen-studio/autogenstudio/datamodel.py @@ -33,8 +33,8 @@ def dict(self): @dataclass class Skill(object): title: str - file_name: str content: str + file_name: Optional[str] = None id: Optional[str] = None description: Optional[str] = None timestamp: Optional[str] = None @@ -110,6 +110,7 @@ class AgentConfig: system_message: Optional[str] = None is_termination_msg: Optional[Union[bool, str, Callable]] = None code_execution_config: Optional[Union[bool, str, Dict[str, Any]]] = None + default_auto_reply: Optional[str] = "" def dict(self): result = asdict(self) @@ -172,6 +173,7 @@ class GroupChatFlowSpec: timestamp: Optional[str] = None user_id: Optional[str] = None description: Optional[str] = None + skills: Optional[Union[None, List[Skill]]] = None def __post_init__(self): if self.timestamp is None: @@ -237,6 +239,8 @@ class Session(object): id: Optional[str] = None timestamp: Optional[str] = None flow_config: AgentWorkFlowConfig = None + name: Optional[str] = None + description: Optional[str] = None def __post_init__(self): if self.timestamp is None: diff --git a/samples/apps/autogen-studio/autogenstudio/utils/dbdefaults.json b/samples/apps/autogen-studio/autogenstudio/utils/dbdefaults.json index 4f7c3ff81504..9359a05afd07 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/dbdefaults.json +++ b/samples/apps/autogen-studio/autogenstudio/utils/dbdefaults.json @@ -15,8 +15,8 @@ { "model": "TheBloke/zephyr-7B-alpha-AWQ", "api_key": "EMPTY", - "base_url": "Your Model Endpoint", - "description": "Zephyr (local model configuration)" + "base_url": "http://localhost:8000/v1", + "description": "Local model example with vLLM server endpoint" } ], "agents": [ @@ -27,7 +27,8 @@ "name": "userproxy", "human_input_mode": "NEVER", "max_consecutive_auto_reply": 5, - "system_message": "", + "system_message": "You are a helpful assistant.", + "default_auto_reply": "TERMINATE", "llm_config": false, "code_execution_config": { "work_dir": null, @@ -43,13 +44,12 @@ "title": "find_papers_arxiv", "description": "This skill finds relevant papers on arXiv given a query.", "content": "import os\nimport re\nimport json\nimport hashlib\n\n\ndef search_arxiv(query, max_results=10):\n \"\"\"\n Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\n\n Args:\n query (str): The search query.\n max_results (int, optional): The maximum number of search results to return. Defaults to 10.\n\n Returns:\n jresults (list): A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\n\n Example:\n >>> results = search_arxiv(\"attention is all you need\")\n >>> print(results)\n \"\"\"\n\n import arxiv\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n", - "file_name": "find_papers_arxiv.py" + "file_name": "find_papers_arxiv" }, { "title": "generate_images", "description": "This skill generates images from a given query using OpenAI's DALL-E model and saves them to disk.", - "content": "from typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n", - "file_name": "generate_images.py" + "content": "from typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n" } ], "config": { @@ -58,13 +58,6 @@ "config_list": [ { "model": "gpt-4-1106-preview" - }, - { - "model": "gpt-3.5-turbo-16k" - }, - { - "model": "TheBloke/zephyr-7B-alpha-AWQ", - "base_url": "http://localhost:8000/v1" } ], "temperature": 0.1, @@ -73,78 +66,23 @@ }, "human_input_mode": "NEVER", "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." + "system_message": "You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself. 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill. When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user. If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. Reply 'TERMINATE' in the end when everything is done." } } ], "skills": [ - { - "title": "find_papers_arxiv", - "description": "This skill finds relevant papers on arXiv given a query.", - "content": "import os\nimport re\nimport json\nimport hashlib\n\n\ndef search_arxiv(query, max_results=10):\n \"\"\"\n Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\n\n Args:\n query (str): The search query.\n max_results (int, optional): The maximum number of search results to return. Defaults to 10.\n\n Returns:\n jresults (list): A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\n\n Example:\n >>> results = search_arxiv(\"attention is all you need\")\n >>> print(results)\n \"\"\"\n\n import arxiv\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n", - "file_name": "find_papers_arxiv.py" - }, { "title": "fetch_profile", "description": "This skill fetches the text content from a personal website.", - "content": "from typing import Optional\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef fetch_user_profile(url: str) -> Optional[str]:\n \"\"\"\n Fetches the text content from a personal website.\n\n Given a URL of a person's personal website, this function scrapes\n the content of the page and returns the text found within the .\n\n Args:\n url (str): The URL of the person's personal website.\n\n Returns:\n Optional[str]: The text content of the website's body, or None if any error occurs.\n \"\"\"\n try:\n # Send a GET request to the URL\n response = requests.get(url)\n # Check for successful access to the webpage\n if response.status_code == 200:\n # Parse the HTML content of the page using BeautifulSoup\n soup = BeautifulSoup(response.text, \"html.parser\")\n # Extract the content of the tag\n body_content = soup.find(\"body\")\n # Return all the text in the body tag, stripping leading/trailing whitespaces\n return \" \".join(body_content.stripped_strings) if body_content else None\n else:\n # Return None if the status code isn't 200 (success)\n return None\n except requests.RequestException:\n # Return None if any request-related exception is caught\n return None\n", - "file_name": "fetch_profile.py" + "content": "from typing import Optional\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef fetch_user_profile(url: str) -> Optional[str]:\n \"\"\"\n Fetches the text content from a personal website.\n\n Given a URL of a person's personal website, this function scrapes\n the content of the page and returns the text found within the .\n\n Args:\n url (str): The URL of the person's personal website.\n\n Returns:\n Optional[str]: The text content of the website's body, or None if any error occurs.\n \"\"\"\n try:\n # Send a GET request to the URL\n response = requests.get(url)\n # Check for successful access to the webpage\n if response.status_code == 200:\n # Parse the HTML content of the page using BeautifulSoup\n soup = BeautifulSoup(response.text, \"html.parser\")\n # Extract the content of the tag\n body_content = soup.find(\"body\")\n # Return all the text in the body tag, stripping leading/trailing whitespaces\n return \" \".join(body_content.stripped_strings) if body_content else None\n else:\n # Return None if the status code isn't 200 (success)\n return None\n except requests.RequestException:\n # Return None if any request-related exception is caught\n return None\n" }, { "title": "generate_images", "description": "This skill generates images from a given query using OpenAI's DALL-E model and saves them to disk.", - "content": "from typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n", - "file_name": "generate_images.py" + "content": "from typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n" } ], "workflows": [ - { - "name": "Visualization Agent Workflow", - "description": "This workflow is used for visualization tasks.", - "sender": { - "type": "userproxy", - "description": "User proxy agent to execute code", - "config": { - "name": "userproxy", - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 5, - "system_message": "", - "llm_config": false, - "code_execution_config": { - "work_dir": null, - "use_docker": false - } - } - }, - "receiver": { - "type": "assistant", - "description": "Visualization assistant to create plans and write code to generate visualizations", - "config": { - "name": "visualization_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - }, - { - "model": "gpt-3.5-turbo-16k" - }, - { - "model": "TheBloke/zephyr-7B-alpha-AWQ", - "base_url": "http://localhost:8000/v1" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": null - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 4, - "system_message": "Your task is to ensure you generate a high quality visualization for the user. Your visualizations must follow best practices and you must articulate your reasoning for your choices. The visualization must not have grid or outline box. The visualization should have an APPROPRIATE ASPECT RATIO e..g rectangular for time series data. The title must be bold. Importantly, if THE CHART IS A LINE CHART, you MUST ADD A LINE OF BEST FIT and ADD TEXT ON THE SLOPE OF EACH LINE. Note that today's date is 12/10/2023. At each point, do your best to determine if the user's request has been addressed and if so, respond with a summary. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request '. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." - } - }, - "type": "twoagents" - }, { "name": "Travel Agent Group Chat Workflow", "description": "A group chat workflow", @@ -155,8 +93,7 @@ "name": "userproxy", "human_input_mode": "NEVER", "max_consecutive_auto_reply": 5, - "system_message": "", - "llm_config": false, + "system_message": "You are a helpful assistant.", "code_execution_config": { "work_dir": null, "use_docker": false @@ -184,12 +121,11 @@ "admin_name": "Admin", "max_round": 10, "speaker_selection_method": "auto", - "agents": [ { "type": "assistant", "config": { - "name": "primary_assistant", + "name": "travel_planner", "llm_config": { "config_list": [ { @@ -202,7 +138,7 @@ }, "human_input_mode": "NEVER", "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can suggest a travel itinerary for a user. You are the primary coordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN that ends with the word TERMINATE. " + "system_message": "You are a helpful assistant that can suggest a travel plan for a user. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN that ends with the word TERMINATE. " } }, { @@ -257,7 +193,8 @@ "name": "userproxy", "human_input_mode": "NEVER", "max_consecutive_auto_reply": 10, - "system_message": "", + "system_message": "You are a helpful assistant.", + "default_auto_reply": "TERMINATE", "llm_config": false, "code_execution_config": { "work_dir": null, @@ -272,14 +209,12 @@ { "title": "find_papers_arxiv", "description": "This skill finds relevant papers on arXiv given a query.", - "content": "import os\nimport re\nimport json\nimport hashlib\n\n\ndef search_arxiv(query, max_results=10):\n \"\"\"\n Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\n\n Args:\n query (str): The search query.\n max_results (int, optional): The maximum number of search results to return. Defaults to 10.\n\n Returns:\n jresults (list): A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\n\n Example:\n >>> results = search_arxiv(\"attention is all you need\")\n >>> print(results)\n \"\"\"\n\n import arxiv\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n", - "file_name": "find_papers_arxiv.py" + "content": "import os\nimport re\nimport json\nimport hashlib\n\n\ndef search_arxiv(query, max_results=10):\n \"\"\"\n Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\n\n Args:\n query (str): The search query.\n max_results (int, optional): The maximum number of search results to return. Defaults to 10.\n\n Returns:\n jresults (list): A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\n\n Example:\n >>> results = search_arxiv(\"attention is all you need\")\n >>> print(results)\n \"\"\"\n\n import arxiv\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n" }, { "title": "generate_images", "description": "This skill generates images from a given query using OpenAI's DALL-E model and saves them to disk.", - "content": "from typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n", - "file_name": "generate_images.py" + "content": "from typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n" } ], "config": { @@ -288,13 +223,6 @@ "config_list": [ { "model": "gpt-4-1106-preview" - }, - { - "model": "gpt-3.5-turbo-16k" - }, - { - "model": "TheBloke/zephyr-7B-alpha-AWQ", - "base_url": "http://localhost:8000/v1" } ], "temperature": 0.1, @@ -303,7 +231,7 @@ }, "human_input_mode": "NEVER", "max_consecutive_auto_reply": 15, - "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." + "system_message": "You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself. 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill. When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user. If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. Reply 'TERMINATE' in the end when everything is done." } }, "type": "twoagents" diff --git a/samples/apps/autogen-studio/autogenstudio/utils/dbutils.py b/samples/apps/autogen-studio/autogenstudio/utils/dbutils.py index d1891081b3c5..92893c01c3dc 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/dbutils.py +++ b/samples/apps/autogen-studio/autogenstudio/utils/dbutils.py @@ -5,8 +5,17 @@ import os from typing import Any, List, Dict, Optional, Tuple from ..datamodel import AgentFlowSpec, AgentWorkFlowConfig, Gallery, Message, Model, Session, Skill +from ..version import __version__ as __db_version__ +VERSION_TABLE_SQL = """ + CREATE TABLE IF NOT EXISTS version ( + + version TEXT NOT NULL, + UNIQUE (version) + ) + """ + MODELS_TABLE_SQL = """ CREATE TABLE IF NOT EXISTS models ( id TEXT NOT NULL, @@ -140,6 +149,13 @@ def reset_db(self): os.remove(self.path) self.init_db(path=self.path) + def run_migrations(self): + """ + Run migrations to update the database schema. + """ + + pass + def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: """ Initializes the database by creating necessary tables. @@ -152,6 +168,10 @@ def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: self.conn = sqlite3.connect(path, check_same_thread=False, **kwargs) self.cursor = self.conn.cursor() + # Create the version table + self.cursor.execute(VERSION_TABLE_SQL) + self.cursor.execute("INSERT INTO version (version) VALUES (?)", (__db_version__,)) + # Create the models table self.cursor.execute(MODELS_TABLE_SQL) diff --git a/samples/apps/autogen-studio/autogenstudio/utils/utils.py b/samples/apps/autogen-studio/autogenstudio/utils/utils.py index ce2778f09f8e..b604fb30d4aa 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/utils/utils.py @@ -1,4 +1,3 @@ -import ast import base64 import hashlib from typing import List, Dict, Tuple, Union @@ -7,7 +6,8 @@ from pathlib import Path import re import autogen -from ..datamodel import AgentConfig, AgentFlowSpec, AgentWorkFlowConfig, LLMConfig, Skill +from autogen.oai.client import OpenAIWrapper +from ..datamodel import AgentConfig, AgentFlowSpec, AgentWorkFlowConfig, LLMConfig, Model, Skill def md5_hash(text: str) -> str: @@ -27,6 +27,9 @@ def clear_folder(folder_path: str) -> None: :param folder_path: The path to the folder to clear. """ # exit if the folder does not exist + if not os.path.exists(folder_path): + return + # exit if the folder does not exist if not os.path.exists(folder_path): return for file in os.listdir(folder_path): @@ -86,6 +89,9 @@ def get_file_type(file_path: str) -> str: ".config", } + # Supported spreadsheet extensions + CSV_EXTENSIONS = {".csv", ".xlsx"} + # Supported image extensions IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".svg", ".webp"} # Supported (web) video extensions @@ -100,6 +106,8 @@ def get_file_type(file_path: str) -> str: # Determine the file type based on the extension if file_extension in CODE_EXTENSIONS: file_type = "code" + elif file_extension in CSV_EXTENSIONS: + file_type = "csv" elif file_extension in IMAGE_EXTENSIONS: file_type = "image" elif file_extension == PDF_EXTENSION: @@ -211,19 +219,18 @@ def init_webserver_folders(root_file_path: str) -> Dict[str, str]: :param root_file_path: The root directory where webserver folders will be created :return: A dictionary with the path of each created folder """ + + if not os.path.exists(root_file_path): + os.makedirs(root_file_path, exist_ok=True) files_static_root = os.path.join(root_file_path, "files/") static_folder_root = os.path.join(root_file_path, "ui") - workdir_root = os.path.join(root_file_path, "workdir") os.makedirs(files_static_root, exist_ok=True) os.makedirs(os.path.join(files_static_root, "user"), exist_ok=True) os.makedirs(static_folder_root, exist_ok=True) - os.makedirs(workdir_root, exist_ok=True) - folders = { "files_static_root": files_static_root, "static_folder_root": static_folder_root, - "workdir_root": workdir_root, } return folders @@ -261,6 +268,9 @@ def get_skills_from_prompt(skills: List[Skill], work_dir: str) -> str: os.makedirs(work_dir) # overwrite skills.py in work_dir + with open(os.path.join(work_dir, "skills.py"), "w", encoding="utf-8") as f: + f.write(prompt) + # overwrite skills.py in work_dir with open(os.path.join(work_dir, "skills.py"), "w", encoding="utf-8") as f: f.write(prompt) @@ -375,3 +385,26 @@ def extract_successful_code_blocks(messages: List[Dict[str, str]]) -> List[str]: successful_code_blocks.extend(code_blocks) return successful_code_blocks + + +def sanitize_model(model: Model): + """ + Sanitize model dictionary to remove None values and empty strings and only keep valid keys. + """ + if isinstance(model, Model): + model = model.dict() + valid_keys = ["model", "base_url", "api_key", "api_type", "api_version"] + # only add key if value is not None + sanitized_model = {k: v for k, v in model.items() if (v is not None and v != "") and k in valid_keys} + return sanitized_model + + +def test_model(model: Model): + """ + Test the model endpoint by sending a simple message to the model and returning the response. + """ + + sanitized_model = sanitize_model(model) + client = OpenAIWrapper(config_list=[sanitized_model]) + response = client.create(messages=[{"role": "user", "content": "2+2="}], cache_seed=None) + return response.choices[0].message.content diff --git a/samples/apps/autogen-studio/autogenstudio/version.py b/samples/apps/autogen-studio/autogenstudio/version.py index 51feb3765672..0b52beb9e0a5 100644 --- a/samples/apps/autogen-studio/autogenstudio/version.py +++ b/samples/apps/autogen-studio/autogenstudio/version.py @@ -1,4 +1,3 @@ -VERSION = "0.0.33a" +VERSION = "0.0.43a" __version__ = VERSION -__db_version__ = "0.0.1" APP_NAME = "autogenstudio" diff --git a/samples/apps/autogen-studio/autogenstudio/web/app.py b/samples/apps/autogen-studio/autogenstudio/web/app.py index 6934c7e8aaf0..c069623af3df 100644 --- a/samples/apps/autogen-studio/autogenstudio/web/app.py +++ b/samples/apps/autogen-studio/autogenstudio/web/app.py @@ -5,6 +5,7 @@ from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles from fastapi import HTTPException +from openai import OpenAIError from ..version import VERSION from ..datamodel import ( @@ -14,7 +15,7 @@ Message, Session, ) -from ..utils import md5_hash, init_webserver_folders, DBManager, dbutils +from ..utils import md5_hash, init_webserver_folders, DBManager, dbutils, test_model from ..chatmanager import AutoGenChatManager @@ -250,7 +251,6 @@ async def get_user_skills(user_id: str): async def create_user_skills(req: DBWebRequestModel): try: skills = dbutils.upsert_skill(skill=req.skill, dbmanager=dbmanager) - return { "status": True, "message": "Skills retrieved successfully", @@ -385,6 +385,32 @@ async def create_user_models(req: DBWebRequestModel): } +@api.post("/models/test") +async def test_user_models(req: DBWebRequestModel): + """Test a model to verify it works""" + + try: + response = test_model(model=req.model) + return { + "status": True, + "message": "Model tested successfully", + "data": response, + } + + except OpenAIError as oai_error: + print(traceback.format_exc()) + return { + "status": False, + "message": "Error occurred while testing model: " + str(oai_error), + } + except Exception as ex_error: + print(traceback.format_exc()) + return { + "status": False, + "message": "Error occurred while testing model: " + str(ex_error), + } + + @api.delete("/models/delete") async def delete_user_model(req: DBWebRequestModel): """Delete a model for a user""" @@ -427,7 +453,6 @@ async def get_user_workflows(user_id: str): @api.post("/workflows") async def create_user_workflow(req: DBWebRequestModel): """Create a new workflow for a user""" - try: workflow = dbutils.upsert_workflow(workflow=req.workflow, dbmanager=dbmanager) return { diff --git a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py index 62c3b977c1d6..37a539f1375a 100644 --- a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py @@ -1,8 +1,8 @@ +import os from typing import List, Optional -from dataclasses import asdict import autogen -from .datamodel import AgentConfig, AgentFlowSpec, AgentWorkFlowConfig, GroupChatConfig, Message -from .utils import get_skills_from_prompt, clear_folder +from .datamodel import AgentConfig, AgentFlowSpec, AgentWorkFlowConfig, Message +from .utils import get_skills_from_prompt, clear_folder, sanitize_model from datetime import datetime @@ -35,6 +35,11 @@ def __init__( self.sender = self.load(config.sender) # given the config, return an AutoGen agent object self.receiver = self.load(config.receiver) + + if config.receiver.type == "groupchat": + # append self.sender to the list of agents + self.receiver._groupchat.agents.append(self.sender) + print(self.receiver) self.agent_history = [] if history: @@ -113,24 +118,40 @@ def sanitize_agent_spec(self, agent_spec: AgentFlowSpec) -> AgentFlowSpec: agent_spec.config.is_termination_msg = agent_spec.config.is_termination_msg or ( lambda x: "TERMINATE" in x.get("content", "").rstrip()[-20:] ) - skills_prompt = "" - if agent_spec.skills: - # get skill prompt, also write skills to a file named skills.py - skills_prompt = get_skills_from_prompt(agent_spec.skills, self.work_dir) - if agent_spec.type == "userproxy": + def get_default_system_message(agent_type: str) -> str: + if agent_type == "assistant": + return autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE + else: + return "You are a helpful AI Assistant." + + # sanitize llm_config if present + if agent_spec.config.llm_config is not False: + config_list = [] + for llm in agent_spec.config.llm_config.config_list: + # check if api_key is present either in llm or env variable + if "api_key" not in llm and "OPENAI_API_KEY" not in os.environ: + error_message = f"api_key is not present in llm_config or OPENAI_API_KEY env variable for agent ** {agent_spec.config.name}**. Update your workflow to provide an api_key to use the LLM." + raise ValueError(error_message) + + # only add key if value is not None + sanitized_llm = sanitize_model(llm) + config_list.append(sanitized_llm) + agent_spec.config.llm_config.config_list = config_list + if agent_spec.config.code_execution_config is not False: code_execution_config = agent_spec.config.code_execution_config or {} code_execution_config["work_dir"] = self.work_dir + # tbd check if docker is installed + code_execution_config["use_docker"] = False agent_spec.config.code_execution_config = code_execution_config - - if agent_spec.type == "assistant": - agent_spec.config.system_message = ( - autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE - + "\n\n" - + agent_spec.config.system_message - + "\n\n" - + skills_prompt - ) + if agent_spec.skills: + # get skill prompt, also write skills to a file named skills.py + skills_prompt = "" + skills_prompt = get_skills_from_prompt(agent_spec.skills, self.work_dir) + if agent_spec.config.system_message: + agent_spec.config.system_message = agent_spec.config.system_message + "\n\n" + skills_prompt + else: + agent_spec.config.system_message = get_default_system_message(agent_spec.type) + "\n\n" + skills_prompt return agent_spec @@ -144,7 +165,7 @@ def load(self, agent_spec: AgentFlowSpec) -> autogen.Agent: Returns: An instance of the loaded agent. """ - + agent_spec = self.sanitize_agent_spec(agent_spec) if agent_spec.type == "groupchat": agents = [ self.load(self.sanitize_agent_spec(agent_config)) for agent_config in agent_spec.groupchat_config.agents @@ -152,11 +173,11 @@ def load(self, agent_spec: AgentFlowSpec) -> autogen.Agent: group_chat_config = agent_spec.groupchat_config.dict() group_chat_config["agents"] = agents groupchat = autogen.GroupChat(**group_chat_config) - manager = autogen.GroupChatManager(groupchat=groupchat, **agent_spec.config.dict()) - return manager + agent = autogen.GroupChatManager(groupchat=groupchat, **agent_spec.config.dict()) + agent.register_reply([autogen.Agent, None], reply_func=self.process_reply, config={"callback": None}) + return agent else: - agent_spec = self.sanitize_agent_spec(agent_spec) agent = self.load_agent_config(agent_spec.config, agent_spec.type) return agent @@ -173,12 +194,11 @@ def load_agent_config(self, agent_config: AgentConfig, agent_type: str) -> autog """ if agent_type == "assistant": agent = autogen.AssistantAgent(**agent_config.dict()) - agent.register_reply([autogen.Agent, None], reply_func=self.process_reply, config={"callback": None}) elif agent_type == "userproxy": agent = autogen.UserProxyAgent(**agent_config.dict()) - agent.register_reply([autogen.Agent, None], reply_func=self.process_reply, config={"callback": None}) else: raise ValueError(f"Unknown agent type: {agent_type}") + agent.register_reply([autogen.Agent, None], reply_func=self.process_reply, config={"callback": None}) return agent def run(self, message: str, clear_history: bool = False) -> None: @@ -195,3 +215,4 @@ def run(self, message: str, clear_history: bool = False) -> None: message=message, clear_history=clear_history, ) + # pass diff --git a/samples/apps/autogen-studio/frontend/.env.default b/samples/apps/autogen-studio/frontend/.env.default index da3ebffaa289..7f0839b275d2 100644 --- a/samples/apps/autogen-studio/frontend/.env.default +++ b/samples/apps/autogen-studio/frontend/.env.default @@ -1,5 +1 @@ - # use this for .env.development assuming your backend is running on port 8081 GATSBY_API_URL=http://127.0.0.1:8081/api - -# use this .env.production assuming your backend is running on same port as frontend. Remember toremove these comments. -GATSBY_API_URL=/api diff --git a/samples/apps/autogen-studio/frontend/README.md b/samples/apps/autogen-studio/frontend/README.md index 7af58ee311ec..b707495cf42a 100644 --- a/samples/apps/autogen-studio/frontend/README.md +++ b/samples/apps/autogen-studio/frontend/README.md @@ -25,6 +25,7 @@ the front end makes request to the backend api and expects it at /api on localho ## setting env variables for the UI -- please look at env.default -- make a copy of this file and name it `env.development` +- please look at `.env.default` +- make a copy of this file and name it `.env.development` - set the values for the variables in this file + - The main variable here is `GATSBY_API_URL` which should be set to `http://localhost:8081/api` for local development. This tells the UI where to make requests to the backend. diff --git a/samples/apps/autogen-studio/frontend/gatsby-config.ts b/samples/apps/autogen-studio/frontend/gatsby-config.ts index 923e531f5155..9644cfc03898 100644 --- a/samples/apps/autogen-studio/frontend/gatsby-config.ts +++ b/samples/apps/autogen-studio/frontend/gatsby-config.ts @@ -1,7 +1,16 @@ import type { GatsbyConfig } from "gatsby"; +import fs from 'fs'; + +const envFile = `.env.${process.env.NODE_ENV}`; + +fs.access(envFile, fs.constants.F_OK, (err) => { + if (err) { + console.warn(`File '${envFile}' is missing. Using default values.`); + } +}); require("dotenv").config({ - path: `.env.${process.env.NODE_ENV}`, + path: envFile, }); const config: GatsbyConfig = { diff --git a/samples/apps/autogen-studio/frontend/package.json b/samples/apps/autogen-studio/frontend/package.json index 2996613fb1f2..80975bab0aba 100644 --- a/samples/apps/autogen-studio/frontend/package.json +++ b/samples/apps/autogen-studio/frontend/package.json @@ -22,6 +22,7 @@ "@heroicons/react": "^2.0.18", "@mdx-js/mdx": "^1.6.22", "@mdx-js/react": "^1.6.22", + "@monaco-editor/react": "^4.6.0", "@tailwindcss/line-clamp": "^0.4.0", "@tailwindcss/typography": "^0.5.9", "@types/lodash.debounce": "^4.0.9", @@ -40,6 +41,7 @@ "gatsby-transformer-sharp": "^4.14.0", "jszip": "^3.10.1", "lodash.debounce": "^4.0.8", + "papaparse": "^5.4.1", "postcss": "^8.4.13", "react": "^18.2.0", "react-contenteditable": "^3.3.6", @@ -56,7 +58,8 @@ }, "devDependencies": { "@types/node": "^18.7.13", - "@types/react": "^18.2.37", + "@types/papaparse": "^5.3.14", + "@types/react": "^18.2.48", "@types/react-dom": "^18.2.15", "@types/react-inner-image-zoom": "^3.0.0", "@types/react-resizable": "^3.0.2", diff --git a/samples/apps/autogen-studio/frontend/src/components/atoms.tsx b/samples/apps/autogen-studio/frontend/src/components/atoms.tsx index dfdc3b4bc63d..59d5c3e4b1a2 100644 --- a/samples/apps/autogen-studio/frontend/src/components/atoms.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/atoms.tsx @@ -5,44 +5,48 @@ import { XMarkIcon, ClipboardIcon, PlusIcon, - ArrowPathIcon, - ArrowDownRightIcon, - PencilIcon, UserGroupIcon, UsersIcon, + ExclamationTriangleIcon, + InformationCircleIcon, } from "@heroicons/react/24/outline"; import React, { ReactNode, useEffect, useRef, useState } from "react"; import Icon from "./icons"; import { Button, + Divider, Dropdown, Input, MenuProps, Modal, Select, Slider, + Table, + Space, Tooltip, message, + theme, } from "antd"; +import Editor from "@monaco-editor/react"; +import Papa from "papaparse"; import remarkGfm from "remark-gfm"; import ReactMarkdown from "react-markdown"; import { atomDark } from "react-syntax-highlighter/dist/esm/styles/prism"; import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"; -import { fetchJSON, getServerUrl, truncateText } from "./utils"; +import { fetchJSON, getServerUrl, obscureString, truncateText } from "./utils"; import { IAgentFlowSpec, IFlowConfig, IGroupChatFlowSpec, + ILLMConfig, IModelConfig, ISkill, IStatus, } from "./types"; -import { ResizableBox } from "react-resizable"; -import debounce from "lodash.debounce"; import TextArea from "antd/es/input/TextArea"; import { appContext } from "../hooks/provider"; -import Item from "antd/es/list/Item"; +const { useToken } = theme; interface CodeProps { node?: any; inline?: any; @@ -207,7 +211,7 @@ export const CollapseBox = ({ {isOpen && ( -
+
{children}
)} @@ -298,7 +302,7 @@ export const GroupView = ({ return (
-
+
{title}
{children}
@@ -371,7 +375,7 @@ export const LoadingOverlay = ({ children, loading }: IProps) => { {loading && ( <>
{/* Overlay background */} @@ -593,12 +597,13 @@ export const ControlRowView = ({ {title} {truncateText(value + "", 20)} - + {" "} + + +
-
{description}
{control} - -
+
); }; @@ -627,14 +632,14 @@ export const ModelSelector = ({ const { user } = React.useContext(appContext); const listModelsUrl = `${serverUrl}/models?user_id=${user?.email}`; - const sanitizeModelConfig = (config: IModelConfig) => { - const sanitizedConfig: IModelConfig = { model: config.model }; - if (config.api_key) sanitizedConfig.api_key = config.api_key; - if (config.base_url) sanitizedConfig.base_url = config.base_url; - if (config.api_type) sanitizedConfig.api_type = config.api_type; - if (config.api_version) sanitizedConfig.api_version = config.api_version; - return sanitizedConfig; - }; + // const sanitizeModelConfig = (config: IModelConfig) => { + // const sanitizedConfig: IModelConfig = { model: config.model }; + // if (config.api_key) sanitizedConfig.api_key = config.api_key; + // if (config.base_url) sanitizedConfig.base_url = config.base_url; + // if (config.api_type) sanitizedConfig.api_type = config.api_type; + // if (config.api_version) sanitizedConfig.api_version = config.api_version; + // return sanitizedConfig; + // }; const handleRemoveConfig = (index: number) => { const updatedConfigs = configs.filter((_, i) => i !== index); @@ -684,13 +689,20 @@ export const ModelSelector = ({ models.length > 0 ? models.map((model: IModelConfig, index: number) => ({ key: index, - label: model.model, + label: ( + <> +
{model.model}
+
+ {truncateText(model.description || "", 20)} +
+ + ), value: index, })) : [ { key: -1, - label: "No models found", + label: <>No models found, value: 0, }, ]; @@ -698,17 +710,49 @@ export const ModelSelector = ({ const modelOnClick: MenuProps["onClick"] = ({ key }) => { const selectedIndex = parseInt(key.toString()); let selectedModel = models[selectedIndex]; - selectedModel = sanitizeModelConfig(selectedModel); const updatedConfigs = [...configs, selectedModel]; setConfigs(updatedConfigs); }; + const menuStyle: React.CSSProperties = { + boxShadow: "none", + }; + + const { token } = useToken(); + const contentStyle: React.CSSProperties = { + backgroundColor: token.colorBgElevated, + borderRadius: token.borderRadiusLG, + boxShadow: token.boxShadowSecondary, + }; + + const addModelsMessage = ( + + {" "} + Please + create models in the Model tab + + ); + const AddModelsDropDown = () => { return ( ( +
+ {React.cloneElement(menu as React.ReactElement, { + style: menuStyle, + })} + {models.length === 0 && ( + <> + + +
{addModelsMessage}
+ + )} +
+ )} >
{ if (newModelConfig?.model.trim()) { - const sanitizedConfig = sanitizeModelConfig(newModelConfig); + const sanitizedConfig = newModelConfig; if (editIndex !== null) { // Edit existing model @@ -756,15 +800,22 @@ export const ModelSelector = ({ }; const modelButtons = configs.map((config, i) => { - const tooltipText = `${config.model} \n ${config.base_url || ""} \n ${ - config.api_type || "" - }`; + const tooltipText = ( + <> +
{config.model}
+ {config.base_url &&
{config.base_url}
} + {config.api_key &&
{obscureString(config.api_key, 3)}
} +
+ {truncateText(config.description || "", 90)} +
+ + ); return (
showModal(config, i)} + // onClick={() => showModal(config, i)} >
{" "} @@ -889,6 +940,73 @@ export const ImageLoader = ({ ); }; +type DataRow = { [key: string]: any }; +export const CsvLoader = ({ + csvUrl, + className, +}: { + csvUrl: string; + className?: string; +}) => { + const [data, setData] = useState([]); + const [columns, setColumns] = useState([]); + const [isLoading, setIsLoading] = useState(true); + + useEffect(() => { + const fetchData = async () => { + try { + const response = await fetch(csvUrl); + const csvString = await response.text(); + const parsedData = Papa.parse(csvString, { + header: true, + dynamicTyping: true, + skipEmptyLines: true, + }); + setData(parsedData.data as DataRow[]); + + // Use the keys of the first object for column headers + const firstRow = parsedData.data[0] as DataRow; // Type assertion + const columnHeaders: any[] = Object.keys(firstRow).map((key) => { + const val = { + title: key.charAt(0).toUpperCase() + key.slice(1), // Capitalize the key for the title + dataIndex: key, + key: key, + }; + if (typeof firstRow[key] === "number") { + return { + ...val, + sorter: (a: DataRow, b: DataRow) => a[key] - b[key], + }; + } + return val; + }); + setColumns(columnHeaders); + setIsLoading(false); + } catch (error) { + console.error("Error fetching CSV data:", error); + setIsLoading(false); + } + }; + + fetchData(); + }, [csvUrl]); + + // calculate x scroll, based on number of columns + const scrollX = columns.length * 150; + + return ( +
+ + + ); +}; + export const CodeLoader = ({ url, className, @@ -963,6 +1081,11 @@ export const AgentFlowSpecView = ({ const [localFlowSpec, setLocalFlowSpec] = React.useState(flowSpec); + // Required to monitor localAgent updates that occur in GroupChatFlowSpecView and reflect updates. + useEffect(() => { + setLocalFlowSpec(flowSpec); + }, [flowSpec]); + // Event handlers for updating local state and propagating changes const onControlChange = (value: any, key: string) => { @@ -975,17 +1098,23 @@ export const AgentFlowSpecView = ({ ...localFlowSpec, config: { ...localFlowSpec.config, [key]: value }, }; - console.log(updatedFlowSpec.config.llm_config); + setLocalFlowSpec(updatedFlowSpec); setFlowSpec(updatedFlowSpec); }; - const llm_config = localFlowSpec.config.llm_config || { config_list: [] }; + const llm_config: ILLMConfig = localFlowSpec.config.llm_config || { + config_list: [], + temperature: 0.1, + }; return ( <>
{title}
- + {flowSpec.config.name} + className="mb-4 bg-primary " + > + { + onControlChange(e.target.value, "default_auto_reply"); + }} + /> + } + /> + - {llm_config && ( + {llm_config && llm_config.config_list.length > 0 && ( { const llm_config = { - ...flowSpec.config.llm_config, + ...(flowSpec.config.llm_config || { temperature: 0.1 }), config_list, }; onControlChange(llm_config, "llm_config"); @@ -1107,6 +1253,30 @@ export const AgentFlowSpecView = ({ /> )} + {llm_config && llm_config.config_list.length > 0 && ( + { + const llm_config = { + ...flowSpec.config.llm_config, + temperature: value, + }; + onControlChange(llm_config, "llm_config"); + }} + /> + } + /> + )} + { { setSelectedAgent(index); - // setShowAgentModal(true); }} >
@@ -1499,12 +1668,45 @@ const GroupChatFlowSpecView = ({ }} /> )} - + Group Chat Agents
>
{agentsView}
+ + { + if (flowSpec?.groupchat_config) { + setFlowSpec({ + ...flowSpec, + groupchat_config: { + ...flowSpec?.groupchat_config, + speaker_selection_method: value, + }, + }); + } + }} + options={ + [ + { label: "Auto", value: "auto" }, + { label: "Round Robin", value: "round_robin" }, + { label: "Random", value: "random" }, + ] as any + } + /> + } + /> ); }; @@ -1534,6 +1736,11 @@ const AgentModal = ({ fetchAgents(); }, []); + // Required to synchronize localAgent changes between GroupChatFlowSpecView and AgentFlowSpecView + useEffect(() => { + setLocalAgent(localAgent); + }, [localAgent]); + const fetchAgents = () => { const onSuccess = (data: any) => { if (data && data.status) { @@ -1576,6 +1783,10 @@ const AgentModal = ({ onCancel={() => { setShowAgentModal(false); }} + afterClose={() => { + // If the modal is closed other than onOk, the agent is reset to before the update; if it is closed onOk, the agent is updated again with the localAgent passed to the handler. + setLocalAgent(agent); + }} > {agent && ( <> @@ -1585,8 +1796,6 @@ const AgentModal = ({ {localAgent && localAgent.type === "groupchat" && (
- {" "} - Group Chat { - const updatedFlowConfig = { ...localFlowConfig, name: newName }; + const updateFlowConfig = (key: string, value: string) => { + // When an updatedFlowConfig is created using localFlowConfig, if the contents of FlowConfigViewer Modal are changed after the Agent Specification Modal is updated, the updated contents of the Agent Specification Modal are not saved. Fixed to localFlowConfig->flowConfig. Fixed a bug. + const updatedFlowConfig = { ...flowConfig, [key]: value }; + console.log("updatedFlowConfig: ", updatedFlowConfig); setLocalFlowConfig(updatedFlowConfig); setFlowConfig(updatedFlowConfig); }; - // React.useEffect(() => { - // setLocalFlowConfig(flowConfig); - // }, [flowConfig]); - return ( <> {/*
{flowConfig.name}
*/} @@ -1738,7 +1945,7 @@ export const FlowConfigViewer = ({ updateFlowConfigName(e.target.value)} + onChange={(e) => updateFlowConfig("name", e.target.value)} /> } /> @@ -1752,14 +1959,7 @@ export const FlowConfigViewer = ({ { - const updatedConfig = { - ...localFlowConfig, - description: e.target.value, - }; - setLocalFlowConfig(updatedConfig); - setFlowConfig(updatedConfig); - }} + onChange={(e) => updateFlowConfig("description", e.target.value)} /> } /> @@ -1772,14 +1972,7 @@ export const FlowConfigViewer = ({ { - setLocalModel({ ...localModel, model: e.target.value }); - }} - /> - { - if (localModel) { - setLocalModel({ ...localModel, api_key: e.target.value }); - } - }} - /> - { - if (localModel) { - setLocalModel({ ...localModel, base_url: e.target.value }); - } - }} - /> - { - if (localModel) { - setLocalModel({ ...localModel, api_type: e.target.value }); - } - }} - /> - { - if (localModel) { - setLocalModel({ ...localModel, api_version: e.target.value }); - } - }} - /> -