From ae33653843330cdbc56676cfd8cba441e2c7b91d Mon Sep 17 00:00:00 2001 From: zerolee <464806884@qq.com> Date: Wed, 13 Dec 2023 14:58:25 +0800 Subject: [PATCH] set enus as default dir --- .vitepress/config.mts | 27 +++--- src/{ => enus}/guide/api.md | 1 + .../guide/contribute/contribute_guide.md | 1 + .../guide/contribute/rfc_template.md | 0 src/{ => enus}/guide/faq.md | 4 +- .../guide/get_started/installation.md | 34 +++++-- .../guide/get_started/introduction.md | 0 .../guide/get_started/quickstart.md | 28 ++++-- src/{ => enus}/guide/get_started/setup.md | 21 ++++- src/{ => enus}/guide/getting-started.md | 0 .../in_depth_guides/agent_communication.md | 1 + src/enus/guide/in_depth_guides/memory.md | 1 + src/enus/guide/in_depth_guides/use_own_llm.md | 1 + src/{ => enus}/guide/tutorials/agent_101.md | 4 +- .../guide/tutorials/agent_think_act.md | 2 +- .../guide/tutorials/breakpoint_recovery.md | 0 src/{ => enus}/guide/tutorials/concepts.md | 9 ++ .../guide/tutorials/human_engagement.md | 19 ++-- .../tutorials/integration_with_open_llm.md | 87 ++++++++++++------ .../guide/tutorials/multi_agent_101.md | 34 +++++-- .../guide/tutorials/use_memories.md | 11 ++- .../guide/use_cases/agent/creative.md | 9 +- .../guide/use_cases/agent/ml_engineer.md | 1 + .../guide/use_cases/agent/photographer.md | 1 + .../use_cases/agent/receipt_assistant.md | 90 +++++++++---------- .../guide/use_cases/agent/researcher.md | 11 +-- .../use_cases/agent/tutorial_assistant.md | 51 +++++------ .../guide/use_cases/multi_agent/creative.md | 0 .../guide/use_cases/multi_agent/debate.md | 20 ++++- .../guide/use_cases/multi_agent/minecraft.md | 1 + .../use_cases/multi_agent/software_company.md | 2 +- .../use_cases/multi_agent/software_gallery.md | 1 + .../use_cases/multi_agent/virtual_town.md | 1 + .../use_cases/multi_agent/werewolf_game.md | 1 + src/{ => enus}/index.md | 2 +- src/guide/in_depth_guides/memory.md | 1 - src/guide/in_depth_guides/use_own_llm.md | 1 - .../use_cases/multi_agent/software_gallery.md | 1 - 38 files changed, 309 insertions(+), 170 deletions(-) rename src/{ => enus}/guide/api.md (97%) rename src/{ => enus}/guide/contribute/contribute_guide.md (98%) rename src/{ => enus}/guide/contribute/rfc_template.md (100%) rename src/{ => enus}/guide/faq.md (98%) rename src/{ => enus}/guide/get_started/installation.md (95%) rename src/{ => enus}/guide/get_started/introduction.md (100%) rename src/{ => enus}/guide/get_started/quickstart.md (77%) rename src/{ => enus}/guide/get_started/setup.md (92%) rename src/{ => enus}/guide/getting-started.md (100%) rename src/{ => enus}/guide/in_depth_guides/agent_communication.md (98%) create mode 100644 src/enus/guide/in_depth_guides/memory.md create mode 100644 src/enus/guide/in_depth_guides/use_own_llm.md rename src/{ => enus}/guide/tutorials/agent_101.md (99%) rename src/{ => enus}/guide/tutorials/agent_think_act.md (56%) rename src/{ => enus}/guide/tutorials/breakpoint_recovery.md (100%) rename src/{ => enus}/guide/tutorials/concepts.md (99%) rename src/{ => enus}/guide/tutorials/human_engagement.md (72%) rename src/{ => enus}/guide/tutorials/integration_with_open_llm.md (90%) rename src/{ => enus}/guide/tutorials/multi_agent_101.md (97%) rename src/{ => enus}/guide/tutorials/use_memories.md (92%) rename src/{ => enus}/guide/use_cases/agent/creative.md (84%) rename src/{ => enus}/guide/use_cases/agent/ml_engineer.md (99%) rename src/{ => enus}/guide/use_cases/agent/photographer.md (98%) rename src/{ => enus}/guide/use_cases/agent/receipt_assistant.md (98%) rename src/{ => enus}/guide/use_cases/agent/researcher.md (99%) rename src/{ => enus}/guide/use_cases/agent/tutorial_assistant.md (98%) rename src/{ => enus}/guide/use_cases/multi_agent/creative.md (100%) rename src/{ => enus}/guide/use_cases/multi_agent/debate.md (98%) rename src/{ => enus}/guide/use_cases/multi_agent/minecraft.md (98%) rename src/{ => enus}/guide/use_cases/multi_agent/software_company.md (65%) create mode 100644 src/enus/guide/use_cases/multi_agent/software_gallery.md rename src/{ => enus}/guide/use_cases/multi_agent/virtual_town.md (98%) rename src/{ => enus}/guide/use_cases/multi_agent/werewolf_game.md (98%) rename src/{ => enus}/index.md (94%) delete mode 100644 src/guide/in_depth_guides/memory.md delete mode 100644 src/guide/in_depth_guides/use_own_llm.md delete mode 100644 src/guide/use_cases/multi_agent/software_gallery.md diff --git a/.vitepress/config.mts b/.vitepress/config.mts index 19095a23..32591d77 100644 --- a/.vitepress/config.mts +++ b/.vitepress/config.mts @@ -16,7 +16,7 @@ const Logo = ` `; const sources = ['blog', 'rfcs']; -const dests = ['zhcn']; +const dests = ['zhcn', 'enus']; // route based on fs, so copy files when deploy if (process.env.NODE_ENV === 'production') { @@ -41,27 +41,28 @@ export default defineConfig({ root: { label: 'English', lang: 'en', + link: '/enus/', themeConfig: { nav: [ { text: 'Docs', - link: '/guide/get_started/introduction', - activeMatch: '/guide/', + link: '/enus/guide/get_started/introduction', + activeMatch: '/enus/guide/', }, { text: 'Blog', - link: '/blog/agents', - activeMatch: '/blog/', + link: '/enus/blog/agents', + activeMatch: '/enus/blog/', }, { text: 'RFCs', - link: '/rfcs/RFC-116-MetaGPT优化方案', - activeMatch: '/rfcs/', + link: '/enus/rfcs/RFC-116-MetaGPT优化方案', + activeMatch: '/enus/rfcs/', }, ], sidebar: { - '/guide/': { - base: '/guide/', + '/enus/guide/': { + base: '/enus/guide/', items: [ { text: 'Get Started', @@ -176,8 +177,8 @@ export default defineConfig({ }, ], }, - '/blog/': { - base: '/blog/', + '/enus/blog/': { + base: '/enus/blog/', items: [ { text: 'Agents', @@ -185,8 +186,8 @@ export default defineConfig({ }, ], }, - '/rfcs/': { - base: '/rfcs/', + '/enus/rfcs/': { + base: '/enus/rfcs/', items: [ { text: 'RFC-116-MetaGPT优化方案', diff --git a/src/guide/api.md b/src/enus/guide/api.md similarity index 97% rename from src/guide/api.md rename to src/enus/guide/api.md index 6f4c91b0..97f6f3b5 100644 --- a/src/guide/api.md +++ b/src/enus/guide/api.md @@ -1,2 +1,3 @@ # API + Will be updated soon, stay tuned. diff --git a/src/guide/contribute/contribute_guide.md b/src/enus/guide/contribute/contribute_guide.md similarity index 98% rename from src/guide/contribute/contribute_guide.md rename to src/enus/guide/contribute/contribute_guide.md index d9ace5e5..aad31605 100644 --- a/src/guide/contribute/contribute_guide.md +++ b/src/enus/guide/contribute/contribute_guide.md @@ -1,2 +1,3 @@ # Contribute Guide + Will be updated soon, stay tuned. diff --git a/src/guide/contribute/rfc_template.md b/src/enus/guide/contribute/rfc_template.md similarity index 100% rename from src/guide/contribute/rfc_template.md rename to src/enus/guide/contribute/rfc_template.md diff --git a/src/guide/faq.md b/src/enus/guide/faq.md similarity index 98% rename from src/guide/faq.md rename to src/enus/guide/faq.md index 8db3cba5..36488287 100644 --- a/src/guide/faq.md +++ b/src/enus/guide/faq.md @@ -81,11 +81,11 @@ MetaGPT Community - The position of Chief Evangelist rotates on a monthly basis. The official OPENAI_API_BASE address is `https://api.openai.com/v1` - If the official OPENAI_API_BASE address is inaccessible in your environment (this can be verified with curl), it's recommended to configure using the reverse proxy OPENAI_API_BASE provided by libraries such as openai-forward. For instance, ` OPENAI_API_BASE: "``https://api.openai-forward.com/v1``" ` + If the official OPENAI_API_BASE address is inaccessible in your environment (this can be verified with curl), it's recommended to configure using the reverse proxy OPENAI_API_BASE provided by libraries such as openai-forward. For instance, `OPENAI_API_BASE: "``https://api.openai-forward.com/v1``"` If the official OPENAI_API_BASE address is inaccessible in your environment (again, verifiable via curl), another option is to configure the OPENAI_PROXY parameter. This way, you can access the official OPENAI_API_BASE via a local proxy. If you don't need to access via a proxy, please do not enable this configuration; if accessing through a proxy is required, modify it to the correct proxy address. Note that when OPENAI_PROXY is enabled, don't set OPENAI_API_BASE. - Note: OpenAI's default API design ends with a v1. An example of the correct configuration is: ` OPENAI_API_BASE: "``https://api.openai.com/v1``" ` + Note: OpenAI's default API design ends with a v1. An example of the correct configuration is: `OPENAI_API_BASE: "``https://api.openai.com/v1``"` - **Absolutely! How can I assist you today?** diff --git a/src/guide/get_started/installation.md b/src/enus/guide/get_started/installation.md similarity index 95% rename from src/guide/get_started/installation.md rename to src/enus/guide/get_started/installation.md index 83ca049d..52289ff8 100644 --- a/src/guide/get_started/installation.md +++ b/src/enus/guide/get_started/installation.md @@ -1,30 +1,41 @@ # Installation + We provide several ways for installing MetaGPT, please take the one most convenient to your case. + ## Support system and version -| System Version | Python Version | Supported | -| ---- | ---- | ----- | -| macOS 13.x | python 3.9 | Yes | -| Windows 11 | python 3.9 | Yes | -| Ubuntu 22.04 | python 3.9 | Yes | + +| System Version | Python Version | Supported | +| -------------- | -------------- | --------- | +| macOS 13.x | python 3.9 | Yes | +| Windows 11 | python 3.9 | Yes | +| Ubuntu 22.04 | python 3.9 | Yes | Ensure that Python 3.9+ is installed on your system. You can check this by using: + ``` python3 --version ``` + ## Install stable version + Recommended for most users. You can import MetaGPT like any python package, employ teams of diverse built-in roles, and build your own agents to serve all kinds of application. + ``` pip install metagpt ``` ## Install latest development version + Best for experiencing newest features. + ``` pip install git+https://github.com/geekan/MetaGPT ``` ## Install in development mode + Recommended for developers and researchers looking to customize the framework for their unique requirements, experiment on new ideas, or create sophisticated functionalities like a novel memory mechanism using the framework. + ``` git clone https://github.com/geekan/MetaGPT.git cd /your/path/to/MetaGPT @@ -32,6 +43,7 @@ pip install -e . ``` ## Install with Docker + ### Use default MetaGPT image ```bash @@ -75,35 +87,45 @@ git clone https://github.com/geekan/MetaGPT.git cd MetaGPT && docker build -t metagpt:custom . ``` - ## Installation for full features + This is relevant if you want to generate diagrams such as quadrant chart, system designs, sequence flow, etc. They are provided as intermediate results if you run the [software startup example](https://github.com/geekan/MetaGPT/blob/main/startup.py). + ### Mermaid + Mermaid is a language that uses text to generate flowcharts, pie charts, Gantt charts, and other diagrams. MetaGPT uses Mermaid to create flowcharts, sequence diagrams, Gantt charts, etc. Mermaid is implemented in Node.js, and direct installation can be somewhat costly. MetaGPT provides the following Mermaid Engines to convert Mermaid text into diagrams: **nodejs** Install mermaid-cli directly through nodejs. MetaGPT calls the command line to turn Mermaid text into diagrams. You need to install nodejs first, then use npm to install: + ``` npm install -g @mermaid-js/mermaid-cli ``` + The official method provided by mermaid, installed via nodejs, thus it basically supports all platforms and also supports output in png/svg/pdf formats. However, it requires the installation of nodejs and mermaid-cli, which comes with certain costs for installation and use, and also requires a browser environment at runtime. **pyppeteer** Mermaid can also be called via JavaScript, and pyppeteer is a web automation testing tool implemented in Python that can execute JavaScript scripts. Therefore, using pyppeteer + mermaidjs can convert Mermaid text into diagrams. You can install pyppeteer with pip: + ``` pip install pyppeteer ``` + This method is relatively simple to install, has no platform restrictions, and supports output in png/svg/pdf formats. However, it requires a dependency on a browser, so you need to install a browser first and set the browser path when running: + ``` export PUPPETEER_EXECUTABLE_PATH=/path/to/your/chromium # or edge or chrome ``` + (Note: pyppeteer is no longer maintained) **playwright** As pyppeteer is no longer maintained, it recommends using playwright-python as a replacement. The principle of running mermaid with playwright is the same as with pyppeteer. However, playwright-python requires the installation of its own provided browser and cannot use an already installed browser. The official only supports the following platforms: + - Windows 10+, Windows Server 2016+ or Windows Subsystem for Linux (WSL). - MacOS 12 Monterey or MacOS 13 Ventura. - Debian 11, Debian 12, Ubuntu 20.04 or Ubuntu 22.04. + ``` pip install playwright playwright install --with-deps chromium diff --git a/src/guide/get_started/introduction.md b/src/enus/guide/get_started/introduction.md similarity index 100% rename from src/guide/get_started/introduction.md rename to src/enus/guide/get_started/introduction.md diff --git a/src/guide/get_started/quickstart.md b/src/enus/guide/get_started/quickstart.md similarity index 77% rename from src/guide/get_started/quickstart.md rename to src/enus/guide/get_started/quickstart.md index 26349c33..e4f696ff 100644 --- a/src/guide/get_started/quickstart.md +++ b/src/enus/guide/get_started/quickstart.md @@ -1,31 +1,39 @@ - # Quickstart + ## Installation + ``` pip install metagpt ``` - Available installation methods can be found in the [Installation](./installation) section + +Available installation methods can be found in the [Installation](./installation) section ## Setup + ``` import os os.environ["OPENAI_API_KEY"] = "sk-..." os.environ["OPENAI_API_MODEL"] = "gpt-4" ``` + Variations for setting up LLM API (OpenAI, Azure, Anthropic, etc.) and other components can be found in the [Setup](./setup) section. We use environment variables for a quick demo. For formal usage of MetaGPT, we recommend using a config or key file. See [Setup](./setup). ## Develop software with a one-line requirement ->Note: + +> Note: > ->Below is a breakdown of the [software startup example](https://github.com/geekan/MetaGPT/blob/main/startup.py). If you install MetaGPT with the git clone approach, simply run ->``` ->python startup.py --idea "write a cli blackjack game" ->``` -Now, let's get started! We will create a team of agents to write software based on one line of our instruction. +> Below is a breakdown of the [software startup example](https://github.com/geekan/MetaGPT/blob/main/startup.py). If you install MetaGPT with the git clone approach, simply run +> +> ``` +> python startup.py --idea "write a cli blackjack game" +> ``` +> +> Now, let's get started! We will create a team of agents to write software based on one line of our instruction. First, import off-the-shelf roles + ```python import asyncio from metagpt.roles import ( @@ -36,7 +44,9 @@ from metagpt.roles import ( ) from metagpt.team import Team ``` + Next, initiate the team, equip it with agents, set their budget, and provide our requirement of writing a small game + ```python async def startup(idea: str): company = Team() @@ -53,7 +63,9 @@ async def startup(idea: str): await company.run(n_round=5) ``` + Finally, run it and get the code! + ```python await startup(idea="write a cli blackjack game") ``` diff --git a/src/guide/get_started/setup.md b/src/enus/guide/get_started/setup.md similarity index 92% rename from src/guide/get_started/setup.md rename to src/enus/guide/get_started/setup.md index 82a2d5ce..20aa05bb 100644 --- a/src/guide/get_started/setup.md +++ b/src/enus/guide/get_started/setup.md @@ -1,40 +1,55 @@ # Setup + Using MetaGPT involves connecting with model providers. We will walk through the setup in this page. + ## Setup for LLM API + We will take OpenAI API as an example. You can finish the setup in either way: + 1. Use environment variables. This can be used temporarily for a quick start or trying out a demo. 2. Use a config or key file. This is the recommended way, best for continuous and full-feature usage and development. + ### 1. Use environment variables + Run in command line: + ```shell export OPENAI_API_KEY="sk-..." # YOUR_API_KEY export OPENAI_API_MODEL="intended model" # gpt-4, gpt-3.5-turbo, etc. ``` + Or in python: + ```python import os os.environ["OPENAI_API_KEY"] = "sk-..." # YOUR_API_KEY os.environ["OPENAI_API_MODEL"] = "intended model" # gpt-4, gpt-3.5-turbo, etc. ``` + ### 2. Use a config or key file + 1. In your current working directory, create a folder `config` and add a new file named `config.yaml` or `key.yaml` under it. 2. Copy the content from the example [config.yaml](https://github.com/geekan/MetaGPT/blob/main/config/config.yaml) file into your new files 3. Fill in your own values to the file: + ```yaml -OPENAI_API_KEY: "sk-..." # YOUR_API_KEY -OPENAI_API_MODEL: "intended model" # gpt-4, gpt-3.5-turbo, etc. +OPENAI_API_KEY: 'sk-...' # YOUR_API_KEY +OPENAI_API_MODEL: 'intended model' # gpt-4, gpt-3.5-turbo, etc. ``` + Remember: If you follow the `git clone` approach in [Installation](./installation), `config/config.yaml` will already be there. Just edit it or make a copy named `config/key.yaml` for editting. This way you don't accidentally commit and share your API key using git. + > Note: > MetaGPT will read your setup in this priority order: `config/key.yaml > config/config.yaml > environment variable` Here you are good to go! See [Quickstart](./quickstart) or our [Tutorials](/guide/tutorials/agent_101) for your first run! ## Setup for different model providers + ### OpenAI ### Azure ### Anthropic -## Setup for other APIs \ No newline at end of file +## Setup for other APIs diff --git a/src/guide/getting-started.md b/src/enus/guide/getting-started.md similarity index 100% rename from src/guide/getting-started.md rename to src/enus/guide/getting-started.md diff --git a/src/guide/in_depth_guides/agent_communication.md b/src/enus/guide/in_depth_guides/agent_communication.md similarity index 98% rename from src/guide/in_depth_guides/agent_communication.md rename to src/enus/guide/in_depth_guides/agent_communication.md index b2c0bf72..d9af1a19 100644 --- a/src/guide/in_depth_guides/agent_communication.md +++ b/src/enus/guide/in_depth_guides/agent_communication.md @@ -1,2 +1,3 @@ # Agent Communication + Will be updated soon, stay tuned. diff --git a/src/enus/guide/in_depth_guides/memory.md b/src/enus/guide/in_depth_guides/memory.md new file mode 100644 index 00000000..9f1fa92e --- /dev/null +++ b/src/enus/guide/in_depth_guides/memory.md @@ -0,0 +1 @@ +# Memory diff --git a/src/enus/guide/in_depth_guides/use_own_llm.md b/src/enus/guide/in_depth_guides/use_own_llm.md new file mode 100644 index 00000000..6072e731 --- /dev/null +++ b/src/enus/guide/in_depth_guides/use_own_llm.md @@ -0,0 +1 @@ +# Use Your Own LLM diff --git a/src/guide/tutorials/agent_101.md b/src/enus/guide/tutorials/agent_101.md similarity index 99% rename from src/guide/tutorials/agent_101.md rename to src/enus/guide/tutorials/agent_101.md index 029dd169..8718019b 100644 --- a/src/guide/tutorials/agent_101.md +++ b/src/enus/guide/tutorials/agent_101.md @@ -6,6 +6,7 @@ After this tutorial, you will be able to: 2. Develop your first agent capable of one or more actions ## Use off-the-shelf agents + Import any role, initialize it, run it with a starting message, done! ```python @@ -25,6 +26,7 @@ if __name__ == '__main__': ``` ## Develop your first agent + Consider agent from a practical usage viewpoint, what are the bare essentials for an agent to be of any utility to us? From MetaGPT's standpoint, if an agent can execute certain actions (whether powered by LLM or otherwise), it holds some degree of usefulness. Put it simply, we define what actions our agent is expected to possess, equip the agent with these capabilities, and we have a basic useful agent! MetaGPT provides high flexibility to define your own action and your own agent. We will walk you through this in the rest of this section. ### Flowchart of one agent run cycle @@ -217,4 +219,4 @@ python examples/build_customized_agent.py --msg "write a function that calculate Or try it on Colab -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1SF3bJiDjKw6Xwnz2Rf0j8Hc0U4KsSB2L?usp=sharing) \ No newline at end of file +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1SF3bJiDjKw6Xwnz2Rf0j8Hc0U4KsSB2L?usp=sharing) diff --git a/src/guide/tutorials/agent_think_act.md b/src/enus/guide/tutorials/agent_think_act.md similarity index 56% rename from src/guide/tutorials/agent_think_act.md rename to src/enus/guide/tutorials/agent_think_act.md index e1148cfe..d973e3d2 100644 --- a/src/guide/tutorials/agent_think_act.md +++ b/src/enus/guide/tutorials/agent_think_act.md @@ -2,4 +2,4 @@ ## Three react modes -## Customize thinking process \ No newline at end of file +## Customize thinking process diff --git a/src/guide/tutorials/breakpoint_recovery.md b/src/enus/guide/tutorials/breakpoint_recovery.md similarity index 100% rename from src/guide/tutorials/breakpoint_recovery.md rename to src/enus/guide/tutorials/breakpoint_recovery.md diff --git a/src/guide/tutorials/concepts.md b/src/enus/guide/tutorials/concepts.md similarity index 99% rename from src/guide/tutorials/concepts.md rename to src/enus/guide/tutorials/concepts.md index f43823f1..5f177251 100644 --- a/src/guide/tutorials/concepts.md +++ b/src/enus/guide/tutorials/concepts.md @@ -1,5 +1,7 @@ # Concepts + After this tutorial, you will be able to: + - Understand MetaGPT's concept of agent and environment - How agents interact with each other and what a multi-agent collaboration may look like @@ -10,6 +12,7 @@ You may also jump to [Agent101](agent_101) if you want hands-on coding first. Check out our [paper](https://arxiv.org/abs/2308.00352) if you want more rigorous explanation. ## Agent + Academia and industry have proposed various definitions for the term "agent". Roughly speaking, an agent should be able to think or plan like human, possesses memories or even emotions, is equipped with a certain skill set to interact with the environment, other agents, and human. An agent, in a comprehensive examination, is a sophisticated system by itself. In our view, we imagine an agent as a digital organism within an environment, where @@ -23,11 +26,15 @@ This formula encapsulates the essence of what makes an agent function. To unders 3. Thought: Thought processes involve analyzing observations, drawing from memory, and considering possible actions. It's the agent's internal decision-making process, which may be powered by LLM. 4. Action: These are the agent's visible responses to its thoughts and observations. They can range from generating code with LLM to manually predefined operations such as reading a local file. Agents can also execute tool-using actions, including searching the web for weather, using a calculator to do maths, and more. 5. Memory: An agent's memory stores past experiences. It is crucial for learning, as it allows the agent to reference previous outcomes and adjust future actions accordingly. + ## MultiAgent + A MultiAgent System can be thought of as a society of agents, where + > MultiAgent = Agents + Environment + Standard Operating Procedure (SOP) + Communication + Economy Each of these components plays a vital role: + 1. Agents: Defined individually above, agents within a multi-agent system work in concert, each with their unique LLM, observations, thoughts, actions, and memories. 2. Environment: The environment is the shared space where agents exist and interact. Agents observe important information from the environment and publish action output for others to make use of. 3. Standard Operating Procedure (SOP): These are the established procedures that govern agent behaviors and interactions, ensuring orderly and efficient operations within the system. For example, in a car manufacturing SOP, one agent welds the car parts while another installs the wiring, maintaining an orderly assembly line. @@ -35,9 +42,11 @@ Each of these components plays a vital role: 5. Economy: This refers to the system of value exchange within the multi-agent environment, dictating how resources are allocated and tasks are prioritized. ## An Illustration + ![img](/image/guide/tutorials/concepts_example.png) This is a simple example showing how agents work + - In the environment, three agents Alice, Bob, Charlie interact with each other. - They can publish messages or their action output to the environment, which, in turn, are observed by others. - We expose the inner process of one agent, Charlie. The same process also applies to Alice and Bob. diff --git a/src/guide/tutorials/human_engagement.md b/src/enus/guide/tutorials/human_engagement.md similarity index 72% rename from src/guide/tutorials/human_engagement.md rename to src/enus/guide/tutorials/human_engagement.md index fdd003e6..1a7ef1de 100644 --- a/src/guide/tutorials/human_engagement.md +++ b/src/enus/guide/tutorials/human_engagement.md @@ -3,36 +3,42 @@ When we talk about agents, we usually mean it to be LLM-driven. However, in some real scenarios, we do want humans to step in, whether it's for quality assurance in a project, guidance offering in critical decision-making, or role playing in a game. In this tutorial, we talk about how to take humans into the loop. After this tutorial, you will be able to: -- Introduce interaction between LLM-based agents and human + +- Introduce interaction between LLM-based agents and human ## Switch between LLM and human + We will reuse the exact example in [MultiAgent 101](multi_agent_101). Originally, an LLM assumes the `SimpleReviewer`. Suppose we want more control over the review process, we may take up the `Role` ourselves. This is enabled by just one switch: set `is_human=True` upon initialization. The code becomes: + ```python team.hire( [ SimpleCoder(), SimpleTester(), # SimpleReviewer(), # the original line - SimpleReviewer(is_human=True), # change to this line + SimpleReviewer(is_human=True), # change to this line ] ) ``` + We are a human `SimpleReviewer` interacting with the two LLM-based agents `SimpleCoder` and `SimpleTester` now. We can comment on the unit tests from `SimpleTester`, requesting more coverage or edge cases. The feedback is then sent back to the tester for writing a new version. The switch is complete agnostic to the original SOP and `Role` definition, meaning applicability to general scenarios. Each time when it's our turn to respond, the running process will pause to wait for our input. Just type in what we want, we are sending our messages to the agents! ->Constraint: ->For develops who customize `Role`'s `_act` function, the `Action`s called in `_act` must be among the `self._actions` initialized with `_init_actions` for the human engagement to take effect. +> Constraint: +> For develops who customize `Role`'s `_act` function, the `Action`s called in `_act` must be among the `self._actions` initialized with `_init_actions` for the human engagement to take effect. ->LIMITATION: ->Currently the interaction is through terminal input, which is inconvenient for multi-line or structured writeup. Meanwhile, users must adhere to the prompt in content or format like what we require of an LLM, in order for the logic after human input works as usual. We will provide solutions to these issues in following update. +> LIMITATION: +> Currently the interaction is through terminal input, which is inconvenient for multi-line or structured writeup. Meanwhile, users must adhere to the prompt in content or format like what we require of an LLM, in order for the logic after human input works as usual. We will provide solutions to these issues in following update. ## Complete script of this tutorial + https://github.com/geekan/MetaGPT/blob/main/examples/build_customized_multi_agents.py Run it with + ```sh python examples/build_customized_multi_agents.py --add_human True ``` @@ -42,4 +48,3 @@ A sample interaction - diff --git a/src/guide/tutorials/integration_with_open_llm.md b/src/enus/guide/tutorials/integration_with_open_llm.md similarity index 90% rename from src/guide/tutorials/integration_with_open_llm.md rename to src/enus/guide/tutorials/integration_with_open_llm.md index 819c5cc9..690435db 100644 --- a/src/guide/tutorials/integration_with_open_llm.md +++ b/src/enus/guide/tutorials/integration_with_open_llm.md @@ -1,29 +1,33 @@ # Integration with open LLM -Currently, if you want to get more stable code generation results, you need to use OpenAI's GPT-3.5 or GPT-4. But there are currently many other excellent open source models available for experiments, and relatively satisfactory results can be obtained. Therefore, in this tutorial, we will explore how to integrate with open source LLM and get project output based on your input idea. + +Currently, if you want to get more stable code generation results, you need to use OpenAI's GPT-3.5 or GPT-4. But there are currently many other excellent open source models available for experiments, and relatively satisfactory results can be obtained. Therefore, in this tutorial, we will explore how to integrate with open source LLM and get project output based on your input idea. **Attention** Due to the limitations of the open source model itself, the content described in this tutorial cannot guarantee stable code generation. If you follow this tutorial to experiment, it means you already know this point. At the same time, we are also exploring how to obtain more stable and better-quality output under the open source model. If you are also interested in this, you can contact us in discord or WeChat community group. -I believe that with the update of the open source model, this goal will be reached soon. +I believe that with the update of the open source model, this goal will be reached soon. -We will conduct an overall introduction to the tutorial according to the following process: +We will conduct an overall introduction to the tutorial according to the following process: -- Model Deployment. Use inference repo such as LLaMA-Factory, FastChat, ollama, etc. to deploy the corresponding LLM model. +- Model Deployment. Use inference repo such as LLaMA-Factory, FastChat, ollama, etc. to deploy the corresponding LLM model. - LLM configuration. - Optionally, repair the LLM output. - Run. -The methods of integrating open source LLM and integrating some non-openai closed source models (such as Baidu Wenxinyiyan, iFLYTEK Spark, Zhipu ChatGLM, etc.) are similar, the main difference is the configuration. For details on the configuration of other closed-source LLMs, please refer to other LLM configuration documents under the online document site. The other process steps after the configuration are consistent with the above. +The methods of integrating open source LLM and integrating some non-openai closed source models (such as Baidu Wenxinyiyan, iFLYTEK Spark, Zhipu ChatGLM, etc.) are similar, the main difference is the configuration. For details on the configuration of other closed-source LLMs, please refer to other LLM configuration documents under the online document site. The other process steps after the configuration are consistent with the above. ## Model Deployment -Note that it is recommended to use the OpenAI compatible interface for model deployment. In this way, both request and response can be processed directly using openai sdk, which will simplify the overall integration process. At the same time, the following inference repos also support publishing as OpenAI-compatible interfaces (except ollama), and the workload required to change is very small. -Note that by default you have graphics card resources for deployment, otherwise CPU inference will be a bit slow. +Note that it is recommended to use the OpenAI compatible interface for model deployment. In this way, both request and response can be processed directly using openai sdk, which will simplify the overall integration process. At the same time, the following inference repos also support publishing as OpenAI-compatible interfaces (except ollama), and the workload required to change is very small. + +Note that by default you have graphics card resources for deployment, otherwise CPU inference will be a bit slow. ### LLaMA-Factory + Repo: https://github.com/hiyouga/LLaMA-Factory ##### Installation + ```shell git clone https://github.com/hiyouga/LLaMA-Factory.git conda create -n llama_factory python=3.10 @@ -35,10 +39,13 @@ pip install -r requirements.txt For details, please see [Installation](https://github.com/hiyouga/LLaMA-Factory#dependence-installation-optional) ##### Supported Models + The common LLaMA, Llama2 and most open source models in China are supported. For details, please see [Model List](https://github.com/hiyouga/LLaMA-Factory#supported-models) ##### Deployment + Source model launching + ```shell python3 src/api_demo.py \ --model_name_or_path meta-llama/Llama-2-13b-chat-hf \ @@ -46,6 +53,7 @@ python3 src/api_demo.py \ ``` Loading and merging lora output launching + ```shell python3 src/api_demo.py \ --model_name_or_path path_to_llama2_model \ @@ -56,11 +64,12 @@ python3 src/api_demo.py \ By default, the interface access address is: `http://0.0.0.0:8000/`. If you need to modify the port, enter `src/api_demo.py` to modify it. If you need to start with multiple cards, add `CUDA_VISIBLE_DEVICES=0,1,2` before the startup command and replace it with your card number. -Different models support different `template` value, which can be found from `src/llmtuner/data/template.py`. +Different models support different `template` value, which can be found from `src/llmtuner/data/template.py`. For details, please see [API Deployment](https://github.com/hiyouga/LLaMA-Factory#api-demo) ##### Request example + ```shell curl -X POST http://0.0.0.0:8000/v1/chat/completions -H "content-type:application/json" -d '{ "messages":[{"role":"user","content":"who are you"}], @@ -69,12 +78,15 @@ curl -X POST http://0.0.0.0:8000/v1/chat/completions -H "content-type:applicatio "max_tokens": 256 }' ``` -By default, the requested `model` parameter value is `gpt-3.5-turbo`, if necessary, modify it. Enter the `list_models` method of `src/llmtuner/api/app.py` and modify it to your custom value. + +By default, the requested `model` parameter value is `gpt-3.5-turbo`, if necessary, modify it. Enter the `list_models` method of `src/llmtuner/api/app.py` and modify it to your custom value. ### FastChat + Repo: https://github.com/lm-sys/FastChat ##### Installation + ```shell pip3 install "fschat[model_worker,webui]" ``` @@ -82,9 +94,11 @@ pip3 install "fschat[model_worker,webui]" For details, please see [Installation](https://github.com/lm-sys/FastChat#install) ##### Supported Models + The common LLaMA, Llama2 and most open source models in China are supported. For details, see [Model List](https://github.com/lm-sys/FastChat#supported-models) ##### Deployment + steps - launch controller,`python3 -m fastchat.serve.controller` @@ -96,6 +110,7 @@ If you need to start the lora fine-tuned model, you need to do [model merge](htt For details, please see [API Deployment](https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md) ##### Request example + ```shell curl -X POST http://0.0.0.0:8000/v1/chat/completions -H "content-type:application/json" -d '{ "messages":[{"role":"user","content":"who are you"}], @@ -104,12 +119,15 @@ curl -X POST http://0.0.0.0:8000/v1/chat/completions -H "content-type:applicatio "max_tokens": 256 }' ``` -By default, the requested `model` parameter value is `vicuna`, which corresponds to the `model-names` when starting `model_worker`. + +By default, the requested `model` parameter value is `vicuna`, which corresponds to the `model-names` when starting `model_worker`. #### vllm + Repo: https://github.com/vllm-project/vllm ##### Installation + ```shell pip3 install vllm ``` @@ -117,9 +135,11 @@ pip3 install vllm For details, please see [Installation](https://docs.vllm.ai/en/latest/getting_started/installation.html) ##### Supported Models + The common LLaMA, Llama2 and most open source models in China are supported. For details, please see [Model List](https://docs.vllm.ai/en/latest/models/supported_models.html) ##### Deployment + ```shell python3 -m vllm.entrypoints.openai.api_server \ --model meta-llama/Llama-2-13b-hf \ @@ -129,6 +149,7 @@ python3 -m vllm.entrypoints.openai.api_server \ For details, please see [API Deployment](https://docs.vllm.ai/en/latest/getting_started/quickstart.html#openai-compatible-server) ##### Request example + ```shell curl -X POST http://0.0.0.0:8000/v1/chat/completions -H "content-type:application/json" -d '{ "messages":[{"role":"user","content":"who are you"}], @@ -137,14 +158,17 @@ curl -X POST http://0.0.0.0:8000/v1/chat/completions -H "content-type:applicatio "max_tokens": 256 }' ``` -By default, the requested `model` parameter value is `llama2-13b`, which corresponds to the `served-model-name` at startup. + +By default, the requested `model` parameter value is `llama2-13b`, which corresponds to the `served-model-name` at startup. ### ollama + Repo: https://github.com/jmorganca/ollama This repo is not compatible with the openai api interface. MetaGPT will support the interface provided by itself in the future. ##### Installation + ```shell curl https://ollama.ai/install.sh | sh ``` @@ -152,9 +176,11 @@ curl https://ollama.ai/install.sh | sh For details, please see [Installation](https://github.com/jmorganca/ollama/blob/main/docs/linux.md) ##### Supported Models + Mainly supports Llama2 and its derivative series, please see [Model List](https://github.com/jmorganca/ollama#model-library) for details ##### Deployment + ```shell ollama run llama2 ``` @@ -164,6 +190,7 @@ llama2[Usage documentation](https://ollama.ai/library/llama2) For details, please see [API deployment](https://github.com/jmorganca/ollama/blob/main/docs/api.md) ##### Request example + ```shell curl -X POST http://localhost:11434/api/generate -d '{ "model": "llama2", @@ -172,15 +199,18 @@ curl -X POST http://localhost:11434/api/generate -d '{ ``` ## LLM Configuration -Since the above deployment is an API interface, it takes effect by modifying the configuration file `config/key.yaml`. + +Since the above deployment is an API interface, it takes effect by modifying the configuration file `config/key.yaml`. #### openai compatible interface -Such as LLaMA-Factory, FastChat, vllm openai compatible interface + +Such as LLaMA-Factory, FastChat, vllm openai compatible interface **config/key.yaml** + ```yaml -OPEN_LLM_API_BASE: "http://106.75.10.65:8001/v1" -OPEN_LLM_API_MODEL: "llama2-13b" +OPEN_LLM_API_BASE: 'http://106.75.10.65:8001/v1' +OPEN_LLM_API_MODEL: 'llama2-13b' ``` The complete routing of the openapi interface `http://0.0.0.0:8000/v1/chat/completions`, `OPEN_LLM_API_BASE` only needs to be configured to `http://0.0.0.0:8000/v1`, and the remaining parts will be filled by openai sdk itself. `OPEN_LLM_API_MODEL` is the actual value of the request interface parameter `model`. @@ -192,29 +222,34 @@ The complete routing of the openapi interface `http://0.0.0.0:8000/v1/chat/compl ## Optional, repair LLM output ### Background -This tutorial mainly guides how to integrate open source models (and non-openai closed source models) in MetaGPT. Since the output results of LLM have a great relationship with the prompt instruction format, open source models (also some non-openai closed source models) are often very complicated. It is difficult to follow MetaGPT's existing roles' instructions for output. On the one hand, we will optimize the role instructions so that they have better command result output compatibility under most open and closed source models. On the other hand, based on the current situation, we will repair the output content of the open source LLM to improve the overall execution success rate. + +This tutorial mainly guides how to integrate open source models (and non-openai closed source models) in MetaGPT. Since the output results of LLM have a great relationship with the prompt instruction format, open source models (also some non-openai closed source models) are often very complicated. It is difficult to follow MetaGPT's existing roles' instructions for output. On the one hand, we will optimize the role instructions so that they have better command result output compatibility under most open and closed source models. On the other hand, based on the current situation, we will repair the output content of the open source LLM to improve the overall execution success rate. ### Main issues with open source model command output -Including some issues with non-openai closed source models. -MetaGPT's prompt has strong structural requirements for output. It is often difficult to follow the complete output according to the instructions when an open source model works, resulting in missing, omitted, and errors in the output content. The main manifestations are as follows: -- The target key cannot output according to the upper and lower case agreed by prompt. -- The output json plain text contains missing or extra special characters. For example, `{"a":b"}}`, `{"a":b"]}`, `{"a":b"` and so on. +Including some issues with non-openai closed source models. +MetaGPT's prompt has strong structural requirements for output. It is often difficult to follow the complete output according to the instructions when an open source model works, resulting in missing, omitted, and errors in the output content. The main manifestations are as follows: + +- The target key cannot output according to the upper and lower case agreed by prompt. +- The output json plain text contains missing or extra special characters. For example, `{"a":b"}}`, `{"a":b"]}`, `{"a":b"` and so on. + +In response to the above situation, we have added the feature of repairing open source LLM output, specifically +**config/key.yaml** -In response to the above situation, we have added the feature of repairing open source LLM output, specifically -**config/key.yaml** ```yaml REPAIR_LLM_OUTPUT: true ``` -After turning on this function, an attempt will be made to repair the above situation during execution. This switch currently does not guarantee a complete repair. There will still be some situations that we have not covered yet (different open source models have different situations), and the execution process will be interrupted and exited. If you are interested with this, please submit a PR and attach the corresponding model description, test log and unittest cases. +After turning on this function, an attempt will be made to repair the above situation during execution. This switch currently does not guarantee a complete repair. There will still be some situations that we have not covered yet (different open source models have different situations), and the execution process will be interrupted and exited. If you are interested with this, please submit a PR and attach the corresponding model description, test log and unittest cases. -If you enable this function, it means that the LLM output (ProductManager and Architect roles in the software company in MetaGPT) will be repaired. The keyword `repair_` will appear in the log. You can pay attention to it. +If you enable this function, it means that the LLM output (ProductManager and Architect roles in the software company in MetaGPT) will be repaired. The keyword `repair_` will appear in the log. You can pay attention to it. ## Run -After following the above process, you can start using it officially. + +After following the above process, you can start using it officially. `python3 startup.py "write a snake game"` ### Extension + MetaGPT itself is a multi-agent framework and is not limited to software project generation. You can also combine the integrated open source model to build the corresponding agent for use in your own application scenarios. -Start your journey of intelligent agents~ +Start your journey of intelligent agents~ diff --git a/src/guide/tutorials/multi_agent_101.md b/src/enus/guide/tutorials/multi_agent_101.md similarity index 97% rename from src/guide/tutorials/multi_agent_101.md rename to src/enus/guide/tutorials/multi_agent_101.md index 9d95a298..b321fba8 100644 --- a/src/guide/tutorials/multi_agent_101.md +++ b/src/enus/guide/tutorials/multi_agent_101.md @@ -1,19 +1,24 @@ # MultiAgent 101 + We briefly discussed the creation of a single agent in last chapter. While a single agent may suffice for many situations, more complex tasks often demand collaboration and teamwork. This is where multiple agents become necessary. The core advantage of MetaGPT also lies in the easy and flexible development of a team of agents. Under MetaGPT framework, users can enable interactions between agents with a minimal amount of codes. After this tutorial, you will be able to: + 1. Understand how agents interact with each other 2. Develop your first team of agents ## Run the software startup example + ```shell python startup.py --idea "write a cli flappy bird game" ``` ## Develop your first team of agents + Hope you find the software startup example enlightenning. Perhaps now you're inspired to develop a team of agents tailored to your unique needs. In this section, we continue with the simple coding example in [Agent101](agent_101) but add more roles to introduce a very basic collaboration. Together with the coder, let's also hire a tester and a reviewer. This starts to look like a development team, doesn't it? In general, we need three steps to set up the team and make it function: + 1. Define each role capable of intended actions 2. Think about the Standard Operating Procedure (SOP), and ensure each role adhere to it. This is made possible by making each role observe the corresponding output from upstream, and publish its own for the downstream. 3. Initialize all roles, create a team with an environment to put them in, and enable them to interact with each other @@ -21,7 +26,9 @@ Together with the coder, let's also hire a tester and a reviewer. This starts to Complete code is available at the end of this tutorial ### Define Action and Role + Following the same process as [Agent101](agent_101), we can define three `Role`s with their respective `Action`s: + - A `SimpleCoder` with a `SimpleWriteCode` action, taking instruction from the user and writing the main code - A `SimpleTester` with a `SimpleWriteTest` action, taking the main code from `SimpleWriteCode` output and providing a test suite for it - A `SimpleReviewer` with a `SimpleWriteReview` action, reviewing the test cases from `SimpleWriteTest` output and check their coverage and quality @@ -29,9 +36,10 @@ Following the same process as [Agent101](agent_101), we can define three `Role`s By giving the outline above, we actually make our SOP clear. We will talk about how to set up the `Role` according to it shortly. #### Define Action + We list the three `Action`s. -```python +````python class SimpleWriteCode(Action): PROMPT_TEMPLATE = """ @@ -52,9 +60,9 @@ class SimpleWriteCode(Action): code_text = parse_code(rsp) return code_text -``` +```` -```python +````python class SimpleWriteTest(Action): PROMPT_TEMPLATE = """ @@ -76,7 +84,8 @@ class SimpleWriteTest(Action): code_text = parse_code(rsp) return code_text -``` +```` + ```python class SimpleWriteReview(Action): @@ -96,8 +105,11 @@ class SimpleWriteReview(Action): return rsp ``` + #### Define Role + In many multi-agent scenarios, defining a `Role` can be as simple as 10 lines of codes. For `SimpleCoder`, we do two things: + 1. Equip the `Role` with the appropriate `Action`s with `_init_actions`, this is identical to setting up a single agent 2. A multi-agent operation: we make the `Role` `_watch` important upstream messages from users or other agents. Recall our SOP, `SimpleCoder` takes user instruction, which is a `Message` caused by `BossRequirement` in MetaGPT. Therefore, we add `self._watch([BossRequirement])`. @@ -117,10 +129,12 @@ class SimpleCoder(Role): ``` --- + Similar to above, for `SimpleTester`, we: + 1. Equip the `SimpleTester` with `SimpleWriteTest` action using `_init_actions` 2. Make the `Role` `_watch` important upstream messages from other agents. Recall our SOP, `SimpleTester` takes main code from `SimpleCoder`, which is a `Message` caused by `SimpleWriteCode`. Therefore, we add `self._watch([SimpleWriteCode])`. ->An extended question: Think about what it means if we use `self._watch([SimpleWriteCode, SimpleWriteReview])` instead, feel free to try this too + > An extended question: Think about what it means if we use `self._watch([SimpleWriteCode, SimpleWriteReview])` instead, feel free to try this too Additionally, we want to show that you can define your own acting logic for the agent. This applies to situation where the `Action` takes more than one input, you want to modify the input, to use particular memories, or to make any other changes to reflect specific logic. Hence, we: @@ -152,8 +166,11 @@ class SimpleTester(Role): return msg ``` + --- + Define `SimpleReviewer` following the same procedure: + ```python class SimpleReviewer(Role): def __init__( @@ -168,9 +185,11 @@ class SimpleReviewer(Role): ``` ### Create a team and add roles -Now that we have defined our three `Role`s, it's time to put them together. We initialize all of them, set up a `Team`, and `hire` them. + +Now that we have defined our three `Role`s, it's time to put them together. We initialize all of them, set up a `Team`, and `hire` them. Run the `Team`, we should see the collaboration between them! + ```python async def main( idea: str = "write a function that calculates the product of a list", @@ -195,11 +214,13 @@ async def main( if __name__ == '__main__': fire.Fire(main) ``` + ## Complete script of this tutorial https://github.com/geekan/MetaGPT/blob/main/examples/build_customized_multi_agents.py Run it with + ```sh python examples/build_customized_multi_agents.py --idea "write a function that calculates the product of a list" ``` @@ -209,6 +230,7 @@ Or try it on Colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-BqQ7PezLtv5QTIAvolI1d11_hTMED5q?usp=sharing) ## Mechanism Explained + While users can write a few lines of code to set up a running `Role`, it's beneficial to describe the inner mechanism so that users understands the implication of the setup code and have a whole picture of the framework. ![img](/image/guide/tutorials/multi_agents_flowchart.png) diff --git a/src/guide/tutorials/use_memories.md b/src/enus/guide/tutorials/use_memories.md similarity index 92% rename from src/guide/tutorials/use_memories.md rename to src/enus/guide/tutorials/use_memories.md index 8788bc67..fe1d0cda 100644 --- a/src/guide/tutorials/use_memories.md +++ b/src/enus/guide/tutorials/use_memories.md @@ -1,15 +1,20 @@ # Use Memories + As discussed in [Concepts](concepts), Memory is one of the core components of an agent. Agent needs memory to have an essential context for making decision or perform actions. It also need memory to learn skills or accumulate experience. In this tutorial, we cover basic memory usage. After this tutorial, you will be able to: + 1. Understand what memories are like in MetaGPT 2. How to add or retrieve memories ## What are memories like + Class `Memory` is the abstraction for an agent's memory in MetaGPT. When initialized, `Role` acquire its `Memory` as `self._rc.memory`, which will store every `Message` it later `_observe` in a list for future retrieval. The initialization and storage are handled by the framework. In short, memories of a `Role` are a list of `Message`s. ## Retrieve memory + When recorded memories are needed, such as serving as context for a LLM call, you can use `self.get_memories`. The function definition is as follows: + ```python def get_memories(self, k=0) -> list[Message]: """A wrapper to return the most recent k memories of this role, return all when k=0""" @@ -34,9 +39,11 @@ async def _act(self) -> Message: ``` ## Add memory -For adding memories, one can use ```self._rc.memory.add(msg)``` where `msg` must be an instance of `Message`. Check the snippet above for an example usage. + +For adding memories, one can use `self._rc.memory.add(msg)` where `msg` must be an instance of `Message`. Check the snippet above for an example usage. It is recommended to add `Message`s of action output to the `Role`'s memory when defining the `_act` logic. `Role` normally needs to remember what it said or did previously in order to take a next step. ## Next step -Memory is a huge topic in agents. To be precise, the memory this tutorial talks about corresponds to the concept of "short-term memory". The retrieval is also based on simple recency. However, there are multiple branches of memories as well as a wide range of memory generation and retrieval techniques. Please consult [Memory](/guide/in_depth_guides/memories) for using memory to really boost your agent's performance. \ No newline at end of file + +Memory is a huge topic in agents. To be precise, the memory this tutorial talks about corresponds to the concept of "short-term memory". The retrieval is also based on simple recency. However, there are multiple branches of memories as well as a wide range of memory generation and retrieval techniques. Please consult [Memory](/guide/in_depth_guides/memories) for using memory to really boost your agent's performance. diff --git a/src/guide/use_cases/agent/creative.md b/src/enus/guide/use_cases/agent/creative.md similarity index 84% rename from src/guide/use_cases/agent/creative.md rename to src/enus/guide/use_cases/agent/creative.md index 521058cc..63549a15 100644 --- a/src/guide/use_cases/agent/creative.md +++ b/src/enus/guide/use_cases/agent/creative.md @@ -3,10 +3,17 @@ In this use case, we explore constructing any `Role` for fun. ## Albert Einstein + ## Warren Buffett + ## Superman + ## Wonder Woman + ## 李白 + ## 曹操 + ## 原神 - 温迪 -## 原神 - 神里绫华 \ No newline at end of file + +## 原神 - 神里绫华 diff --git a/src/guide/use_cases/agent/ml_engineer.md b/src/enus/guide/use_cases/agent/ml_engineer.md similarity index 99% rename from src/guide/use_cases/agent/ml_engineer.md rename to src/enus/guide/use_cases/agent/ml_engineer.md index eb019348..a4693eec 100644 --- a/src/guide/use_cases/agent/ml_engineer.md +++ b/src/enus/guide/use_cases/agent/ml_engineer.md @@ -1,2 +1,3 @@ # Machine Learning Engineer: Analyze, Visualize, and Model Datasets + Will be updated soon, stay tuned. diff --git a/src/guide/use_cases/agent/photographer.md b/src/enus/guide/use_cases/agent/photographer.md similarity index 98% rename from src/guide/use_cases/agent/photographer.md rename to src/enus/guide/use_cases/agent/photographer.md index ede14c20..98fba5ea 100644 --- a/src/guide/use_cases/agent/photographer.md +++ b/src/enus/guide/use_cases/agent/photographer.md @@ -1,2 +1,3 @@ # Photographer: Generate Images with Prompts + Will be updated soon, stay tuned. diff --git a/src/guide/use_cases/agent/receipt_assistant.md b/src/enus/guide/use_cases/agent/receipt_assistant.md similarity index 98% rename from src/guide/use_cases/agent/receipt_assistant.md rename to src/enus/guide/use_cases/agent/receipt_assistant.md index 0cd4cf2f..5d3fd661 100644 --- a/src/guide/use_cases/agent/receipt_assistant.md +++ b/src/enus/guide/use_cases/agent/receipt_assistant.md @@ -15,8 +15,6 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form [GitHub Source Code](https://github.com/geekan/MetaGPT/blob/main/metagpt/roles/invoice_ocr_assistant.py) - - ## Role Definition 1. Define the role class, inherit from the `Role` base class, and override the `__init__` initialization method. The `__init__` method must include the `name`, `profile`, `goal`, and `constraints` parameters. The first line of code uses `super().__init__(name, profile, goal, constraints)` to call the constructor of the parent class, implementing the initialization of the `Role`. Use `self._init_actions([InvoiceOCR])` to add initial actions and states. Here, the initial action is to add an action for OCR recognition of invoices. Custom parameters can also be added; here, the `language` parameter is added to support custom languages. Variables such as `filename`, `origin_query`, and `orc_data` are used to temporarily store the invoice file name, the original query, and the OCR recognition result, respectively. Use `self._set_react_mode(react_mode="by_order")` to set the execution order of actions to be sequential in the `_init_actions`. @@ -26,7 +24,7 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form """Invoice OCR assistant, support OCR text recognition of invoice PDF, png, jpg, and zip files, generate a table for the payee, city, total amount, and invoicing date of the invoice, and ask questions for a single file based on the OCR recognition results of the invoice. - + Args: name: The name of the role. profile: The role profile description. @@ -34,7 +32,7 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form constraints: Constraints or requirements for the role. language: The language in which the invoice table will be generated. """ - + def __init__( self, name: str = "Stitch", @@ -57,7 +55,7 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form ```python async def _act(self) -> Message: """Perform an action as determined by the role. - + Returns: A message containing the result of the action. """ @@ -69,7 +67,7 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form self.filename = file_path.name if not file_path: raise Exception("Invoice file not uploaded") - + resp = await todo.run(file_path) if len(resp) == 1: # Single file support for questioning based on OCR recognition results @@ -77,20 +75,18 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form self.orc_data = resp[0] else: self._init_actions([GenerateTable]) - + self._rc.todo = None content = INVOICE_OCR_SUCCESS elif isinstance(todo, GenerateTable): ocr_results = msg.instruct_content resp = await todo.run(ocr_results, self.filename) - + # Convert list to Markdown format string df = pd.DataFrame(resp) markdown_table ``` - - ## Action Definition 1. Define an `action`, where each `action` corresponds to a `class` object. Inherit from the `Action` base class and override the `__init__` initialization method. The `__init__` method includes the `name` parameter. The first line of code uses `super().__init__(name, *args, **kwargs)` to call the parent class constructor, implementing the initialization of the `action`. Here, use `args` and `kwargs` to pass other parameters to the parent class constructor, such as `context` and `llm`. @@ -98,13 +94,13 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form ```python class InvoiceOCR(Action): """Action class for performing OCR on invoice files, including zip, PDF, png, and jpg files. - + Args: name: The name of the action. Defaults to an empty string. language: The language for OCR output. Defaults to "ch" (Chinese). - + """ - + def __init__(self, name: str = "", *args, **kwargs): super().__init__(name, *args, **kwargs) ``` @@ -114,15 +110,15 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form ```python async def run(self, file_path: Path, *args, **kwargs) -> list: """Execute the action to identify invoice files through OCR. - + Args: file_path: The path to the input file. - + Returns: A list of OCR results. """ file_ext = await self._check_file_type(file_path) - + if file_ext == ".zip": # OCR recognizes zip batch files unzip_path = await self._unzip(file_path) @@ -135,38 +131,38 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form ocr_result = await self._ocr(str(invoice_file_path)) ocr_list.append(ocr_result) return ocr_list - + else: # OCR identifies single file ocr_result = await self._ocr(file_path) return [ocr_result] - + @staticmethod async def _check_file_type(file_path: Path) -> str: """Check the file type of the given filename. - + Args: file_path: The path of the file. - + Returns: The file type based on FileExtensionType enum. - + Raises: Exception: If the file format is not zip, pdf, png, or jpg. """ ext = file_path.suffix if ext not in [".zip", ".pdf", ".png", ".jpg"]: raise Exception("The invoice format is not zip, pdf, png, or jpg") - + return ext - + @staticmethod async def _unzip(file_path: Path) -> Path: """Unzip a file and return the path to the unzipped directory. - + Args: file_path: The path to the zip file. - + Returns: The path to the unzipped directory. """ @@ -178,10 +174,10 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form if relative_name.suffix: full_filename = file_directory / relative_name await File.write(full_filename.parent, relative_name.name, zip_ref.read(zip_info.filename)) - + logger.info(f"unzip_path: {file_directory}") return file_directory - + @staticmethod async def _ocr(invoice_file_path: Path): ocr = PaddleOCR(use_angle_cls=True, lang="ch", page_num=1) @@ -194,32 +190,32 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form ```python class GenerateTable(Action): """Action class for generating tables from OCR results. - + Args: name: The name of the action. Defaults to an empty string. language: The language used for the generated table. Defaults to "ch" (Chinese). - + """ - + def __init__(self, name: str = "", language: str = "ch", *args, **kwargs): super().__init__(name, *args, **kwargs) self.language = language - + async def run(self, ocr_results: list, filename: str, *args, **kwargs) -> dict[str, str]: """Processes OCR results, extracts invoice information, generates a table, and saves it as an Excel file. - + Args: ocr_results: A list of OCR results obtained from invoice processing. filename: The name of the output Excel file. - + Returns: A dictionary containing the invoice information. - + """ table_data = [] pathname = INVOICE_OCR_TABLE_PATH pathname.mkdir(parents=True, exist_ok=True) - + for ocr_result in ocr_results: # Extract invoice OCR main information prompt = EXTRACT_OCR_MAIN_INFO_PROMPT.format(ocr_result=ocr_result, language=self.language) @@ -227,35 +223,35 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form invoice_data = OutputParser.extract_struct(ocr_info, dict) if invoice_data: table_data.append(invoice_data) - + # Generate Excel file filename = f"{filename.split('.')[0]}.xlsx" full_filename = f"{pathname}/{filename}" df = pd.DataFrame(table_data) df.to_excel(full_filename, index=False) return table_data - - + + class ReplyQuestion(Action): """Action class for generating replies to questions based on OCR results. - + Args: name: The name of the action. Defaults to an empty string. language: The language used for generating the reply. Defaults to "ch" (Chinese). - + """ - + def __init__(self, name: str = "", language: str = "ch", *args, **kwargs): super().__init__(name, *args, **kwargs) self.language = language - + async def run(self, query: str, ocr_result: list, *args, **kwargs) -> str: """Reply to questions based on ocr results. - + Args: query: The question for which a reply is generated. ocr_result: A list of OCR results. - + Returns: A reply result of string type. """ @@ -264,8 +260,6 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form return resp ``` - - ## Role Execution Results ### Input Examples @@ -283,7 +277,7 @@ Supports OCR recognition of invoice files in `pdf`, `png`, `jpg`, and `zip` form await role.run(Message(content="Invoicing date", instruct_content={"file_path": path})) ``` -#### Example 2 +#### Example 2 - Invoice Image @@ -306,8 +300,6 @@ The generated invoice information is in the xlsx file in the `/data/invoice_ocr` ![image](/image/guide/use_cases/invoice_ocr_assistant/output.png) - - ## Note It is recommended to use a large text limit `llm` model `api`, such as `gpt-3.5-turbo-16k`, for this role. This helps to avoid limitations when interacting with the `llm` large model due to excessively large OCR recognition results. diff --git a/src/guide/use_cases/agent/researcher.md b/src/enus/guide/use_cases/agent/researcher.md similarity index 99% rename from src/guide/use_cases/agent/researcher.md rename to src/enus/guide/use_cases/agent/researcher.md index f40cdb9c..916d96c0 100644 --- a/src/guide/use_cases/agent/researcher.md +++ b/src/enus/guide/use_cases/agent/researcher.md @@ -16,7 +16,6 @@ Through this document, you can learn how to use the MetaGPT researcher role to s - [Researcher Actions](https://github.com/geekan/MetaGPT/blob/main/metagpt/actions/research.py) - [Researcher Example](https://github.com/geekan/MetaGPT/blob/main/examples/research.py) - ## Design Overview ### Design Philosophy @@ -39,10 +38,8 @@ The following is a flowchart illustrating the Researcher role architecture: ![Researcher Role Architecture Diagram](/image/guide/use_cases/researcher/researcher-role-architecture-diagram-en.png) - Based on this process, we can abstract three Actions and define a Role as follows: - | Name | Class | Description | | --------------------- | ------ | -------------------------------------------------------------- | | CollectLinks | Action | Collect links from a search engine | @@ -56,7 +53,6 @@ Based on this process, we can abstract three Actions and define a Role as follow The CollectLinks Action is used to search the internet for relevant questions and retrieve a list of URL addresses. Since user-input questions may not be directly suitable for search engine queries, the CollectLinks Action first breaks down the user's question into multiple sub-questions suitable for search. It then uses a search engine for this purpose. The implementation utilizes the SearchEngine in the tools module, supporting searches through serpapi/google/serper/ddg. The implementation details can be found in [metagpt/actions/research.py](https://github.com/geekan/MetaGPT/blob/main/metagpt/actions/research.py), and the following provides a basic explanation of the CollectLinks.run method: - ```python class CollectLinks(Action): @@ -87,7 +83,7 @@ class CollectLinks(Action): except Exception as e: logger.exception(f"fail to get keywords related to the research topic \"{topic}\" for {e}") keywords = [topic] - + # Search the sub-problems using the search engine results = await asyncio.gather(*(self.search_engine.run(i, as_string=False) for i in keywords)) @@ -122,7 +118,6 @@ class CollectLinks(Action): The WebBrowseAndSummarize Action is responsible for browsing web pages and summarizing their content. MetaGPT provides the `WebBrowserEngine` in the `tools` module, which supports web browsing through playwright/selenium. The WebBrowseAndSummarize Action uses the `WebBrowserEngine` for web browsing. The implementation details can be found in [metagpt/actions/research.py](https://github.com/geekan/MetaGPT/blob/main/metagpt/actions/research.py), and the following provides a basic explanation of the `WebBrowseAndSummarize.run` method: - ```python class WebBrowseAndSummarize(Action): async def run( @@ -180,7 +175,6 @@ class WebBrowseAndSummarize(Action): The ConductResearch Action is responsible for writing a research report. It is implemented by using the summarized data from the WebBrowseAndSummarize Action as context and then generating the research report. The implementation details can be found in [metagpt/actions/research.py](https://github.com/geekan/MetaGPT/blob/main/metagpt/actions/research.py), and the following provides a basic explanation of the `ConductResearch.run` method: - ```python class ConductResearch(Action): async def run( @@ -222,7 +216,7 @@ class Researcher(Role): **kwargs, ): super().__init__(name, profile, goal, constraints, **kwargs) - + # Add the `CollectLinks`, `WebBrowseAndSummarize`, and `ConductResearch` actions self._init_actions([CollectLinks(name), WebBrowseAndSummarize(name), ConductResearch(name)]) @@ -333,5 +327,6 @@ The `metagpt.roles.researcher` module provides a command-line interface for exec ```bash python -m metagpt.roles.researcher "dataiku vs. datarobot" ``` + Log output: [log.txt](https://github.com/geekan/MetaGPT/files/12302886/log.txt) Report output: [dataiku vs. datarobot.md](https://github.com/geekan/MetaGPT/files/12302882/dataiku.vs.datarobot.md) diff --git a/src/guide/use_cases/agent/tutorial_assistant.md b/src/enus/guide/use_cases/agent/tutorial_assistant.md similarity index 98% rename from src/guide/use_cases/agent/tutorial_assistant.md rename to src/enus/guide/use_cases/agent/tutorial_assistant.md index 1dfd6e18..b9041c9f 100644 --- a/src/guide/use_cases/agent/tutorial_assistant.md +++ b/src/enus/guide/use_cases/agent/tutorial_assistant.md @@ -14,8 +14,6 @@ The design approach involves using the `LLM` (Large Language Model) to initially [GitHub Source Code](https://github.com/geekan/MetaGPT/blob/main/metagpt/roles/tutorial_assistant.py) - - ### Role Definition 1. Define a role class, inherit from the `Role` base class, and override the `__init__` initialization method. The `__init__` method must include `name`, `profile`, `goal`, `constraints` parameters. The first line of code uses `super().__init__(name, profile, goal, constraints)` to call the constructor of the parent class, initializing the `Role`. Use `self._init_actions([WriteDirectory(language=language)])` to add initial `action` and `states`, here adding the action to write the directory. Custom parameters can also be added; here, the `language` parameter is added to support custom languages. @@ -23,7 +21,7 @@ The design approach involves using the `LLM` (Large Language Model) to initially ```python class TutorialAssistant(Role): """Tutorial assistant, input one sentence to generate a tutorial document in markup format. - + Args: name: The name of the role. profile: The role profile description. @@ -31,7 +29,7 @@ The design approach involves using the `LLM` (Large Language Model) to initially constraints: Constraints or requirements for the role. language: The language in which the tutorial documents will be generated. """ - + def __init__( self, name: str = "Stitch", @@ -53,7 +51,7 @@ The design approach involves using the `LLM` (Large Language Model) to initially ```python async def _react(self) -> Message: """Execute the assistant's think and actions. - + Returns: A message containing the final result of the assistant's actions. """ @@ -75,7 +73,7 @@ The design approach involves using the `LLM` (Large Language Model) to initially if self._rc.todo is None: self._set_state(0) return - + if self._rc.state + 1 < len(self._states): self._set_state(self._rc.state + 1) else: @@ -87,7 +85,7 @@ The design approach involves using the `LLM` (Large Language Model) to initially ```python async def _act(self) -> Message: """Perform an action as determined by the role. - + Returns: A message containing the result of the action. """ @@ -104,14 +102,14 @@ The design approach involves using the `LLM` (Large Language Model) to initially self.total_content += "\n\n\n" self.total_content += resp return Message(content=resp, role=self.profile) - + async def _handle_directory(self, titles: Dict) -> Message: """Handle the directories for the tutorial document. - + Args: titles: A dictionary containing the titles and directory structure, such as {"title": "xxx", "directory": [{"dir 1": ["sub dir 1", "sub dir 2"]}]} - + Returns: A message containing information about the directory. """ @@ -130,8 +128,6 @@ The design approach involves using the `LLM` (Large Language Model) to initially return Message(content=directory) ``` - - ### Action Definition 1. Define an `action`, where each `action` corresponds to a `class` object. Inherit from the `Action` base class and override the `__init__` initialization method. The `__init__` method includes the `name` parameter. The first line of code uses `super().__init__(name, *args, **kwargs)` to call the constructor of the parent class, initializing the `action`. Here, use `args` and `kwargs` to pass other parameters to the parent class constructor, such as `context` and `llm`. @@ -145,22 +141,22 @@ The design approach involves using the `LLM` (Large Language Model) to initially @File : tutorial_assistant.py @Describe : Actions of the tutorial assistant, including writing directories and document content. """ - + from typing import Dict - + from metagpt.actions import Action from metagpt.prompts.tutorial_assistant import DIRECTORY_PROMPT, CONTENT_PROMPT from metagpt.utils.common import OutputParser - - + + class WriteDirectory(Action): """Action class for writing tutorial directories. - + Args: name: The name of the action. language: The language to output, default is "Chinese". """ - + def __init__(self, name: str = "", language: str = "Chinese", *args, **kwargs): super().__init__(name, *args, **kwargs) self.language = language @@ -171,10 +167,10 @@ The design approach involves using the `LLM` (Large Language Model) to initially ```python async def run(self, topic: str, *args, **kwargs) -> Dict: """Execute the action to generate a tutorial directory according to the topic. - + Args: topic: The tutorial topic. - + Returns: The tutorial directory information, including {"title": "xxx", "directory": [{"dir 1": ["sub dir 1", "sub dir 2"]}]}. """ @@ -188,24 +184,24 @@ The design approach involves using the `LLM` (Large Language Model) to initially ```python class WriteContent(Action): """Action class for writing tutorial content. - + Args: name: The name of the action. directory: The content to write. language: The language to output, default is "Chinese". """ - + def __init__(self, name: str = "", directory: str = "", language: str = "Chinese", *args, **kwargs): super().__init__(name, *args, **kwargs) self.language = language self.directory = directory - + async def run(self, topic: str, *args, **kwargs) -> str: """Execute the action to write document content according to the directory and topic. - + Args: topic: The tutorial topic. - + Returns: The written tutorial content. """ @@ -213,8 +209,6 @@ The design approach involves using the `LLM` (Large Language Model) to initially return await self._aask(prompt=prompt) ``` - - ### Role Execution Results #### Input Examples @@ -235,9 +229,6 @@ The generated tutorial documents are located in the project's `/data/tutorial_do ![image](/image/guide/use_cases/tutorial_assistant/output_en_2.png) - - ### Note This role currently does not support internet search capabilities. Content generation relies on data trained by the `LLM` large model. - diff --git a/src/guide/use_cases/multi_agent/creative.md b/src/enus/guide/use_cases/multi_agent/creative.md similarity index 100% rename from src/guide/use_cases/multi_agent/creative.md rename to src/enus/guide/use_cases/multi_agent/creative.md diff --git a/src/guide/use_cases/multi_agent/debate.md b/src/enus/guide/use_cases/multi_agent/debate.md similarity index 98% rename from src/guide/use_cases/multi_agent/debate.md rename to src/enus/guide/use_cases/multi_agent/debate.md index fa2162c6..124cfa99 100644 --- a/src/guide/use_cases/multi_agent/debate.md +++ b/src/enus/guide/use_cases/multi_agent/debate.md @@ -5,6 +5,7 @@ In this use case, we will illustrate the development process of a playful exampl Imagine, just for a moment, if we were to simulate agents representing Biden and Trump working together. It's a fun experiment, isn't it? Given their known disagreements, such a combination could lead to some lively exchanges. This serves as an ideal example to showcase how to design multiple agents and facilitate interactions between them. We will deb our experiment the "Biden-Trump Debate". In general, we need two steps to set up a debate between them: + 1. Define a role Debator capable of a speaking action, which we suggest taking reference from Agent101 2. Take care of the communication between Debator, that is, have Biden listen to Trump and Trump to Biden 3. Initialize two Debator instances, Biden and Trump, create a team with an environment to put them in, and enable them to interact with each other @@ -12,7 +13,9 @@ In general, we need two steps to set up a debate between them: Complete code is available at the end of this section ### Define Action + First, we need to define our `Action`. It's a debate setting, so let's name it as `SpeakAloud` + ```python class SpeakAloud(Action): """Action: Speak out aloud in a debate (quarrel)""" @@ -39,10 +42,13 @@ class SpeakAloud(Action): return rsp ``` + ### Define Role -We will define a common `Role` called `Debator`. + +We will define a common `Role` called `Debator`. Here `_init_actions` make our `Role` possess the `SpeakAloud` action we just define. We also `_watch` both `SpeakAloud` and `BossRequirement`, because we want each debator to pay attention to messages of `SpeakAloud` from his opponent, as well as `BossRequirement` (human instruction) from users. + ```python class Debator(Role): def __init__( @@ -58,7 +64,9 @@ class Debator(Role): self.name = name self.opponent_name = opponent_name ``` + Next, we make each debator listen to his opponent's argument. This is done by overwriting the `_observe` function. This is an important point because there will be "SpeakAloud messages" (`Message` triggered by `SpeakAloud`) from both Trump and Biden in the environment. We don't want Trump to process his own "SpeakAloud message" from the last round, but instead those from Biden, and vice versa. (We will take care of this process with a general message routing mechanism in updates shortly to come. You won't need this step after the updates) + ```python async def _observe(self) -> int: await super()._observe() @@ -66,7 +74,9 @@ async def _observe(self) -> int: self._rc.news = [msg for msg in self._rc.news if msg.send_to == self.name] return len(self._rc.news) ``` + Finally, we enable each debator to send counter arguments back to his opponent. Here we construct a context from message history, make the `Debator` run his possessed `SpeakAloud` action, and craft a new `Message` with the counter argument content. Notice we define that each `Debator` will send the `Message` to his opponent. + ```python async def _act(self) -> Message: logger.info(f"{self._setting}: ready to {self._rc.todo}") @@ -133,10 +143,13 @@ class Debator(Role): return msg ``` + ### Create a team and add roles + Now that we have defined our `Debator`s, let's put them together to see what will come up. We set up a `Team` and "hire" Biden and Trump. In this example, we will send our instruction (as a `BossRequirement` under the hood) to Biden to have him start first. If you want Trump to speak first, set send_to as "Trump". Run the `Team`, we should see the friendly conversation between them! + ```python async def debate(idea: str, investment: float = 3.0, n_round: int = 5): """Run a team of presidents and watch they quarrel. :) """ @@ -150,7 +163,7 @@ async def debate(idea: str, investment: float = 3.0, n_round: int = 5): def main(idea: str, investment: float = 3.0, n_round: int = 10): """ - :param idea: Debate topic, such as "Topic: The U.S. should commit more in climate change fighting" + :param idea: Debate topic, such as "Topic: The U.S. should commit more in climate change fighting" or "Trump: Climate change is a hoax" :param investment: contribute a certain dollar amount to watch the debate :param n_round: maximum rounds of the debate @@ -163,14 +176,17 @@ def main(idea: str, investment: float = 3.0, n_round: int = 10): if __name__ == '__main__': fire.Fire(main) ``` + ## Complete script of this section https://github.com/geekan/MetaGPT/blob/main/examples/debate.py Run it with + ```sh python examples/debate.py --idea "Talk about how the U.S. should respond to climate change" ``` + A sample run ![img](/image/guide/use_cases/debate_log.png) diff --git a/src/guide/use_cases/multi_agent/minecraft.md b/src/enus/guide/use_cases/multi_agent/minecraft.md similarity index 98% rename from src/guide/use_cases/multi_agent/minecraft.md rename to src/enus/guide/use_cases/multi_agent/minecraft.md index 5a038961..1c903992 100644 --- a/src/guide/use_cases/multi_agent/minecraft.md +++ b/src/enus/guide/use_cases/multi_agent/minecraft.md @@ -1,2 +1,3 @@ # Minecraft: Agents Collectively Exlpore the World + Will be updated soon, stay tuned. diff --git a/src/guide/use_cases/multi_agent/software_company.md b/src/enus/guide/use_cases/multi_agent/software_company.md similarity index 65% rename from src/guide/use_cases/multi_agent/software_company.md rename to src/enus/guide/use_cases/multi_agent/software_company.md index 2fe672a2..7d10d0ae 100644 --- a/src/guide/use_cases/multi_agent/software_company.md +++ b/src/enus/guide/use_cases/multi_agent/software_company.md @@ -1,3 +1,3 @@ # Software Company: Write Software With One Line of Requirement -Will be updated soon, stay tuned. \ No newline at end of file +Will be updated soon, stay tuned. diff --git a/src/enus/guide/use_cases/multi_agent/software_gallery.md b/src/enus/guide/use_cases/multi_agent/software_gallery.md new file mode 100644 index 00000000..b0217654 --- /dev/null +++ b/src/enus/guide/use_cases/multi_agent/software_gallery.md @@ -0,0 +1 @@ +# Gallery diff --git a/src/guide/use_cases/multi_agent/virtual_town.md b/src/enus/guide/use_cases/multi_agent/virtual_town.md similarity index 98% rename from src/guide/use_cases/multi_agent/virtual_town.md rename to src/enus/guide/use_cases/multi_agent/virtual_town.md index c8674361..8a4c4d0c 100644 --- a/src/guide/use_cases/multi_agent/virtual_town.md +++ b/src/enus/guide/use_cases/multi_agent/virtual_town.md @@ -1,2 +1,3 @@ # Virtual Town: Lively Daily Life of an Agent Community + Will be updated soon, stay tuned. diff --git a/src/guide/use_cases/multi_agent/werewolf_game.md b/src/enus/guide/use_cases/multi_agent/werewolf_game.md similarity index 98% rename from src/guide/use_cases/multi_agent/werewolf_game.md rename to src/enus/guide/use_cases/multi_agent/werewolf_game.md index 56c27aba..fa44267a 100644 --- a/src/guide/use_cases/multi_agent/werewolf_game.md +++ b/src/enus/guide/use_cases/multi_agent/werewolf_game.md @@ -1,2 +1,3 @@ # Werewolf: Agents Playing Strategy Games + Will be updated soon, stay tuned. diff --git a/src/index.md b/src/enus/index.md similarity index 94% rename from src/index.md rename to src/enus/index.md index 0391f744..0eb4749e 100644 --- a/src/index.md +++ b/src/enus/index.md @@ -13,7 +13,7 @@ hero: actions: - theme: brand text: Get Started - link: /guide/get_started/introduction + link: /enus/guide/get_started/introduction - theme: alt text: View on Github link: https://github.com/geekan/MetaGPT diff --git a/src/guide/in_depth_guides/memory.md b/src/guide/in_depth_guides/memory.md deleted file mode 100644 index 107fe785..00000000 --- a/src/guide/in_depth_guides/memory.md +++ /dev/null @@ -1 +0,0 @@ -# Memory \ No newline at end of file diff --git a/src/guide/in_depth_guides/use_own_llm.md b/src/guide/in_depth_guides/use_own_llm.md deleted file mode 100644 index d481ce73..00000000 --- a/src/guide/in_depth_guides/use_own_llm.md +++ /dev/null @@ -1 +0,0 @@ -# Use Your Own LLM \ No newline at end of file diff --git a/src/guide/use_cases/multi_agent/software_gallery.md b/src/guide/use_cases/multi_agent/software_gallery.md deleted file mode 100644 index 07a30c26..00000000 --- a/src/guide/use_cases/multi_agent/software_gallery.md +++ /dev/null @@ -1 +0,0 @@ -# Gallery \ No newline at end of file