diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 94d349f12..89f6285d1 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -24,9 +24,9 @@ jobs: - name: Install Minimal Dependencies run: | pip install -q -e . - - name: Run import tests + - name: Run minimal import tests run: | - python -c "import agentscope; print(agentscope.__version__)" + python tests/minimal.py - name: Install Full Dependencies run: | pip install -q -e .[full] diff --git a/README.md b/README.md index f648146bd..44e8abdef 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,6 @@ Start building LLM-empowered multi-agent applications in an easier way. |----------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------| | | | - ---- ## News @@ -187,7 +186,6 @@ the following libraries. - new[Conversation with CodeAct Agent](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_codeact_agent/) - new[Conversation with Router Agent](https://github.com/modelscope/agentscope/blob/main/examples/conversation_with_router_agent/) - - Game - [Gomoku](https://github.com/modelscope/agentscope/blob/main/examples/game_gomoku) - [Werewolf](https://github.com/modelscope/agentscope/blob/main/examples/game_werewolf) @@ -236,7 +234,6 @@ optional dependencies. Full list of optional dependencies refers to Taking distribution mode as an example, you can install its dependencies as follows: - #### On Windows ```bash @@ -247,6 +244,7 @@ pip install agentscope[distribute] ``` #### On Mac & Linux + ```bash # From source pip install -e .\[distribute\] @@ -254,7 +252,6 @@ pip install -e .\[distribute\] pip install agentscope\[distribute\] ``` - ## Quick Start ### Configuration @@ -391,35 +388,70 @@ pre-commit install Please refer to our [Contribution Guide](https://modelscope.github.io/agentscope/en/tutorial/302-contribute.html) for more details. -## References - -If you find our work helpful for your research or application, please -cite [our paper](https://arxiv.org/abs/2402.14034): - -``` -@article{agentscope, - author = {Dawei Gao and - Zitao Li and - Xuchen Pan and - Weirui Kuang and - Zhijian Ma and - Bingchen Qian and - Fei Wei and - Wenhao Zhang and - Yuexiang Xie and - Daoyuan Chen and - Liuyi Yao and - Hongyi Peng and - Ze Yu Zhang and - Lin Zhu and - Chen Cheng and - Hongzhu Shi and - Yaliang Li and - Bolin Ding and - Jingren Zhou}, - title = {AgentScope: A Flexible yet Robust Multi-Agent Platform}, - journal = {CoRR}, - volume = {abs/2402.14034}, - year = {2024}, -} -``` +## Publications + +If you find our work helpful for your research or application, please cite our papers. + +1. [AgentScope: A Flexible yet Robust Multi-Agent Platform](https://arxiv.org/abs/2402.14034) + + ``` + @article{agentscope, + author = {Dawei Gao and + Zitao Li and + Xuchen Pan and + Weirui Kuang and + Zhijian Ma and + Bingchen Qian and + Fei Wei and + Wenhao Zhang and + Yuexiang Xie and + Daoyuan Chen and + Liuyi Yao and + Hongyi Peng and + Ze Yu Zhang and + Lin Zhu and + Chen Cheng and + Hongzhu Shi and + Yaliang Li and + Bolin Ding and + Jingren Zhou} + title = {AgentScope: A Flexible yet Robust Multi-Agent Platform}, + journal = {CoRR}, + volume = {abs/2402.14034}, + year = {2024}, + } + ``` + +2. [On the Design and Analysis of LLM-Based Algorithms](https://arxiv.org/abs/2407.14788) + + ``` + @article{llm_based_algorithms, + author = {Yanxi Chen and + Yaliang Li and + Bolin Ding and + Jingren Zhou}, + title = {On the Design and Analysis of LLM-Based Algorithms}, + journal = {CoRR}, + volume = {abs/2407.14788}, + year = {2024}, + } + ``` + +3. [Very Large-Scale Multi-Agent Simulation in AgentScope](https://arxiv.org/abs/2407.17789) + + ``` + @article{agentscope_simulation, + author = {Xuchen Pan and + Dawei Gao and + Yuexiang Xie and + Zhewei Wei and + Yaliang Li and + Bolin Ding and + Ji{-}Rong Wen and + Jingren Zhou}, + title = {Very Large-Scale Multi-Agent Simulation in AgentScope}, + journal = {CoRR}, + volume = {abs/2407.17789}, + year = {2024}, + } + ``` diff --git a/README_ZH.md b/README_ZH.md index 8c3be6469..d65fca5e1 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -35,8 +35,6 @@ |---------|----------| | | | - - ---- ## 新闻 @@ -56,7 +54,6 @@ agentscope-logo - - new**[2024-07-15]** AgentScope 中添加了 Mixture of Agents 算法。使用样例请参考 [MoA 示例](https://github.com/modelscope/agentscope/blob/main/examples/conversation_mixture_of_agents)。 - **[2024-06-14]** 新的提示调优(Prompt tuning)模块已经上线 AgentScope,用以帮助开发者生成和优化智能体的 system prompt。更多的细节和使用样例请参考 AgentScope [教程](https://modelscope.github.io/agentscope/en/tutorial/209-prompt_opt.html)! @@ -232,6 +229,7 @@ pip install agentscope[distribute] ``` #### On Mac & Linux + ```bash # From source pip install -e .\[distribute\] @@ -362,34 +360,70 @@ pre-commit install 请参阅我们的[贡献指南](https://modelscope.github.io/agentscope/zh_CN/tutorial/302-contribute.html)了解更多细节。 -## 引用 - -如果您觉得我们的工作对您的研究或应用有帮助,请引用[我们的论文](https://arxiv.org/abs/2402.14034)。 - -``` -@article{agentscope, - author = {Dawei Gao and - Zitao Li and - Xuchen Pan and - Weirui Kuang and - Zhijian Ma and - Bingchen Qian and - Fei Wei and - Wenhao Zhang and - Yuexiang Xie and - Daoyuan Chen and - Liuyi Yao and - Hongyi Peng and - Zeyu Zhang and - Lin Zhu and - Chen Cheng and - Hongzhu Shi and - Yaliang Li and - Bolin Ding and - Jingren Zhou}, - title = {AgentScope: A Flexible yet Robust Multi-Agent Platform}, - journal = {CoRR}, - volume = {abs/2402.14034}, - year = {2024}, -} -``` +## 发表 + +如果您觉得我们的工作对您的研究或应用有帮助,请引用如下论文 + +1. [AgentScope: A Flexible yet Robust Multi-Agent Platform](https://arxiv.org/abs/2402.14034) + + ``` + @article{agentscope, + author = {Dawei Gao and + Zitao Li and + Xuchen Pan and + Weirui Kuang and + Zhijian Ma and + Bingchen Qian and + Fei Wei and + Wenhao Zhang and + Yuexiang Xie and + Daoyuan Chen and + Liuyi Yao and + Hongyi Peng and + Ze Yu Zhang and + Lin Zhu and + Chen Cheng and + Hongzhu Shi and + Yaliang Li and + Bolin Ding and + Jingren Zhou} + title = {AgentScope: A Flexible yet Robust Multi-Agent Platform}, + journal = {CoRR}, + volume = {abs/2402.14034}, + year = {2024}, + } + ``` + +2. [On the Design and Analysis of LLM-Based Algorithms](https://arxiv.org/abs/2407.14788) + + ``` + @article{llm_based_algorithms, + author = {Yanxi Chen and + Yaliang Li and + Bolin Ding and + Jingren Zhou}, + title = {On the Design and Analysis of LLM-Based Algorithms}, + journal = {CoRR}, + volume = {abs/2407.14788}, + year = {2024}, + } + ``` + +3. [Very Large-Scale Multi-Agent Simulation in AgentScope](https://arxiv.org/abs/2407.17789) + + ``` + @article{agentscope_simulation, + author = {Xuchen Pan and + Dawei Gao and + Yuexiang Xie and + Zhewei Wei and + Yaliang Li and + Bolin Ding and + Ji{-}Rong Wen and + Jingren Zhou}, + title = {Very Large-Scale Multi-Agent Simulation in AgentScope}, + journal = {CoRR}, + volume = {abs/2407.17789}, + year = {2024}, + } + ``` diff --git a/docs/sphinx_doc/en/source/tutorial/104-usecase.md b/docs/sphinx_doc/en/source/tutorial/104-usecase.md index 7b0fddae1..60a0ed02e 100644 --- a/docs/sphinx_doc/en/source/tutorial/104-usecase.md +++ b/docs/sphinx_doc/en/source/tutorial/104-usecase.md @@ -291,7 +291,7 @@ With the game logic and agents set up, you're ready to run the Werewolf game. By ```bash cd examples/game_werewolf -python main.py # Assuming the pipeline is implemented in main.py +python werewolf.py # Assuming the pipeline is implemented in werewolf.py ``` It is recommended that you start the game in [AgentScope Studio](https://modelscope.github.io/agentscope/en/tutorial/209-gui.html), where you diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md b/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md index 25608c845..f2b56c3f3 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/104-usecase.md @@ -170,7 +170,7 @@ for i in range(1, MAX_GAME_ROUND + 1): # Night phase: werewolves discuss hint = HostMsg(content=Prompts.to_wolves.format(n2s(wolves))) with msghub(wolves, announcement=hint) as hub: - set_parsers(wolves, Prompts.wolves_discuss_parser) + set_parsers(wolves, Prompts.wolves_discuss_parser) for _ in range(MAX_WEREWOLF_DISCUSSION_ROUND): x = sequentialpipeline(wolves) if x.metadata.get("finish_discussion", False): @@ -295,7 +295,7 @@ for i in range(1, MAX_GAME_ROUND + 1): ```bash cd examples/game_werewolf -python main.py # Assuming the pipeline is implemented in main.py +python werewolf.py # Assuming the pipeline is implemented in werewolf.py ``` 建议您在在 [AgentScope Studio](https://modelscope.github.io/agentscope/zh_CN/tutorial/209-gui.html) 中启动游戏,在对应的链接中您将看到下面的内容输出。 diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/209-gui.md b/docs/sphinx_doc/zh_CN/source/tutorial/209-gui.md index 7feeccd29..20c945429 100644 --- a/docs/sphinx_doc/zh_CN/source/tutorial/209-gui.md +++ b/docs/sphinx_doc/zh_CN/source/tutorial/209-gui.md @@ -70,7 +70,7 @@ agentscope.init( # ... project="xxx", name="xxx", - studio_url="http://127.0.0.1:5000" # AgentScope Studio 的 URL + studio_url="http://127.0.0.1:5000" # AgentScope Studio 的 URL ) ``` diff --git a/examples/paper_llm_based_algorithm/README.md b/examples/paper_llm_based_algorithm/README.md index 11199f4f1..9b290b22a 100644 --- a/examples/paper_llm_based_algorithm/README.md +++ b/examples/paper_llm_based_algorithm/README.md @@ -1,6 +1,5 @@ # LLM-based algorithms - This folder contains the source code for reproducing the experiment results in our arXiv preprint "On the Design and Analysis of LLM-Based Algorithms". Our work initiates a formal investigation into the design and analysis of LLM-based algorithms, @@ -11,7 +10,6 @@ Within this folder, you can find our implementation for the key abstractions, the LLM-based algorithms in four concrete examples, and the experiments for validating our analysis in the manuscript. - ## Tested Models The following models have been tested, which are also listed in `model_configs.json`: @@ -20,26 +18,25 @@ GPT-3.5 Turbo, Llama3-8B (with ollama), Llama3-70B (with vLLM). - ## Prerequisites - 1. Install AgentScope from source with `pip`, according to the [official instruction](../../README.md). 2. Install matplotlib: `pip install matplotlib`. -3. Change directory: `cd examples/llm_based_algorithm`. +3. Change directory: `cd examples/paper_llm_based_algorithm`. 4. Set up LLM model configs in `model_configs.json`. - - ## Usage ### Run experiments To run experiments for a certain task: + ```bash bash ./scripts/exp_{task}.sh ``` + or copy a piece of scripts therein, modify the parameters, and run it in the terminal, for example: + ```bash python3 run_exp_single_variable.py \ --task counting \ @@ -52,6 +49,7 @@ python3 run_exp_single_variable.py \ ``` Parameters: + - `task`: name of the task, {"counting", "sorting", "retrieval", "retrieval_no_needle", "rag"}. - `llm_model`: name of the LLM model, i.e. `config_name` in `model_configs.json`. - `variable_name`: "n" for problem size, or "m" for sub-task size. @@ -60,30 +58,37 @@ Parameters: - `save_results`: if `True`, experiment results will be saved to `./out`; otherwise, results will be plotted and shown at the end of the experiment, and won't be saved. - `ntrials`: number of independent trials for each experiment config, i.e. each entry of `lst_variable`. - ### Plot results To plot experiment results that have been saved: + ```bash bash ./scripts/plot_{task}.sh ``` + or copy a piece of scripts therein and run it in the terminal, for example: + ```bash python3 plot_exp_results.py \ --folder ./out/counting/exp_counting_vary_n_model_ollama_llama3_8b-2024-06-19-11-11-13-kkwrhc ``` + The path to the experiment results need to be replaced with the actual one generated during your own experiment. The generated figures will be saved to the same folder. - ## Reference For more details, please refer to our arXiv preprint: + ``` -@article{chen2024llmbasedalgorithms, - title={On the Design and Analysis of LLM-Based Algorithms}, - author={Yanxi Chen and Yaliang Li and Bolin Ding and Jingren Zhou}, - year={2024}, +@article{llm_based_algorithms, + author = {Yanxi Chen and + Yaliang Li and + Bolin Ding and + Jingren Zhou}, + title = {On the Design and Analysis of LLM-Based Algorithms}, + journal = {CoRR}, + volume = {abs/2407.14788}, + year = {2024}, } ``` - diff --git a/src/agentscope/models/ollama_model.py b/src/agentscope/models/ollama_model.py index 55cc5083c..0dec98e34 100644 --- a/src/agentscope/models/ollama_model.py +++ b/src/agentscope/models/ollama_model.py @@ -359,7 +359,7 @@ def format( system_content = "\n".join(system_content_template) system_message = { - "role": "system", + "role": "user", "content": system_content, } diff --git a/src/agentscope/models/post_model.py b/src/agentscope/models/post_model.py index 7cb1fc25c..fbd09bd0e 100644 --- a/src/agentscope/models/post_model.py +++ b/src/agentscope/models/post_model.py @@ -8,7 +8,6 @@ import requests from loguru import logger -from .gemini_model import GeminiChatWrapper from .openai_model import OpenAIChatWrapper from .model import ModelWrapperBase, ModelResponse from ..constants import _DEFAULT_MAX_RETRIES @@ -221,6 +220,8 @@ def format( # Gemini elif model_name and model_name.startswith("gemini"): + from .gemini_model import GeminiChatWrapper + return GeminiChatWrapper.format(*args) # Include DashScope, ZhipuAI, Ollama, the other models supported by diff --git a/tests/format_test.py b/tests/format_test.py index 00582d2ff..de0ccb510 100644 --- a/tests/format_test.py +++ b/tests/format_test.py @@ -283,7 +283,7 @@ def test_ollama_chat(self) -> None: # correct format ground_truth = [ { - "role": "system", + "role": "user", "content": ( "You are a helpful assistant\n" "\n" diff --git a/tests/minimal.py b/tests/minimal.py new file mode 100644 index 000000000..affad7cb7 --- /dev/null +++ b/tests/minimal.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +""" +Minimal case for agentscope +""" +import agentscope +from agentscope.agents import DialogAgent + +print(agentscope.__version__) +agentscope.init( + project="minimal", + model_configs=[ + { + "model_type": "dashscope_chat", + "config_name": "qwen", + "model_name": "qwen-max", + "api_key": "xxx", + }, + { + "model_type": "openai_chat", + "config_name": "gpt-4", + "model_name": "gpt-4", + "api_key": "xxx", + "organization": "xxx", + "generate_args": {"temperature": 0.5}, + }, + { + "model_type": "post_api_chat", + "config_name": "my_post_api", + "api_url": "https://xxx", + "headers": {}, + "json_args": {}, + }, + ], +) +a = DialogAgent( + name="A", + sys_prompt="You are a helpful assistant.", + model_config_name="my_post_api", +) + +b = DialogAgent( + name="B", + sys_prompt="You are a helpful assistant.", + model_config_name="qwen", +) + +c = DialogAgent( + name="C", + sys_prompt="You are a helpful assistant.", + model_config_name="gpt-4", +)