Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove wrong timeout #611

Closed
wants to merge 28 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
37f2c82
Update README.md
eltociear Dec 21, 2023
4b0cb00
add ollama support
better629 Dec 21, 2023
40d3cc5
format general_api_requestor params type
better629 Dec 22, 2023
2a0922b
add non-software role/action BaseModel
better629 Dec 22, 2023
19c16bf
fix
better629 Dec 22, 2023
322ac4a
upgrade langchain and simplify faiss load/save
seehi Dec 22, 2023
3b066b3
upgrade langchain and simplify faiss load/save
seehi Dec 22, 2023
7816488
update examples
better629 Dec 22, 2023
a6346c7
update
better629 Dec 22, 2023
bf4ef46
typing of store
seehi Dec 22, 2023
058252c
fix bugs and make it perform better
geekan Dec 22, 2023
b6eb776
update sk AzureChatCompletion from base_url to endpoint
better629 Dec 22, 2023
6cd083a
tuning performance
geekan Dec 22, 2023
6bae6f7
Merge pull request #605 from seehi/feature-upgrade-langchain
geekan Dec 22, 2023
da1e0b8
fix invoice_ocr_assistant
better629 Dec 22, 2023
25eeb6c
Merge pull request #602 from eltociear/patch-1
geekan Dec 22, 2023
f1d4624
Merge pull request #603 from better629/feat_ollama
geekan Dec 22, 2023
a1e1eb8
fix invoice_ocr
better629 Dec 22, 2023
3e74b58
fix invoice_ocr
better629 Dec 22, 2023
67a325b
fix invoice_ocr
better629 Dec 22, 2023
5710630
fix
better629 Dec 22, 2023
19d3311
Merge branch 'main' into feat_basemodel
better629 Dec 22, 2023
41a1743
Merge pull request #606 from better629/feat_basemodel
geekan Dec 22, 2023
3feee73
refine debate example
geekan Dec 22, 2023
336350e
refine code
geekan Dec 22, 2023
6624819
add test case for action node
geekan Dec 23, 2023
c7f47e8
add test
geekan Dec 23, 2023
20c70fa
remove wrong timeout
seehi Dec 23, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
<p align="center">Software Company Multi-Role Schematic (Gradually Implementing)</p>

## News
- Dec 15: [v0.5.0](https://github.com/geekan/MetaGPT/releases/tag/v0.5.0) is released! We introduce **incremental development**, facilitating agents to build up larger projects on top of their previous efforts or exisiting codebase. We also launch a whole collection of important features, including **multilingual support** (experimental), multiple **programming languages support** (experimental), **incremental development** (experimental), CLI support, pip support, enhanced code review, documentation mechanism, and optimized messaging mechanism!
- Dec 15: [v0.5.0](https://github.com/geekan/MetaGPT/releases/tag/v0.5.0) is released! We introduce **incremental development**, facilitating agents to build up larger projects on top of their previous efforts or existing codebase. We also launch a whole collection of important features, including **multilingual support** (experimental), multiple **programming languages support** (experimental), **incremental development** (experimental), CLI support, pip support, enhanced code review, documentation mechanism, and optimized messaging mechanism!

## Install

Expand Down
4 changes: 4 additions & 0 deletions config/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ RPM: 10
#FIREWORKS_API_BASE: "https://api.fireworks.ai/inference/v1"
#FIREWORKS_API_MODEL: "YOUR_LLM_MODEL" # example, accounts/fireworks/models/llama-v2-13b-chat

#### if use self-host open llm model by ollama
# OLLAMA_API_BASE: http://127.0.0.1:11434/api
# OLLAMA_API_MODEL: llama2

#### for Search

## Supported values: serpapi/google/serper/ddg
Expand Down
21 changes: 7 additions & 14 deletions examples/agent_creator.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,19 +55,16 @@ def parse_code(rsp):


class AgentCreator(Role):
def __init__(
self,
name: str = "Matrix",
profile: str = "AgentCreator",
agent_template: str = MULTI_ACTION_AGENT_CODE_EXAMPLE,
**kwargs,
):
super().__init__(name, profile, **kwargs)
name: str = "Matrix"
profile: str = "AgentCreator"
agent_template: str = MULTI_ACTION_AGENT_CODE_EXAMPLE

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._init_actions([CreateAgent])
self.agent_template = agent_template

async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
logger.info(f"{self._setting}: to do {self._rc.todo}({self._rc.todo.name})")
todo = self._rc.todo
msg = self._rc.memory.get()[-1]

Expand All @@ -86,10 +83,6 @@ async def main():

creator = AgentCreator(agent_template=agent_template)

# msg = """Write an agent called SimpleTester that will take any code snippet (str)
# and return a testing code (str) for testing
# the given code snippet. Use pytest as the testing framework."""

msg = """
Write an agent called SimpleTester that will take any code snippet (str) and do the following:
1. write a testing code (str) for testing the given code snippet, save the testing code as a .py file in the current working directory;
Expand Down
39 changes: 16 additions & 23 deletions examples/build_customized_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,8 @@
import fire

from metagpt.actions import Action
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.roles.role import Role, RoleReactMode
from metagpt.schema import Message


Expand All @@ -23,8 +22,7 @@ class SimpleWriteCode(Action):
your code:
"""

def __init__(self, name: str = "SimpleWriteCode", context=None, llm: LLM = None):
super().__init__(name, context, llm)
name: str = "SimpleWriteCode"

async def run(self, instruction: str):
prompt = self.PROMPT_TEMPLATE.format(instruction=instruction)
Expand All @@ -44,8 +42,7 @@ def parse_code(rsp):


class SimpleRunCode(Action):
def __init__(self, name: str = "SimpleRunCode", context=None, llm: LLM = None):
super().__init__(name, context, llm)
name: str = "SimpleRunCode"

async def run(self, code_text: str):
result = subprocess.run(["python3", "-c", code_text], capture_output=True, text=True)
Expand All @@ -55,17 +52,15 @@ async def run(self, code_text: str):


class SimpleCoder(Role):
def __init__(
self,
name: str = "Alice",
profile: str = "SimpleCoder",
**kwargs,
):
super().__init__(name, profile, **kwargs)
name: str = "Alice"
profile: str = "SimpleCoder"

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._init_actions([SimpleWriteCode])

async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
logger.info(f"{self._setting}: to do {self._rc.todo}({self._rc.todo.name})")
todo = self._rc.todo # todo will be SimpleWriteCode()

msg = self.get_memories(k=1)[0] # find the most recent messages
Expand All @@ -76,18 +71,16 @@ async def _act(self) -> Message:


class RunnableCoder(Role):
def __init__(
self,
name: str = "Alice",
profile: str = "RunnableCoder",
**kwargs,
):
super().__init__(name, profile, **kwargs)
name: str = "Alice"
profile: str = "RunnableCoder"

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._init_actions([SimpleWriteCode, SimpleRunCode])
self._set_react_mode(react_mode="by_order")
self._set_react_mode(react_mode=RoleReactMode.BY_ORDER.value)

async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
logger.info(f"{self._setting}: to do {self._rc.todo}({self._rc.todo.name})")
# By choosing the Action by order under the hood
# todo will be first SimpleWriteCode() then SimpleRunCode()
todo = self._rc.todo
Expand Down
51 changes: 20 additions & 31 deletions examples/build_customized_multi_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import fire

from metagpt.actions import Action, UserRequirement
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import Message
Expand All @@ -28,9 +27,7 @@ class SimpleWriteCode(Action):
Return ```python your_code_here ``` with NO other texts,
your code:
"""

def __init__(self, name: str = "SimpleWriteCode", context=None, llm: LLM = None):
super().__init__(name, context, llm)
name: str = "SimpleWriteCode"

async def run(self, instruction: str):
prompt = self.PROMPT_TEMPLATE.format(instruction=instruction)
Expand All @@ -43,13 +40,11 @@ async def run(self, instruction: str):


class SimpleCoder(Role):
def __init__(
self,
name: str = "Alice",
profile: str = "SimpleCoder",
**kwargs,
):
super().__init__(name, profile, **kwargs)
name: str = "Alice"
profile: str = "SimpleCoder"

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._watch([UserRequirement])
self._init_actions([SimpleWriteCode])

Expand All @@ -62,8 +57,7 @@ class SimpleWriteTest(Action):
your code:
"""

def __init__(self, name: str = "SimpleWriteTest", context=None, llm: LLM = None):
super().__init__(name, context, llm)
name: str = "SimpleWriteTest"

async def run(self, context: str, k: int = 3):
prompt = self.PROMPT_TEMPLATE.format(context=context, k=k)
Expand All @@ -76,19 +70,17 @@ async def run(self, context: str, k: int = 3):


class SimpleTester(Role):
def __init__(
self,
name: str = "Bob",
profile: str = "SimpleTester",
**kwargs,
):
super().__init__(name, profile, **kwargs)
name: str = "Bob"
profile: str = "SimpleTester"

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._init_actions([SimpleWriteTest])
# self._watch([SimpleWriteCode])
self._watch([SimpleWriteCode, SimpleWriteReview]) # feel free to try this too

async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
logger.info(f"{self._setting}: to do {self._rc.todo}({self._rc.todo.name})")
todo = self._rc.todo

# context = self.get_memories(k=1)[0].content # use the most recent memory as context
Expand All @@ -106,8 +98,7 @@ class SimpleWriteReview(Action):
Review the test cases and provide one critical comments:
"""

def __init__(self, name: str = "SimpleWriteReview", context=None, llm: LLM = None):
super().__init__(name, context, llm)
name: str = "SimpleWriteReview"

async def run(self, context: str):
prompt = self.PROMPT_TEMPLATE.format(context=context)
Expand All @@ -118,13 +109,11 @@ async def run(self, context: str):


class SimpleReviewer(Role):
def __init__(
self,
name: str = "Charlie",
profile: str = "SimpleReviewer",
**kwargs,
):
super().__init__(name, profile, **kwargs)
name: str = "Charlie"
profile: str = "SimpleReviewer"

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._init_actions([SimpleWriteReview])
self._watch([SimpleWriteTest])

Expand All @@ -147,7 +136,7 @@ async def main(
)

team.invest(investment=investment)
team.start_project(idea)
team.run_project(idea)
await team.run(n_round=n_round)


Expand Down
2 changes: 1 addition & 1 deletion examples/debate.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ async def _observe(self) -> int:
return len(self._rc.news)

async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
logger.info(f"{self._setting}: to do {self._rc.todo}({self._rc.todo.name})")
todo = self._rc.todo # An instance of SpeakAloud

memories = self.get_memories()
Expand Down
16 changes: 9 additions & 7 deletions examples/debate_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,16 @@
"""
import asyncio

from metagpt.actions import Action, UserRequirement
from metagpt.actions import Action
from metagpt.environment import Environment
from metagpt.roles import Role
from metagpt.team import Team

action1 = Action(name="BidenSay", instruction="Use diverse words to attack your opponent, strong and emotional.")
action2 = Action(name="TrumpSay", instruction="Use diverse words to attack your opponent, strong and emotional.")
biden = Role(name="Biden", profile="democrat", goal="win election", actions=[action1], watch=[action2, UserRequirement])
trump = Role(name="Trump", profile="republican", goal="win election", actions=[action2], watch=[action1])
team = Team(investment=10.0, env_desc="US election live broadcast", roles=[biden, trump])
action1 = Action(name="BidenSay", instruction="Express opinions and argue vigorously, and strive to gain votes")
action2 = Action(name="TrumpSay", instruction="Express opinions and argue vigorously, and strive to gain votes")
biden = Role(name="Biden", profile="Democratic candidate", goal="Win the election", actions=[action1], watch=[action2])
trump = Role(name="Trump", profile="Republican candidate", goal="Win the election", actions=[action2], watch=[action1])
env = Environment(desc="US election live broadcast")
team = Team(investment=10.0, env=env, roles=[biden, trump])

asyncio.run(team.run(idea="Topic: climate change", n_round=5))
asyncio.run(team.run(idea="Topic: climate change. Under 80 words per message.", send_to="Biden", n_round=5))
4 changes: 2 additions & 2 deletions examples/invoice_ocr.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import asyncio
from pathlib import Path

from metagpt.roles.invoice_ocr_assistant import InvoiceOCRAssistant
from metagpt.roles.invoice_ocr_assistant import InvoiceOCRAssistant, InvoicePath
from metagpt.schema import Message


Expand All @@ -26,7 +26,7 @@ async def main():

for path in absolute_file_paths:
role = InvoiceOCRAssistant()
await role.run(Message(content="Invoicing date", instruct_content={"file_path": path}))
await role.run(Message(content="Invoicing date", instruct_content=InvoicePath(file_path=path)))


if __name__ == "__main__":
Expand Down
20 changes: 11 additions & 9 deletions examples/search_kb.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@
"""
import asyncio

from metagpt.actions import Action
from langchain.embeddings import OpenAIEmbeddings

from metagpt.config import CONFIG
from metagpt.const import DATA_PATH
from metagpt.document_store import FaissStore
from metagpt.logs import logger
from metagpt.roles import Sales
from metagpt.schema import Message

""" example.json, e.g.
[
Expand All @@ -26,14 +27,15 @@
"""


def get_store():
embedding = OpenAIEmbeddings(openai_api_key=CONFIG.openai_api_key, openai_api_base=CONFIG.openai_base_url)
return FaissStore(DATA_PATH / "example.json", embedding=embedding)


async def search():
store = FaissStore(DATA_PATH / "example.json")
role = Sales(profile="Sales", store=store)
role._watch({Action})
queries = [
Message(content="Which facial cleanser is good for oily skin?", cause_by=Action),
Message(content="Is L'Oreal good to use?", cause_by=Action),
]
role = Sales(profile="Sales", store=get_store())
queries = ["Which facial cleanser is good for oily skin?", "Is L'Oreal good to use?"]

for query in queries:
logger.info(f"User: {query}")
result = await role.run(query)
Expand Down
4 changes: 2 additions & 2 deletions examples/search_with_specific_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ async def main():
# Serper API
# await Searcher(engine=SearchEngineType.SERPER_GOOGLE).run(question)
# SerpAPI
# await Searcher(engine=SearchEngineType.SERPAPI_GOOGLE).run(question)
await Searcher(engine=SearchEngineType.SERPAPI_GOOGLE).run(question)
# Google API
await Searcher(engine=SearchEngineType.DIRECT_GOOGLE).run(question)
# await Searcher(engine=SearchEngineType.DIRECT_GOOGLE).run(question)


if __name__ == "__main__":
Expand Down
7 changes: 4 additions & 3 deletions metagpt/actions/action.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class Config:

def __init_with_instruction(self, instruction: str):
"""Initialize action with instruction"""
self.node = ActionNode(key=self.name, expected_type=str, instruction=instruction, example="")
self.node = ActionNode(key=self.name, expected_type=str, instruction=instruction, example="", schema="raw")
return self

def __init__(self, **kwargs: Any):
Expand All @@ -59,7 +59,7 @@ def __init_subclass__(cls, **kwargs: Any) -> None:
action_subclass_registry[cls.__name__] = cls

def dict(self, *args, **kwargs) -> "DictStrAny":
obj_dict = super(Action, self).dict(*args, **kwargs)
obj_dict = super().dict(*args, **kwargs)
if "llm" in obj_dict:
obj_dict.pop("llm")
return obj_dict
Expand All @@ -85,7 +85,8 @@ async def _aask(self, prompt: str, system_msgs: Optional[list[str]] = None) -> s
async def _run_action_node(self, *args, **kwargs):
"""Run action node"""
msgs = args[0]
context = "\n".join([f"Msg {idx}: {i}" for idx, i in enumerate(reversed(msgs))])
context = "## History Messages\n"
context += "\n".join([f"{idx}: {i}" for idx, i in enumerate(reversed(msgs))])
return await self.node.fill(context=context, llm=self.llm)

async def run(self, *args, **kwargs):
Expand Down
Loading
Loading