You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
File ~/anaconda3/lib/python3.10/site-packages/nest_asyncio.py:35, in _patch_asyncio..run(main, debug)
33 task = asyncio.ensure_future(main)
34 try:
---> 35 return loop.run_until_complete(task)
36 finally:
37 if not task.done():
File ~/anaconda3/lib/python3.10/site-packages/nest_asyncio.py:90, in _patch_loop..run_until_complete(self, future)
87 if not f.done():
88 raise RuntimeError(
89 'Event loop stopped before Future completed.')
---> 90 return f.result()
File ~/anaconda3/lib/python3.10/asyncio/futures.py:201, in Future.result(self)
199 self.__log_traceback = False
200 if self._exception is not None:
--> 201 raise self._exception.with_traceback(self._exception_tb)
202 return self._result
File ~/anaconda3/lib/python3.10/asyncio/tasks.py:232, in Task.__step(failed resolving arguments)
228 try:
229 if exc is None:
230 # We use the send method directly, because coroutines
231 # don't have __iter__ and __next__ methods.
--> 232 result = coro.send(None)
233 else:
234 result = coro.throw(exc)
File ~/anaconda3/lib/python3.10/site-packages/metagpt/context_mixin.py:95, in ContextMixin.llm(self)
93 # print(f"class:{self.class.name}({self.name}), llm: {self._llm}, llm_config: {self._llm_config}")
94 if not self.private_llm:
---> 95 self.private_llm = self.context.llm_with_cost_manager_from_llm_config(self.config.llm)
96 return self.private_llm
File ~/anaconda3/lib/python3.10/site-packages/metagpt/context.py:107, in Context.llm_with_cost_manager_from_llm_config(self, llm_config)
105 """Return a LLM instance, fixme: support cache"""
106 # if self._llm is None:
--> 107 llm = create_llm_instance(llm_config)
108 if llm.cost_manager is None:
109 llm.cost_manager = self._select_costmanager(llm_config)
File ~/anaconda3/lib/python3.10/site-packages/metagpt/provider/llm_provider_registry.py:40, in create_llm_instance(config)
38 def create_llm_instance(config: LLMConfig) -> BaseLLM:
39 """get the default llm provider"""
---> 40 return LLM_REGISTRY.get_provider(config.api_type)(config)
File ~/anaconda3/lib/python3.10/site-packages/metagpt/provider/llm_provider_registry.py:21, in LLMProviderRegistry.get_provider(self, enum)
19 def get_provider(self, enum: LLMType):
20 """get provider instance according to the enum"""
---> 21 return self.providers[enum]
KeyError: <LLMType.OPENROUTER: 'openrouter'>
Looking at the GitHub repo, it seems like "openrouter" does not have a ".py" script under the path metagpt/provider/
Other working API like OpenAI, Anthropic all seems to have corresponding ".py" script.
The text was updated successfully, but these errors were encountered:
I tried the documentation:
llm:
api_type: 'openrouter'
base_url: 'https://openrouter.ai/api/v1'
api_key: 'sk...'
model: meta-llama/llama-3-70b-instruct:nitro
Then I got this issue:
KeyError Traceback (most recent call last)
File :1
File ~/anaconda3/lib/python3.10/site-packages/nest_asyncio.py:35, in _patch_asyncio..run(main, debug)
33 task = asyncio.ensure_future(main)
34 try:
---> 35 return loop.run_until_complete(task)
36 finally:
37 if not task.done():
File ~/anaconda3/lib/python3.10/site-packages/nest_asyncio.py:90, in _patch_loop..run_until_complete(self, future)
87 if not f.done():
88 raise RuntimeError(
89 'Event loop stopped before Future completed.')
---> 90 return f.result()
File ~/anaconda3/lib/python3.10/asyncio/futures.py:201, in Future.result(self)
199 self.__log_traceback = False
200 if self._exception is not None:
--> 201 raise self._exception.with_traceback(self._exception_tb)
202 return self._result
File ~/anaconda3/lib/python3.10/asyncio/tasks.py:232, in Task.__step(failed resolving arguments)
228 try:
229 if exc is None:
230 # We use the
send
method directly, because coroutines231 # don't have
__iter__
and__next__
methods.--> 232 result = coro.send(None)
233 else:
234 result = coro.throw(exc)
File ~/Desktop/AI_eval_redo/src.py:766, in main(questions, investment, n_round, add_human, output_dir)
761 logger.info(f"Processing idea: {idea}")
763 team = Team()
764 team.hire(
765 [
--> 766 RAGer(),
767 ProductManager(),
768 SimpleReviewer(),
769 Researcher(),
770 ]
771 )
773 team.invest(investment=investment)
774 team.run_project(idea)
File ~/Desktop/AI_eval_redo/src.py:488, in RAGer.init(self, **kwargs)
487 def init(self, **kwargs):
--> 488 super().init(**kwargs)
489 self._watch([ProductManage])
490 self.set_actions([RAG_TT])
File ~/anaconda3/lib/python3.10/site-packages/metagpt/roles/role.py:161, in Role.validate_role_extra(self)
159 @model_validator(mode="after")
160 def validate_role_extra(self):
--> 161 self._process_role_extra()
162 return self
File ~/anaconda3/lib/python3.10/site-packages/metagpt/roles/role.py:171, in Role._process_role_extra(self)
168 self.llm = HumanProvider(None)
170 self._check_actions()
--> 171 self.llm.system_prompt = self._get_prefix()
172 self.llm.cost_manager = self.context.cost_manager
173 self._watch(kwargs.pop("watch", [UserRequirement]))
File ~/anaconda3/lib/python3.10/site-packages/metagpt/context_mixin.py:95, in ContextMixin.llm(self)
93 # print(f"class:{self.class.name}({self.name}), llm: {self._llm}, llm_config: {self._llm_config}")
94 if not self.private_llm:
---> 95 self.private_llm = self.context.llm_with_cost_manager_from_llm_config(self.config.llm)
96 return self.private_llm
File ~/anaconda3/lib/python3.10/site-packages/metagpt/context.py:107, in Context.llm_with_cost_manager_from_llm_config(self, llm_config)
105 """Return a LLM instance, fixme: support cache"""
106 # if self._llm is None:
--> 107 llm = create_llm_instance(llm_config)
108 if llm.cost_manager is None:
109 llm.cost_manager = self._select_costmanager(llm_config)
File ~/anaconda3/lib/python3.10/site-packages/metagpt/provider/llm_provider_registry.py:40, in create_llm_instance(config)
38 def create_llm_instance(config: LLMConfig) -> BaseLLM:
39 """get the default llm provider"""
---> 40 return LLM_REGISTRY.get_provider(config.api_type)(config)
File ~/anaconda3/lib/python3.10/site-packages/metagpt/provider/llm_provider_registry.py:21, in LLMProviderRegistry.get_provider(self, enum)
19 def get_provider(self, enum: LLMType):
20 """get provider instance according to the enum"""
---> 21 return self.providers[enum]
KeyError: <LLMType.OPENROUTER: 'openrouter'>
Looking at the GitHub repo, it seems like "openrouter" does not have a ".py" script under the path metagpt/provider/
Other working API like OpenAI, Anthropic all seems to have corresponding ".py" script.
The text was updated successfully, but these errors were encountered: