diff --git a/superagi/agent/super_agi.py b/superagi/agent/super_agi.py index cfa2421ec..7c033299a 100644 --- a/superagi/agent/super_agi.py +++ b/superagi/agent/super_agi.py @@ -136,7 +136,10 @@ def execute(self, workflow_step: AgentWorkflowStep): total_tokens = current_tokens + TokenCounter.count_message_tokens(response, self.llm.get_model()) self.update_agent_execution_tokens(current_calls, total_tokens, session) - + + if 'error' in response and response['error'] == "RATE_LIMIT_EXCEEDED": + return {"result": "RATE_LIMIT_EXCEEDED", "retry": True} + if 'content' not in response or response['content'] is None: raise RuntimeError(f"Failed to get response from llm") assistant_reply = response['content'] diff --git a/superagi/jobs/agent_executor.py b/superagi/jobs/agent_executor.py index b321361bf..751944b33 100644 --- a/superagi/jobs/agent_executor.py +++ b/superagi/jobs/agent_executor.py @@ -257,7 +257,11 @@ def execute_next_action(self, agent_execution_id): return if "retry" in response and response["retry"]: - superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=15) + if "result" in response and response["result"] == "RATE_LIMIT_EXCEEDED": + superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=60) + else: + superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=15) + session.close() return