Skip to content

Commit

Permalink
Add in missing triple quote and execution time to resume agent functi…
Browse files Browse the repository at this point in the history
…onality. (#1025)

* Add in missing triple quote and execution time to resume agent functionality

* Fixing broken kwargs and other issues causing our tests to fail
  • Loading branch information
bhancockio authored Jul 29, 2024
1 parent 25c314b commit fa4393d
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 43 deletions.
17 changes: 17 additions & 0 deletions src/crewai/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ class Agent(BaseAgent):
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
"""

_times_executed: int = PrivateAttr(default=0)
max_execution_time: Optional[int] = Field(
default=None,
description="Maximum execution time for an agent to execute a task",
Expand Down Expand Up @@ -186,6 +189,20 @@ def execute_task(
else:
task_prompt = self._use_trained_data(task_prompt=task_prompt)

try:
result = self.agent_executor.invoke(
{
"input": task_prompt,
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
}
)["output"]
except Exception as e:
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
raise e
result = self.execute_task(task, context, tools)

if self.max_rpm:
self._rpm_controller.stop_rpm_counter()

Expand Down
2 changes: 1 addition & 1 deletion src/crewai/memory/short_term/short_term_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def __init__(self, crew=None, embedder_config=None):
super().__init__(storage)

def save(self, item: ShortTermMemoryItem) -> None:
super().save(item.data, item.metadata, item.agent)
super().save(value=item.data, metadata=item.metadata, agent=item.agent)

def search(self, query: str, score_threshold: float = 0.35):
return self.storage.search(query=query, score_threshold=score_threshold) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
Expand Down
4 changes: 4 additions & 0 deletions src/crewai/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,9 @@ def _get_output_format(self) -> OutputFormat:
return OutputFormat.RAW

def _save_file(self, result: Any) -> None:
if self.output_file is None:
raise ValueError("output_file is not set.")

directory = os.path.dirname(self.output_file) # type: ignore # Value of type variable "AnyOrLiteralStr" of "dirname" cannot be "str | None"

if directory and not os.path.exists(directory):
Expand All @@ -363,6 +366,7 @@ def _save_file(self, result: Any) -> None:
with open(self.output_file, "w", encoding="utf-8") as file:
if isinstance(result, dict):
import json

json.dump(result, file, ensure_ascii=False, indent=2)
else:
file.write(str(result))
Expand Down
42 changes: 0 additions & 42 deletions tests/crew_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

import pydantic_core
import pytest

from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.crew import Crew
Expand Down Expand Up @@ -572,47 +571,6 @@ def get_final_answer(anything) -> float:
moveon.assert_called()


# This test is not consistent, some issue is happening on the CI when it comes to Prompt tokens
# {'usage_metrics': {'completion_tokens': 34, 'prompt_tokens': 0, 'successful_requests': 2, 'total_tokens': 34}} CI OUTPUT
# {'usage_metrics': {'completion_tokens': 34, 'prompt_tokens': 314, 'successful_requests': 2, 'total_tokens': 348}}
# The issue might be related to the calculate_usage_metrics function
# @pytest.mark.vcr(filter_headers=["authorization"])
# def test_crew_full_output():
# agent = Agent(
# role="test role",
# goal="test goal",
# backstory="test backstory",
# allow_delegation=False,
# verbose=True,
# )

# task1 = Task(
# description="just say hi!",
# expected_output="your greeting",
# agent=agent,
# )
# task2 = Task(
# description="just say hello!",
# expected_output="your greeting",
# agent=agent,
# )

# crew = Crew(agents=[agent], tasks=[task1, task2], full_output=True)

# result = crew.kickoff()

# assert result == {
# "final_output": "Hello!",
# "tasks_outputs": [task1.output, task2.output],
# "usage_metrics": {
# "total_tokens": 348,
# "prompt_tokens": 314,
# "completion_tokens": 34,
# "successful_requests": 2,
# },
# }


@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_kickoff_usage_metrics():
inputs = [
Expand Down

0 comments on commit fa4393d

Please sign in to comment.