Skip to content

Commit

Permalink
Merge pull request #1064 from crewAIInc/thiago/pipeline-fix
Browse files Browse the repository at this point in the history
Fix flaky test due to suppressed error on `on_llm_start` callback
  • Loading branch information
thiagomoretto authored Aug 5, 2024
2 parents 4a7ae8d + f3b3d32 commit c0c59dc
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions src/crewai/utilities/token_counter_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,24 +10,24 @@
class TokenCalcHandler(BaseCallbackHandler):
model_name: str = ""
token_cost_process: TokenProcess
encoding: tiktoken.Encoding

def __init__(self, model_name, token_cost_process):
self.model_name = model_name
self.token_cost_process = token_cost_process
try:
self.encoding = tiktoken.encoding_for_model(self.model_name)
except KeyError:
self.encoding = tiktoken.get_encoding("cl100k_base")

def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
try:
encoding = tiktoken.encoding_for_model(self.model_name)
except KeyError:
encoding = tiktoken.get_encoding("cl100k_base")

if self.token_cost_process is None:
return

for prompt in prompts:
self.token_cost_process.sum_prompt_tokens(len(encoding.encode(prompt)))
self.token_cost_process.sum_prompt_tokens(len(self.encoding.encode(prompt)))

async def on_llm_new_token(self, token: str, **kwargs) -> None:
self.token_cost_process.sum_completion_tokens(1)
Expand Down

0 comments on commit c0c59dc

Please sign in to comment.