Skip to content

Commit

Permalink
Fix: Token counter expecting response.raw as dict, got ChatCompletion…
Browse files Browse the repository at this point in the history
…Chunk (#14937)
  • Loading branch information
joelbarmettlerUZH authored Jul 24, 2024
1 parent 3a35a6e commit 3be95a2
Showing 1 changed file with 6 additions and 3 deletions.
9 changes: 6 additions & 3 deletions llama-index-core/llama_index/core/callbacks/token_counting.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,14 @@ def get_llm_token_counts(
response_tokens = 0

if response is not None and response.raw is not None:
usage = response.raw.get("usage", None)
if isinstance(response.raw, dict):
raw_dict = response.raw
else:
raw_dict = response.raw.model_dump()

usage = raw_dict.get("usage", None)

if usage is not None:
if not isinstance(usage, dict):
usage = dict(usage)
messages_tokens = usage.get("prompt_tokens", 0)
response_tokens = usage.get("completion_tokens", 0)

Expand Down

0 comments on commit 3be95a2

Please sign in to comment.