From 8118b7b7d681442ef400a51ca7f29b69b06155d9 Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Thu, 1 Aug 2024 13:15:50 -0700 Subject: [PATCH] Feat/sliding context window (#1042) * patching for non-gpt model * removal of json_object tool name assignment * fixed issue for smaller models due to instructions prompt * fixing for ollama llama3 models * WIP: generated summary from documents split, could also create memgpt approach * WIP: need tests but user inputted summarization strategy implemented - handling context window exceeding errors * rm extra line * removed type ignores * added tests * handling n to summarize prompt * code cleanup, using click for cli asker * rm not used class * better refactor * reverted poetry lock * reverted poetry.locl * improved context window exceeding exception class --- src/crewai/agents/executor.py | 127 +++++++++++- src/crewai/tools/tool_usage.py | 2 +- src/crewai/utilities/__init__.py | 4 + .../context_window_exceeding_exception.py | 26 +++ tests/agent_test.py | 73 +++++++ ...t_handle_context_length_exceeds_limit.yaml | 181 ++++++++++++++++++ ...e_context_length_exceeds_limit_cli_no.yaml | 162 ++++++++++++++++ 7 files changed, 572 insertions(+), 3 deletions(-) create mode 100644 src/crewai/utilities/exceptions/context_window_exceeding_exception.py create mode 100644 tests/cassettes/test_handle_context_length_exceeds_limit.yaml create mode 100644 tests/cassettes/test_handle_context_length_exceeds_limit_cli_no.yaml diff --git a/src/crewai/agents/executor.py b/src/crewai/agents/executor.py index c98e87eb4f..fa8a3068ae 100644 --- a/src/crewai/agents/executor.py +++ b/src/crewai/agents/executor.py @@ -1,6 +1,8 @@ import threading import time -from typing import Any, Dict, Iterator, List, Optional, Tuple, Union +from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union +import click + from langchain.agents import AgentExecutor from langchain.agents.agent import ExceptionTool @@ -11,12 +13,21 @@ from langchain_core.utils.input import get_color_mapping from pydantic import InstanceOf +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.chains.summarize import load_summarize_chain + from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.tools_handler import ToolsHandler + + from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException from crewai.utilities import I18N from crewai.utilities.constants import TRAINING_DATA_FILE +from crewai.utilities.exceptions.context_window_exceeding_exception import ( + LLMContextLengthExceededException, +) from crewai.utilities.training_handler import CrewTrainingHandler +from crewai.utilities.logger import Logger class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin): @@ -40,6 +51,8 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin): system_template: Optional[str] = None prompt_template: Optional[str] = None response_template: Optional[str] = None + _logger: Logger = Logger(verbose_level=2) + _fit_context_window_strategy: Optional[Literal["summarize"]] = "summarize" def _call( self, @@ -131,7 +144,7 @@ def _iter_next_step( intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) # Call the LLM to see what to do. - output = self.agent.plan( # type: ignore # Incompatible types in assignment (expression has type "AgentAction | AgentFinish | list[AgentAction]", variable has type "AgentAction") + output = self.agent.plan( intermediate_steps, callbacks=run_manager.get_child() if run_manager else None, **inputs, @@ -185,6 +198,27 @@ def _iter_next_step( yield AgentStep(action=output, observation=observation) return + except Exception as e: + if LLMContextLengthExceededException(str(e))._is_context_limit_error( + str(e) + ): + output = self._handle_context_length_error( + intermediate_steps, run_manager, inputs + ) + + if isinstance(output, AgentFinish): + yield output + elif isinstance(output, list): + for step in output: + yield step + return + + yield AgentStep( + action=AgentAction("_Exception", str(e), str(e)), + observation=str(e), + ) + return + # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): if self.should_ask_for_human_input: @@ -235,6 +269,7 @@ def _iter_next_step( agent=self.crew_agent, action=agent_action, ) + tool_calling = tool_usage.parse(agent_action.log) if isinstance(tool_calling, ToolUsageErrorException): @@ -280,3 +315,91 @@ def _handle_crew_training_output( CrewTrainingHandler(TRAINING_DATA_FILE).append( self.crew._train_iteration, agent_id, training_data ) + + def _handle_context_length( + self, intermediate_steps: List[Tuple[AgentAction, str]] + ) -> List[Tuple[AgentAction, str]]: + text = intermediate_steps[0][1] + original_action = intermediate_steps[0][0] + + text_splitter = RecursiveCharacterTextSplitter( + separators=["\n\n", "\n"], + chunk_size=8000, + chunk_overlap=500, + ) + + if self._fit_context_window_strategy == "summarize": + docs = text_splitter.create_documents([text]) + self._logger.log( + "debug", + "Summarizing Content, it is recommended to use a RAG tool", + color="bold_blue", + ) + summarize_chain = load_summarize_chain( + self.llm, chain_type="map_reduce", verbose=True + ) + summarized_docs = [] + for doc in docs: + summary = summarize_chain.invoke( + {"input_documents": [doc]}, return_only_outputs=True + ) + + summarized_docs.append(summary["output_text"]) + + formatted_results = "\n\n".join(summarized_docs) + summary_step = AgentStep( + action=AgentAction( + tool=original_action.tool, + tool_input=original_action.tool_input, + log=original_action.log, + ), + observation=formatted_results, + ) + summary_tuple = (summary_step.action, summary_step.observation) + return [summary_tuple] + + return intermediate_steps + + def _handle_context_length_error( + self, + intermediate_steps: List[Tuple[AgentAction, str]], + run_manager: Optional[CallbackManagerForChainRun], + inputs: Dict[str, str], + ) -> Union[AgentFinish, List[AgentStep]]: + self._logger.log( + "debug", + "Context length exceeded. Asking user if they want to use summarize prompt to fit, this will reduce context length.", + color="yellow", + ) + user_choice = click.confirm( + "Context length exceeded. Do you want to summarize the text to fit models context window?" + ) + if user_choice: + self._logger.log( + "debug", + "Context length exceeded. Using summarize prompt to fit, this will reduce context length.", + color="bold_blue", + ) + intermediate_steps = self._handle_context_length(intermediate_steps) + + output = self.agent.plan( + intermediate_steps, + callbacks=run_manager.get_child() if run_manager else None, + **inputs, + ) + + if isinstance(output, AgentFinish): + return output + elif isinstance(output, AgentAction): + return [AgentStep(action=output, observation=None)] + else: + return [AgentStep(action=action, observation=None) for action in output] + else: + self._logger.log( + "debug", + "Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.", + color="red", + ) + raise SystemExit( + "Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools." + ) diff --git a/src/crewai/tools/tool_usage.py b/src/crewai/tools/tool_usage.py index 2426e6b6bf..f205dd0987 100644 --- a/src/crewai/tools/tool_usage.py +++ b/src/crewai/tools/tool_usage.py @@ -16,7 +16,7 @@ except ImportError: agentops = None -OPENAI_BIGGER_MODELS = ["gpt-4"] +OPENAI_BIGGER_MODELS = ["gpt-4o"] class ToolUsageErrorException(Exception): diff --git a/src/crewai/utilities/__init__.py b/src/crewai/utilities/__init__.py index efe620c2e5..f13fce3909 100644 --- a/src/crewai/utilities/__init__.py +++ b/src/crewai/utilities/__init__.py @@ -7,6 +7,9 @@ from .printer import Printer from .prompts import Prompts from .rpm_controller import RPMController +from .exceptions.context_window_exceeding_exception import ( + LLMContextLengthExceededException, +) __all__ = [ "Converter", @@ -19,4 +22,5 @@ "Prompts", "RPMController", "YamlParser", + "LLMContextLengthExceededException", ] diff --git a/src/crewai/utilities/exceptions/context_window_exceeding_exception.py b/src/crewai/utilities/exceptions/context_window_exceeding_exception.py new file mode 100644 index 0000000000..c02c40cc71 --- /dev/null +++ b/src/crewai/utilities/exceptions/context_window_exceeding_exception.py @@ -0,0 +1,26 @@ +class LLMContextLengthExceededException(Exception): + CONTEXT_LIMIT_ERRORS = [ + "maximum context length", + "context length exceeded", + "context_length_exceeded", + "context window full", + "too many tokens", + "input is too long", + "exceeds token limit", + ] + + def __init__(self, error_message: str): + self.original_error_message = error_message + super().__init__(self._get_error_message(error_message)) + + def _is_context_limit_error(self, error_message: str) -> bool: + return any( + phrase.lower() in error_message.lower() + for phrase in self.CONTEXT_LIMIT_ERRORS + ) + + def _get_error_message(self, error_message: str): + return ( + f"LLM context length exceeded. Original error: {error_message}\n" + "Consider using a smaller input or implementing a text splitting strategy." + ) diff --git a/tests/agent_test.py b/tests/agent_test.py index b0a779cea2..05ef15ba8a 100644 --- a/tests/agent_test.py +++ b/tests/agent_test.py @@ -7,6 +7,7 @@ from langchain.tools import tool from langchain_core.exceptions import OutputParserException from langchain_openai import ChatOpenAI +from langchain.schema import AgentAction from crewai import Agent, Crew, Task from crewai.agents.cache import CacheHandler @@ -1014,3 +1015,75 @@ def test_agent_max_retry_limit(): ), ] ) + + +@pytest.mark.vcr(filter_headers=["authorization"]) +def test_handle_context_length_exceeds_limit(): + agent = Agent( + role="test role", + goal="test goal", + backstory="test backstory", + ) + original_action = AgentAction( + tool="test_tool", tool_input="test_input", log="test_log" + ) + + with patch.object( + CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step + ) as private_mock: + task = Task( + description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.", + expected_output="The final answer", + ) + agent.execute_task( + task=task, + ) + private_mock.assert_called_once() + with patch("crewai.agents.executor.click") as mock_prompt: + mock_prompt.return_value = "y" + with patch.object( + CrewAgentExecutor, "_handle_context_length" + ) as mock_handle_context: + mock_handle_context.side_effect = ValueError( + "Context length limit exceeded" + ) + + long_input = "This is a very long input. " * 10000 + + # Attempt to handle context length, expecting the mocked error + with pytest.raises(ValueError) as excinfo: + agent.agent_executor._handle_context_length( + [(original_action, long_input)] + ) + + assert "Context length limit exceeded" in str(excinfo.value) + mock_handle_context.assert_called_once() + + +@pytest.mark.vcr(filter_headers=["authorization"]) +def test_handle_context_length_exceeds_limit_cli_no(): + agent = Agent( + role="test role", + goal="test goal", + backstory="test backstory", + ) + task = Task(description="test task", agent=agent, expected_output="test output") + + with patch.object( + CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step + ) as private_mock: + task = Task( + description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.", + expected_output="The final answer", + ) + agent.execute_task( + task=task, + ) + private_mock.assert_called_once() + with patch("crewai.agents.executor.click") as mock_prompt: + mock_prompt.return_value = "n" + pytest.raises(SystemExit) + with patch.object( + CrewAgentExecutor, "_handle_context_length" + ) as mock_handle_context: + mock_handle_context.assert_not_called() diff --git a/tests/cassettes/test_handle_context_length_exceeds_limit.yaml b/tests/cassettes/test_handle_context_length_exceeds_limit.yaml new file mode 100644 index 0000000000..bb2fb70647 --- /dev/null +++ b/tests/cassettes/test_handle_context_length_exceeds_limit.yaml @@ -0,0 +1,181 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are test role. test backstory\nYour personal + goal is: test goalTo give my best complete final answer to the task use the + exact following format:\n\nThought: I now can give a great answer\nFinal Answer: + my best complete final answer to the task.\nYour final answer must be the great + and the most complete as possible, it must be outcome described.\n\nI MUST use + these formats, my job depends on it!\nCurrent Task: The final answer is 42. + But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis + is the expect criteria for your final answer: The final answer \n you MUST return + the actual complete content as the final answer, not a summary.\n\nBegin! This + is VERY important to you, use the tools available and give your best Final Answer, + your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", + "n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, br + connection: + - keep-alive + content-length: + - '938' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.35.10 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.35.10 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.5 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + I"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + now"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + can"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + give"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + a"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + great"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + answer"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + Answer"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + The"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + final"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + answer"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + is"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + + + data: [DONE] + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8ac1a40879b87d1f-LAX + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Thu, 01 Aug 2024 00:16:40 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=MHUl15YVi607cmuLtQ84ESiH30IyJiIW1a40fopQ81w-1722471400-1.0.1.1-OGpq5Ezj6iE0ToM1diQllGb70.J3O_K2De9NbwZPWmW2qN07U20adJ_0yd6PKUNqMdL.xEnLcNAOWVmsfrLUrQ; + path=/; expires=Thu, 01-Aug-24 00:46:40 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=G2ZVNvfNfFk4DeKyZ7jMYetG7wOasINAGHstrOnuAY8-1722471400129-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '131' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15552000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '30000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '29999786' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_b68b417b3fe1c67244279551e411b37a + status: + code: 200 + message: OK +version: 1 diff --git a/tests/cassettes/test_handle_context_length_exceeds_limit_cli_no.yaml b/tests/cassettes/test_handle_context_length_exceeds_limit_cli_no.yaml new file mode 100644 index 0000000000..e3463310ab --- /dev/null +++ b/tests/cassettes/test_handle_context_length_exceeds_limit_cli_no.yaml @@ -0,0 +1,162 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are test role. test backstory\nYour personal + goal is: test goalTo give my best complete final answer to the task use the + exact following format:\n\nThought: I now can give a great answer\nFinal Answer: + my best complete final answer to the task.\nYour final answer must be the great + and the most complete as possible, it must be outcome described.\n\nI MUST use + these formats, my job depends on it!\nCurrent Task: The final answer is 42. + But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis + is the expect criteria for your final answer: The final answer \n you MUST return + the actual complete content as the final answer, not a summary.\n\nBegin! This + is VERY important to you, use the tools available and give your best Final Answer, + your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", + "n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, br + connection: + - keep-alive + content-length: + - '938' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.35.10 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.35.10 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.11.5 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + I"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + now"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + can"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + give"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + a"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + great"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + answer"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + Answer"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + + + data: [DONE] + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8ac331b9eaee2b7f-LAX + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Thu, 01 Aug 2024 04:48:09 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=OXht5zC71vWYFW_z_933m3sZfFS2xBez0DHv93FvT5s-1722487689-1.0.1.1-wE8JTR7MnwUgiiTDppYg8A7zLEiidth.MB0zrwONeAtNWRjKC1tuGf8LZYDlYIHUhqG73syYExpZ.5pZhzJkcg; + path=/; expires=Thu, 01-Aug-24 05:18:09 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=PAR7y4xRe4VzRT.7GK34Tq5r8vevY6xq0E.i.R40xnU-1722487689562-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '84' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15552000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '30000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '29999786' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_105dcfc53c9672dea0437249c12c3319 + status: + code: 200 + message: OK +version: 1