From 12e285e720636843df108731e1a4a7ade693df09 Mon Sep 17 00:00:00 2001 From: jagdeep sidhu Date: Mon, 13 Nov 2023 13:20:34 -0800 Subject: [PATCH 1/8] respond to more run status states 1) according to the spec there are other states we can handle in wait_for_run function, so I added those. 2) added termination msg param. 3) register_reply using invoke_assistant and check_termination_and_human_reply in order, so we can check for exit/human reply for human_input_mode != "NEVER". Remove the hardcoded human_input_mode. 4) return empty array if while loop terminates for some reason without returning messages from the state machine (while loop) --- .../agentchat/contrib/gpt_assistant_agent.py | 76 ++++++++++++++++--- 1 file changed, 67 insertions(+), 9 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index b4fc5e8096e..6d2fea50d63 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -8,7 +8,7 @@ from autogen.agentchat.agent import Agent from autogen.agentchat.assistant_agent import ConversableAgent from autogen.agentchat.assistant_agent import AssistantAgent -from typing import Dict, Optional, Union, List, Tuple, Any +from typing import Dict, Optional, Union, List, Tuple, Any, Callable logger = logging.getLogger(__name__) @@ -22,9 +22,11 @@ class GPTAssistantAgent(ConversableAgent): def __init__( self, name="GPT Assistant", + is_termination_msg: Optional[Callable[[Dict], bool]] = None, instructions: Optional[str] = None, llm_config: Optional[Union[Dict, bool]] = None, overwrite_instructions: bool = False, + **kwargs, ): """ Args: @@ -86,18 +88,24 @@ def __init__( logger.warning( "overwrite_instructions is False. Provided instructions will be used without permanently modifying the assistant in the API." ) - + _is_termination_msg = ( + is_termination_msg if is_termination_msg is not None else (lambda x: "TERMINATE" in x.get("content", "")) + ) super().__init__( name=name, system_message=instructions, - human_input_mode="NEVER", llm_config=llm_config, + is_termination_msg=_is_termination_msg, + **kwargs ) # lazly create thread self._openai_threads = {} self._unread_index = defaultdict(int) - self.register_reply(Agent, GPTAssistantAgent._invoke_assistant) + self._reply_func_list = [] + self.register_reply([Agent, None], GPTAssistantAgent._invoke_assistant) + self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply) + def _invoke_assistant( self, @@ -146,7 +154,7 @@ def _invoke_assistant( run_response_messages = self._get_run_response(assistant_thread, run) assert len(run_response_messages) > 0, "No response from the assistant." - + response = { "role": run_response_messages[-1]["role"], "content": "", @@ -157,7 +165,6 @@ def _invoke_assistant( if len(response["content"]) > 0: response["content"] += "\n\n" response["content"] += message["content"] - self._unread_index[sender] = len(self._oai_messages[sender]) + 1 return True, response @@ -173,10 +180,60 @@ def _get_run_response(self, thread, run): """ while True: run = self._wait_for_run(run.id, thread.id) - if run.status == "completed": + if run.status == "failed": + new_messages = [] + print(f'Run: {run.id} Thread: {thread.id}: failed...') + if run.last_error: + new_messages.append( + { + "role": msg.role, + "content": f'Last error: {run.last_error}', + } + ) + new_messages.append( + { + "role": msg.role, + "content": 'Failed', + } + ) + return new_messages + elif run.status == "cancelling": + print(f'Run: {run.id} Thread: {thread.id}: cancelling...') + elif run.status == "expired": + print(f'Run: {run.id} Thread: {thread.id}: expired...') + new_messages = [] + new_messages.append( + { + "role": msg.role, + "content": 'Expired', + } + ) + return new_messages + elif run.status == "cancelled": + print(f'Run: {run.id} Thread: {thread.id}: cancelled...') + new_messages = [] + new_messages.append( + { + "role": msg.role, + "content": 'Cancelled', + } + ) + return new_messages + elif run.status == "in_progress": + print(f'Run: {run.id} Thread: {thread.id}: in progress...') + elif run.status == "queued": + print(f'Run: {run.id} Thread: {thread.id}: queued...') + elif run.status == "completed": + print(f'Run: {run.id} Thread: {thread.id}: completed...') response_messages = self._openai_client.beta.threads.messages.list(thread.id, order="asc") - new_messages = [] + if run.last_error: + new_messages.append( + { + "role": msg.role, + "content": f'Last error: {run.last_error}', + } + ) for msg in response_messages: if msg.run_id == run.id: for content in msg.content: @@ -193,6 +250,7 @@ def _get_run_response(self, thread, run): ) return new_messages elif run.status == "requires_action": + print(f'Run: {run.id} Thread: {thread.id}: required action...') actions = [] for tool_call in run.required_action.submit_tool_outputs.tool_calls: function = tool_call.function @@ -224,7 +282,7 @@ def _get_run_response(self, thread, run): else: run_info = json.dumps(run.dict(), indent=2) raise ValueError(f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})") - + return [] def _wait_for_run(self, run_id: str, thread_id: str) -> Any: """ Waits for a run to complete or reach a final state. From 4da0876fe9c58f8fe21127448c562895d7751cff Mon Sep 17 00:00:00 2001 From: jagdeep sidhu Date: Mon, 13 Nov 2023 13:24:56 -0800 Subject: [PATCH 2/8] use user role when creating new messages from send() msgs I recieved ``` openai.BadRequestError: Error code: 400 - {'error': {'message': "1 validation error for Request\nbody -> role\n value is not a valid enumeration member; permitted: 'user' (type=type_error.enum; enum_values=[])", 'type': 'invalid_request_error', 'param': None, 'code': None}} ``` When using message["role"] which uses "assistant" for send messages but the API assumes only user role coming into new messages in thread. Not sure how it works for you without this change? --- autogen/agentchat/contrib/gpt_assistant_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index 6d2fea50d63..dbe83b2b84b 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -141,7 +141,7 @@ def _invoke_assistant( self._openai_client.beta.threads.messages.create( thread_id=assistant_thread.id, content=message["content"], - role=message["role"], + role="user", ) # Create a new run to get responses from the assistant From 9c8cac547f9f8c70ebc321c37134cddd4543fa47 Mon Sep 17 00:00:00 2001 From: jagdeep sidhu Date: Mon, 13 Nov 2023 20:21:51 -0800 Subject: [PATCH 3/8] print -> logger, remove return [] (unreachable) and use "assistant" role for end state msgs --- .../agentchat/contrib/gpt_assistant_agent.py | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index dbe83b2b84b..dbc98789a8f 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -182,55 +182,55 @@ def _get_run_response(self, thread, run): run = self._wait_for_run(run.id, thread.id) if run.status == "failed": new_messages = [] - print(f'Run: {run.id} Thread: {thread.id}: failed...') + logger.info(f'Run: {run.id} Thread: {thread.id}: failed...') if run.last_error: new_messages.append( { - "role": msg.role, + "role": "assistant", "content": f'Last error: {run.last_error}', } ) new_messages.append( { - "role": msg.role, + "role": "assistant", "content": 'Failed', } ) return new_messages elif run.status == "cancelling": - print(f'Run: {run.id} Thread: {thread.id}: cancelling...') + logger.info(f'Run: {run.id} Thread: {thread.id}: cancelling...') elif run.status == "expired": - print(f'Run: {run.id} Thread: {thread.id}: expired...') + logger.info(f'Run: {run.id} Thread: {thread.id}: expired...') new_messages = [] new_messages.append( { - "role": msg.role, + "role": "assistant", "content": 'Expired', } ) return new_messages elif run.status == "cancelled": - print(f'Run: {run.id} Thread: {thread.id}: cancelled...') + logger.info(f'Run: {run.id} Thread: {thread.id}: cancelled...') new_messages = [] new_messages.append( { - "role": msg.role, + "role": "assistant", "content": 'Cancelled', } ) return new_messages elif run.status == "in_progress": - print(f'Run: {run.id} Thread: {thread.id}: in progress...') + logger.info(f'Run: {run.id} Thread: {thread.id}: in progress...') elif run.status == "queued": - print(f'Run: {run.id} Thread: {thread.id}: queued...') + logger.info(f'Run: {run.id} Thread: {thread.id}: queued...') elif run.status == "completed": - print(f'Run: {run.id} Thread: {thread.id}: completed...') + logger.info(f'Run: {run.id} Thread: {thread.id}: completed...') response_messages = self._openai_client.beta.threads.messages.list(thread.id, order="asc") new_messages = [] if run.last_error: new_messages.append( { - "role": msg.role, + "role": "assistant", "content": f'Last error: {run.last_error}', } ) @@ -250,7 +250,7 @@ def _get_run_response(self, thread, run): ) return new_messages elif run.status == "requires_action": - print(f'Run: {run.id} Thread: {thread.id}: required action...') + logger.info(f'Run: {run.id} Thread: {thread.id}: required action...') actions = [] for tool_call in run.required_action.submit_tool_outputs.tool_calls: function = tool_call.function @@ -262,7 +262,7 @@ def _get_run_response(self, thread, run): } logger.info( - "Intermediate executing(%s, Sucess: %s) : %s", + "Intermediate executing(%s, Success: %s) : %s", tool_response["name"], is_exec_success, tool_response["content"], @@ -282,7 +282,7 @@ def _get_run_response(self, thread, run): else: run_info = json.dumps(run.dict(), indent=2) raise ValueError(f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})") - return [] + def _wait_for_run(self, run_id: str, thread_id: str) -> Any: """ Waits for a run to complete or reach a final state. From ad0216b77885dbf4b8ac72c9b829ce68d31f3ae0 Mon Sep 17 00:00:00 2001 From: jagdeep sidhu Date: Mon, 13 Nov 2023 20:30:13 -0800 Subject: [PATCH 4/8] add api reference to hardcoded user role --- autogen/agentchat/contrib/gpt_assistant_agent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index dbc98789a8f..573462ccb33 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -141,6 +141,7 @@ def _invoke_assistant( self._openai_client.beta.threads.messages.create( thread_id=assistant_thread.id, content=message["content"], + # https://platform.openai.com/docs/api-reference/messages/createMessage - only user role accepted to create message in thread role="user", ) From 975bf0e8c02b4908897a75c0d4754a2765845027 Mon Sep 17 00:00:00 2001 From: jagdeep sidhu Date: Tue, 14 Nov 2023 08:11:56 -0800 Subject: [PATCH 5/8] nits logging level based on errors/cancellations. Remove extra message on failure. Remove last error message on success. only show last error casted to string in content for last error --- .../agentchat/contrib/gpt_assistant_agent.py | 30 ++++++++----------- 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index 573462ccb33..f7f7f566084 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -183,25 +183,26 @@ def _get_run_response(self, thread, run): run = self._wait_for_run(run.id, thread.id) if run.status == "failed": new_messages = [] - logger.info(f'Run: {run.id} Thread: {thread.id}: failed...') + logger.error(f'Run: {run.id} Thread: {thread.id}: failed...') if run.last_error: new_messages.append( { "role": "assistant", - "content": f'Last error: {run.last_error}', + "content": str(run.last_error), + } + ) + else: + new_messages.append( + { + "role": "assistant", + "content": 'Failed', } ) - new_messages.append( - { - "role": "assistant", - "content": 'Failed', - } - ) return new_messages elif run.status == "cancelling": - logger.info(f'Run: {run.id} Thread: {thread.id}: cancelling...') + logger.warn(f'Run: {run.id} Thread: {thread.id}: cancelling...') elif run.status == "expired": - logger.info(f'Run: {run.id} Thread: {thread.id}: expired...') + logger.warn(f'Run: {run.id} Thread: {thread.id}: expired...') new_messages = [] new_messages.append( { @@ -211,7 +212,7 @@ def _get_run_response(self, thread, run): ) return new_messages elif run.status == "cancelled": - logger.info(f'Run: {run.id} Thread: {thread.id}: cancelled...') + logger.warn(f'Run: {run.id} Thread: {thread.id}: cancelled...') new_messages = [] new_messages.append( { @@ -228,13 +229,6 @@ def _get_run_response(self, thread, run): logger.info(f'Run: {run.id} Thread: {thread.id}: completed...') response_messages = self._openai_client.beta.threads.messages.list(thread.id, order="asc") new_messages = [] - if run.last_error: - new_messages.append( - { - "role": "assistant", - "content": f'Last error: {run.last_error}', - } - ) for msg in response_messages: if msg.run_id == run.id: for content in msg.content: From b89bbe28365c38f72f58dba6861ec4317fb9e097 Mon Sep 17 00:00:00 2001 From: jagdeep sidhu Date: Tue, 14 Nov 2023 21:05:49 -0800 Subject: [PATCH 6/8] revert role change (locally tested, seems to work for now) --- autogen/agentchat/contrib/gpt_assistant_agent.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index f7f7f566084..a23da9a5603 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -141,8 +141,7 @@ def _invoke_assistant( self._openai_client.beta.threads.messages.create( thread_id=assistant_thread.id, content=message["content"], - # https://platform.openai.com/docs/api-reference/messages/createMessage - only user role accepted to create message in thread - role="user", + role=message["role"], ) # Create a new run to get responses from the assistant From 88a63432017b2c3a170a2f19fc48c7f42b5f87fd Mon Sep 17 00:00:00 2001 From: jagdeep sidhu Date: Thu, 16 Nov 2023 20:02:02 -0800 Subject: [PATCH 7/8] clean up run loop 1) remove is_termination_msg 2) add external run cancellation 3) remove _wait_for_run and internalize through _get_run_response 4) process responses through _process_messages --- .../agentchat/contrib/gpt_assistant_agent.py | 214 +++++++++--------- 1 file changed, 103 insertions(+), 111 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index a23da9a5603..61ae6fe31cc 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -3,16 +3,16 @@ import json import time import logging +import threading from autogen import OpenAIWrapper from autogen.agentchat.agent import Agent from autogen.agentchat.assistant_agent import ConversableAgent from autogen.agentchat.assistant_agent import AssistantAgent -from typing import Dict, Optional, Union, List, Tuple, Any, Callable +from typing import Dict, Optional, Union, List, Tuple, Any logger = logging.getLogger(__name__) - class GPTAssistantAgent(ConversableAgent): """ An experimental AutoGen agent class that leverages the OpenAI Assistant API for conversational capabilities. @@ -22,7 +22,6 @@ class GPTAssistantAgent(ConversableAgent): def __init__( self, name="GPT Assistant", - is_termination_msg: Optional[Callable[[Dict], bool]] = None, instructions: Optional[str] = None, llm_config: Optional[Union[Dict, bool]] = None, overwrite_instructions: bool = False, @@ -66,6 +65,7 @@ def __init__( instructions=instructions, tools=llm_config.get("tools", []), model=llm_config.get("model", "gpt-4-1106-preview"), + file_ids=llm_config.get("file_ids", []), ) else: # retrieve an existing assistant @@ -88,24 +88,23 @@ def __init__( logger.warning( "overwrite_instructions is False. Provided instructions will be used without permanently modifying the assistant in the API." ) - _is_termination_msg = ( - is_termination_msg if is_termination_msg is not None else (lambda x: "TERMINATE" in x.get("content", "")) - ) super().__init__( name=name, system_message=instructions, llm_config=llm_config, - is_termination_msg=_is_termination_msg, **kwargs ) - + self.cancellation_requested = False # lazly create thread self._openai_threads = {} self._unread_index = defaultdict(int) - self._reply_func_list = [] - self.register_reply([Agent, None], GPTAssistantAgent._invoke_assistant) - self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply) + self.register_reply([Agent, None], GPTAssistantAgent._invoke_assistant, position=1) + def check_for_cancellation(self): + """ + Checks for cancellation used during _get_run_response + """ + return self.cancellation_requested def _invoke_assistant( self, @@ -124,7 +123,6 @@ def _invoke_assistant( Returns: A tuple containing a boolean indicating success and the assistant's reply. """ - if messages is None: messages = self._oai_messages[sender] unread_index = self._unread_index[sender] or 0 @@ -151,100 +149,96 @@ def _invoke_assistant( # pass the latest system message as instructions instructions=self.system_message, ) - - run_response_messages = self._get_run_response(assistant_thread, run) - assert len(run_response_messages) > 0, "No response from the assistant." - - response = { - "role": run_response_messages[-1]["role"], - "content": "", - } - for message in run_response_messages: - # just logging or do something with the intermediate messages? - # if current response is not empty and there is more, append new lines - if len(response["content"]) > 0: - response["content"] += "\n\n" - response["content"] += message["content"] + self.cancellation_requested = False + response = self._get_run_response(assistant_thread, run) self._unread_index[sender] = len(self._oai_messages[sender]) + 1 - return True, response + if response["content"]: + return True, response + else: + return False, "No response from the assistant." - def _get_run_response(self, thread, run): + def _process_messages(self, assistant_thread, run): + """ + Processes and provides a response based on the run status. + Args: + assistant_thread: The thread object for the assistant. + run: The run object initiated with the OpenAI assistant. + """ + if run.status == "failed": + logger.error(f'Run: {run.id} Thread: {assistant_thread.id}: failed...') + if run.last_error: + response = { + "role": "assistant", + "content": str(run.last_error), + } + else: + response = { + "role": "assistant", + "content": 'Failed', + } + return response + elif run.status == "expired": + logger.warn(f'Run: {run.id} Thread: {assistant_thread.id}: expired...') + response = { + "role": "assistant", + "content": 'Expired', + } + return new_messages + elif run.status == "cancelled": + logger.warn(f'Run: {run.id} Thread: {assistant_thread.id}: cancelled...') + response = { + "role": "assistant", + "content": 'Cancelled', + } + return response + elif run.status == "completed": + logger.info(f'Run: {run.id} Thread: {assistant_thread.id}: completed...') + response_messages = self._openai_client.beta.threads.messages.list(assistant_thread.id, order="asc") + new_messages = [] + for msg in response_messages: + if msg.run_id == run.id: + for content in msg.content: + if content.type == "text": + new_messages.append( + {"role": msg.role, "content": self._format_assistant_message(content.text)} + ) + elif content.type == "image_file": + new_messages.append( + { + "role": msg.role, + "content": f"Recieved file id={content.image_file.file_id}", + } + ) + response = { + "role": new_messages[-1]["role"], + "content": "", + } + for message in new_messages: + # just logging or do something with the intermediate messages? + # if current response is not empty and there is more, append new lines + if len(response["content"]) > 0: + response["content"] += "\n\n" + response["content"] += message["content"] + return response + + def _get_run_response(self, assistant_thread, run): """ Waits for and processes the response of a run from the OpenAI assistant. - Args: + assistant_thread: The thread object for the assistant. run: The run object initiated with the OpenAI assistant. - - Returns: - Updated run object, status of the run, and response messages. """ while True: - run = self._wait_for_run(run.id, thread.id) - if run.status == "failed": - new_messages = [] - logger.error(f'Run: {run.id} Thread: {thread.id}: failed...') - if run.last_error: - new_messages.append( - { - "role": "assistant", - "content": str(run.last_error), - } - ) - else: - new_messages.append( - { - "role": "assistant", - "content": 'Failed', - } - ) - return new_messages + run = self._openai_client.beta.threads.runs.retrieve(run.id, thread_id=assistant_thread.id) + if run.status == "in_progress" or run.status == "queued": + time.sleep(self.llm_config.get("check_every_ms", 1000) / 1000) + run = self._openai_client.beta.threads.runs.retrieve(run.id, thread_id=assistant_thread.id) + elif run.status == "completed" or run.status == "cancelled" or run.status == "expired" or run.status == "failed": + return self._process_messages(assistant_thread, run) elif run.status == "cancelling": - logger.warn(f'Run: {run.id} Thread: {thread.id}: cancelling...') - elif run.status == "expired": - logger.warn(f'Run: {run.id} Thread: {thread.id}: expired...') - new_messages = [] - new_messages.append( - { - "role": "assistant", - "content": 'Expired', - } - ) - return new_messages - elif run.status == "cancelled": - logger.warn(f'Run: {run.id} Thread: {thread.id}: cancelled...') - new_messages = [] - new_messages.append( - { - "role": "assistant", - "content": 'Cancelled', - } - ) - return new_messages - elif run.status == "in_progress": - logger.info(f'Run: {run.id} Thread: {thread.id}: in progress...') - elif run.status == "queued": - logger.info(f'Run: {run.id} Thread: {thread.id}: queued...') - elif run.status == "completed": - logger.info(f'Run: {run.id} Thread: {thread.id}: completed...') - response_messages = self._openai_client.beta.threads.messages.list(thread.id, order="asc") - new_messages = [] - for msg in response_messages: - if msg.run_id == run.id: - for content in msg.content: - if content.type == "text": - new_messages.append( - {"role": msg.role, "content": self._format_assistant_message(content.text)} - ) - elif content.type == "image_file": - new_messages.append( - { - "role": msg.role, - "content": f"Recieved file id={content.image_file.file_id}", - } - ) - return new_messages + logger.warn(f'Run: {run.id} Thread: {assistant_thread.id}: cancelling...') elif run.status == "requires_action": - logger.info(f'Run: {run.id} Thread: {thread.id}: required action...') + logger.info(f'Run: {run.id} Thread: {assistant_thread.id}: required action...') actions = [] for tool_call in run.required_action.submit_tool_outputs.tool_calls: function = tool_call.function @@ -252,7 +246,7 @@ def _get_run_response(self, thread, run): tool_response["metadata"] = { "tool_call_id": tool_call.id, "run_id": run.id, - "thread_id": thread.id, + "thread_id": assistant_thread.id, } logger.info( @@ -269,38 +263,36 @@ def _get_run_response(self, thread, run): for action in actions ], "run_id": run.id, - "thread_id": thread.id, + "thread_id": assistant_thread.id, } run = self._openai_client.beta.threads.runs.submit_tool_outputs(**submit_tool_outputs) + if self.check_for_cancellation(): + self._cancel_run() else: run_info = json.dumps(run.dict(), indent=2) raise ValueError(f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})") - def _wait_for_run(self, run_id: str, thread_id: str) -> Any: + + def _cancel_run(self, run_id: str, thread_id: str): """ - Waits for a run to complete or reach a final state. + Cancels a run. Args: run_id: The ID of the run. thread_id: The ID of the thread associated with the run. - - Returns: - The updated run object after completion or reaching a final state. """ - in_progress = True - while in_progress: - run = self._openai_client.beta.threads.runs.retrieve(run_id, thread_id=thread_id) - in_progress = run.status in ("in_progress", "queued") - if in_progress: - time.sleep(self.llm_config.get("check_every_ms", 1000) / 1000) - return run + try: + self._openai_client.beta.threads.runs.cancel(run_id=run_id, thread_id=thread_id) + logger.info(f'Run: {run_id} Thread: {thread_id}: successfully sent cancellation signal.') + except Exception as e: + logger.error(f'Run: {run_id} Thread: {thread_id}: failed to send cancellation signal: {e}') + def _format_assistant_message(self, message_content): """ Formats the assistant's message to include annotations and citations. """ - annotations = message_content.annotations citations = [] From 7ae462df737e9448f85df336dc20d87d652a88ab Mon Sep 17 00:00:00 2001 From: jagdeep sidhu Date: Thu, 16 Nov 2023 20:02:32 -0800 Subject: [PATCH 8/8] remove unused imports --- autogen/agentchat/contrib/gpt_assistant_agent.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index 61ae6fe31cc..c788d24803d 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -1,9 +1,7 @@ from collections import defaultdict -import openai import json import time import logging -import threading from autogen import OpenAIWrapper from autogen.agentchat.agent import Agent