Skip to content

Commit

Permalink
add a verbose flag to print more information of runtime (microsoft#717)
Browse files Browse the repository at this point in the history
Co-authored-by: Qingyun Wu <qingyun.wu@psu.edu>
  • Loading branch information
IANTHEREAL and qingyun-wu committed Dec 4, 2023
1 parent 03f9264 commit 079ba1a
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 22 deletions.
12 changes: 7 additions & 5 deletions autogen/agentchat/contrib/gpt_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ def __init__(
instructions: Optional[str] = None,
llm_config: Optional[Union[Dict, bool]] = None,
overwrite_instructions: bool = False,
**kwargs,
):
"""
Args:
Expand All @@ -45,6 +46,9 @@ def __init__(
or build your own tools using Function calling. ref https://platform.openai.com/docs/assistants/tools
- file_ids: files used by retrieval in run
overwrite_instructions (bool): whether to overwrite the instructions of an existing assistant.
kwargs (dict): Additional configuration options for the agent.
- verbose (bool): If set to True, enables more detailed output from the assistant thread.
- Other kwargs: Except verbose, others are passed directly to ConversableAgent.
"""
# Use AutoGen OpenAIWrapper to create a client
oai_wrapper = OpenAIWrapper(**llm_config)
Expand Down Expand Up @@ -100,11 +104,9 @@ def __init__(
"overwrite_instructions is False. Provided instructions will be used without permanently modifying the assistant in the API."
)

self._verbose = kwargs.pop("verbose", False)
super().__init__(
name=name,
system_message=instructions,
human_input_mode="NEVER",
llm_config=llm_config,
name=name, system_message=instructions, human_input_mode="NEVER", llm_config=llm_config, **kwargs
)

# lazily create threads
Expand Down Expand Up @@ -209,7 +211,7 @@ def _get_run_response(self, thread, run):
actions = []
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
function = tool_call.function
is_exec_success, tool_response = self.execute_function(function.dict())
is_exec_success, tool_response = self.execute_function(function.dict(), self._verbose)
tool_response["metadata"] = {
"tool_call_id": tool_call.id,
"run_id": run.id,
Expand Down
8 changes: 7 additions & 1 deletion autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1144,7 +1144,7 @@ def _format_json_str(jstr):
result.append(char)
return "".join(result)

def execute_function(self, func_call) -> Tuple[bool, Dict[str, str]]:
def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict[str, str]]:
"""Execute a function call and return the result.
Override this function to modify the way to execute a function call.
Expand Down Expand Up @@ -1184,6 +1184,12 @@ def execute_function(self, func_call) -> Tuple[bool, Dict[str, str]]:
else:
content = f"Error: Function {func_name} not found."

if verbose:
print(
colored(f"\nInput arguments: {arguments}\nOutput:\n{content}", "magenta"),
flush=True,
)

return is_exec_success, {
"name": func_name,
"role": "function",
Expand Down
55 changes: 39 additions & 16 deletions notebook/agentchat_oai_assistant_function_call.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -104,7 +104,7 @@
"\n",
" if answer.get('error', None) is not None:\n",
" report_components.append(f\"Error: {answer['error']}\")\n",
" return \"\\n\\n\".join(report_components)"
" return \"\\n\\n\".join(report_components) + \"\\n\\n\""
]
},
{
Expand All @@ -118,7 +118,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -147,6 +147,7 @@
" \"Please carefully read the context of the conversation to identify the current analysis question or problem that needs addressing.\"\n",
" ),\n",
" llm_config=llm_config,\n",
" verbose=True,\n",
")\n",
"oss_analyst.register_function(\n",
" function_map={\n",
Expand All @@ -166,7 +167,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 3,
"metadata": {},
"outputs": [
{
Expand All @@ -180,20 +181,42 @@
"--------------------------------------------------------------------------------\n",
"\u001b[35m\n",
">>>>>>>> EXECUTING FUNCTION ossinsight_data_api...\u001b[0m\n",
"\u001b[35m\n",
"Input arguments: {'question': 'Who are the top 10 developers with the most followers on GitHub?'}\n",
"Output:\n",
"Question: Who are the top 10 developers with the most followers on GitHub?\n",
"\n",
"querySQL: SELECT `login` AS `user_login`, `followers` AS `followers` FROM `github_users` ORDER BY `followers` DESC LIMIT 10\n",
"\n",
"Result:\n",
" {'followers': 166730, 'user_login': 'torvalds'}\n",
" {'followers': 86239, 'user_login': 'yyx990803'}\n",
" {'followers': 77611, 'user_login': 'gaearon'}\n",
" {'followers': 72668, 'user_login': 'ruanyf'}\n",
" {'followers': 65415, 'user_login': 'JakeWharton'}\n",
" {'followers': 60972, 'user_login': 'peng-zhihui'}\n",
" {'followers': 58172, 'user_login': 'bradtraversy'}\n",
" {'followers': 52143, 'user_login': 'gustavoguanabara'}\n",
" {'followers': 51542, 'user_login': 'sindresorhus'}\n",
" {'followers': 49621, 'user_login': 'tj'}\n",
"\n",
"\u001b[0m\n",
"\u001b[33mOSS Analyst\u001b[0m (to user_proxy):\n",
"\n",
"The top 10 developers on GitHub with the most followers are:\n",
"The top 10 developers with the most followers on GitHub are as follows:\n",
"\n",
"1. `torvalds` with 166,730 followers\n",
"2. `yyx990803` with 86,239 followers\n",
"3. `gaearon` with 77,611 followers\n",
"4. `ruanyf` with 72,668 followers\n",
"5. `JakeWharton` with 65,415 followers\n",
"6. `peng-zhihui` with 60,972 followers\n",
"7. `bradtraversy` with 58,172 followers\n",
"8. `gustavoguanabara` with 52,143 followers\n",
"9. `sindresorhus` with 51,542 followers\n",
"10. `tj` with 49,621 followers\n",
"\n",
"1. Linus Torvalds (`torvalds`) with 166,730 followers.\n",
"2. Evan You (`yyx990803`) with 86,239 followers.\n",
"3. Dan Abramov (`gaearon`) with 77,611 followers.\n",
"4. Ruan YiFeng (`ruanyf`) with 72,668 followers.\n",
"5. Jake Wharton (`JakeWharton`) with 65,415 followers.\n",
"6. Peng Zhihui (`peng-zhihui`) with 60,972 followers.\n",
"7. Brad Traversy (`bradtraversy`) with 58,172 followers.\n",
"8. Gustavo Guanabara (`gustavoguanabara`) with 52,143 followers.\n",
"9. Sindre Sorhus (`sindresorhus`) with 51,542 followers.\n",
"10. TJ Holowaychuk (`tj`) with 49,621 followers.\n",
"These figures indicate the number of followers these developers had at the time of the analysis.\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n",
Expand All @@ -204,7 +227,7 @@
"--------------------------------------------------------------------------------\n",
"\u001b[33mOSS Analyst\u001b[0m (to user_proxy):\n",
"\n",
"It seems like there is no question or further instructions provided in your last response. How may I assist you further with open source project analysis or any other inquiries you might have?\n",
"It seems you haven't entered a question or a request. Could you please provide more details or specify how I can assist you further?\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n"
Expand Down

0 comments on commit 079ba1a

Please sign in to comment.