From 0481483faa03b337faaa4e8c7b3b4a9d51e7db29 Mon Sep 17 00:00:00 2001 From: Hiftie <127197446+hiftielabs@users.noreply.github.com> Date: Mon, 2 Oct 2023 18:31:14 +0530 Subject: [PATCH] Fixed MD Issue (#72) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c552f5f1c4f..a8af692a479 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ By automating chat among multiple capable agents, one can easily make them colle from autogen import AssistantAgent, UserProxyAgent, config_list_from_json # Load LLM inference endpoints from an env variable or a file # See https://microsoft.github.io/autogen/docs/FAQ#set-your-api-endpoints -# and OAI_CONFIG_LIST_sample.json +# and OAI_CONFIG_LIST_sample config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST") assistant = AssistantAgent("assistant", llm_config={"config_list": config_list}) user_proxy = UserProxyAgent("user_proxy", code_execution_config={"work_dir": "coding"})