From 5da9d92dfe6f6c7a737fb9e7826021f8f3db8d1f Mon Sep 17 00:00:00 2001 From: XiaoYun Zhang Date: Sat, 29 Jun 2024 22:00:46 -0700 Subject: [PATCH 1/3] add instruction in ollama-litellm function call example --- .../Tool_Call_With_Ollama_And_LiteLLM.cs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs b/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs index f4fabe3c9e8..5271a404a57 100644 --- a/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs +++ b/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs @@ -20,6 +20,15 @@ public class Tool_Call_With_Ollama_And_LiteLLM { public static async Task RunAsync() { + // Before running this code, make sure you have + // - Ollama: + // - Install dolphincoder:latest in Ollama + // - Ollama running on http://localhost:11434 + // - LiteLLM + // - Install LiteLLM + // - Start LiteLLM with the following command: + // - litellm --model ollama_chat/dolphincoder --port 4000 + #region Create_Agent var liteLLMUrl = "http://localhost:4000"; using var httpClient = new HttpClient(new CustomHttpClientHandler(liteLLMUrl)); @@ -43,7 +52,7 @@ public static async Task RunAsync() var agent = new OpenAIChatAgent( openAIClient: openAIClient, name: "assistant", - modelName: "placeholder", + modelName: "dolphincoder:latest", systemMessage: "You are a helpful AI assistant") .RegisterMessageConnector() .RegisterMiddleware(functionMiddleware) From ca9b59a151c58f0e897e254e663a7d902e54d7b6 Mon Sep 17 00:00:00 2001 From: XiaoYun Zhang Date: Sat, 29 Jun 2024 23:27:18 -0700 Subject: [PATCH 2/3] add tutorial --- .../Example13_OpenAIAgent_JsonMode.cs | 67 +------------ ...nAIChatAgent_ConnectToThirdPartyBackend.cs | 61 +----------- .../Tool_Call_With_Ollama_And_LiteLLM.cs | 20 ++-- .../AutoGen.OpenAI.Sample/Use_Json_Mode.cs | 67 +++++++++++++ .../Function-call-with-ollama-and-litellm.md | 93 +++++++++++++++++++ ...nAIChatAgent-connect-to-third-party-api.md | 11 +-- .../articles/OpenAIChatAgent-use-json-mode.md | 11 +-- dotnet/website/articles/toc.yml | 2 + .../tutorial/Create-agent-with-tools.md | 5 +- 9 files changed, 189 insertions(+), 148 deletions(-) create mode 100644 dotnet/sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs create mode 100644 dotnet/website/articles/Function-call-with-ollama-and-litellm.md diff --git a/dotnet/sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs b/dotnet/sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs index dadad7f00b9..9e5b91ecc12 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs @@ -1,68 +1,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Example13_OpenAIAgent_JsonMode.cs - -using System.Text.Json; -using System.Text.Json.Serialization; -using AutoGen.Core; -using AutoGen.OpenAI; -using AutoGen.OpenAI.Extension; -using Azure.AI.OpenAI; -using FluentAssertions; - -namespace AutoGen.BasicSample; - -public class Example13_OpenAIAgent_JsonMode -{ - public static async Task RunAsync() - { - #region create_agent - var config = LLMConfiguration.GetAzureOpenAIGPT3_5_Turbo(deployName: "gpt-35-turbo"); // json mode only works with 0125 and later model. - var apiKey = config.ApiKey; - var endPoint = new Uri(config.Endpoint); - - var openAIClient = new OpenAIClient(endPoint, new Azure.AzureKeyCredential(apiKey)); - var openAIClientAgent = new OpenAIChatAgent( - openAIClient: openAIClient, - name: "assistant", - modelName: config.DeploymentName, - systemMessage: "You are a helpful assistant designed to output JSON.", - seed: 0, // explicitly set a seed to enable deterministic output - responseFormat: ChatCompletionsResponseFormat.JsonObject) // set response format to JSON object to enable JSON mode - .RegisterMessageConnector() - .RegisterPrintMessage(); - #endregion create_agent - - #region chat_with_agent - var reply = await openAIClientAgent.SendAsync("My name is John, I am 25 years old, and I live in Seattle."); - - var person = JsonSerializer.Deserialize(reply.GetContent()); - Console.WriteLine($"Name: {person.Name}"); - Console.WriteLine($"Age: {person.Age}"); - - if (!string.IsNullOrEmpty(person.Address)) - { - Console.WriteLine($"Address: {person.Address}"); - } - - Console.WriteLine("Done."); - #endregion chat_with_agent - - person.Name.Should().Be("John"); - person.Age.Should().Be(25); - person.Address.Should().BeNullOrEmpty(); - } -} - -#region person_class -public class Person -{ - [JsonPropertyName("name")] - public string Name { get; set; } - - [JsonPropertyName("age")] - public int Age { get; set; } - - [JsonPropertyName("address")] - public string Address { get; set; } -} -#endregion person_class +// this example has been moved to https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs \ No newline at end of file diff --git a/dotnet/sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs b/dotnet/sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs index eb8bcb179be..f676e22a2d4 100644 --- a/dotnet/sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs +++ b/dotnet/sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs @@ -1,62 +1,3 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs -#region using_statement -using AutoGen.Core; -using AutoGen.OpenAI; -using AutoGen.OpenAI.Extension; -using Azure.AI.OpenAI; -using Azure.Core.Pipeline; -#endregion using_statement - -namespace AutoGen.BasicSample; - -#region CustomHttpClientHandler -public sealed class CustomHttpClientHandler : HttpClientHandler -{ - private string _modelServiceUrl; - - public CustomHttpClientHandler(string modelServiceUrl) - { - _modelServiceUrl = modelServiceUrl; - } - - protected override Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) - { - request.RequestUri = new Uri($"{_modelServiceUrl}{request.RequestUri.PathAndQuery}"); - - return base.SendAsync(request, cancellationToken); - } -} -#endregion CustomHttpClientHandler - -public class Example16_OpenAIChatAgent_ConnectToThirdPartyBackend -{ - public static async Task RunAsync() - { - #region create_agent - using var client = new HttpClient(new CustomHttpClientHandler("http://localhost:11434")); - var option = new OpenAIClientOptions(OpenAIClientOptions.ServiceVersion.V2024_04_01_Preview) - { - Transport = new HttpClientTransport(client), - }; - - // api-key is not required for local server - // so you can use any string here - var openAIClient = new OpenAIClient("api-key", option); - var model = "llama3"; - - var agent = new OpenAIChatAgent( - openAIClient: openAIClient, - name: "assistant", - modelName: model, - systemMessage: "You are a helpful assistant designed to output JSON.", - seed: 0) - .RegisterMessageConnector() - .RegisterPrintMessage(); - #endregion create_agent - - #region send_message - await agent.SendAsync("Can you write a piece of C# code to calculate 100th of fibonacci?"); - #endregion send_message - } -} +// this example has been moved to https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Connect_To_Ollama.cs \ No newline at end of file diff --git a/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs b/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs index 5271a404a57..b0b0adc0e6f 100644 --- a/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs +++ b/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs @@ -8,6 +8,7 @@ namespace AutoGen.OpenAI.Sample; +#region Function public partial class Function { [Function] @@ -16,6 +17,8 @@ public async Task GetWeatherAsync(string city) return await Task.FromResult("The weather in " + city + " is 72 degrees and sunny."); } } +#endregion Function + public class Tool_Call_With_Ollama_And_LiteLLM { public static async Task RunAsync() @@ -29,14 +32,7 @@ public static async Task RunAsync() // - Start LiteLLM with the following command: // - litellm --model ollama_chat/dolphincoder --port 4000 - #region Create_Agent - var liteLLMUrl = "http://localhost:4000"; - using var httpClient = new HttpClient(new CustomHttpClientHandler(liteLLMUrl)); - var option = new OpenAIClientOptions(OpenAIClientOptions.ServiceVersion.V2024_04_01_Preview) - { - Transport = new HttpClientTransport(httpClient), - }; - + # region Create_tools var functions = new Function(); var functionMiddleware = new FunctionCallMiddleware( functions: [functions.GetWeatherAsyncFunctionContract], @@ -44,6 +40,14 @@ public static async Task RunAsync() { { functions.GetWeatherAsyncFunctionContract.Name!, functions.GetWeatherAsyncWrapper }, }); + #endregion Create_tools + #region Create_Agent + var liteLLMUrl = "http://localhost:4000"; + using var httpClient = new HttpClient(new CustomHttpClientHandler(liteLLMUrl)); + var option = new OpenAIClientOptions(OpenAIClientOptions.ServiceVersion.V2024_04_01_Preview) + { + Transport = new HttpClientTransport(httpClient), + }; // api-key is not required for local server // so you can use any string here diff --git a/dotnet/sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs b/dotnet/sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs new file mode 100644 index 00000000000..3bf88be7256 --- /dev/null +++ b/dotnet/sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Example13_OpenAIAgent_JsonMode.cs + +using System.Text.Json; +using System.Text.Json.Serialization; +using AutoGen.Core; +using AutoGen.OpenAI; +using AutoGen.OpenAI.Extension; +using Azure.AI.OpenAI; +using FluentAssertions; + +namespace AutoGen.BasicSample; + +public class Use_Json_Mode +{ + public static async Task RunAsync() + { + #region create_agent + var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable."); + var model = "gpt-3.5-turbo"; + + var openAIClient = new OpenAIClient(apiKey); + var openAIClientAgent = new OpenAIChatAgent( + openAIClient: openAIClient, + name: "assistant", + modelName: model, + systemMessage: "You are a helpful assistant designed to output JSON.", + seed: 0, // explicitly set a seed to enable deterministic output + responseFormat: ChatCompletionsResponseFormat.JsonObject) // set response format to JSON object to enable JSON mode + .RegisterMessageConnector() + .RegisterPrintMessage(); + #endregion create_agent + + #region chat_with_agent + var reply = await openAIClientAgent.SendAsync("My name is John, I am 25 years old, and I live in Seattle."); + + var person = JsonSerializer.Deserialize(reply.GetContent()); + Console.WriteLine($"Name: {person.Name}"); + Console.WriteLine($"Age: {person.Age}"); + + if (!string.IsNullOrEmpty(person.Address)) + { + Console.WriteLine($"Address: {person.Address}"); + } + + Console.WriteLine("Done."); + #endregion chat_with_agent + + person.Name.Should().Be("John"); + person.Age.Should().Be(25); + person.Address.Should().BeNullOrEmpty(); + } +} + +#region person_class +public class Person +{ + [JsonPropertyName("name")] + public string Name { get; set; } + + [JsonPropertyName("age")] + public int Age { get; set; } + + [JsonPropertyName("address")] + public string Address { get; set; } +} +#endregion person_class diff --git a/dotnet/website/articles/Function-call-with-ollama-and-litellm.md b/dotnet/website/articles/Function-call-with-ollama-and-litellm.md new file mode 100644 index 00000000000..2dc595ba3ad --- /dev/null +++ b/dotnet/website/articles/Function-call-with-ollama-and-litellm.md @@ -0,0 +1,93 @@ +This example shows how to use function call with local LLM models where [Ollama](https://ollama.com/) as local model provider and [LiteLLM](https://docs.litellm.ai/docs/) proxy server which provides an openai-api compatible interface. + +[![](https://img.shields.io/badge/Open%20on%20Github-grey?logo=github)](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs) + +To run this example, the following prerequisites are required: +- Install [Ollama](https://ollama.com/) and [LiteLLM](https://docs.litellm.ai/docs/) on your local machine. +- A local model that supports function call. In this example `dolphincoder:latest` is used. + +## Install Ollama and pull `dolphincoder:latest` model +First, install Ollama by following the instructions on the [Ollama website](https://ollama.com/). + +After installing Ollama, pull the `dolphincoder:latest` model by running the following command: +```bash +ollama pull dolphincoder:latest +``` + +## Install LiteLLM and start the proxy server + +You can install LiteLLM by following the instructions on the [LiteLLM website](https://docs.litellm.ai/docs/). +```bash +pip install 'litellm[proxy]' +``` + +Then, start the proxy server by running the following command: + +```bash +litellm --model ollama_chat/dolphincoder --port 4000 +``` + +This will start an openai-api compatible proxy server at `http://localhost:4000`. You can verify if the server is running by observing the following output in the terminal: + +```bash +#------------------------------------------------------------# +# # +# 'The worst thing about this product is...' # +# https://github.com/BerriAI/litellm/issues/new # +# # +#------------------------------------------------------------# + +INFO: Application startup complete. +INFO: Uvicorn running on http://0.0.0.0:4000 (Press CTRL+C to quit) +``` + +## Install AutoGen and AutoGen.SourceGenerator +In your project, install the AutoGen and AutoGen.SourceGenerator package using the following command: + +```bash +dotnet add package AutoGen +dotnet add package AutoGen.SourceGenerator +``` + +The `AutoGen.SourceGenerator` package is used to automatically generate type-safe `FunctionContract` instead of manually defining them. For more information, please check out [Create type-safe function](Create-type-safe-function-call.md). + +And in your project file, enable structural xml document support by setting the `GenerateDocumentationFile` property to `true`: + +```xml + + + true + +``` + +## Define `WeatherReport` function and create @AutoGen.Core.FunctionCallMiddleware + +Create a `public partial` class to host the methods you want to use in AutoGen agents. The method has to be a `public` instance method and its return type must be `Task`. After the methods are defined, mark them with `AutoGen.Core.FunctionAttribute` attribute. + +[!code-csharp[Define WeatherReport function](../../sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs?name=Function)] + +Then create a @AutoGen.Core.FunctionCallMiddleware and add the `WeatherReport` function to the middleware. The middleware will pass the `FunctionContract` to the agent when generating a response, and process the tool call response when receiving a `ToolCallMessage`. +[!code-csharp[Define WeatherReport function](../../sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs?name=Create_tools)] + +## Create @AutoGen.OpenAI.OpenAIChatAgent with `GetWeatherReport` tool and chat with it + +Because LiteLLM proxy server is openai-api compatible, we can use @AutoGen.OpenAI.OpenAIChatAgent to connect to it as a third-party openai-api provider. The agent is also registered with a @AutoGen.Core.FunctionCallMiddleware which contains the `WeatherReport` tool. Therefore, the agent can call the `WeatherReport` tool when generating a response. + +[!code-csharp[Create an agent with tools](../../sample/AutoGen.OpenAI.Sample/Tool_Call_With_Ollama_And_LiteLLM.cs?name=Create_Agent)] + +The reply from the agent will similar to the following: +```bash +AggregateMessage from assistant +-------------------- +ToolCallMessage: +ToolCallMessage from assistant +-------------------- +- GetWeatherAsync: {"city": "new york"} +-------------------- + +ToolCallResultMessage: +ToolCallResultMessage from assistant +-------------------- +- GetWeatherAsync: The weather in new york is 72 degrees and sunny. +-------------------- +``` \ No newline at end of file diff --git a/dotnet/website/articles/OpenAIChatAgent-connect-to-third-party-api.md b/dotnet/website/articles/OpenAIChatAgent-connect-to-third-party-api.md index 8321fc87a5c..0873765b1a6 100644 --- a/dotnet/website/articles/OpenAIChatAgent-connect-to-third-party-api.md +++ b/dotnet/website/articles/OpenAIChatAgent-connect-to-third-party-api.md @@ -1,7 +1,6 @@ The following example shows how to connect to third-party OpenAI API using @AutoGen.OpenAI.OpenAIChatAgent. -> [!NOTE] -> You can find the complete code of this example in [Example16_OpenAIChatAgent_ConnectToThirdPartyBackend](https://github.com/microsoft/autogen/tree/main/dotnet/sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs). +[![](https://img.shields.io/badge/Open%20on%20Github-grey?logo=github)](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Connect_To_Ollama.cs) ## Overview A lot of LLM applications/platforms support spinning up a chat server that is compatible with OpenAI API, such as LM Studio, Ollama, Mistral etc. This means that you can connect to these servers using the @AutoGen.OpenAI.OpenAIChatAgent. @@ -25,24 +24,24 @@ ollama serve ## Steps - Import the required namespaces: -[!code-csharp[](../../sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs?name=using_statement)] +[!code-csharp[](../../sample/AutoGen.OpenAI.Sample/Connect_To_Ollama.cs?name=using_statement)] - Create a `CustomHttpClientHandler` class. The `CustomHttpClientHandler` class is used to customize the HttpClientHandler. In this example, we override the `SendAsync` method to redirect the request to local Ollama server, which is running on `http://localhost:11434`. -[!code-csharp[](../../sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs?name=CustomHttpClientHandler)] +[!code-csharp[](../../sample/AutoGen.OpenAI.Sample/Connect_To_Ollama.cs?name=CustomHttpClientHandler)] - Create an `OpenAIChatAgent` instance and connect to the third-party API. Then create an @AutoGen.OpenAI.OpenAIChatAgent instance and connect to the OpenAI API from Ollama. You can customize the transport behavior of `OpenAIClient` by passing a customized `HttpClientTransport` instance. In the customized `HttpClientTransport` instance, we pass the `CustomHttpClientHandler` we just created which redirects all openai chat requests to the local Ollama server. -[!code-csharp[](../../sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs?name=create_agent)] +[!code-csharp[](../../sample/AutoGen.OpenAI.Sample/Connect_To_Ollama.cs?name=create_agent)] - Chat with the `OpenAIChatAgent`. Finally, you can start chatting with the agent. In this example, we send a coding question to the agent and get the response. -[!code-csharp[](../../sample/AutoGen.BasicSamples/Example16_OpenAIChatAgent_ConnectToThirdPartyBackend.cs?name=send_message)] +[!code-csharp[](../../sample/AutoGen.OpenAI.Sample/Connect_To_Ollama.cs?name=send_message)] ## Sample Output The following is the sample output of the code snippet above: diff --git a/dotnet/website/articles/OpenAIChatAgent-use-json-mode.md b/dotnet/website/articles/OpenAIChatAgent-use-json-mode.md index a822cb04633..22f0ced0046 100644 --- a/dotnet/website/articles/OpenAIChatAgent-use-json-mode.md +++ b/dotnet/website/articles/OpenAIChatAgent-use-json-mode.md @@ -1,5 +1,7 @@ The following example shows how to enable JSON mode in @AutoGen.OpenAI.OpenAIChatAgent. +[![](https://img.shields.io/badge/Open%20on%20Github-grey?logo=github)](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs) + ## What is JSON mode? JSON mode is a new feature in OpenAI which allows you to instruct model to always respond with a valid JSON object. This is useful when you want to constrain the model output to JSON format only. @@ -8,20 +10,17 @@ JSON mode is a new feature in OpenAI which allows you to instruct model to alway ## How to enable JSON mode in OpenAIChatAgent. -> [!NOTE] -> You can find the complete example in the [Example13_OpenAIAgent_JsonMode](https://github.com/microsoft/autogen/tree/main/dotnet/sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs). - To enable JSON mode for @AutoGen.OpenAI.OpenAIChatAgent, set `responseFormat` to `ChatCompletionsResponseFormat.JsonObject` when creating the agent. Note that when enabling JSON mode, you also need to instruct the agent to output JSON format in its system message. -[!code-csharp[](../../sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs?name=create_agent)] +[!code-csharp[](../../sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs?name=create_agent)] After enabling JSON mode, the `openAIClientAgent` will always respond in JSON format when it receives a message. -[!code-csharp[](../../sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs?name=chat_with_agent)] +[!code-csharp[](../../sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs?name=chat_with_agent)] When running the example, the output from `openAIClientAgent` will be a valid JSON object which can be parsed as `Person` class defined below. Note that in the output, the `address` field is missing because the address information is not provided in user input. -[!code-csharp[](../../sample/AutoGen.BasicSamples/Example13_OpenAIAgent_JsonMode.cs?name=person_class)] +[!code-csharp[](../../sample/AutoGen.OpenAI.Sample/Use_Json_Mode.cs?name=person_class)] The output will be: ```bash diff --git a/dotnet/website/articles/toc.yml b/dotnet/website/articles/toc.yml index 6d3787ae3d0..2335ebf092b 100644 --- a/dotnet/website/articles/toc.yml +++ b/dotnet/website/articles/toc.yml @@ -26,6 +26,8 @@ href: Create-type-safe-function-call.md - name: Use function call in an agent href: Use-function-call.md + - name: Function call with local model + href: Function-call-with-ollama-and-litellm.md - name: middleware items: - name: middleware overview diff --git a/dotnet/website/tutorial/Create-agent-with-tools.md b/dotnet/website/tutorial/Create-agent-with-tools.md index d3af7bff6f6..5d631890308 100644 --- a/dotnet/website/tutorial/Create-agent-with-tools.md +++ b/dotnet/website/tutorial/Create-agent-with-tools.md @@ -98,7 +98,8 @@ Some LLM models support parallel tool call, which returns multiple tool calls in [!code-csharp[Generate Response](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=parallel_tool_call)] ## Further Reading -- [Function-call-with-openai](../articles/OpenAIChatAgent-use-function-call.md) -- [Function-call-with-gemini](../articles/AutoGen.Gemini/Function-call-with-gemini.md) +- [Function call with openai](../articles/OpenAIChatAgent-use-function-call.md) +- [Function call with gemini](../articles/AutoGen.Gemini/Function-call-with-gemini.md) +- [Function call with local model](../articles/Function-call-with-ollama-and-litellm.md) - [Use kernel plugin in other agents](../articles/AutoGen.SemanticKernel/Use-kernel-plugin-in-other-agents.md) - [function call in mistral](../articles/MistralChatAgent-use-function-call.md) \ No newline at end of file From 58d958cb895bea77eedcd3b089d476926ac58610 Mon Sep 17 00:00:00 2001 From: XiaoYun Zhang Date: Sat, 29 Jun 2024 23:33:02 -0700 Subject: [PATCH 3/3] fix tests --- dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj | 1 + dotnet/test/AutoGen.Tests/BasicSampleTest.cs | 5 ----- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj b/dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj index ba499232beb..04800a631ee 100644 --- a/dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj +++ b/dotnet/test/AutoGen.OpenAI.Tests/AutoGen.OpenAI.Tests.csproj @@ -8,6 +8,7 @@ + diff --git a/dotnet/test/AutoGen.Tests/BasicSampleTest.cs b/dotnet/test/AutoGen.Tests/BasicSampleTest.cs index 8f2b9b2de51..8da87d7902b 100644 --- a/dotnet/test/AutoGen.Tests/BasicSampleTest.cs +++ b/dotnet/test/AutoGen.Tests/BasicSampleTest.cs @@ -37,11 +37,6 @@ public async Task AgentFunctionCallTestAsync() await Example03_Agent_FunctionCall.RunAsync(); } - [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")] - public async Task OpenAIAgent_JsonMode() - { - await Example13_OpenAIAgent_JsonMode.RunAsync(); - } [ApiKeyFact("MISTRAL_API_KEY")] public async Task MistralClientAgent_TokenCount()