Skip to content

Commit

Permalink
[.Net] Update website with Tutorial section (microsoft#2982)
Browse files Browse the repository at this point in the history
* update

* Update -> Releaes Notes

* add ImageChat

* update

* update
  • Loading branch information
LittleLittleCloud authored and luxzoli committed Jun 27, 2024
1 parent bf6d9e2 commit 1ea5537
Show file tree
Hide file tree
Showing 10 changed files with 277 additions and 25 deletions.
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Image_Chat_With_Agent.cs

#region Using
using AutoGen.Core;
using AutoGen.OpenAI;
using AutoGen.OpenAI.Extension;
using Azure.AI.OpenAI;
#endregion Using
using FluentAssertions;

namespace AutoGen.BasicSample;
Expand Down Expand Up @@ -33,16 +35,17 @@ public static async Task RunAsync()
var imageMessage = new ImageMessage(Role.User, BinaryData.FromBytes(imageBytes, "image/png"));
#endregion Prepare_Image_Input

#region Chat_With_Agent
var reply = await agent.SendAsync("what's in the picture", chatHistory: [imageMessage]);
#endregion Chat_With_Agent

#region Prepare_Multimodal_Input
var textMessage = new TextMessage(Role.User, "what's in the picture");
var multimodalMessage = new MultiModalMessage(Role.User, [textMessage, imageMessage]);
reply = await agent.SendAsync(multimodalMessage);
#endregion Prepare_Multimodal_Input

#region Chat_With_Agent
var reply = await agent.SendAsync("what's in the picture", chatHistory: [imageMessage]);
// or use multimodal message to generate reply
reply = await agent.SendAsync(multimodalMessage);
#endregion Chat_With_Agent

#region verify_reply
reply.Should().BeOfType<TextMessage>();
#endregion verify_reply
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

namespace AutoGen.BasicSample;

#region Tools
public partial class Tools
{
/// <summary>
Expand All @@ -23,6 +24,8 @@ public async Task<string> GetWeather(string city)
return $"The weather in {city} is sunny.";
}
}
#endregion Tools

public class Use_Tools_With_Agent
{
public static async Task RunAsync()
Expand All @@ -31,37 +34,53 @@ public static async Task RunAsync()
var tools = new Tools();
#endregion Create_tools

#region Create_Agent
var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
var model = "gpt-3.5-turbo";
var openaiClient = new OpenAIClient(apiKey);
var functionCallMiddleware = new FunctionCallMiddleware(
#region Create_auto_invoke_middleware
var autoInvokeMiddleware = new FunctionCallMiddleware(
functions: [tools.GetWeatherFunctionContract],
functionMap: new Dictionary<string, Func<string, Task<string>>>()
{
{ tools.GetWeatherFunctionContract.Name!, tools.GetWeatherWrapper },
});
#endregion Create_auto_invoke_middleware

#region Create_no_invoke_middleware
var noInvokeMiddleware = new FunctionCallMiddleware(
functions: [tools.GetWeatherFunctionContract]);
#endregion Create_no_invoke_middleware

#region Create_Agent
var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
var model = "gpt-3.5-turbo";
var openaiClient = new OpenAIClient(apiKey);
var agent = new OpenAIChatAgent(
openAIClient: openaiClient,
name: "agent",
modelName: model,
systemMessage: "You are a helpful AI assistant")
.RegisterMessageConnector() // convert OpenAI message to AutoGen message
.RegisterMiddleware(functionCallMiddleware) // pass function definition to agent.
.RegisterPrintMessage(); // print the message content
.RegisterMessageConnector(); // convert OpenAI message to AutoGen message
#endregion Create_Agent

#region Single_Turn_Tool_Call
#region Single_Turn_Auto_Invoke
var autoInvokeAgent = agent
.RegisterMiddleware(autoInvokeMiddleware) // pass function definition to agent.
.RegisterPrintMessage(); // print the message content
var question = new TextMessage(Role.User, "What is the weather in Seattle?");
var toolCallReply = await agent.SendAsync(question);
#endregion Single_Turn_Tool_Call
var reply = await autoInvokeAgent.SendAsync(question);
reply.Should().BeOfType<ToolCallAggregateMessage>();
#endregion Single_Turn_Auto_Invoke

#region Single_Turn_No_Invoke
var noInvokeAgent = agent
.RegisterMiddleware(noInvokeMiddleware) // pass function definition to agent.
.RegisterPrintMessage(); // print the message content

#region verify_too_call_reply
toolCallReply.Should().BeOfType<ToolCallAggregateMessage>();
#endregion verify_too_call_reply
question = new TextMessage(Role.User, "What is the weather in Seattle?");
reply = await noInvokeAgent.SendAsync(question);
reply.Should().BeOfType<ToolCallMessage>();
#endregion Single_Turn_No_Invoke

#region Multi_Turn_Tool_Call
var finalReply = await agent.SendAsync(chatHistory: [question, toolCallReply]);
var finalReply = await agent.SendAsync(chatHistory: [question, reply]);
#endregion Multi_Turn_Tool_Call

#region verify_reply
Expand All @@ -70,16 +89,19 @@ public static async Task RunAsync()

#region parallel_tool_call
question = new TextMessage(Role.User, "What is the weather in Seattle, New York and Vancouver");
toolCallReply = await agent.SendAsync(question);
reply = await agent.SendAsync(question);
#endregion parallel_tool_call

#region verify_parallel_tool_call_reply
toolCallReply.Should().BeOfType<ToolCallAggregateMessage>();
(toolCallReply as ToolCallAggregateMessage)!.Message1.ToolCalls.Count().Should().Be(3);
reply.Should().BeOfType<ToolCallAggregateMessage>();
(reply as ToolCallAggregateMessage)!.Message1.ToolCalls.Count().Should().Be(3);
#endregion verify_parallel_tool_call_reply

#region Multi_Turn_Parallel_Tool_Call
finalReply = await agent.SendAsync(chatHistory: [question, toolCallReply]);
finalReply = await agent.SendAsync(chatHistory: [question, reply]);
finalReply.Should().BeOfType<ToolCallAggregateMessage>();
(finalReply as ToolCallAggregateMessage)!.Message1.ToolCalls.Count().Should().Be(3);
#endregion Multi_Turn_Parallel_Tool_Call
}

}
2 changes: 2 additions & 0 deletions dotnet/website/docfx.json
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
"files": [
"articles/**.md",
"articles/**/toc.yml",
"tutorial/**.md",
"tutorial/**/toc.yml",
"toc.yml",
"*.md"
]
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 4 additions & 1 deletion dotnet/website/toc.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
- name: Docs
href: articles/

- name: Tutorial
href: tutorial/

- name: API Reference
href: api/

- name: Update Log
- name: Release Notes
href: update.md

- name: Other Languages
Expand Down
53 changes: 53 additions & 0 deletions dotnet/website/tutorial/Chat-with-an-agent.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
This tutorial shows how to generate response using an @AutoGen.Core.IAgent by taking @AutoGen.OpenAI.OpenAIChatAgent as an example.

> [!NOTE]
> AutoGen.Net provides the following agents to connect to different LLM platforms. Generating responses using these agents is similar to the example shown below.
> - @AutoGen.OpenAI.OpenAIChatAgent
> - @AutoGen.SemanticKernel.SemanticKernelAgent
> - @AutoGen.LMStudio.LMStudioAgent
> - @AutoGen.Mistral.MistralClientAgent
> - @AutoGen.Anthropic.AnthropicClientAgent
> - @AutoGen.Ollama.OllamaAgent
> - @AutoGen.Gemini.GeminiChatAgent
> [!NOTE]
> The complete code example can be found in [Chat_With_Agent.cs](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.BasicSamples/GettingStart/Chat_With_Agent.cs)
## Step 1: Install AutoGen

First, install the AutoGen package using the following command:

```bash
dotnet add package AutoGen
```

## Step 2: add Using Statements

[!code-csharp[Using Statements](../../sample/AutoGen.BasicSamples/GettingStart/Chat_With_Agent.cs?name=Using)]

## Step 3: Create an @AutoGen.OpenAI.OpenAIChatAgent

> [!NOTE]
> The @AutoGen.OpenAI.Extension.OpenAIAgentExtension.RegisterMessageConnector* method registers an @AutoGen.OpenAI.OpenAIChatRequestMessageConnector middleware which converts OpenAI message types to AutoGen message types. This step is necessary when you want to use AutoGen built-in message types like @AutoGen.Core.TextMessage, @AutoGen.Core.ImageMessage, etc.
> For more information, see [Built-in-messages](../articles/Built-in-messages.md)
[!code-csharp[Create an OpenAIChatAgent](../../sample/AutoGen.BasicSamples/GettingStart/Chat_With_Agent.cs?name=Create_Agent)]

## Step 4: Generate Response
To generate response, you can use one of the overloaded method of @AutoGen.Core.AgentExtension.SendAsync* method. The following code shows how to generate response with text message:

[!code-csharp[Generate Response](../../sample/AutoGen.BasicSamples/GettingStart/Chat_With_Agent.cs?name=Chat_With_Agent)]

To generate response with chat history, you can pass the chat history to the @AutoGen.Core.AgentExtension.SendAsync* method:

[!code-csharp[Generate Response with Chat History](../../sample/AutoGen.BasicSamples/GettingStart/Chat_With_Agent.cs?name=Chat_With_History)]

To streamingly generate response, use @AutoGen.Core.IStreamingAgent.GenerateStreamingReplyAsync*

[!code-csharp[Generate Streaming Response](../../sample/AutoGen.BasicSamples/GettingStart/Chat_With_Agent.cs?name=Streaming_Chat)]

## Further Reading
- [Chat with google gemini](../articles/AutoGen.Gemini/Chat-with-google-gemini.md)
- [Chat with vertex gemini](../articles/AutoGen.Gemini/Chat-with-vertex-gemini.md)
- [Chat with Ollama](../articles/AutoGen.Ollama/Chat-with-llama.md)
- [Chat with Semantic Kernel Agent](../articles/AutoGen.SemanticKernel/SemanticKernelAgent-simple-chat.md)
105 changes: 105 additions & 0 deletions dotnet/website/tutorial/Create-agent-with-tools.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
This tutorial shows how to use tools in an agent.

## What is tool
Tools are pre-defined functions in user's project that agent can invoke. Agent can use tools to perform actions like search web, perform calculations, etc. With tools, it can greatly extend the capabilities of an agent.

> [!NOTE]
> To use tools with agent, the backend LLM model used by the agent needs to support tool calling. Here are some of the LLM models that support tool calling as of 06/21/2024
> - GPT-3.5-turbo with version >= 0613
> - GPT-4 series
> - Gemini series
> - OPEN_MISTRAL_7B
> - ...
>
> This tutorial uses the latest `GPT-3.5-turbo` as example.
> [!NOTE]
> The complete code example can be found in [Use_Tools_With_Agent.cs](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs)
## Key Concepts
- @AutoGen.Core.FunctionContract: The contract of a function that agent can invoke. It contains the function name, description, parameters schema, and return type.
- @AutoGen.Core.ToolCallMessage: A message type that represents a tool call request in AutoGen.Net.
- @AutoGen.Core.ToolCallResultMessage: A message type that represents a tool call result in AutoGen.Net.
- @AutoGen.Core.ToolCallAggregateMessage: An aggregate message type that represents a tool call request and its result in a single message in AutoGen.Net.
- @AutoGen.Core.FunctionCallMiddleware: A middleware that pass the @AutoGen.Core.FunctionContract to the agent when generating response, and process the tool call response when receiving a @AutoGen.Core.ToolCallMessage.

> [!Tip]
> You can Use AutoGen.SourceGenerator to automatically generate type-safe @AutoGen.Core.FunctionContract instead of manually defining them. For more information, please check out [Create type-safe function](../articles/Create-type-safe-function-call.md).
## Install AutoGen and AutoGen.SourceGenerator
First, install the AutoGen and AutoGen.SourceGenerator package using the following command:

```bash
dotnet add package AutoGen
dotnet add package AutoGen.SourceGenerator
```

Also, you might need to enable structural xml document support by setting `GenerateDocumentationFile` property to true in your project file. This allows source generator to leverage the documentation of the function when generating the function definition.

```xml
<PropertyGroup>
<!-- This enables structural xml document support -->
<GenerateDocumentationFile>true</GenerateDocumentationFile>
</PropertyGroup>
```

## Add Using Statements

[!code-csharp[Using Statements](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Using)]

## Define `Tool` class
Create a `public partial` class to host the tools you want to use in AutoGen agents. The method has to be a `public` instance method and its return type must be `Task<string>`. After the methods is defined, mark them with @AutoGen.Core.FunctionAttribute attribute.

In the following example, we define a `GetWeather` tool that returns the weather information of a city.

[!code-csharp[Define Tool class](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Tools)]

## Tool call without auto-invoke
In this case, when receiving a @AutoGen.Core.ToolCallMessage, the agent will not automatically invoke the tool. Instead, the agent will return the original message back to the user. The user can then decide whether to invoke the tool or not.

![single-turn tool call without auto-invoke](../images/articles/CreateAgentWithTools/single-turn-tool-call-without-auto-invoke.png)

To implement this, you can create the @AutoGen.Core.FunctionCallMiddleware without passing the `functionMap` parameter to the constructor so that the middleware will not automatically invoke the tool once it receives a @AutoGen.Core.ToolCallMessage from its inner agent.

[!code-csharp[Create_tools](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Create_tools)]

[!code-csharp[Single-turn tool call without auto-invoke](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Create_no_invoke_middleware)]

## Tool call with auto-invoke
In this case, the agent will automatically invoke the tool when receiving a @AutoGen.Core.ToolCallMessage and return the @AutoGen.Core.ToolCallAggregateMessage which contains both the tool call request and the tool call result.

![single-turn tool call with auto-invoke](../images/articles/CreateAgentWithTools/single-turn-tool-call-with-auto-invoke.png)

To implement this, you can create the @AutoGen.Core.FunctionCallMiddleware with the `functionMap` parameter so that the middleware will automatically invoke the tool once it receives a @AutoGen.Core.ToolCallMessage from its inner agent.

[!code-csharp[Single-turn tool call with auto-invoke](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Create_auto_invoke_middleware)]

## Create an agent with the @AutoGen.Core.FunctionCallMiddleware and chat with the agent

To use tools with an agent, you can create an agent and register the @AutoGen.Core.FunctionCallMiddleware to the agent that was just created above.

[!code-csharp[Create an agent with tools](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Create_Agent)]

To automatically invoke the tool, you can register the agent with `autoInvokeMiddleware`. In this situation, the agent will automatically invoke the tool when receiving a @AutoGen.Core.ToolCallMessage. And return the @AutoGen.Core.ToolCallAggregateMessage which contains both the tool call request and the tool call result.

[!code-csharp[Generate Response](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Single_Turn_Auto_Invoke)]

To retrieve the raw @AutoGen.Core.ToolCallMessage without invoking the tool, you can register the agent with `noInvokeMiddleware`. In this situation, the agent will return the original message back to the user when receiving a @AutoGen.Core.ToolCallMessage.

[!code-csharp[Generate Response](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Single_Turn_No_Invoke)]

## Send the tool call result back to LLM to generate further response
In some cases, you may want to send the tool call result back to the LLM to generate further response. To do this, you can send the tool call response from agent back to the LLM by calling the `SendAsync` method of the agent.

[!code-csharp[Generate Response](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=Multi_Turn_Tool_Call)]

## Parallel tool call
Some LLM models support parallel tool call, which returns multiple tool calls in one single message. Note that @AutoGen.Core.FunctionCallMiddleware has already handled the parallel tool call for you. When it receives a @AutoGen.Core.ToolCallMessage that contains multiple tool calls, it will automatically invoke all the tools in the sequantial order and return the @AutoGen.Core.ToolCallAggregateMessage which contains all the tool call requests and results.

[!code-csharp[Generate Response](../../sample/AutoGen.BasicSamples/GettingStart/Use_Tools_With_Agent.cs?name=parallel_tool_call)]

## Further Reading
- [Function-call-with-openai](../articles/OpenAIChatAgent-use-function-call.md)
- [Function-call-with-gemini](../articles/AutoGen.Gemini/Function-call-with-gemini.md)
- [Use kernel plugin in other agents](../articles/AutoGen.SemanticKernel/Use-kernel-plugin-in-other-agents.md)
- [function call in mistral](../articles/MistralChatAgent-use-function-call.md)
50 changes: 50 additions & 0 deletions dotnet/website/tutorial/Image-chat-with-agent.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
This tutorial shows how to perform image chat with an agent using the @AutoGen.OpenAI.OpenAIChatAgent as an example.

> [!NOTE]
> To chat image with an agent, the model behind the agent needs to support image input. Here is a partial list of models that support image input:
> - gpt-4o
> - gemini-1.5
> - llava
> - claude-3
> - ...
>
> In this example, we are using the gpt-4o model as the backend model for the agent.
> [!NOTE]
> The complete code example can be found in [Image_Chat_With_Agent.cs](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.BasicSamples/GettingStart/Image_Chat_With_Agent.cs)
## Step 1: Install AutoGen

First, install the AutoGen package using the following command:

```bash
dotnet add package AutoGen
```

## Step 2: Add Using Statements

[!code-csharp[Using Statements](../../sample/AutoGen.BasicSamples/GettingStart/Image_Chat_With_Agent.cs?name=Using)]

## Step 3: Create an @AutoGen.OpenAI.OpenAIChatAgent

[!code-csharp[Create an OpenAIChatAgent](../../sample/AutoGen.BasicSamples/GettingStart/Image_Chat_With_Agent.cs?name=Create_Agent)]

## Step 4: Prepare Image Message

In AutoGen, you can create an image message using either @AutoGen.Core.ImageMessage or @AutoGen.Core.MultiModalMessage. The @AutoGen.Core.ImageMessage takes a single image as input, whereas the @AutoGen.Core.MultiModalMessage allows you to pass multiple modalities like text or image.

Here is how to create an image message using @AutoGen.Core.ImageMessage:
[!code-csharp[Create Image Message](../../sample/AutoGen.BasicSamples/GettingStart/Image_Chat_With_Agent.cs?name=Prepare_Image_Input)]

Here is how to create a multimodal message using @AutoGen.Core.MultiModalMessage:
[!code-csharp[Create MultiModal Message](../../sample/AutoGen.BasicSamples/GettingStart/Image_Chat_With_Agent.cs?name=Prepare_Multimodal_Input)]

## Step 5: Generate Response

To generate response, you can use one of the overloaded methods of @AutoGen.Core.AgentExtension.SendAsync* method. The following code shows how to generate response with an image message:

[!code-csharp[Generate Response](../../sample/AutoGen.BasicSamples/GettingStart/Image_Chat_With_Agent.cs?name=Chat_With_Agent)]

## Further Reading
- [Image chat with gemini](../articles/AutoGen.Gemini/Image-chat-with-gemini.md)
- [Image chat with llava](../articles/AutoGen.Ollama/Chat-with-llava.md)
Loading

0 comments on commit 1ea5537

Please sign in to comment.