Skip to content

Commit

Permalink
[.Net] Update website for AutoGen.SemanticKernel and AutoGen.Ollama (#…
Browse files Browse the repository at this point in the history
…2814)

* update sk documents

* add ollama doc
  • Loading branch information
LittleLittleCloud committed May 28, 2024
1 parent f9d3fda commit 7f635b4
Show file tree
Hide file tree
Showing 15 changed files with 175 additions and 15 deletions.
6 changes: 5 additions & 1 deletion dotnet/sample/AutoGen.Ollama.Sample/Chat_With_LLaMA.cs
Original file line number Diff line number Diff line change
@@ -1,18 +1,21 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Chat_With_LLaMA.cs

#region Using
using AutoGen.Core;
using AutoGen.Ollama.Extension;
#endregion Using

namespace AutoGen.Ollama.Sample;

public class Chat_With_LLaMA
{
public static async Task RunAsync()
{
#region Create_Ollama_Agent
using var httpClient = new HttpClient()
{
BaseAddress = new Uri("https://2xbvtxd1-11434.usw2.devtunnels.ms")
BaseAddress = new Uri("http://localhost:11434"),
};

var ollamaAgent = new OllamaAgent(
Expand All @@ -24,5 +27,6 @@ public static async Task RunAsync()
.RegisterPrintMessage();

var reply = await ollamaAgent.SendAsync("Can you write a piece of C# code to calculate 100th of fibonacci?");
#endregion Create_Ollama_Agent
}
}
10 changes: 9 additions & 1 deletion dotnet/sample/AutoGen.Ollama.Sample/Chat_With_LLaVA.cs
Original file line number Diff line number Diff line change
@@ -1,18 +1,21 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Chat_With_LLaVA.cs

#region Using
using AutoGen.Core;
using AutoGen.Ollama.Extension;
#endregion Using

namespace AutoGen.Ollama.Sample;

public class Chat_With_LLaVA
{
public static async Task RunAsync()
{
#region Create_Ollama_Agent
using var httpClient = new HttpClient()
{
BaseAddress = new Uri("https://2xbvtxd1-11434.usw2.devtunnels.ms")
BaseAddress = new Uri("http://localhost:11434"),
};

var ollamaAgent = new OllamaAgent(
Expand All @@ -22,19 +25,24 @@ public static async Task RunAsync()
systemMessage: "You are a helpful AI assistant")
.RegisterMessageConnector()
.RegisterPrintMessage();
#endregion Create_Ollama_Agent

#region Send_Message
var image = Path.Combine("images", "background.png");
var binaryData = BinaryData.FromBytes(File.ReadAllBytes(image), "image/png");
var imageMessage = new ImageMessage(Role.User, binaryData);
var textMessage = new TextMessage(Role.User, "what's in this image?");
var reply = await ollamaAgent.SendAsync(chatHistory: [textMessage, imageMessage]);
#endregion Send_Message

#region Send_MultiModal_Message
// You can also use MultiModalMessage to put text and image together in one message
// In this case, all the messages in the multi-modal message will be put into single piece of message
// where the text is the concatenation of all the text messages seperated by \n
// and the images are all the images in the multi-modal message
var multiModalMessage = new MultiModalMessage(Role.User, [textMessage, imageMessage]);

reply = await ollamaAgent.SendAsync(chatHistory: [multiModalMessage]);
#endregion Send_MultiModal_Message
}
}
Original file line number Diff line number Diff line change
@@ -1,35 +1,44 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Create_Semantic_Kernel_Chat_Agent.cs

#region Using
using AutoGen.Core;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.Agents;

#endregion Using
namespace AutoGen.SemanticKernel.Sample;

public class Create_Semantic_Kernel_Chat_Agent
{
public static async Task RunAsync()
{
#region Create_Kernel
var openAIKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
var modelId = "gpt-3.5-turbo";
var kernel = Kernel.CreateBuilder()
.AddOpenAIChatCompletion(modelId: modelId, apiKey: openAIKey)
.Build();
#endregion Create_Kernel

#region Create_ChatCompletionAgent
// The built-in ChatCompletionAgent from semantic kernel.
var chatAgent = new ChatCompletionAgent()
{
Kernel = kernel,
Name = "assistant",
Description = "You are a helpful AI assistant",
};
#endregion Create_ChatCompletionAgent

#region Create_SemanticKernelChatCompletionAgent
var messageConnector = new SemanticKernelChatMessageContentConnector();
var skAgent = new SemanticKernelChatCompletionAgent(chatAgent)
.RegisterMiddleware(messageConnector) // register message connector so it support AutoGen built-in message types like TextMessage.
.RegisterPrintMessage(); // pretty print the message to the console
#endregion Create_SemanticKernelChatCompletionAgent

#region Send_Message
await skAgent.SendAsync("Hey tell me a long tedious joke");
#endregion Send_Message
}
}
Original file line number Diff line number Diff line change
@@ -1,18 +1,21 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Use_Kernel_Functions_With_Other_Agent.cs

#region Using
using AutoGen.Core;
using AutoGen.OpenAI;
using AutoGen.OpenAI.Extension;
using Azure.AI.OpenAI;
using Microsoft.SemanticKernel;
#endregion Using

namespace AutoGen.SemanticKernel.Sample;

public class Use_Kernel_Functions_With_Other_Agent
{
public static async Task RunAsync()
{
#region Create_plugin
var openAIKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new Exception("Please set OPENAI_API_KEY environment variable.");
var modelId = "gpt-3.5-turbo";
var kernelBuilder = Kernel.CreateBuilder();
Expand All @@ -22,7 +25,9 @@ public static async Task RunAsync()
functionName: "GetWeather",
description: "Get the weather for a location.");
var plugin = kernel.CreatePluginFromFunctions("my_plugin", [getWeatherFunction]);
#endregion Create_plugin

#region Use_plugin
// Create a middleware to handle the plugin functions
var kernelPluginMiddleware = new KernelPluginMiddleware(kernel, plugin);

Expand All @@ -34,11 +39,14 @@ public static async Task RunAsync()
.RegisterMessageConnector() // register message connector so it support AutoGen built-in message types like TextMessage.
.RegisterMiddleware(kernelPluginMiddleware) // register the middleware to handle the plugin functions
.RegisterPrintMessage(); // pretty print the message to the console
#endregion Use_plugin

#region Send_message
var toolAggregateMessage = await openAIAgent.SendAsync("Tell me the weather in Seattle");

// The aggregate message will be converted to [ToolCallMessage, ToolCallResultMessage] when flowing into the agent
// send the aggregated message to llm to generate the final response
var finalReply = await openAIAgent.SendAsync(toolAggregateMessage);
#endregion Send_message
}
}
10 changes: 10 additions & 0 deletions dotnet/src/AutoGen.Ollama/AutoGen.Ollama.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,16 @@
<GenerateDocumentationFile>True</GenerateDocumentationFile>
</PropertyGroup>

<Import Project="$(RepoRoot)/nuget/nuget-package.props" />

<PropertyGroup>
<!-- NuGet Package Settings -->
<Title>AutoGen.Ollama</Title>
<Description>
Provide support for Ollama server in AutoGen
</Description>
</PropertyGroup>

<ItemGroup>
<ProjectReference Include="..\AutoGen.Core\AutoGen.Core.csproj" />
</ItemGroup>
Expand Down
4 changes: 2 additions & 2 deletions dotnet/website/articles/Agent-overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
## Create an agent
- Create an @AutoGen.AssistantAgent: [Create an assistant agent](./Create-an-agent.md)
- Create an @AutoGen.OpenAI.OpenAIChatAgent: [Create an OpenAI chat agent](./OpenAIChatAgent-simple-chat.md)
- Create a @AutoGen.SemanticKernel.SemanticKernelAgent: [Create a semantic kernel agent](./SemanticKernelAgent-simple-chat.md)
- Create a @AutoGen.SemanticKernel.SemanticKernelAgent: [Create a semantic kernel agent](./AutoGen.SemanticKernel/SemanticKernelAgent-simple-chat.md)
- Create a @AutoGen.LMStudio.LMStudioAgent: [Connect to LM Studio](./Consume-LLM-server-from-LM-Studio.md)
- Create your own agent: [Create your own agent](./Create-your-own-agent.md)

Expand All @@ -33,7 +33,7 @@ If an agent implements @AutoGen.Core.IStreamingAgent, you can use @AutoGen.Core.

- Middleware overview: [Middleware overview](./Middleware-overview.md)
- Write message to console: [Print message middleware](./Print-message-middleware.md)
- Convert message type: [SemanticKernelChatMessageContentConnector](./SemanticKernelAgent-support-more-messages.md) and [OpenAIChatRequestMessageConnector](./OpenAIChatAgent-support-more-messages.md)
- Convert message type: [SemanticKernelChatMessageContentConnector](./AutoGen.SemanticKernel/SemanticKernelAgent-support-more-messages.md) and [OpenAIChatRequestMessageConnector](./OpenAIChatAgent-support-more-messages.md)
- Create your own middleware: [Create your own middleware](./Create-your-own-middleware.md)

## Group chat
Expand Down
27 changes: 27 additions & 0 deletions dotnet/website/articles/AutoGen.Ollama/Chat-with-llama.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
This example shows how to use @AutoGen.Ollama.OllamaAgent to connect to Ollama server and chat with LLaVA model.

To run this example, you need to have an Ollama server running aside and have `llama3:latest` model installed. For how to setup an Ollama server, please refer to [Ollama](https://ollama.com/).

> [!NOTE]
> You can find the complete sample code [here](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.Ollama.Sample/Chat_With_LLaMA.cs)
### Step 1: Install AutoGen.Ollama

First, install the AutoGen.Ollama package using the following command:

```bash
dotnet add package AutoGen.Ollama
```

For how to install from nightly build, please refer to [Installation](../Installation.md).

### Step 2: Add using statement

[!code-csharp[](../../../sample/AutoGen.Ollama.Sample/Chat_With_LLaMA.cs?name=Using)]

### Step 3: Create and chat @AutoGen.Ollama.OllamaAgent

In this step, we create an @AutoGen.Ollama.OllamaAgent and connect it to the Ollama server.

[!code-csharp[](../../../sample/AutoGen.Ollama.Sample/Chat_With_LLaMA.cs?name=Create_Ollama_Agent)]

29 changes: 29 additions & 0 deletions dotnet/website/articles/AutoGen.Ollama/Chat-with-llava.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
This sample shows how to use @AutoGen.Ollama.OllamaAgent to chat with LLaVA model.

To run this example, you need to have an Ollama server running aside and have `llava:latest` model installed. For how to setup an Ollama server, please refer to [Ollama](https://ollama.com/).

> [!NOTE]
> You can find the complete sample code [here](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.Ollama.Sample/Chat_With_LLaVA.cs)
### Step 1: Install AutoGen.Ollama

First, install the AutoGen.Ollama package using the following command:

```bash
dotnet add package AutoGen.Ollama
```

For how to install from nightly build, please refer to [Installation](../Installation.md).

### Step 2: Add using statement

[!code-csharp[](../../../sample/AutoGen.Ollama.Sample/Chat_With_LLaVA.cs?name=Using)]

### Step 3: Create @AutoGen.Ollama.OllamaAgent

[!code-csharp[](../../../sample/AutoGen.Ollama.Sample/Chat_With_LLaVA.cs?name=Create_Ollama_Agent)]

### Step 4: Start MultiModal Chat
LLaVA is a multimodal model that supports both text and image inputs. In this step, we create an image message along with a question about the image.

[!code-csharp[](../../../sample/AutoGen.Ollama.Sample/Chat_With_LLaVA.cs?name=Send_Message)]
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
## AutoGen.SemanticKernel Overview

AutoGen.SemanticKernel is a package that provides seamless integration with Semantic Kernel. It provides the following agent:
AutoGen.SemanticKernel is a package that provides seamless integration with Semantic Kernel. It provides the following agents:
- @AutoGen.SemanticKernel.SemanticKernelAgent: A slim wrapper agent over `Kernel` that only support original `ChatMessageContent` type via `IMessage<ChatMessageContent>`. To support more AutoGen built-in message type, register the agent with @AutoGen.SemanticKernel.SemanticKernelChatMessageContentConnector.
- @AutoGen.SemanticKernel.SemanticKernelChatCompletionAgent: A slim wrapper agent over `Microsoft.SemanticKernel.Agents.ChatCompletionAgent`.

AutoGen.SemanticKernel also provides the following middleware:
- @AutoGen.SemanticKernel.SemanticKernelChatMessageContentConnector: A connector that convert the message from AutoGen built-in message types to `ChatMessageContent` and vice versa. At the current stage, it only supports conversation between @AutoGen.Core.TextMessage, @AutoGen.Core.ImageMessage and @AutoGen.Core.MultiModalMessage. Function call message type like @AutoGen.Core.ToolCallMessage and @AutoGen.Core.ToolCallResultMessage are not supported yet.
- @AutoGen.SemanticKernel.KernelPluginMiddleware: A middleware that allows you to use semantic kernel plugins in other AutoGen agents like @AutoGen.OpenAI.OpenAIChatAgent.

### Get start with AutoGen.SemanticKernel

To get start with AutoGen.SemanticKernel, firstly, follow the [installation guide](Installation.md) to make sure you add the AutoGen feed correctly. Then add `AutoGen.SemanticKernel` package to your project file.
To get start with AutoGen.SemanticKernel, firstly, follow the [installation guide](../Installation.md) to make sure you add the AutoGen feed correctly. Then add `AutoGen.SemanticKernel` package to your project file.

```xml
<ItemGroup>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@ You can chat with @AutoGen.SemanticKernel.SemanticKernelAgent using both streami

The following example shows how to create an @AutoGen.SemanticKernel.SemanticKernelAgent and chat with it using non-streaming method:

[!code-csharp[](../../sample/AutoGen.BasicSamples/CodeSnippet/SemanticKernelCodeSnippet.cs?name=create_semantic_kernel_agent)]
[!code-csharp[](../../../sample/AutoGen.BasicSamples/CodeSnippet/SemanticKernelCodeSnippet.cs?name=create_semantic_kernel_agent)]

@AutoGen.SemanticKernel.SemanticKernelAgent also supports streaming chat via @AutoGen.Core.IStreamingAgent.GenerateStreamingReplyAsync*.

[!code-csharp[](../../sample/AutoGen.BasicSamples/CodeSnippet/SemanticKernelCodeSnippet.cs?name=create_semantic_kernel_agent_streaming)]
[!code-csharp[](../../../sample/AutoGen.BasicSamples/CodeSnippet/SemanticKernelCodeSnippet.cs?name=create_semantic_kernel_agent_streaming)]
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@
>
> Function call message type like @AutoGen.Core.ToolCallMessage and @AutoGen.Core.ToolCallResultMessage are not supported yet.
[!code-csharp[](../../sample/AutoGen.BasicSamples/CodeSnippet/SemanticKernelCodeSnippet.cs?name=register_semantic_kernel_chat_message_content_connector)]
[!code-csharp[](../../../sample/AutoGen.BasicSamples/CodeSnippet/SemanticKernelCodeSnippet.cs?name=register_semantic_kernel_chat_message_content_connector)]
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
`AutoGen.SemanticKernel` provides built-in support for `ChatCompletionAgent` via @AutoGen.SemanticKernel.SemanticKernelChatCompletionAgent. By default the @AutoGen.SemanticKernel.SemanticKernelChatCompletionAgent only supports the original `ChatMessageContent` type via `IMessage<ChatMessageContent>`. To support more AutoGen built-in message types like @AutoGen.Core.TextMessage, @AutoGen.Core.ImageMessage, @AutoGen.Core.MultiModalMessage, you can register the agent with @AutoGen.SemanticKernel.SemanticKernelChatMessageContentConnector. The @AutoGen.SemanticKernel.SemanticKernelChatMessageContentConnector will convert the message from AutoGen built-in message types to `ChatMessageContent` and vice versa.

The following step-by-step example shows how to create an @AutoGen.SemanticKernel.SemanticKernelChatCompletionAgent and chat with it:

> [!NOTE]
> You can find the complete sample code [here](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.SemanticKernel.Sample/Create_Semantic_Kernel_Chat_Agent.cs).
### Step 1: add using statement
[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Create_Semantic_Kernel_Chat_Agent.cs?name=Using)]

### Step 2: create kernel
[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Create_Semantic_Kernel_Chat_Agent.cs?name=Create_Kernel)]

### Step 3: create ChatCompletionAgent
[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Create_Semantic_Kernel_Chat_Agent.cs?name=Create_ChatCompletionAgent)]

### Step 4: create @AutoGen.SemanticKernel.SemanticKernelChatCompletionAgent
In this step, we create an @AutoGen.SemanticKernel.SemanticKernelChatCompletionAgent and register it with @AutoGen.SemanticKernel.SemanticKernelChatMessageContentConnector. The @AutoGen.SemanticKernel.SemanticKernelChatMessageContentConnector will convert the message from AutoGen built-in message types to `ChatMessageContent` and vice versa.
[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Create_Semantic_Kernel_Chat_Agent.cs?name=Create_SemanticKernelChatCompletionAgent)]

### Step 5: chat with @AutoGen.SemanticKernel.SemanticKernelChatCompletionAgent
[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Create_Semantic_Kernel_Chat_Agent.cs?name=Send_Message)]
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
In semantic kernel, a kernel plugin is a collection of kernel functions that can be invoked during LLM calls. Semantic kernel provides a list of built-in plugins, like [core plugins](https://github.com/microsoft/semantic-kernel/tree/main/dotnet/src/Plugins/Plugins.Core), [web search plugin](https://github.com/microsoft/semantic-kernel/tree/main/dotnet/src/Plugins/Plugins.Web) and many more. You can also create your own plugins and use them in semantic kernel. Kernel plugins greatly extend the capabilities of semantic kernel and can be used to perform various tasks like web search, image search, text summarization, etc.

`AutoGen.SemanticKernel` provides a middleware called @AutoGen.SemanticKernel.KernelPluginMiddleware that allows you to use semantic kernel plugins in other AutoGen agents like @AutoGen.OpenAI.OpenAIChatAgent. The following example shows how to define a simple plugin with a single `GetWeather` function and use it in @AutoGen.OpenAI.OpenAIChatAgent.

> [!NOTE]
> You can find the complete sample code [here](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.SemanticKernel.Sample/Use_Kernel_Functions_With_Other_Agent.cs)
### Step 1: add using statement
[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Use_Kernel_Functions_With_Other_Agent.cs?name=Using)]

### Step 2: create plugin

In this step, we create a simple plugin with a single `GetWeather` function that takes a location as input and returns the weather information for that location.

[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Use_Kernel_Functions_With_Other_Agent.cs?name=Create_plugin)]

### Step 3: create OpenAIChatAgent and use the plugin

In this step, we firstly create a @AutoGen.SemanticKernel.KernelPluginMiddleware and register the previous plugin with it. The `KernelPluginMiddleware` will load the plugin and make the functions available for use in other agents. Followed by creating an @AutoGen.OpenAI.OpenAIChatAgent and register it with the `KernelPluginMiddleware`.

[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Use_Kernel_Functions_With_Other_Agent.cs?name=Use_plugin)]

### Step 4: chat with OpenAIChatAgent

In this final step, we start the chat with the @AutoGen.OpenAI.OpenAIChatAgent by asking the weather in Seattle. The `OpenAIChatAgent` will use the `GetWeather` function from the plugin to get the weather information for Seattle.

[!code-csharp[](../../../sample/AutoGen.SemanticKernel.Sample/Use_Kernel_Functions_With_Other_Agent.cs?name=Send_message)]
2 changes: 2 additions & 0 deletions dotnet/website/articles/Installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ AutoGen.Net provides the following packages, you can choose to install one or mo
- `AutoGen.Core`: The core package, this package provides the abstraction for message type, agent and group chat.
- `AutoGen.OpenAI`: This package provides the integration agents over openai models.
- `AutoGen.Mistral`: This package provides the integration agents for Mistral.AI models.
- `AutoGen.Ollama`: This package provides the integration agents for [Ollama](https://ollama.com/).
- `AutoGen.Anthropic`: This package provides the integration agents for [Anthropic](https://www.anthropic.com/api)
- `AutoGen.LMStudio`: This package provides the integration agents from LM Studio.
- `AutoGen.SemanticKernel`: This package provides the integration agents over semantic kernel.
- `AutoGen.SourceGenerator`: This package carries a source generator that adds support for type-safe function definition generation.
Expand Down
Loading

0 comments on commit 7f635b4

Please sign in to comment.