Skip to content

Commit

Permalink
Merge pull request #250 from elbruno/main
Browse files Browse the repository at this point in the history
Update the CsharpOllamaCodeSpaces c# samples to use phi3.5 and fix RAG errors
  • Loading branch information
leestott authored Jan 27, 2025
2 parents 65cf4b5 + 9e8dab5 commit 6717c95
Show file tree
Hide file tree
Showing 10 changed files with 324 additions and 246 deletions.
83 changes: 39 additions & 44 deletions .devcontainer/csollamaphi3.5/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,49 +1,44 @@
{
"name": "Ollama with Phi-3.5 for C#",
"image": "mcr.microsoft.com/devcontainers/dotnet:9.0",
"features": {
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers/features/common-utils:2": {},
"ghcr.io/devcontainers/features/dotnet:2": {
"version": "none",
"dotnetRuntimeVersions": "8.0",
"aspNetCoreRuntimeVersions": "8.0"
},
"ghcr.io/prulloac/devcontainer-features/ollama:1": {
"pull": "phi3"
},
"sshd": "latest"
"name": "Ollama with Phi-3.5 for C#",
"image": "mcr.microsoft.com/devcontainers/dotnet:9.0",
"features": {
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers/features/common-utils:2": {},
"ghcr.io/prulloac/devcontainer-features/ollama:1": {
"pull": "phi3.5"
},
"customizations": {
"vscode": {
"extensions": [
"ms-vscode.vscode-node-azure-pack",
"github.vscode-github-actions",
"ms-dotnettools.csdevkit",
"ms-dotnettools.vscode-dotnet-runtime",
"github.copilot",
"ms-azuretools.vscode-docker"
]
}
},
"forwardPorts": [
32000,
32001
],
"postCreateCommand": "sudo dotnet workload update",
"postStartCommand": "ollama pull phi3.5",
"remoteUser": "vscode",
"hostRequirements": {
"memory": "8gb",
"cpus": 4
"sshd": "latest"
},
"customizations": {
"vscode": {
"extensions": [
"ms-vscode.vscode-node-azure-pack",
"github.vscode-github-actions",
"ms-dotnettools.csdevkit",
"ms-dotnettools.vscode-dotnet-runtime",
"github.copilot",
"ms-azuretools.vscode-docker"
]
}
},
"forwardPorts": [
32000,
32001
],
"postCreateCommand": "sudo dotnet workload update",
"postStartCommand": "ollama pull all-minilm",
"remoteUser": "vscode",
"hostRequirements": {
"memory": "8gb",
"cpus": 4
},
"portsAttributes": {
"32001": {
"label": "Back End"
},
"portsAttributes": {
"32001": {
"label": "Back End"
},
"32000": {
"label": "Front End"
}
"32000": {
"label": "Front End"
}
}
}
17 changes: 8 additions & 9 deletions md/07.Labs/CsharpOllamaCodeSpaces/src/Sample01/Program.cs
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,19 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.

#pragma warning disable SKEXP0001, SKEXP0003, SKEXP0010, SKEXP0011, SKEXP0050, SKEXP0052
#pragma warning disable SKEXP0001, SKEXP0003, SKEXP0010, SKEXP0011, SKEXP0050, SKEXP0052, SKEXP0070

using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.Connectors.OpenAI;

var ollamaEndpoint = "http://localhost:11434";
var modelIdChat = "phi3.5";

// Create kernel with a custom http address
var builder = Kernel.CreateBuilder();
builder.AddOpenAIChatCompletion(
modelId: "phi3.5",
endpoint: new Uri("http://localhost:11434"),
apiKey: "apikey");
var kernel = builder.Build();
var kernel = Kernel.CreateBuilder()
.AddOllamaChatCompletion(modelId: modelIdChat, endpoint: new Uri(ollamaEndpoint))
.Build();

// 14 - define prompt execution settings
var settings = new OpenAIPromptExecutionSettings
{
MaxTokens = 100,
Expand Down
10 changes: 4 additions & 6 deletions md/07.Labs/CsharpOllamaCodeSpaces/src/Sample01/Sample01.csproj
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<TargetFramework>net9.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Microsoft.SemanticKernel" Version="1.17.2" />
<PackageReference Include="Microsoft.SemanticKernel" Version="1.34.0" />
<PackageReference Include="Microsoft.SemanticKernel.Connectors.Ollama" Version="1.34.0-alpha" />
</ItemGroup>

</Project>
</Project>
35 changes: 22 additions & 13 deletions md/07.Labs/CsharpOllamaCodeSpaces/src/Sample02/Program.cs
Original file line number Diff line number Diff line change
Expand Up @@ -21,34 +21,43 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.

#pragma warning disable SKEXP0001, SKEXP0003, SKEXP0010, SKEXP0011, SKEXP0050, SKEXP0052
#pragma warning disable SKEXP0001, SKEXP0003, SKEXP0010, SKEXP0011, SKEXP0050, SKEXP0052, SKEXP0070

using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.Connectors.OpenAI;
using System.Text;

var ollamaEndpoint = "http://localhost:11434";
var modelIdChat = "phi3.5";

// Create kernel with a custom http address
var builder = Kernel.CreateBuilder();
builder.AddOpenAIChatCompletion(
modelId: "phi3.5",
endpoint: new Uri("http://localhost:11434"),
apiKey: "apikey");
var kernel = builder.Build();
var kernel = Kernel.CreateBuilder()
.AddOllamaChatCompletion(modelId: modelIdChat, endpoint: new Uri(ollamaEndpoint))
.Build();


var chat = kernel.GetRequiredService<IChatCompletionService>();
var history = new ChatHistory();
history.AddSystemMessage("You are a useful chatbot. If you don't know an answer, say 'I don't know!'. Always reply in a funny ways. Use emojis if possible.");
history.AddSystemMessage("You always respond in 1 sentence in a funny way. Use emojis if possible.");

while (true)
{
Console.Write("Q:");
Console.Write("Q: ");
var userQ = Console.ReadLine();
if (string.IsNullOrEmpty(userQ))
{
break;
}
history.AddUserMessage(userQ);

var result = await chat.GetChatMessageContentsAsync(history);
Console.WriteLine(result[^1].Content);
history.Add(result[^1]);
Console.Write($"{modelIdChat}: ");
var response = chat.GetStreamingChatMessageContentsAsync(history);
var assistantResponse = new StringBuilder();
await foreach (var message in response)
{
Console.Write(message.ToString());
assistantResponse.Append(message.ToString());
}
history.AddAssistantMessage(assistantResponse.ToString());
Console.WriteLine();
}
14 changes: 6 additions & 8 deletions md/07.Labs/CsharpOllamaCodeSpaces/src/Sample02/Sample02.csproj
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<TargetFramework>net9.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<UserSecretsId>506e8050-acbd-476d-ab7d-bbebc8238bfa</UserSecretsId>
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Microsoft.SemanticKernel" Version="1.17.2" />
</ItemGroup>

</Project>
<ItemGroup>
<PackageReference Include="Microsoft.SemanticKernel" Version="1.34.0" />
<PackageReference Include="Microsoft.SemanticKernel.Connectors.Ollama" Version="1.34.0-alpha" />
</ItemGroup>
</Project>
91 changes: 32 additions & 59 deletions md/07.Labs/CsharpOllamaCodeSpaces/src/Sample03/Program.cs
Original file line number Diff line number Diff line change
Expand Up @@ -23,87 +23,60 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.

#pragma warning disable SKEXP0001
#pragma warning disable SKEXP0003
#pragma warning disable SKEXP0010
#pragma warning disable SKEXP0011
#pragma warning disable SKEXP0050
#pragma warning disable SKEXP0052
#pragma warning disable SKEXP0001, SKEXP0003, SKEXP0010, SKEXP0011, SKEXP0050, SKEXP0052, SKEXP0070

using Microsoft.Extensions.DependencyInjection;
using Microsoft.KernelMemory;
using Microsoft.KernelMemory.AI.Ollama;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.Connectors.OpenAI;
using Microsoft.SemanticKernel.Embeddings;
using Microsoft.SemanticKernel.Memory;
using Microsoft.SemanticKernel.Plugins.Memory;

var ollamaEndpoint = "http://localhost:11434";
var modelIdChat = "phi3.5";
var modelIdEmbeddings = "all-minilm";

// questions
var question = "What is Bruno's favourite super hero?";

Console.WriteLine($"This program will answer the following question: {question}");
Console.WriteLine("1st approach will be to ask the question directly to the Phi-3 model.");
Console.WriteLine($"1st approach will be to ask the question directly to the {modelIdChat} model.");
Console.WriteLine("2nd approach will be to add facts to a semantic memory and ask the question again");
Console.WriteLine("");

// Create a chat completion service
var builder = Kernel.CreateBuilder();
builder.AddOpenAIChatCompletion(
modelId: "phi3.5",
endpoint: new Uri("http://localhost:11434"),
apiKey: "apikey");
builder.AddLocalTextEmbeddingGeneration();
Kernel kernel = builder.Build();

Console.WriteLine($"Phi-3 response (no memory).");
Kernel kernel = Kernel.CreateBuilder()
.AddOllamaChatCompletion(modelId: modelIdChat, endpoint: new Uri(ollamaEndpoint))
.Build();
var response = kernel.InvokePromptStreamingAsync(question);
await foreach (var result in response)
{
Console.Write(result);
Console.Write(result.ToString());
}

// separator
Console.WriteLine("");
Console.WriteLine("==============");
Console.WriteLine("");
Console.WriteLine($"{modelIdChat} response (using semantic memory).");

// get the embeddings generator service
var embeddingGenerator = kernel.Services.GetRequiredService<ITextEmbeddingGenerationService>();
var memory = new SemanticTextMemory(new VolatileMemoryStore(), embeddingGenerator);

// add facts to the collection
const string MemoryCollectionName = "fanFacts";

await memory.SaveInformationAsync(MemoryCollectionName, id: "info1", text: "Gisela's favourite super hero is Batman");
await memory.SaveInformationAsync(MemoryCollectionName, id: "info2", text: "The last super hero movie watched by Gisela was Guardians of the Galaxy Vol 3");
await memory.SaveInformationAsync(MemoryCollectionName, id: "info3", text: "Bruno's favourite super hero is Invincible");
await memory.SaveInformationAsync(MemoryCollectionName, id: "info4", text: "The last super hero movie watched by Bruno was Aquaman II");
await memory.SaveInformationAsync(MemoryCollectionName, id: "info5", text: "Bruno don't like the super hero movie: Eternals");

TextMemoryPlugin memoryPlugin = new(memory);

// Import the text memory plugin into the Kernel.
kernel.ImportPluginFromObject(memoryPlugin);

OpenAIPromptExecutionSettings settings = new()
var configOllamaKernelMemory = new OllamaConfig
{
ToolCallBehavior = null,
Endpoint = ollamaEndpoint,
TextModel = new OllamaModelConfig(modelIdChat),
EmbeddingModel = new OllamaModelConfig(modelIdEmbeddings, 2048)
};

var memory = new KernelMemoryBuilder()
.WithOllamaTextGeneration(configOllamaKernelMemory)
.WithOllamaTextEmbeddingGeneration(configOllamaKernelMemory)
.Build();

var prompt = @"
Question: {{$input}}
Answer the question using the memory content: {{Recall}}";
await memory.ImportTextAsync("Gisela's favourite super hero is Batman");
await memory.ImportTextAsync("The last super hero movie watched by Gisela was Guardians of the Galaxy Vol 3");
await memory.ImportTextAsync("Bruno's favourite super hero is Invincible");
await memory.ImportTextAsync("The last super hero movie watched by Bruno was Deadpool and Wolverine");
await memory.ImportTextAsync("Bruno don't like the super hero movie: Eternals");

var arguments = new KernelArguments(settings)
var answer = memory.AskStreamingAsync(question);
await foreach (var result in answer)
{
{ "input", question },
{ "collection", MemoryCollectionName }
};

Console.WriteLine($"Phi-3 response (using semantic memory).");

response = kernel.InvokePromptStreamingAsync(prompt, arguments);
await foreach (var result in response)
{
Console.Write(result);
}

Console.WriteLine($"");
Console.Write(result.ToString());
}
23 changes: 13 additions & 10 deletions md/07.Labs/CsharpOllamaCodeSpaces/src/Sample03/Sample03.csproj
Original file line number Diff line number Diff line change
@@ -1,17 +1,20 @@
<Project Sdk="Microsoft.NET.Sdk">

<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<TargetFramework>net9.0</TargetFramework>
<RootNamespace>sample03</RootNamespace>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>

<ItemGroup>
<PackageReference Include="Microsoft.SemanticKernel" Version="1.19.0" />
<PackageReference Include="Microsoft.SemanticKernel.Plugins.Memory" Version="1.19.0-alpha" />
<PackageReference Include="SmartComponents.LocalEmbeddings.SemanticKernel" Version="0.1.0-preview10148" />
</ItemGroup>

</Project>
<ItemGroup>
<PackageReference Include="Microsoft.KernelMemory.Abstractions" Version="0.96.250120.1" />
<PackageReference Include="Microsoft.KernelMemory.AI.Ollama" Version="0.96.250120.1" />
<PackageReference Include="Microsoft.KernelMemory.Core" Version="0.96.250120.1" />
<PackageReference Include="Microsoft.KernelMemory.SemanticKernelPlugin" Version="0.96.250120.1" />
<PackageReference Include="Microsoft.SemanticKernel" Version="1.34.0" />
<PackageReference Include="Microsoft.SemanticKernel.Connectors.Ollama" Version="1.34.0-alpha" />
<PackageReference Include="Microsoft.SemanticKernel.Plugins.Memory" Version="1.34.0-alpha" />
<PackageReference Include="SmartComponents.LocalEmbeddings.SemanticKernel" Version="0.1.0-preview10148" />
<PackageReference Include="System.Linq.Async" Version="6.0.1" />
</ItemGroup>
</Project>
Loading

0 comments on commit 6717c95

Please sign in to comment.