-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathProgram.cs
158 lines (130 loc) · 5.91 KB
/
Program.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
using Microsoft.Extensions.Configuration;
using MistralSharp;
using MistralSharp.Models;
var configurationBuilder = new ConfigurationBuilder();
configurationBuilder.AddUserSecrets<Program>();
var configuration = configurationBuilder.Build();
// API key is stored in dotnet secrets
var apiKey = configuration["MistralAPIKey"];
// Create a new instance of MistralClient and pass your API key
var mistralClient = new MistralClient(apiKey);
// Get the models endpoint response and print the result to console
var models = await mistralClient.GetAvailableModelsAsync();
Console.WriteLine($"ID: {models.Object}");
foreach (var modelData in models.Data)
{
Console.WriteLine($"\nModel ID: {modelData.Id}\n " +
$"Model Object: {modelData.Object}\n" +
$"Model Created: {modelData.Created}\n" +
$"Model OwnedBy: {modelData.OwnedBy}\n" +
$"Model Root: {modelData.Root}\n" +
$"Model Parent: {modelData.Parent}"
);
Console.WriteLine("Permissions:");
foreach (var permission in modelData.Permission)
{
Console.WriteLine($"ID: {permission.Id}\n" +
$"Object: {permission.Object}\n" +
$"Created: {permission.Created}\n" +
$"Organization: {permission.Organization}\n" +
$"Group: {permission.Group}\n" +
$"IsBlocking: {permission.IsBlocking}\n" +
$"AllowCreateEngine: {permission.AllowCreateEngine}\n" +
$"AllowFineTuning: {permission.AllowFineTuning}\n" +
$"AllowLogprobs: {permission.AllowLogprobs}\n" +
$"AllowSampling: {permission.AllowSampling}\n" +
$"AllowSearchIndices: {permission.AllowSearchIndices}\n" +
$"AllowView: {permission.AllowView}"
);
}
}
// Create a new chat
var chatRequest = new ChatRequest()
{
// The ID of the model to use. You can use GetAvailableModelsAsync() to get the list of available models
Model = ModelType.MistralMedium,
// Pass a list of messages to the model.
// The role can either be "user" or "agent"
// Content is the message content
Messages =
[
new Message()
{
Role = "user",
Content = "How can Mistral AI assist programmers?"
}
],
//The maximum number of tokens to generate in the completion.
// The token count of your prompt plus max_tokens cannot exceed the model's context length.
MaxTokens = 16,
// Default: 0.7
// What sampling temperature to use, between 0.0 and 2.0.
// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make
// it more focused and deterministic.
Temperature = 0.7,
// Default: 1
// Nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
// Mistral generally recommends altering this or temperature but not both.
TopP = 1,
// Default: false
// Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events
// as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will
// hold the request open until the timeout or until completion, with the response containing the full
// result as JSON.
Stream = false,
// Default: false
// Whether to inject a safety prompt before all conversations.
SafeMode = false,
// Default: null
// The seed to use for random sampling. If set, different calls will generate deterministic results.
RandomSeed = null
};
// Call the chat endpoint and pass our ChatRequest object
var sampleChat = await mistralClient.ChatAsync(chatRequest);
Console.WriteLine($"\nChat Response ID: {sampleChat.Id}\n" +
$"Chat Response Created: {sampleChat.Created}\n" +
$"Chat Response Object: {sampleChat.ObjectPropertyName}\n" +
$"Chat Response Model: {sampleChat.Model}\n" +
$"Chat Response Usage:\n" +
$"Prompt Tokens: {sampleChat.Usage.PromptTokens}\n" +
$"Completion Tokens: {sampleChat.Usage.CompletionTokens}\n" +
$"Total Tokens: {sampleChat.Usage.TotalTokens}\n");
Console.WriteLine("Choices:");
foreach (var choice in sampleChat.Choices)
{
Console.WriteLine($"Finish Reason: {choice.FinishReason}\n" + $"Index: {choice.Index}\n");
Console.WriteLine("Response Messages:");
Console.WriteLine($"Role: {choice.Message.Role}\n" + $"Content: {choice.Message.Content}\n");
}
// Create a new EmbeddingRequest object
var embeddings = new EmbeddingRequest()
{
// The ID of the model to use for this request.
Model = ModelType.MistralEmbed,
// The format of the output data.
EncodingFormat = "float",
// The list of strings to embed.
Input = new List<string>()
{
"Hello",
"World"
}
};
// Create an embedding and pass it our EmbeddingResponse object
var embeddedResponse = await mistralClient.CreateEmbeddingsAsync(embeddings);
// Print the embedding to the console
Console.WriteLine("\n---Example Embedding Response---");
Console.WriteLine("EmbeddingResponse:\nId: {0}\nObject: {1}\nModel: {2}\nPromptTokens: {3}\nTotalTokens: {4}",
embeddedResponse.Id,
embeddedResponse.Object,
embeddedResponse.Model,
embeddedResponse.TokenUsage?.PromptTokens,
embeddedResponse.TokenUsage?.TotalTokens);
foreach (var embedding in embeddedResponse.Data)
{
Console.WriteLine(" - Object: {0}\n Index: {1}\n EmbeddingList: {2}",
embedding.Object,
embedding.Index,
string.Join(", ", embedding.EmbeddingList));
}