Skip to content

Commit

Permalink
Add Mistral instruct prompt template.
Browse files Browse the repository at this point in the history
  • Loading branch information
lgrammel committed Dec 29, 2023
1 parent 55c10b0 commit d2c2cc9
Show file tree
Hide file tree
Showing 16 changed files with 322 additions and 24 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import { MistralInstructPrompt, llamacpp, streamText } from "modelfusion";

// example assumes you are running a mistral instruct model with llama.cpp
async function main() {
const textStream = await streamText(
llamacpp
.TextGenerator({ maxGenerationTokens: 512 })
.withTextPromptTemplate(MistralInstructPrompt.instruction()),
{
system: "You are a celebrated poet.",
instruction: "Write a short story about a robot learning to love.",
}
);

for await (const textPart of textStream) {
process.stdout.write(textPart);
}
}

main().catch(console.error);
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,7 @@ async function main() {
[
new ImageGeneratorTool({
model: automatic1111
.ImageGenerator({
model: "rpg_v5",
})
.ImageGenerator({ model: "rpg_v5" })
.withTextPrompt(),
}),
],
Expand Down
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
import dotenv from "dotenv";
import { generateText, ollama } from "modelfusion";
import { MistralInstructPrompt, generateText, ollama } from "modelfusion";

dotenv.config();

async function main() {
const text = await generateText(
ollama
.CompletionTextGenerator({
model: "mistral:text",
model: "mistral",
maxGenerationTokens: 120,
raw: true, // use prompt template below
})
.withTextPrompt(),
.withTextPromptTemplate(MistralInstructPrompt.text()),

"Write a short story about a robot learning to love:\n\n"
"Write a short story about a robot learning to love."
);

console.log(text);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ async function main() {
maxGenerationTokens: 1024,
temperature: 0,
format: "json",
raw: true,
raw: true, // use prompt template below
stopSequences: ["\n\n"], // prevent infinite generation
})
.withTextPromptTemplate(ChatMLPrompt.instruction()) // needed for jsonStructurePrompt.text()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
import dotenv from "dotenv";
import { ollama, streamText } from "modelfusion";
import { MistralInstructPrompt, ollama, streamText } from "modelfusion";

dotenv.config();

async function main() {
const textStream = await streamText(
ollama
.CompletionTextGenerator({
model: "mistral:text",
model: "mistral",
maxGenerationTokens: 500,
raw: true, // use prompt template below
})
.withTextPrompt(),
.withTextPromptTemplate(MistralInstructPrompt.text()),

"Write a short story about a robot learning to love:\n\n"
);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import dotenv from "dotenv";
import { ollama, streamText } from "modelfusion";
import { MistralInstructPrompt, ollama, streamText } from "modelfusion";

dotenv.config();

Expand All @@ -10,8 +10,9 @@ async function main() {
model: "mistral",
maxGenerationTokens: 500,
format: "json",
raw: true, // use prompt template below
})
.withTextPrompt(),
.withTextPromptTemplate(MistralInstructPrompt.text()),

"Generate 3 character descriptions for a fantasy role playing game. " +
"Respond using JSON."
Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import dotenv from "dotenv";
import { ollama, streamText } from "modelfusion";
import { MistralInstructPrompt, ollama, streamText } from "modelfusion";

dotenv.config();

async function main() {
const textStream = await streamText(
ollama.CompletionTextGenerator({
model: "mistral:text",
model: "mistral:text", // raw mistral model without instruct fine-tuning
maxGenerationTokens: 500,
}),
{ prompt: "Write a short story about a robot learning to love:\n\n" }
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
import { MathJsTool } from "@modelfusion/mathjs-tool";
import dotenv from "dotenv";
import { Llama2Prompt, jsonToolCallPrompt, ollama, useTool } from "modelfusion";
import {
MistralInstructPrompt,
jsonToolCallPrompt,
ollama,
useTool,
} from "modelfusion";

dotenv.config();

Expand All @@ -13,9 +18,10 @@ async function main() {
model: "mistral",
format: "json",
temperature: 0,
raw: true,
raw: true, // use prompt template below
stopSequences: ["\n\n"], // prevent infinite generation
})
.withTextPromptTemplate(Llama2Prompt.instruction()) // TODO Mistral prompt template
.withTextPromptTemplate(MistralInstructPrompt.instruction())
.asToolCallGenerationModel(jsonToolCallPrompt.text()),

new MathJsTool({ name: "calculator" }),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import dotenv from "dotenv";
import {
Llama2Prompt,
MistralInstructPrompt,
modelfusion,
ollama,
useToolsOrGenerateText,
Expand All @@ -19,9 +19,10 @@ async function main() {
.CompletionTextGenerator({
model: "mixtral",
temperature: 0,
raw: true,
raw: true, // use prompt template below
stopSequences: ["\n\n"], // prevent infinite generation
})
.withTextPromptTemplate(Llama2Prompt.instruction()) // TODO mistral
.withTextPromptTemplate(MistralInstructPrompt.instruction())
.asToolCallsOrTextGenerationModel(mistralMultiToolCallPromptTemplate),

[calculator, weather],
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import { chat, instruction, text } from "./MistralInstructPromptTemplate.js";

describe("text prompt", () => {
it("should format prompt", () => {
const prompt = text().format("prompt");

expect(prompt).toMatchSnapshot();
});
});

describe("instruction prompt", () => {
it("should format prompt with instruction", () => {
const prompt = instruction().format({
instruction: "instruction",
});

expect(prompt).toMatchSnapshot();
});

it("should format prompt with system and instruction", () => {
const prompt = instruction().format({
system: "system",
instruction: "instruction",
});

expect(prompt).toMatchSnapshot();
});

it("should format prompt with instruction and response prefix", () => {
const prompt = instruction().format({
instruction: "instruction",
responsePrefix: "response prefix",
});

expect(prompt).toMatchSnapshot();
});
});

describe("chat prompt", () => {
it("should format prompt with user message", () => {
const prompt = chat().format({
messages: [{ role: "user", content: "user message" }],
});

expect(prompt).toMatchSnapshot();
});

it("should format prompt with user-assistant-user messages", () => {
const prompt = chat().format({
messages: [
{ role: "user", content: "1st user message" },
{ role: "assistant", content: "assistant message" },
{ role: "user", content: "2nd user message" },
],
});

expect(prompt).toMatchSnapshot();
});

it("should format prompt with system message and user-assistant-user messages", () => {
const prompt = chat().format({
system: "you are a chatbot",
messages: [
{ role: "user", content: "1st user message" },
{ role: "assistant", content: "assistant message" },
{ role: "user", content: "2nd user message" },
],
});

expect(prompt).toMatchSnapshot();
});
});
Loading

1 comment on commit d2c2cc9

@vercel
Copy link

@vercel vercel bot commented on d2c2cc9 Dec 29, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.