Skip to content

Commit

Permalink
feat(inject-prefix): Allows you to add a prefix before generated outp…
Browse files Browse the repository at this point in the history
…ut (#62)
  • Loading branch information
briansunter authored Dec 15, 2022
1 parent 41f4c37 commit e62bdf7
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 6 deletions.
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,15 @@ This will generate an image using the DALL-E model, save the image to the `asset

![logseq dalle](docs/dalle.gif)

### Inject Prefix
Allows you to inject a prefix into the GPT-3 output before it is inserted into the block, such as a [[gpt3]] tag or markdown formatting like `>` for a blockquote. This is useful for identifying blocks that were generated by GPT-3.
Use the `Inject Prefix` options in the setting to set the prefix. You can add a space or `\n` newline to separate the prefix from the generated text.

#### Inject Tag
![inject tag](docs/inject-tag.gif)

#### Inject Markdown Blockquote
![inject tag](docs/inject-quote.gif)
### OpenAI Examples
[See here for example usages](https://beta.openai.com/examples).

Expand Down
Binary file added docs/inject-quote.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/inject-tag.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
6 changes: 5 additions & 1 deletion src/lib/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,12 @@ export async function openAI(

const choices = response.data.choices;
if (choices && choices[0] && choices[0].text && choices[0].text.length > 0) {
return choices[0].text;
return trimLeadingWhitespace(choices[0].text);
} else {
return null;
}
}

function trimLeadingWhitespace(s: string): string {
return s.replace(/^\s+/, "");
}
30 changes: 25 additions & 5 deletions src/main.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,13 @@ const settingsSchema: SettingSchemaDesc[] = [
description:
"The maximum amount of tokens to generate. Tokens can be words or just chunks of characters. The number of tokens processed in a given API request depends on the length of both your inputs and outputs. As a rough rule of thumb, 1 token is approximately 4 characters or 0.75 words for English text. One limitation to keep in mind is that your text prompt and generated completion combined must be no more than the model's maximum context length (for most models this is 2048 tokens, or about 1500 words).",
},
{
key: "injectPrefix",
type: "string",
default: "",
title: "Output prefix",
description: "Prepends the output with this string. Such as a tag like [[gpt3]] or markdown like > to blockquote. Add a space at the end if you want a space between the prefix and the output or \\n for a linebreak.",
},
{
key: "dalleImageSize",
type: "number",
Expand All @@ -46,15 +53,20 @@ const settingsSchema: SettingSchemaDesc[] = [
},
];

interface PluginOptions extends OpenAIOptions {
injectPrefix?: string;
}

logseq.useSettingsSchema(settingsSchema);

function getOpenaiSettings(): OpenAIOptions {
function getOpenaiSettings(): PluginOptions {
const apiKey = logseq.settings!["openAIKey"];
const completionEngine = logseq.settings!["openAICompletionEngine"];
const injectPrefix = unescapeNewlines(logseq.settings!["injectPrefix"]);
const temperature = Number.parseFloat(logseq.settings!["openAITemperature"]);
const maxTokens = Number.parseInt(logseq.settings!["openAIMaxTokens"]);
const dalleImageSize = Number.parseInt(logseq.settings!["dalleImageSize"]) as DalleImageSize
return { apiKey, completionEngine, temperature, maxTokens, dalleImageSize };
return { apiKey, completionEngine, temperature, maxTokens, dalleImageSize, injectPrefix };
}

function handleOpenAIError(e: any) {
Expand Down Expand Up @@ -132,19 +144,24 @@ async function runGptBlock(b: IHookEvent) {
}

try {
const result = await openAI(currentBlock.content, openAISettings);
let result = await openAI(currentBlock.content, openAISettings);
if (!result) {
logseq.App.showMsg("No OpenAI results.", "warning");
return;
}

if (openAISettings.injectPrefix) {
result = openAISettings.injectPrefix + result;
}
await logseq.Editor.insertBlock(currentBlock.uuid, result, {
sibling: false,
});
} catch (e: any) {
handleOpenAIError(e);
}
}
function unescapeNewlines(s: string) {
return s.replace(/\\n/g, "\n");
}

async function runGptPage(b: IHookEvent) {
const openAISettings = getOpenaiSettings();
Expand All @@ -170,12 +187,15 @@ async function runGptPage(b: IHookEvent) {
}

try {
const result = await openAI(pageContents, openAISettings);
let result = await openAI(pageContents, openAISettings);

if (!result) {
logseq.App.showMsg("No OpenAI results.", "warning");
return;
}
if (openAISettings.injectPrefix){
result = openAISettings.injectPrefix + result;
}

await logseq.Editor.appendBlockInPage(page.uuid, result);
} catch (e: any) {
Expand Down

0 comments on commit e62bdf7

Please sign in to comment.