diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..b496ae6 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,29 @@ +name: ci + +on: [push, pull_request] + +jobs: + build: + name: tests (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macOS-latest] + steps: + - uses: actions/checkout@v2 + - name: download deno + uses: denoland/setup-deno@v1 + with: + deno-version: v1.x + + - name: check format + if: matrix.os == 'ubuntu-latest' + run: deno fmt --check + + - name: check linting + if: matrix.os == 'ubuntu-latest' + run: deno lint + + # TODO: Testing + # - name: run tests + # run: deno task test \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..600d2d3 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.vscode \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..c6abc49 --- /dev/null +++ b/LICENSE @@ -0,0 +1,20 @@ +MIT License + +Copyright (c) 2023 Dean Srebnik + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index 7b9a44b..2386110 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,30 @@ # unofficial Deno wrapper for the Open Ai api -### usage: +[![Tags](https://img.shields.io/github/release/load1n9/openai)](https://github.com/load1n9/openai/releases) +[![Doc](https://doc.deno.land/badge.svg)](https://doc.deno.land/https/deno.land/x/openai/mod.ts) +[![Checks](https://github.com/load1n9/openai/actions/workflows/ci.yml/badge.svg)](https://github.com/load1n9/openai/actions/workflows/ci.yml) +[![License](https://img.shields.io/github/license/load1n9/openai)](https://github.com/load1n9/openai/blob/master/LICENSE) + +### Usage + ```ts -import { OpenAI } from 'https://deno.land/x/openai/mod.ts'; +import { OpenAI } from "https://deno.land/x/openai/mod.ts"; -const instance = new OpenAI('YOUR_API_KEY'); +const openAI = new OpenAI("YOUR_API_KEY"); -console.log(await instance.createCompletion('The meaning of life is')) +const completion = await openAI.createCompletion({ + model: "davinci", + prompt: "The meaning of life is", +}); + +console.log(completion.choices); ``` + +### Maintainers + +- Dean Srebnik ([@load1n9](https://github.com/load1n9)) +- Lino Le Van ([@lino-levan](https://github.com/lino-levan)) + +### License + +MIT diff --git a/deno.json b/deno.json new file mode 100644 index 0000000..90faa72 --- /dev/null +++ b/deno.json @@ -0,0 +1,3 @@ +{ + "tasks": {} +} diff --git a/examples/chat_completion.ts b/examples/chat_completion.ts new file mode 100644 index 0000000..142dbf0 --- /dev/null +++ b/examples/chat_completion.ts @@ -0,0 +1,18 @@ +import { OpenAI } from "../mod.ts"; + +const openAI = new OpenAI("YOUR_API_KEY"); + +const chatCompletion = await openAI.createChatCompletion({ + model: "gpt-3.5-turbo", + messages: [ + { "role": "system", "content": "You are a helpful assistant." }, + { "role": "user", "content": "Who won the world series in 2020?" }, + { + "role": "assistant", + "content": "The Los Angeles Dodgers won the World Series in 2020.", + }, + { "role": "user", "content": "Where was it played?" }, + ], +}); + +console.log(chatCompletion); diff --git a/examples/classification.ts b/examples/classification.ts deleted file mode 100644 index 6da25a1..0000000 --- a/examples/classification.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { OpenAI } from '../mod.ts'; - -const instance = new OpenAI('YOUR_API_KEY'); - - -console.log(await instance.createClassification( - [ - ["A happy moment", "Positive"], - ["I am sad.", "Negative"], - ["I am feeling awesome", "Positive"] - ], - "It is a raining day :(", - ["Positive", "Negative", "Neutral"] -)) \ No newline at end of file diff --git a/examples/completion.ts b/examples/completion.ts index 3134361..4d597a6 100644 --- a/examples/completion.ts +++ b/examples/completion.ts @@ -1,5 +1,10 @@ -import { OpenAI } from '../mod.ts'; +import { OpenAI } from "../mod.ts"; -const instance = new OpenAI('YOUR_API_KEY'); +const openAI = new OpenAI("YOUR_API_KEY"); -console.log(await instance.createCompletion('The meaning of life is')) +const completion = await openAI.createCompletion({ + model: "davinci", + prompt: "The meaning of life is", +}); + +console.log(completion); diff --git a/examples/image.ts b/examples/image.ts new file mode 100644 index 0000000..d0c4fa7 --- /dev/null +++ b/examples/image.ts @@ -0,0 +1,9 @@ +import { OpenAI } from "../mod.ts"; + +const openAI = new OpenAI("YOUR_API_KEY"); + +const image = await openAI.createImage({ + prompt: "A unicorn in space", +}); + +console.log(image); diff --git a/examples/search.ts b/examples/search.ts deleted file mode 100644 index 326ab93..0000000 --- a/examples/search.ts +++ /dev/null @@ -1,5 +0,0 @@ -import { OpenAI } from '../mod.ts'; - -const instance = new OpenAI('YOUR_API_KEY'); - -console.log(await instance.createSearch(['White house', 'hospital', 'school'],'the president')) diff --git a/mod.ts b/mod.ts index 763f822..403c428 100644 --- a/mod.ts +++ b/mod.ts @@ -1 +1,3 @@ -export { OpenAI } from './src/openai.ts'; +export { OpenAI } from "./src/openai.ts"; + +export * from "./src/types.ts"; diff --git a/src/openai.ts b/src/openai.ts index 0c0f5c8..619ecab 100644 --- a/src/openai.ts +++ b/src/openai.ts @@ -1,140 +1,375 @@ +import type { + ChatCompletion, + ChatCompletionOptions, + Completion, + CompletionOptions, + DeletedFile, + DeletedFineTune, + Edit, + EditOptions, + Embedding, + EmbeddingsOptions, + File, + FileList, + FineTune, + FineTuneEvent, + FineTuneEventList, + FineTuneList, + FineTuneOptions, + Image, + ImageEditOptions, + ImageOptions, + ImageVariationOptions, + Model, + ModelList, + Moderation, + Transcription, + TranscriptionOptions, + Translation, + TranslationOptions, +} from "./types.ts"; + +const baseUrl = "https://api.openai.com/v1"; + export class OpenAI { - private privKey: string; + #privateKey: string; constructor(privateKey: string) { - this.privKey = privateKey; - } - - public async createCompletion( - prompt: string, - engine = "davinci", - temperature = 0.3, - maxTokens = 60, - topP = 1, - frequencyPenalty = 0.5, - presencePenalty = 0, - ): Promise { - const response = await fetch( - `https://api.openai.com/v1/engines/${engine}/completions`, - { - body: JSON.stringify({ - prompt: prompt, - temperature: temperature, - max_tokens: maxTokens, - top_p: topP, - frequency_penalty: frequencyPenalty, - presence_penalty: presencePenalty, - }), - headers: { - Authorization: `Bearer ${this.privKey}`, - "Content-Type": "application/json", - }, - method: "POST", - }, - ); - return response.json(); - } - - public async createSearch( - documents: string[], - query: string, - engine = "davinci", - ): Promise { - const response = await fetch( - `https://api.openai.com/v1/engines/${engine}/search`, - { - body: JSON.stringify({ - documents: documents, - query: query, - }), - headers: { - Authorization: `Bearer ${this.privKey}`, - "Content-Type": "application/json", - }, - method: "POST", - }, - ); - return response.json(); - } - - public async createClassification( - examples: string[][], - query: string, - labels: string[], - model = "curie", - searchModel = "ada", - ): Promise { - const response = await fetch("https://api.openai.com/v1/classifications", { - body: JSON.stringify({ - examples: examples, - query: query, - search_model: searchModel, - model: model, - labels: labels, - }), + this.#privateKey = privateKey; + } + + // deno-lint-ignore no-explicit-any + async #request(url: string, body: any, method = "POST") { + const response = await fetch(`${baseUrl}${url}`, { + body: body ? JSON.stringify(body) : undefined, headers: { - Authorization: `Bearer ${this.privKey}`, + Authorization: `Bearer ${this.#privateKey}`, "Content-Type": "application/json", }, - method: "POST", + method, + }); + + return await response.json(); + } + + /** + * Lists the currently available models, and provides basic information about each one such as the owner and availability. + * + * https://platform.openai.com/docs/api-reference/models/list + */ + async listModels(): Promise { + return await this.#request("/models", undefined, "GET"); + } + + /** + * Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + * + * https://platform.openai.com/docs/api-reference/models/retrieve + */ + async getModel(model: string): Promise { + return await this.#request(`/models/${model}`, undefined, "GET"); + } + + /** + * Creates a completion for the provided prompt and parameters + * + * https://platform.openai.com/docs/api-reference/completions/create + */ + async createCompletion(options: CompletionOptions): Promise { + // TODO: make options.stream work + return await this.#request(`/completions`, { + model: options.model, + prompt: options.prompt, + suffix: options.suffix, + max_tokens: options.maxTokens, + temperature: options.temperature, + top_p: options.topP, + n: options.n, + stream: options.stream, + logprobs: options.logprobs, + echo: options.echo, + stop: options.stop, + presence_penalty: options.presencePenalty, + frequency_penalty: options.frequencyPenalty, + best_of: options.bestOf, + logit_bias: options.logitBias, + user: options.user, + }); + } + + /** + * Creates a completion for the chat message + * + * https://platform.openai.com/docs/api-reference/chat/create + */ + async createChatCompletion( + options: ChatCompletionOptions, + ): Promise { + return await this.#request(`/chat/completions`, { + model: options.model, + messages: options.messages, + temperature: options.temperature, + top_p: options.topP, + n: options.n, + stream: options.stream, + stop: options.stop, + max_tokens: options.maxTokens, + presence_penalty: options.presencePenalty, + frequency_penalty: options.frequencyPenalty, + logit_bias: options.logitBias, + user: options.user, + }); + } + + /** + * Creates a new edit for the provided input, instruction, and parameters. + * + * https://platform.openai.com/docs/api-reference/edits/create + */ + async createEdit(options: EditOptions): Promise { + return await this.#request(`/edits`, { + model: options.model, + input: options.input, + instruction: options.instruction, + n: options.n, + temperature: options.temperature, + top_p: options.topP, + }); + } + + /** + * Creates an image given a prompt. + * + * https://platform.openai.com/docs/api-reference/images/create + */ + async createImage(options: ImageOptions): Promise { + return await this.#request(`/images/generations`, { + prompt: options.prompt, + n: options.n, + size: options.size, + response_format: options.responseFormat, + user: options.user, + }); + } + + /** + * Creates an edited or extended image given an original image and a prompt. + * + * https://platform.openai.com/docs/api-reference/images/create-edit + */ + async createImageEdit(options: ImageEditOptions): Promise { + return await this.#request(`/images/edits`, { + image: options.image, + mask: options.mask, + prompt: options.prompt, + n: options.n, + size: options.size, + response_format: options.responseFormat, + user: options.user, + }); + } + + /** + * Creates a variation of a given image. + * + * https://platform.openai.com/docs/api-reference/images/create-variation + */ + async createImageVariation(options: ImageVariationOptions): Promise { + return await this.#request(`/images/variations`, { + image: options.image, + n: options.n, + size: options.size, + response_format: options.responseFormat, + user: options.user, + }); + } + + /** + * Creates an embedding vector representing the input text. + * + * https://platform.openai.com/docs/api-reference/embeddings/create + */ + async createEmbeddings(options: EmbeddingsOptions): Promise { + return await this.#request(`/embeddings`, options); + } + + /** + * Transcribes audio into the input language. + * + * https://platform.openai.com/docs/api-reference/audio/create + */ + async createTranscription( + options: TranscriptionOptions, + ): Promise { + return await this.#request(`/audio/transcriptions`, { + file: options.file, + model: options.model, + prompt: options.prompt, + response_format: options.responseFormat, + temperature: options.temperature, + language: options.language, }); - return response.json(); - } - - public async createAnswer( - documents: string[], - question: string, - examplesContext: string, - examples: string[][], - maxTokens = 5, - model = "curie", - searchModel = "ada", - ): Promise { - const response = await fetch("https://api.openai.com/v1/classifications", { - body: JSON.stringify({ - documents: documents, - question: question, - search_model: searchModel, - model: model, - examples_context: examplesContext, - examples: examples, - max_tokens: maxTokens, - }), + } + + /** + * Translates audio into into English. + * + * https://platform.openai.com/docs/api-reference/audio/create + */ + async createTranslation(options: TranslationOptions): Promise { + return await this.#request(`/audio/translations`, { + file: options.file, + model: options.model, + prompt: options.prompt, + response_format: options.responseFormat, + temperature: options.temperature, + }); + } + + /** + * Returns a list of files that belong to the user's organization. + * + * https://platform.openai.com/docs/api-reference/files/list + */ + async listFiles(): Promise { + return await this.#request(`/files`, undefined, "GET"); + } + + /** + * Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + * + * https://platform.openai.com/docs/api-reference/files/upload + */ + async uploadFile(file: string, purpose: string): Promise { + return await this.#request(`/files`, { + file, + purpose, + }); + } + + /** + * Delete a file. + * + * https://platform.openai.com/docs/api-reference/files/delete + */ + async deleteFile(fileId: string): Promise { + return await this.#request(`/files/${fileId}`, undefined, "DELETE"); + } + + /** + * Returns information about a specific file. + * + * https://platform.openai.com/docs/api-reference/files/retrieve + */ + async retrieveFile(fileId: string): Promise { + return await this.#request(`/files/${fileId}`, undefined, "GET"); + } + + /** + * Returns the contents of the specified file + * + * https://platform.openai.com/docs/api-reference/files/retrieve-content + */ + async retrieveFileContent(fileId: string) { + const response = await fetch(`${baseUrl}/files/${fileId}/content`, { headers: { - Authorization: `Bearer ${this.privKey}`, + Authorization: `Bearer ${this.#privateKey}`, "Content-Type": "application/json", }, - method: "POST", }); - return response.json(); + return response.body; } - public async retrieveEngine(engine: string): Promise { - const response = await fetch( - `https://api.openai.com/v1/engines/${engine}`, - { - headers: { - Authorization: `Bearer ${this.privKey}`, - }, - }, + /** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * https://platform.openai.com/docs/api-reference/fine-tunes/create + */ + async createFineTune( + options: FineTuneOptions, + ): Promise<(FineTune & { events: FineTuneEvent[] })> { + return await this.#request(`/fine-tunes`, { + training_file: options.trainingFile, + validation_file: options.validationFile, + model: options.model, + n_epochs: options.nEpochs, + batch_size: options.batchSize, + learning_rate_multiplier: options.learningRateMultiplier, + prompt_loss_weight: options.promptLossWeight, + compute_classification_metrics: options.computeClassificationMetrics, + classification_n_classes: options.classificationNClasses, + classification_positive_class: options.classificationPositiveClass, + classification_betas: options.classificationBetas, + suffix: options.suffix, + }); + } + + /** + * List your organization's fine-tuning jobs + * + * https://platform.openai.com/docs/api-reference/fine-tunes/list + */ + async listFineTunes(): Promise { + return await this.#request(`/fine-tunes`, undefined, "GET"); + } + + /** + * Gets info about the fine-tune job. + * + * https://platform.openai.com/docs/api-reference/fine-tunes/retrieve + */ + async retrieveFineTune( + fineTuneId: string, + ): Promise<(FineTune & { events: FineTuneEvent[] })> { + return await this.#request(`/fine-tunes/${fineTuneId}`, undefined, "GET"); + } + + /** + * Immediately cancel a fine-tune job. + * + * https://platform.openai.com/docs/api-reference/fine-tunes/cancel + */ + async cancelFineTune( + fineTuneId: string, + ): Promise<(FineTune & { events: FineTuneEvent[] })> { + return await this.#request(`/fine-tunes/${fineTuneId}/cancel`, undefined); + } + + /** + * Get fine-grained status updates for a fine-tune job. + * + * https://platform.openai.com/docs/api-reference/fine-tunes/events + */ + async listFineTuneEvents(fineTuneId: string): Promise { + // TODO: stream query parameter + return await this.#request( + `/fine-tunes/${fineTuneId}/events`, + undefined, + "GET", ); - return response.json(); } - public async listEngines(): Promise { - const response = await fetch("https://api.openai.com/v1/engines", { - headers: { - Authorization: `Bearer ${this.privKey}`, - }, - }); - return response.json(); + /** + * Delete a fine-tuned model. You must have the Owner role in your organization. + * + * https://platform.openai.com/docs/api-reference/fine-tunes/delete-model + */ + async deleteFineTuneModel(model: string): Promise { + return await this.#request(`/models/${model}`, undefined, "DELETE"); } - public async listFiles(): Promise { - const response = await fetch("https://api.openai.com/v1/files", { - headers: { - Authorization: `Bearer ${this.privKey}`, - }, + + /** + * Classifies if text violates OpenAI's Content Policy + * + * https://platform.openai.com/docs/api-reference/moderations/create + */ + async createModeration( + input: string | string[], + model?: string, + ): Promise { + return await this.#request(`/moderations`, { + input, + model, }); - return response.json(); } } diff --git a/src/types.ts b/src/types.ts new file mode 100644 index 0000000..1b2ac04 --- /dev/null +++ b/src/types.ts @@ -0,0 +1,757 @@ +export interface CompletionOptions { + /** + * ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-model + */ + model: string; + + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + * Note that <|endoftext|> is the document separator that the model sees during training, + * so if a prompt is not specified the model will generate as if from the beginning of a new document. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-prompt + */ + prompt: string | string[]; + + /** + * The suffix that comes after a completion of inserted text. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-suffix + */ + suffix?: string; + + /** + * The maximum number of tokens to generate in the completion. + * The token count of your prompt plus max_tokens cannot exceed the model's context length. + * Most models have a context length of 2048 tokens (except for the newest models, which support 4096). + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-max_tokens + */ + maxTokens?: number; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. + * We generally recommend altering this or top_p but not both. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature + */ + temperature?: number; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-top_p + */ + topP?: number; + + /** + * How many completions to generate for each prompt. + * Note: Because this parameter generates many completions, it can quickly consume your token quota. + * Use carefully and ensure that you have reasonable settings for max_tokens and stop. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-n + */ + n?: number; + + /** + * Whether to stream back partial progress. + * If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-stream + */ + stream?: boolean; + + /** + * Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens. + * For example, if logprobs is 5, the API will return a list of the 5 most likely tokens. + * The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. + * The maximum value for logprobs is 5. If you need more than this, please contact us through our Help center and describe your use case. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-logprobs + */ + logprobs?: number; + + /** + * Echo back the prompt in addition to the completion + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-echo + */ + echo?: boolean; + + /** + * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-stop + */ + stop?: string | string[]; + + /** + * Number between -2.0 and 2.0. + * Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-presence_penalty + */ + presencePenalty?: number; + + /** + * Number between -2.0 and 2.0. + * Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-frequency_penalty + */ + frequencyPenalty?: number; + + /** + * Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + * When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n. + * Note: Because this parameter generates many completions, it can quickly consume your token quota. + * Use carefully and ensure that you have reasonable settings for max_tokens and stop. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-best_of + */ + bestOf?: number; + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. + * You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs. + * Mathematically, the bias is added to the logits generated by the model prior to sampling. + * The exact effect will vary per model, + * but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + * As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-logit_bias + */ + logitBias?: Record; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + * https://platform.openai.com/docs/api-reference/completions/create#completions/create-user + */ + user?: string; +} + +export interface ChatCompletionOptions { + /** + * ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-model + */ + model: string; + + /** + * The messages to generate chat completions for, in the chat format.The messages to generate chat completions for, in the chat format. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-messages + */ + messages: { + role: string; + content: string; + }[]; + + /** + * What sampling temperature to use, between 0 and 2. + * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + * We generally recommend altering this or top_p but not both. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-temperature + */ + temperature?: number; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * We generally recommend altering this or temperature but not both. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-top_p + */ + topP?: number; + + /** + * How many chat completion choices to generate for each input message. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-n + */ + n?: number; + + /** + * If set, partial message deltas will be sent, like in ChatGPT. + * Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-stream + */ + stream?: boolean; + + /** + * Up to 4 sequences where the API will stop generating further tokens. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-stop + */ + stop?: string | string[]; + + /** + * The maximum number of tokens allowed for the generated answer. + * By default, the number of tokens the model can return will be (4096 - prompt tokens). + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-max_tokens + */ + maxTokens?: number; + + /** + * Number between -2.0 and 2.0. + * Positive values penalize new tokens based on whether they appear in the text so far, + * increasing the model's likelihood to talk about new topics. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-presence_penalty + */ + presencePenalty?: number; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, + * decreasing the model's likelihood to repeat the same line verbatim. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-frequency_penalty + */ + frequencyPenalty?: number; + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. + * Mathematically, the bias is added to the logits generated by the model prior to sampling. + * The exact effect will vary per model, + * but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias + */ + logitBias?: Record; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + * https://platform.openai.com/docs/api-reference/chat/create#chat/create-user + */ + user?: string; +} + +export interface EditOptions { + /** + * ID of the model to use. You can use the text-davinci-edit-001 or code-davinci-edit-001 model with this endpoint. + * https://platform.openai.com/docs/api-reference/edits/create#edits/create-model + */ + model: string; + + /** + * The input text to use as a starting point for the edit. + * https://platform.openai.com/docs/api-reference/edits/create#edits/create-input + */ + input?: string; + + /** + * The instruction that tells the model how to edit the prompt. + * https://platform.openai.com/docs/api-reference/edits/create#edits/create-instruction + */ + instruction: string; + + /** + * How many edits to generate for the input and instruction. + * https://platform.openai.com/docs/api-reference/edits/create#edits/create-n + */ + n?: number; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. + * We generally recommend altering this or top_p but not both. + * https://platform.openai.com/docs/api-reference/edits/create#edits/create-temperature + */ + temperature?: number; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + * https://platform.openai.com/docs/api-reference/edits/create#edits/create-top_p + */ + topP?: number; +} + +export interface ImageOptions { + /** + * A text description of the desired image(s). The maximum length is 1000 characters. + * https://platform.openai.com/docs/api-reference/images/create#images/create-prompt + */ + prompt: string; + + /** + * The number of images to generate. Must be between 1 and 10. + * https://platform.openai.com/docs/api-reference/images/create#images/create-n + */ + n?: number; + + /** + * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. + * https://platform.openai.com/docs/api-reference/images/create#images/create-size + */ + size?: "256x256" | "512x512" | "1024x1024"; + + /** + * The format in which the generated images are returned. Must be one of url or b64_json. + * https://platform.openai.com/docs/api-reference/images/create#images/create-response_format + */ + responseFormat?: "url" | "b64_json"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + * https://platform.openai.com/docs/api-reference/images/create#images/create-user + */ + user?: string; +} + +export interface ImageEditOptions { + /** + * The image to edit. Must be a valid PNG file, less than 4MB, and square. + * If mask is not provided, image must have transparency, which will be used as the mask. + * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-image + */ + image: string; + + /** + * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. + * Must be a valid PNG file, less than 4MB, and have the same dimensions as image. + * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-mask + */ + mask?: string; + + /** + * A text description of the desired image(s). The maximum length is 1000 characters. + * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-prompt + */ + prompt: string; + + /** + * The number of images to generate. Must be between 1 and 10. + * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-n + */ + n?: number; + + /** + * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. + * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-size + */ + size?: "256x256" | "512x512" | "1024x1024"; + + /** + * The format in which the generated images are returned. Must be one of url or b64_json. + * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-response_format + */ + responseFormat?: "url" | "b64_json"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + * https://platform.openai.com/docs/api-reference/images/create-edit#images/create-edit-user + */ + user?: string; +} + +export interface ImageVariationOptions { + /** + * The image to edit. Must be a valid PNG file, less than 4MB, and square. + * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-image + */ + image: string; + + /** + * The number of images to generate. Must be between 1 and 10. + * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-n + */ + n?: number; + + /** + * The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. + * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-size + */ + size?: "256x256" | "512x512" | "1024x1024"; + + /** + * The format in which the generated images are returned. Must be one of url or b64_json. + * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-response_format + */ + responseFormat?: "url" | "b64_json"; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + * https://platform.openai.com/docs/api-reference/images/create-variation#images/create-variation-user + */ + user?: string; +} + +export interface EmbeddingsOptions { + /** + * ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. + * https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-model + */ + model: string; + + /** + * Input text to get embeddings for, encoded as a string or array of tokens. + * To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. + * Each input must not exceed 8192 tokens in length. + * https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-input + */ + input: string | string[]; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + * https://platform.openai.com/docs/api-reference/embeddings/create#embeddings/create-user + */ + user?: string; +} + +export interface TranscriptionOptions { + /** + * The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-file + */ + file: string; + + /** + * ID of the model to use. Only whisper-1 is currently available. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-model + */ + model: string; + + /** + * An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-prompt + */ + prompt?: string; + + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-response_format + */ + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-temperature + */ + temperature?: number; + + /** + * The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-language + */ + language?: string; +} + +export interface TranslationOptions { + /** + * The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-file + */ + file: string; + + /** + * ID of the model to use. Only whisper-1 is currently available. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-model + */ + model: string; + + /** + * An optional text to guide the model's style or continue a previous audio segment. The prompt should be in English. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-prompt + */ + prompt?: string; + + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-response_format + */ + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-temperature + */ + temperature?: number; + + /** + * The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. + * https://platform.openai.com/docs/api-reference/audio/create#audio/create-language + */ + language?: string; +} + +export interface FineTuneOptions { + /** + * The ID of an uploaded file that contains training data. + * Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys "prompt" and "completion". + * Additionally, you must upload your file with the purpose fine-tune. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-training_file + */ + trainingFile: string; + + /** + * The ID of an uploaded file that contains validation data. + * If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. + * These metrics can be viewed in the fine-tuning results file. Your train and validation data should be mutually exclusive. + * Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys "prompt" and "completion". + * Additionally, you must upload your file with the purpose fine-tune. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-training_file + */ + validationFile: string; + + /** + * The name of the base model to fine-tune. + * You can select one of "ada", "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. + * To learn more about these models, see the Models documentation. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-model + */ + model: string; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-n_epochs + */ + nEpochs?: number; + + /** + * The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. + * By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, + * we've found that larger batch sizes tend to work better for larger datasets. + */ + batchSize?: number; + + /** + * The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. + * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final batch_size (larger learning rates tend to perform better with larger batch sizes). + * We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-learning_rate_multiplier + */ + learningRateMultiplier?: number; + + /** + * The weight to use for loss on the prompt tokens. + * This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + * and can add a stabilizing effect to training when completions are short. + * If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-prompt_loss_weight + */ + promptLossWeight?: number; + + /** + * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. + * These metrics can be viewed in the results file. + * In order to compute classification metrics, you must provide a validation_file. + * Additionally, you must specify classification_n_classes for multiclass classification or classification_positive_class for binary classification. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-compute_classification_metrics + */ + computeClassificationMetrics?: boolean; + + /** + * The number of classes in a classification task. + * This parameter is required for multiclass classification. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_n_classes + */ + classificationNClasses?: number; + + /** + * The positive class in binary classification. + * This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_positive_class + */ + classificationPositiveClass?: string; + + /** + * If this is provided, we calculate F-beta scores at the specified beta values. + * The F-beta score is a generalization of F-1 score. This is only used for binary classification. + * With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. + * A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-classification_betas + */ + classificationBetas?: number[]; + + /** + * A string of up to 40 characters that will be added to your fine-tuned model name. + * For example, a suffix of "custom-model-name" would produce a model name like ada:ft-your-org:custom-model-name-2022-02-15-04-21-04. + * https://platform.openai.com/docs/api-reference/fine-tunes/create#fine-tunes/create-suffix + */ + suffix?: string; +} + +export interface Model { + id: string; + object: "model"; + created: number; + owned_by: string; + permission: { + id: string; + object: "model_permission"; + created: number; + allow_create_engine: boolean; + allow_sampling: boolean; + allow_logprobs: boolean; + allow_search_indices: boolean; + allow_view: boolean; + allow_fine_tuning: boolean; + organization: string; + group: null | string; + is_blocking: boolean; + }[]; + root: string; + parent: null | string; +} + +export interface ModelList { + object: "list"; + data: Model[]; +} + +export interface Completion { + id: string; + object: "text_completion"; + created: number; + model: string; + choices: { + text: string; + index: number; + logprobs: number | null; + finish_reason: string; + }[]; + usage: { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + }; +} + +export interface ChatCompletion { + id: string; + object: "chat.completion"; + created: number; + choices: { + index: number; + message: { + role: string; + content: string; + }; + finish_reason: string; + }[]; + usage: { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + }; +} + +export interface Edit { + object: "edit"; + created: number; + choices: { + text: string; + index: number; + }[]; + usage: { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + }; +} + +export interface Image { + created: number; + data: { + url: string; + b64_json: string; + }[]; +} + +export interface Embedding { + object: "list"; + data: { + object: "embedding"; + embedding: number[]; + index: number; + }[]; + model: string; + usage: { + prompt_tokens: number; + total_tokens: number; + }; +} + +export interface Transcription { + text: string; +} + +export interface Translation { + text: string; +} + +export interface File { + id: string; + object: "file"; + bytes: number; + created_at: number; + filename: string; + purpose: string; +} + +export interface FileList { + data: File[]; + object: "list"; +} + +export interface DeletedFile { + id: string; + object: "file"; + deleted: boolean; +} + +export interface FineTuneEvent { + object: "fine-tune-event"; + created_at: number; + level: string; + message: string; +} + +export interface FineTuneEventList { + object: "list"; + data: FineTuneEvent[]; +} + +export interface FineTune { + id: string; + object: "fine-tune"; + model: string; + created_at: number; + fine_tuned_model: null | string; + hyperparams: { + batch_size: number; + learning_rate_multiplier: number; + n_epochs: number; + prompt_loss_weight: number; + }; + organization_id: string; + result_files: File[]; + status: "pending" | "succeeded" | "cancelled"; + validation_files: File[]; + training_files: File[]; + updated_at: number; +} + +export interface FineTuneList { + object: "list"; + data: FineTune[]; +} + +export interface DeletedFineTune { + id: string; + object: "model"; + deleted: boolean; +} + +export interface Moderation { + id: string; + model: string; + results: { + categories: { + hate: boolean; + "hate/threatening": boolean; + "self-harm": boolean; + sexual: boolean; + "sexual/minors": boolean; + violence: boolean; + "violence/graphic": boolean; + }; + category_scores: { + hate: number; + "hate/threatening": number; + "self-harm": number; + sexual: number; + "sexual/minors": number; + violence: number; + "violence/graphic": number; + }; + flagged: boolean; + }[]; +}