From 940d5695f4cacddbb58e3bfc50fec28c468c7e63 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 29 Feb 2024 17:39:28 +0100 Subject: [PATCH 01/13] docs(contributing): improve wording (#696) --- CONTRIBUTING.md | 6 +++--- README.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 61f37370f..693e9ea70 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,7 +3,7 @@ This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install/#mac-stable). Other package managers may work but are not officially supported for development. -To setup the repository, run: +To set up the repository, run: ```bash yarn @@ -65,7 +65,7 @@ pnpm link -—global openai ## Running tests -Most tests will require you to [setup a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. +Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. ```bash npx prism path/to/your/openapi.yml @@ -99,7 +99,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish NPM` GitHub action](https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml). This will require a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish NPM` GitHub action](https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index ef174634e..e8ff603e9 100644 --- a/README.md +++ b/README.md @@ -424,7 +424,7 @@ import OpenAI from 'openai'; ``` To do the inverse, add `import "openai/shims/node"` (which does import polyfills). -This can also be useful if you are getting the wrong TypeScript types for `Response` - more details [here](https://github.com/openai/openai-node/tree/master/src/_shims#readme). +This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/openai/openai-node/tree/master/src/_shims#readme)). You may also provide a custom `fetch` function when instantiating the client, which can be used to inspect or alter the `Request` or `Response` before/after each request: From 64041fd33da569eccae64afe4e50ee803017b20b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 29 Feb 2024 21:56:48 +0100 Subject: [PATCH 02/13] docs(readme): fix typo in custom fetch implementation (#698) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e8ff603e9..68d356f8f 100644 --- a/README.md +++ b/README.md @@ -434,7 +434,7 @@ import { fetch } from 'undici'; // as one example import OpenAI from 'openai'; const client = new OpenAI({ - fetch: async (url: RequestInfo, init?: RequestInfo): Promise => { + fetch: async (url: RequestInfo, init?: RequestInit): Promise => { console.log('About to make a request', url, init); const response = await fetch(url, init); console.log('Got response', response); From ac417a2db31919d2b52f2eb2e38f9c67a8f73254 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:32:50 +0100 Subject: [PATCH 03/13] fix(ChatCompletionStream): abort on async iterator break and handle errors (#699) `break`-ing the async iterator did not previously abort the request which increases usage. Errors are now handled more effectively in the async iterator. --- src/lib/ChatCompletionRunFunctions.test.ts | 53 +++++++++++++++++++++- src/lib/ChatCompletionStream.ts | 35 +++++++++++--- 2 files changed, 81 insertions(+), 7 deletions(-) diff --git a/src/lib/ChatCompletionRunFunctions.test.ts b/src/lib/ChatCompletionRunFunctions.test.ts index bb360b217..b524218ae 100644 --- a/src/lib/ChatCompletionRunFunctions.test.ts +++ b/src/lib/ChatCompletionRunFunctions.test.ts @@ -1,5 +1,5 @@ import OpenAI from 'openai'; -import { OpenAIError } from 'openai/error'; +import { OpenAIError, APIConnectionError } from 'openai/error'; import { PassThrough } from 'stream'; import { ParsingToolFunction, @@ -2207,6 +2207,7 @@ describe('resource completions', () => { await listener.sanityCheck(); }); }); + describe('stream', () => { test('successful flow', async () => { const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); @@ -2273,5 +2274,55 @@ describe('resource completions', () => { expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' }); await listener.sanityCheck(); }); + test('handles network errors', async () => { + const { fetch, handleRequest } = mockFetch(); + + const openai = new OpenAI({ apiKey: '...', fetch }); + + const stream = openai.beta.chat.completions.stream( + { + max_tokens: 1024, + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Say hello there!' }], + }, + { maxRetries: 0 }, + ); + + handleRequest(async () => { + throw new Error('mock request error'); + }).catch(() => {}); + + async function runStream() { + await stream.done(); + } + + await expect(runStream).rejects.toThrow(APIConnectionError); + }); + test('handles network errors on async iterator', async () => { + const { fetch, handleRequest } = mockFetch(); + + const openai = new OpenAI({ apiKey: '...', fetch }); + + const stream = openai.beta.chat.completions.stream( + { + max_tokens: 1024, + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Say hello there!' }], + }, + { maxRetries: 0 }, + ); + + handleRequest(async () => { + throw new Error('mock request error'); + }).catch(() => {}); + + async function runStream() { + for await (const _event of stream) { + continue; + } + } + + await expect(runStream).rejects.toThrow(APIConnectionError); + }); }); }); diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index a2aa7032e..2ea040383 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -210,13 +210,16 @@ export class ChatCompletionStream [Symbol.asyncIterator](): AsyncIterator { const pushQueue: ChatCompletionChunk[] = []; - const readQueue: ((chunk: ChatCompletionChunk | undefined) => void)[] = []; + const readQueue: { + resolve: (chunk: ChatCompletionChunk | undefined) => void; + reject: (err: unknown) => void; + }[] = []; let done = false; this.on('chunk', (chunk) => { const reader = readQueue.shift(); if (reader) { - reader(chunk); + reader.resolve(chunk); } else { pushQueue.push(chunk); } @@ -225,7 +228,23 @@ export class ChatCompletionStream this.on('end', () => { done = true; for (const reader of readQueue) { - reader(undefined); + reader.resolve(undefined); + } + readQueue.length = 0; + }); + + this.on('abort', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + this.on('error', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); } readQueue.length = 0; }); @@ -236,13 +255,17 @@ export class ChatCompletionStream if (done) { return { value: undefined, done: true }; } - return new Promise((resolve) => readQueue.push(resolve)).then( - (chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }), - ); + return new Promise((resolve, reject) => + readQueue.push({ resolve, reject }), + ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true })); } const chunk = pushQueue.shift()!; return { value: chunk, done: false }; }, + return: async () => { + this.abort(); + return { value: undefined, done: true }; + }, }; } From c081bdbb55585e63370496d324dc6f94d86424d1 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 1 Mar 2024 14:57:12 +0100 Subject: [PATCH 04/13] chore(docs): mention install from git repo (#700) --- CONTRIBUTING.md | 2 ++ README.md | 1 + 2 files changed, 3 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 693e9ea70..297322d17 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,6 +43,8 @@ To install via git: ```bash npm install --save git+ssh://git@github.com:openai/openai-node.git +# or +yarn add git+ssh://git@github.com:openai/openai-node.git ``` Alternatively, to link a local copy of the repo: diff --git a/README.md b/README.md index 68d356f8f..dd3ac15c0 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo ## Installation ```sh +# install from NPM npm install --save openai # or yarn add openai From e1db98bef29d200e2e401e3f5d7b2db6839c7836 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 4 Mar 2024 19:17:09 +0100 Subject: [PATCH 05/13] chore(api): update docs (#703) --- src/resources/audio/speech.ts | 9 +++------ src/resources/audio/transcriptions.ts | 18 ++++++++++++++---- src/resources/audio/translations.ts | 3 ++- src/resources/beta/threads/runs/runs.ts | 4 ++-- src/resources/chat/completions.ts | 14 +++++++++----- src/resources/images.ts | 9 ++++++--- src/resources/moderations.ts | 8 +++----- 7 files changed, 39 insertions(+), 26 deletions(-) diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index d5ef09118..7d0ee2195 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -35,13 +35,10 @@ export interface SpeechCreateParams { voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; /** - * The format to return audio in. Supported formats are `mp3`, `opus`, `aac`, - * `flac`, `pcm`, and `wav`. - * - * The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz - * sample rate, mono channel, and 16-bit depth in signed little-endian format. + * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + * `wav`, and `pcm`. */ - response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'pcm' | 'wav'; + response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm'; /** * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 7f381c5a3..ab2079ed6 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -14,7 +14,14 @@ export class Transcriptions extends APIResource { } } +/** + * Represents a transcription response returned by model, based on the provided + * input. + */ export interface Transcription { + /** + * The transcribed text. + */ text: string; } @@ -26,7 +33,8 @@ export interface TranscriptionCreateParams { file: Uploadable; /** - * ID of the model to use. Only `whisper-1` is currently available. + * ID of the model to use. Only `whisper-1` (which is powered by our open source + * Whisper V2 model) is currently available. */ model: (string & {}) | 'whisper-1'; @@ -61,9 +69,11 @@ export interface TranscriptionCreateParams { temperature?: number; /** - * The timestamp granularities to populate for this transcription. Any of these - * options: `word`, or `segment`. Note: There is no additional latency for segment - * timestamps, but generating word timestamps incurs additional latency. + * The timestamp granularities to populate for this transcription. + * `response_format` must be set `verbose_json` to use timestamp granularities. + * Either or both of these options are supported: `word`, or `segment`. Note: There + * is no additional latency for segment timestamps, but generating word timestamps + * incurs additional latency. */ timestamp_granularities?: Array<'word' | 'segment'>; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 54583ce1f..e68a714fb 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -26,7 +26,8 @@ export interface TranslationCreateParams { file: Uploadable; /** - * ID of the model to use. Only `whisper-1` is currently available. + * ID of the model to use. Only `whisper-1` (which is powered by our open source + * Whisper V2 model) is currently available. */ model: (string & {}) | 'whisper-1'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 9582a060b..9a0bc00dd 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -270,9 +270,9 @@ export namespace Run { */ export interface LastError { /** - * One of `server_error` or `rate_limit_exceeded`. + * One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. */ - code: 'server_error' | 'rate_limit_exceeded'; + code: 'server_error' | 'rate_limit_exceeded' | 'invalid_prompt'; /** * A human-readable description of the error. diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 2a5216745..44627eb85 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -546,7 +546,9 @@ export interface ChatCompletionTokenLogprob { bytes: Array | null; /** - * The log probability of this token. + * The log probability of this token, if it is within the top 20 most likely + * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + * unlikely. */ logprob: number; @@ -574,7 +576,9 @@ export namespace ChatCompletionTokenLogprob { bytes: Array | null; /** - * The log probability of this token. + * The log probability of this token, if it is within the top 20 most likely + * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + * unlikely. */ logprob: number; } @@ -827,9 +831,9 @@ export interface ChatCompletionCreateParamsBase { tools?: Array; /** - * An integer between 0 and 5 specifying the number of most likely tokens to return - * at each token position, each with an associated log probability. `logprobs` must - * be set to `true` if this parameter is used. + * An integer between 0 and 20 specifying the number of most likely tokens to + * return at each token position, each with an associated log probability. + * `logprobs` must be set to `true` if this parameter is used. */ top_logprobs?: number | null; diff --git a/src/resources/images.ts b/src/resources/images.ts index 4bc654903..bc5b9edc0 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -80,7 +80,8 @@ export interface ImageCreateVariationParams { /** * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. + * `b64_json`. URLs are only valid for 60 minutes after the image has been + * generated. */ response_format?: 'url' | 'b64_json' | null; @@ -131,7 +132,8 @@ export interface ImageEditParams { /** * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. + * `b64_json`. URLs are only valid for 60 minutes after the image has been + * generated. */ response_format?: 'url' | 'b64_json' | null; @@ -176,7 +178,8 @@ export interface ImageGenerateParams { /** * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. + * `b64_json`. URLs are only valid for 60 minutes after the image has been + * generated. */ response_format?: 'url' | 'b64_json' | null; diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index 8bde6ecca..a43006ccf 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -6,7 +6,7 @@ import * as ModerationsAPI from 'openai/resources/moderations'; export class Moderations extends APIResource { /** - * Classifies if text violates OpenAI's Content Policy + * Classifies if text is potentially harmful. */ create( body: ModerationCreateParams, @@ -28,8 +28,7 @@ export interface Moderation { category_scores: Moderation.CategoryScores; /** - * Whether the content violates - * [OpenAI's usage policies](/policies/usage-policies). + * Whether any of the below categories are flagged. */ flagged: boolean; } @@ -170,8 +169,7 @@ export namespace Moderation { } /** - * Represents policy compliance report by OpenAI's content moderation model against - * a given input. + * Represents if a given text input is potentially harmful. */ export interface ModerationCreateResponse { /** From 4ff790a67cf876191e04ad0e369e447e080b78a7 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 4 Mar 2024 21:53:09 +0100 Subject: [PATCH 06/13] chore: fix error handler in readme (#704) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dd3ac15c0..1cfb2537a 100644 --- a/README.md +++ b/README.md @@ -275,7 +275,7 @@ a subclass of `APIError` will be thrown: async function main() { const job = await openai.fineTuning.jobs .create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' }) - .catch((err) => { + .catch(async (err) => { if (err instanceof OpenAI.APIError) { console.log(err.status); // 400 console.log(err.name); // BadRequestError From d1447890a556d37928b628f6449bb80de224d207 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:10:53 +0100 Subject: [PATCH 07/13] docs(readme): fix https proxy example (#705) --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 1cfb2537a..1207d5d24 100644 --- a/README.md +++ b/README.md @@ -456,7 +456,7 @@ If you would like to disable or customize this behavior, for example to use the ```ts import http from 'http'; -import HttpsProxyAgent from 'https-proxy-agent'; +import { HttpsProxyAgent } from 'https-proxy-agent'; // Configure the default for all requests: const openai = new OpenAI({ @@ -465,9 +465,8 @@ const openai = new OpenAI({ // Override per-request: await openai.models.list({ - baseURL: 'http://localhost:8080/test-api', httpAgent: new http.Agent({ keepAlive: false }), -}) +}); ``` ## Semantic Versioning From 4753be272b1d1dade7a769cf350b829fc639f36e Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 6 Mar 2024 17:42:34 +0100 Subject: [PATCH 08/13] fix(streaming): correctly handle trailing new lines in byte chunks (#708) --- src/streaming.ts | 8 +++++++- tests/streaming.test.ts | 42 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 tests/streaming.test.ts diff --git a/src/streaming.ts b/src/streaming.ts index 7d8b4442a..1b59bce20 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -267,7 +267,7 @@ class SSEDecoder { * * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258 */ -class LineDecoder { +export class LineDecoder { // prettier-ignore static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']); static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; @@ -300,6 +300,12 @@ class LineDecoder { const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || ''); let lines = text.split(LineDecoder.NEWLINE_REGEXP); + // if there is a trailing new line then the last entry will be an empty + // string which we don't care about + if (trailingNewline) { + lines.pop(); + } + if (lines.length === 1 && !trailingNewline) { this.buffer.push(lines[0]!); return []; diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts new file mode 100644 index 000000000..45cf6f6cd --- /dev/null +++ b/tests/streaming.test.ts @@ -0,0 +1,42 @@ +import { LineDecoder } from 'openai/streaming'; + +function decodeChunks(chunks: string[], decoder?: LineDecoder): string[] { + if (!decoder) { + decoder = new LineDecoder(); + } + + const lines = []; + for (const chunk of chunks) { + lines.push(...decoder.decode(chunk)); + } + + return lines; +} + +describe('line decoder', () => { + test('basic', () => { + // baz is not included because the line hasn't ended yet + expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); + }); + + test('basic with \\r', () => { + // baz is not included because the line hasn't ended yet + expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); + }); + + test('trailing new lines', () => { + expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('trailing new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('escaped new lines', () => { + expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); + }); + + test('escaped new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); + }); +}); From 0323ecb98ddbd8910fc5719c8bab5175b945d2ab Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 6 Mar 2024 19:13:10 +0100 Subject: [PATCH 09/13] chore(types): fix accidental exposure of Buffer type to cloudflare (#709) --- src/streaming.ts | 13 ++++++++++++- tests/streaming.test.ts | 15 +-------------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index 1b59bce20..7b0466a3c 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -267,7 +267,7 @@ class SSEDecoder { * * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258 */ -export class LineDecoder { +class LineDecoder { // prettier-ignore static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']); static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; @@ -372,6 +372,17 @@ export class LineDecoder { } } +/** This is an internal helper function that's just used for testing */ +export function _decodeChunks(chunks: string[]): string[] { + const decoder = new LineDecoder(); + const lines = []; + for (const chunk of chunks) { + lines.push(...decoder.decode(chunk)); + } + + return lines; +} + function partition(str: string, delimiter: string): [string, string, string] { const index = str.indexOf(delimiter); if (index !== -1) { diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 45cf6f6cd..479b2a341 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -1,17 +1,4 @@ -import { LineDecoder } from 'openai/streaming'; - -function decodeChunks(chunks: string[], decoder?: LineDecoder): string[] { - if (!decoder) { - decoder = new LineDecoder(); - } - - const lines = []; - for (const chunk of chunks) { - lines.push(...decoder.decode(chunk)); - } - - return lines; -} +import { _decodeChunks as decodeChunks } from 'openai/streaming'; describe('line decoder', () => { test('basic', () => { From 8ec216d6b72ee4d67e26786f06c93af18d042117 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 6 Mar 2024 21:12:48 +0100 Subject: [PATCH 10/13] docs: remove extraneous --save and yarn install instructions (#710) --- CONTRIBUTING.md | 4 +--- README.md | 5 +---- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 297322d17..d9e64025d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,9 +42,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```bash -npm install --save git+ssh://git@github.com:openai/openai-node.git -# or -yarn add git+ssh://git@github.com:openai/openai-node.git +npm install git+ssh://git@github.com:openai/openai-node.git ``` Alternatively, to link a local copy of the repo: diff --git a/README.md b/README.md index 1207d5d24..28262aaca 100644 --- a/README.md +++ b/README.md @@ -11,10 +11,7 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo ## Installation ```sh -# install from NPM -npm install --save openai -# or -yarn add openai +npm install openai ``` You can import in Deno via: From 4688ef4b36e9f383a3abf6cdb31d498163a7bb9e Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 7 Mar 2024 19:13:22 +0100 Subject: [PATCH 11/13] docs: use @deprecated decorator for deprecated params (#711) --- src/resources/chat/completions.ts | 30 ++++++++++++++++++------------ src/resources/files.ts | 8 ++++---- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 44627eb85..c2d6da0be 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -133,8 +133,8 @@ export interface ChatCompletionAssistantMessageParam { content?: string | null; /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ function_call?: ChatCompletionAssistantMessageParam.FunctionCall; @@ -152,8 +152,8 @@ export interface ChatCompletionAssistantMessageParam { export namespace ChatCompletionAssistantMessageParam { /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -250,8 +250,8 @@ export namespace ChatCompletionChunk { content?: string | null; /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ function_call?: Delta.FunctionCall; @@ -265,8 +265,8 @@ export namespace ChatCompletionChunk { export namespace Delta { /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -378,6 +378,9 @@ export interface ChatCompletionFunctionCallOption { name: string; } +/** + * @deprecated + */ export interface ChatCompletionFunctionMessageParam { /** * The contents of the function message. @@ -410,8 +413,8 @@ export interface ChatCompletionMessage { role: 'assistant'; /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ function_call?: ChatCompletionMessage.FunctionCall; @@ -423,8 +426,8 @@ export interface ChatCompletionMessage { export namespace ChatCompletionMessage { /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -855,6 +858,9 @@ export interface ChatCompletionCreateParamsBase { } export namespace ChatCompletionCreateParams { + /** + * @deprecated + */ export interface Function { /** * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain diff --git a/src/resources/files.ts b/src/resources/files.ts index db8f3a66a..cda487a63 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -154,14 +154,14 @@ export interface FileObject { purpose: 'fine-tune' | 'fine-tune-results' | 'assistants' | 'assistants_output'; /** - * Deprecated. The current status of the file, which can be either `uploaded`, - * `processed`, or `error`. + * @deprecated: Deprecated. The current status of the file, which can be either + * `uploaded`, `processed`, or `error`. */ status: 'uploaded' | 'processed' | 'error'; /** - * Deprecated. For details on why a fine-tuning training file failed validation, - * see the `error` field on `fine_tuning.job`. + * @deprecated: Deprecated. For details on why a fine-tuning training file failed + * validation, see the `error` field on `fine_tuning.job`. */ status_details?: string; } From d728e9923554e4c72c9efa3bd528561400d50ad8 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 7 Mar 2024 21:10:51 +0100 Subject: [PATCH 12/13] chore(internal): add explicit type annotation to decoder (#712) --- src/streaming.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/streaming.ts b/src/streaming.ts index 7b0466a3c..f90c5d89a 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -375,7 +375,7 @@ class LineDecoder { /** This is an internal helper function that's just used for testing */ export function _decodeChunks(chunks: string[]): string[] { const decoder = new LineDecoder(); - const lines = []; + const lines: string[] = []; for (const chunk of chunks) { lines.push(...decoder.decode(chunk)); } From ad4a5fa5f11ce79b4710952307e4e4852ca1d728 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 13 Mar 2024 01:06:20 -0400 Subject: [PATCH 13/13] release: 4.28.5 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 27 +++++++++++++++++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 32 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5934251e9..2813cb972 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.28.4" + ".": "4.28.5" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 68ebe3767..8798e4b66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## 4.28.5 (2024-03-13) + +Full Changelog: [v4.28.4...v4.28.5](https://github.com/openai/openai-node/compare/v4.28.4...v4.28.5) + +### Bug Fixes + +* **ChatCompletionStream:** abort on async iterator break and handle errors ([#699](https://github.com/openai/openai-node/issues/699)) ([ac417a2](https://github.com/openai/openai-node/commit/ac417a2db31919d2b52f2eb2e38f9c67a8f73254)) +* **streaming:** correctly handle trailing new lines in byte chunks ([#708](https://github.com/openai/openai-node/issues/708)) ([4753be2](https://github.com/openai/openai-node/commit/4753be272b1d1dade7a769cf350b829fc639f36e)) + + +### Chores + +* **api:** update docs ([#703](https://github.com/openai/openai-node/issues/703)) ([e1db98b](https://github.com/openai/openai-node/commit/e1db98bef29d200e2e401e3f5d7b2db6839c7836)) +* **docs:** mention install from git repo ([#700](https://github.com/openai/openai-node/issues/700)) ([c081bdb](https://github.com/openai/openai-node/commit/c081bdbb55585e63370496d324dc6f94d86424d1)) +* fix error handler in readme ([#704](https://github.com/openai/openai-node/issues/704)) ([4ff790a](https://github.com/openai/openai-node/commit/4ff790a67cf876191e04ad0e369e447e080b78a7)) +* **internal:** add explicit type annotation to decoder ([#712](https://github.com/openai/openai-node/issues/712)) ([d728e99](https://github.com/openai/openai-node/commit/d728e9923554e4c72c9efa3bd528561400d50ad8)) +* **types:** fix accidental exposure of Buffer type to cloudflare ([#709](https://github.com/openai/openai-node/issues/709)) ([0323ecb](https://github.com/openai/openai-node/commit/0323ecb98ddbd8910fc5719c8bab5175b945d2ab)) + + +### Documentation + +* **contributing:** improve wording ([#696](https://github.com/openai/openai-node/issues/696)) ([940d569](https://github.com/openai/openai-node/commit/940d5695f4cacddbb58e3bfc50fec28c468c7e63)) +* **readme:** fix https proxy example ([#705](https://github.com/openai/openai-node/issues/705)) ([d144789](https://github.com/openai/openai-node/commit/d1447890a556d37928b628f6449bb80de224d207)) +* **readme:** fix typo in custom fetch implementation ([#698](https://github.com/openai/openai-node/issues/698)) ([64041fd](https://github.com/openai/openai-node/commit/64041fd33da569eccae64afe4e50ee803017b20b)) +* remove extraneous --save and yarn install instructions ([#710](https://github.com/openai/openai-node/issues/710)) ([8ec216d](https://github.com/openai/openai-node/commit/8ec216d6b72ee4d67e26786f06c93af18d042117)) +* use [@deprecated](https://github.com/deprecated) decorator for deprecated params ([#711](https://github.com/openai/openai-node/issues/711)) ([4688ef4](https://github.com/openai/openai-node/commit/4688ef4b36e9f383a3abf6cdb31d498163a7bb9e)) + ## 4.28.4 (2024-02-28) Full Changelog: [v4.28.3...v4.28.4](https://github.com/openai/openai-node/compare/v4.28.3...v4.28.4) diff --git a/README.md b/README.md index 28262aaca..24d38ac79 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.28.4/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.28.5/mod.ts'; ``` diff --git a/build-deno b/build-deno index 74d994d08..fb739cc50 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.28.4/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.28.5/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 65d6046f6..d51c4ca96 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.28.4", + "version": "4.28.5", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 9dd894067..516e764d1 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.28.4'; // x-release-please-version +export const VERSION = '4.28.5'; // x-release-please-version