diff --git a/README.md b/README.md index 3ad75fbda..3d303e5b0 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ The full API of this library can be found in [api.md file](api.md) along with ma ```js import OpenAI from 'openai'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); @@ -53,7 +53,7 @@ We provide support for streaming responses using Server Sent Events (SSE). ```ts import OpenAI from 'openai'; -const openai = new OpenAI(); +const client = new OpenAI(); async function main() { const stream = await openai.chat.completions.create({ @@ -80,7 +80,7 @@ This library includes TypeScript definitions for all request params and response ```ts import OpenAI from 'openai'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); @@ -301,7 +301,7 @@ import fs from 'fs'; import fetch from 'node-fetch'; import OpenAI, { toFile } from 'openai'; -const openai = new OpenAI(); +const client = new OpenAI(); // If you have access to Node `fs` we recommend using `fs.createReadStream()`: await openai.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' }); @@ -399,7 +399,7 @@ You can use the `maxRetries` option to configure or disable this: ```js // Configure the default for all requests: -const openai = new OpenAI({ +const client = new OpenAI({ maxRetries: 0, // default is 2 }); @@ -416,7 +416,7 @@ Requests time out after 10 minutes by default. You can configure this with a `ti ```ts // Configure the default for all requests: -const openai = new OpenAI({ +const client = new OpenAI({ timeout: 20 * 1000, // 20 seconds (default is 10 minutes) }); @@ -471,7 +471,7 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts -const openai = new OpenAI(); +const client = new OpenAI(); const response = await openai.chat.completions .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) @@ -582,7 +582,7 @@ import http from 'http'; import { HttpsProxyAgent } from 'https-proxy-agent'; // Configure the default for all requests: -const openai = new OpenAI({ +const client = new OpenAI({ httpAgent: new HttpsProxyAgent(process.env.PROXY_URL), });