Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

openai[patch],api_refs[patch]: Improve OpenAI JSDoc & API refs #6451

Merged
merged 6 commits into from
Aug 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/api_refs/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
"prettier": "^2.8.3",
"tailwindcss": "^3.3.0",
"typedoc": "^0.26.0",
"typedoc-plugin-expand-object-like-types": "^0.1.2",
"typescript": "~5.1.6"
}
}
7 changes: 5 additions & 2 deletions docs/api_refs/scripts/create-entrypoints.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,15 @@ const BASE_TYPEDOC_CONFIG = {
"required-first",
"alphabetical",
],
plugin: ["./typedoc_plugins/hide_underscore_lc.js"],
plugin: [
"./typedoc_plugins/hide_underscore_lc.js",
"typedoc-plugin-expand-object-like-types",
],
tsconfig: "../../tsconfig.json",
readme: "none",
excludePrivate: true,
excludeInternal: true,
excludeExternals: true,
excludeExternals: false,
excludeNotDocumented: false,
includeVersion: true,
sourceLinkTemplate:
Expand Down
259 changes: 237 additions & 22 deletions libs/langchain-openai/src/chat_models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -367,37 +367,252 @@ export interface ChatOpenAIFields
}

/**
* Wrapper around OpenAI large language models that use the Chat endpoint.
* OpenAI chat model integration.
*
* To use you should have the `OPENAI_API_KEY` environment variable set.
* Setup:
* Install `@langchain/openai` and set environment variable `OPENAI_API_KEY`.
*
* To use with Azure you should have the:
* `AZURE_OPENAI_API_KEY`,
* `AZURE_OPENAI_API_INSTANCE_NAME`,
* `AZURE_OPENAI_API_DEPLOYMENT_NAME`
* and `AZURE_OPENAI_API_VERSION` environment variables set.
* `AZURE_OPENAI_BASE_PATH` is optional and will override `AZURE_OPENAI_API_INSTANCE_NAME` if you need to use a custom endpoint.
* ```bash
* npm install @langchain/openai
* export OPENAI_API_KEY="your-api-key"
* ```
*
* ## Key args
*
* ### [Init args](/classes/langchain_openai.ChatOpenAI.html#constructor)
*
* ### [Runtime args](/interfaces/langchain_openai.ChatOpenAICallOptions.html)
*
* > See full list of supported init args and their descriptions in the [`constructor`](/classes/langchain_openai.ChatOpenAI.html#constructor) section.
*
* ## Examples
*
* <details open>
* <summary><strong>Instantiate</strong></summary>
*
* @remarks
* Any parameters that are valid to be passed to {@link
* https://platform.openai.com/docs/api-reference/chat/create |
* `openai.createChatCompletion`} can be passed through {@link modelKwargs}, even
* if not explicitly available on this class.
* @example
* ```typescript
* // Create a new instance of ChatOpenAI with specific temperature and model name settings
* const model = new ChatOpenAI({
* temperature: 0.9,
* model: "ft:gpt-3.5-turbo-0613:{ORG_NAME}::{MODEL_ID}",
* import { ChatOpenAI } from '@langchain/openai';
*
* const llm = new ChatOpenAI({
* model: "gpt-4o",
* temperature: 0,
* maxTokens: undefined,
* timeout: undefined,
* maxRetries: 2,
* // apiKey: "...",
* // baseUrl: "...",
* // organization: "...",
* // other params...
* });
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Invoking</strong></summary>
*
* ```typescript
* const messages = [
* {
* type: "system" as const,
* content: "You are a helpful translator. Translate the user sentence to French.",
* },
* {
* type: "human" as const,
* content: "I love programming.",
* },
* ];
* const result = await llm.invoke(messages);
* console.log(result);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Streaming Chunks</strong></summary>
*
* ```typescript
* for await (const chunk of await llm.stream(messages)) {
* console.log(chunk);
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
*
* ```typescript
* import { AIMessageChunk } from '@langchain/core/messages';
* import { concat } from '@langchain/core/utils/stream';
*
* const stream = await llm.stream(messages);
* let full: AIMessageChunk | undefined;
* for await (const chunk of stream) {
* full = !full ? chunk : concat(full, chunk);
* }
* console.log(full);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Bind tools</strong></summary>
*
* // Invoke the model with a message and await the response
* const message = await model.invoke("Hi there!");
* ```typescript
* import { z } from 'zod';
*
* const GetWeather = {
* name: "GetWeather",
* description: "Get the current weather in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const GetPopulation = {
* name: "GetPopulation",
* description: "Get the current population in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const llmWithTools = llm.bindTools(
* [GetWeather, GetPopulation],
* {
* // strict: true // enforce tool args schema is respected
* }
* );
* const aiMsg = await llmWithTools.invoke(
* "Which city is hotter today and which is bigger: LA or NY?"
* );
* console.log(aiMsg.tool_calls);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Structured Output</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* // Log the response to the console
* console.log(message);
* const Joke = z.object({
* setup: z.string().describe("The setup of the joke"),
* punchline: z.string().describe("The punchline to the joke"),
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
* }).describe('Joke to tell user.');
*
* const structuredLlm = llm.withStructuredOutput(Joke);
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
* console.log(jokeResult);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>JSON Object Response Format</strong></summary>
*
* ```typescript
* const jsonLlm = llm.bind({ response_format: { type: "json_object" } });
* const jsonLlmAiMsg = await jsonLlm.invoke(
* "Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]"
* );
* console.log(jsonLlmAiMsg.content);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Multimodal</strong></summary>
*
* ```typescript
* import { HumanMessage } from '@langchain/core/messages';
*
* const imageUrl = "https://example.com/image.jpg";
* const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());
* const base64Image = Buffer.from(imageData).toString('base64');
*
* const message = new HumanMessage({
* content: [
* { type: "text", text: "describe the weather in this image" },
* {
* type: "image_url",
* image_url: { url: `data:image/jpeg;base64,${base64Image}` },
* },
* ]
* });
*
* const imageDescriptionAiMsg = await llm.invoke([message]);
* console.log(imageDescriptionAiMsg.content);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Usage Metadata</strong></summary>
*
* ```typescript
* const aiMsgForMetadata = await llm.invoke(messages);
* console.log(aiMsgForMetadata.usage_metadata);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Stream Usage Metadata</strong></summary>
*
* ```typescript
* const streamForMetadata = await llm.stream(
* messages,
* {
* stream_options: {
* include_usage: true
* }
* }
* );
* let fullForMetadata: AIMessageChunk | undefined;
* for await (const chunk of streamForMetadata) {
* fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
* }
* console.log(fullForMetadata?.usage_metadata);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Logprobs</strong></summary>
*
* ```typescript
* const logprobsLlm = new ChatOpenAI({ logprobs: true });
* const aiMsgForLogprobs = await logprobsLlm.invoke(messages);
* console.log(aiMsgForLogprobs.response_metadata.logprobs);
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Response Metadata</strong></summary>
*
* ```typescript
* const aiMsgForResponseMetadata = await llm.invoke(messages);
* console.log(aiMsgForResponseMetadata.response_metadata);
* ```
* </details>
*
* <br />
*/
export class ChatOpenAI<
CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions
Expand Down
10 changes: 10 additions & 0 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -20250,6 +20250,7 @@ __metadata:
react-dom: ^18
tailwindcss: ^3.3.0
typedoc: ^0.26.0
typedoc-plugin-expand-object-like-types: ^0.1.2
typescript: ~5.1.6
languageName: unknown
linkType: soft
Expand Down Expand Up @@ -39863,6 +39864,15 @@ __metadata:
languageName: node
linkType: hard

"typedoc-plugin-expand-object-like-types@npm:^0.1.2":
version: 0.1.2
resolution: "typedoc-plugin-expand-object-like-types@npm:0.1.2"
peerDependencies:
typedoc: 0.22.x || 0.23.x
checksum: 9dd58ce283386709c821f7a30267edddff984cac69fc8dc994185378403d586ac53a58b790b553693a572ea5d417e5a9bb90c02d4e73dbf595ec5a3156b5252e
languageName: node
linkType: hard

"typedoc-plugin-markdown@npm:4.0.0-next.6":
version: 4.0.0-next.6
resolution: "typedoc-plugin-markdown@npm:4.0.0-next.6"
Expand Down
Loading