From cf90a2542aef1ef7addd7f7fabf5631e2d97f0ff Mon Sep 17 00:00:00 2001 From: Walter Korman Date: Thu, 19 Dec 2024 09:40:40 -0800 Subject: [PATCH] chore (docs): Further OpenAI compatible doc updates. (#4149) --- .../02-providers-and-models.mdx | 2 +- .../10-perplexity.mdx | 24 +++++++----- .../30-lmstudio.mdx | 39 ++++++++++--------- .../40-baseten.mdx | 34 +++++++++------- .../02-openai-compatible-providers/index.mdx | 10 +++-- 5 files changed, 63 insertions(+), 46 deletions(-) diff --git a/content/docs/02-foundations/02-providers-and-models.mdx b/content/docs/02-foundations/02-providers-and-models.mdx index 337f76977f6c..6c93744702d2 100644 --- a/content/docs/02-foundations/02-providers-and-models.mdx +++ b/content/docs/02-foundations/02-providers-and-models.mdx @@ -34,12 +34,12 @@ The AI SDK comes with several providers that you can use to interact with differ - [xAI Grok Provider](/providers/ai-sdk-providers/xai) (`@ai-sdk/xai`) - [Together.ai Provider](/providers/ai-sdk-providers/togetherai) (`@ai-sdk/togetherai`) - [Cohere Provider](/providers/ai-sdk-providers/cohere) (`@ai-sdk/cohere`) +- [Fireworks](/providers/ai-sdk-providers/fireworks) - [Groq](/providers/ai-sdk-providers/groq) (`@ai-sdk/groq`) You can also use the OpenAI provider with OpenAI-compatible APIs: - [Perplexity](/providers/ai-sdk-providers/perplexity) -- [Fireworks](/providers/ai-sdk-providers/fireworks) - [LM Studio](/providers/openai-compatible-providers/lmstudio) - [Baseten](/providers/openai-compatible-providers/baseten) diff --git a/content/providers/02-openai-compatible-providers/10-perplexity.mdx b/content/providers/02-openai-compatible-providers/10-perplexity.mdx index 98ad6b7b075a..273f803116fd 100644 --- a/content/providers/02-openai-compatible-providers/10-perplexity.mdx +++ b/content/providers/02-openai-compatible-providers/10-perplexity.mdx @@ -10,18 +10,18 @@ It offers an OpenAI compatible API that you can use with the AI SDK. ## Setup -The Perplexity provider is available via the `@ai-sdk/openai` module as it is compatible with the OpenAI API. +The Perplexity provider is available via the `@ai-sdk/openai-compatible` module as it is compatible with the OpenAI API. You can install it with - + - + - + @@ -30,11 +30,13 @@ You can install it with To use Perplexity, you can create a custom provider instance with the `createOpenAI` function from `@ai-sdk/openai`: ```ts -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; -const perplexity = createOpenAI({ +const perplexity = createOpenAICompatible({ name: 'perplexity', - apiKey: process.env.PERPLEXITY_API_KEY ?? '', + headers: { + Authorization: `Bearer ${process.env.PERPLEXITY_API_KEY}`, + }, baseURL: 'https://api.perplexity.ai/', }); ``` @@ -53,12 +55,14 @@ const model = perplexity('llama-3.1-sonar-large-32k-online'); You can use Perplexity language models to generate text with the `generateText` function: ```ts -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { generateText } from 'ai'; -const perplexity = createOpenAI({ +const perplexity = createOpenAICompatible({ name: 'perplexity', - apiKey: process.env.PERPLEXITY_API_KEY ?? '', + headers: { + Authorization: `Bearer ${process.env.PERPLEXITY_API_KEY}`, + }, baseURL: 'https://api.perplexity.ai/', }); diff --git a/content/providers/02-openai-compatible-providers/30-lmstudio.mdx b/content/providers/02-openai-compatible-providers/30-lmstudio.mdx index ef15eac6719f..60e9a8790dd7 100644 --- a/content/providers/02-openai-compatible-providers/30-lmstudio.mdx +++ b/content/providers/02-openai-compatible-providers/30-lmstudio.mdx @@ -5,36 +5,36 @@ description: Use the LM Studio OpenAI compatible API with the AI SDK. # LM Studio Provider -[LM Studio](https://lmstudio.ai/) is user interface for running local models. +[LM Studio](https://lmstudio.ai/) is a user interface for running local models. It contains an OpenAI compatible API server that you can use with the AI SDK. You can start the local server under the [Local Server tab](https://lmstudio.ai/docs/basics/server) in the LM Studio UI ("Start Server" button). ## Setup -The LM Studio provider is available via the `@ai-sdk/openai` module as it is compatible with the OpenAI API. +The LM Studio provider is available via the `@ai-sdk/openai-compatible` module as it is compatible with the OpenAI API. You can install it with - + - + - + ## Provider Instance -To use LM Studio, you can create a custom provider instance with the `createOpenAI` function from `@ai-sdk/openai`: +To use LM Studio, you can create a custom provider instance with the `createOpenAICompatible` function from `@ai-sdk/openai-compatible`: ```ts -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; -const lmstudio = createOpenAI({ +const lmstudio = createOpenAICompatible({ name: 'lmstudio', baseURL: 'http://localhost:1234/v1', maxRetries: 1, // immediately error if the server is not running @@ -62,12 +62,11 @@ const model = lmstudio('llama-3.2-1b'); You can use LM Studio language models to generate text with the `generateText` function: ```ts -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { generateText } from 'ai'; -const lmstudio = createOpenAI({ +const lmstudio = createOpenAICompatible({ name: 'lmstudio', - apiKey: 'not-needed', baseURL: 'https://localhost:1234/v1', }); @@ -91,18 +90,17 @@ const model = lmstudio.embedding('text-embedding-nomic-embed-text-v1.5'); ### Example - Embedding a Single Value ```tsx -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { embed } from 'ai'; -const lmstudio = createOpenAI({ +const lmstudio = createOpenAICompatible({ name: 'lmstudio', - apiKey: 'not-needed', baseURL: 'https://localhost:1234/v1', }); // 'embedding' is a single embedding object (number[]) const { embedding } = await embed({ - model: lmstudio.embedding('text-embedding-nomic-embed-text-v1.5'), + model: lmstudio.textEmbeddingModel('text-embedding-nomic-embed-text-v1.5'), value: 'sunny day at the beach', }); ``` @@ -114,16 +112,21 @@ it is often useful to embed many values at once (batch embedding). The AI SDK provides the [`embedMany`](/docs/reference/ai-sdk-core/embed-many) function for this purpose. Similar to `embed`, you can use it with embeddings models, -e.g. `lmstudio.embedding('text-embedding-nomic-embed-text-v1.5')` or `lmstudio.embedding('text-embedding-bge-small-en-v1.5')`. +e.g. `lmstudio.textEmbeddingModel('text-embedding-nomic-embed-text-v1.5')` or `lmstudio.textEmbeddingModel('text-embedding-bge-small-en-v1.5')`. ```tsx -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai'; import { embedMany } from 'ai'; +const lmstudio = createOpenAICompatible({ + name: 'lmstudio', + baseURL: 'https://localhost:1234/v1', +}); + // 'embeddings' is an array of embedding objects (number[][]). // It is sorted in the same order as the input values. const { embeddings } = await embedMany({ - model: lmstudio.embedding('text-embedding-nomic-embed-text-v1.5'), + model: lmstudio.textEmbeddingModel('text-embedding-nomic-embed-text-v1.5'), values: [ 'sunny day at the beach', 'rainy afternoon in the city', diff --git a/content/providers/02-openai-compatible-providers/40-baseten.mdx b/content/providers/02-openai-compatible-providers/40-baseten.mdx index 2bbfe68429a2..cab0e6614633 100644 --- a/content/providers/02-openai-compatible-providers/40-baseten.mdx +++ b/content/providers/02-openai-compatible-providers/40-baseten.mdx @@ -10,27 +10,27 @@ It allows you to deploy models that are OpenAI API compatible that you can use w ## Setup -The Baseten provider is available via the `@ai-sdk/openai` module as it is compatible with the OpenAI API. +The Baseten provider is available via the `@ai-sdk/openai-compatible` module as it is compatible with the OpenAI API. You can install it with - + - + - + ## Provider Instance -To use Baseten, you can create a custom provider instance with the `createOpenAI` function from `@ai-sdk/openai`: +To use Baseten, you can create a custom provider instance with the `createOpenAICompatible` function from `@ai-sdk/openai-compatible`: ```ts -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; const BASETEN_MODEL_ID = ''; const BASETEN_DEPLOYMENT_ID = null; @@ -41,9 +41,11 @@ const basetenExtraPayload = { deployment_id: BASETEN_DEPLOYMENT_ID, }; -const baseten = createOpenAI({ +const baseten = createOpenAICompatible({ name: 'baseten', - apiKey: process.env.BASETEN_API_KEY ?? '', + headers: { + Authorization: `Bearer ${process.env.BASETEN_API_KEY}`, + }, baseURL: 'https://bridge.baseten.co/v1/direct', fetch: async (url, request) => { const bodyWithBasetenPayload = JSON.stringify({ @@ -71,7 +73,7 @@ const model = baseten('ultravox'); You can use Baseten language models to generate text with the `generateText` function: ```ts -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { generateText } from 'ai'; const BASETEN_MODEL_ID = ''; @@ -83,9 +85,11 @@ const basetenExtraPayload = { deployment_id: BASETEN_DEPLOYMENT_ID, }; -const baseten = createOpenAI({ +const baseten = createOpenAICompatible({ name: 'baseten', - apiKey: process.env.BASETEN_API_KEY ?? '', + headers: { + Authorization: `Bearer ${process.env.BASETEN_API_KEY}`, + }, baseURL: 'https://bridge.baseten.co/v1/direct', fetch: async (url, request) => { const bodyWithBasetenPayload = JSON.stringify({ @@ -107,7 +111,7 @@ console.log(text); Baseten language models are also able to generate text in a streaming fashion with the `streamText` function: ```ts -import { createOpenAI } from '@ai-sdk/openai'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { streamText } from 'ai'; const BASETEN_MODEL_ID = ''; @@ -119,9 +123,11 @@ const basetenExtraPayload = { deployment_id: BASETEN_DEPLOYMENT_ID, }; -const baseten = createOpenAI({ +const baseten = createOpenAICompatible({ name: 'baseten', - apiKey: process.env.BASETEN_API_KEY ?? '', + headers: { + Authorization: `Bearer ${process.env.BASETEN_API_KEY}`, + }, baseURL: 'https://bridge.baseten.co/v1/direct', fetch: async (url, request) => { const bodyWithBasetenPayload = JSON.stringify({ diff --git a/content/providers/02-openai-compatible-providers/index.mdx b/content/providers/02-openai-compatible-providers/index.mdx index 3d4b1b1ebf72..5e80553a32fb 100644 --- a/content/providers/02-openai-compatible-providers/index.mdx +++ b/content/providers/02-openai-compatible-providers/index.mdx @@ -42,7 +42,9 @@ import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; const provider = createOpenAICompatible({ name: 'provider-name', - apiKey: process.env.PROVIDER_API_KEY ?? '', + headers: { + Authorization: `Bearer ${process.env.PROVIDER_API_KEY}`, + }, baseURL: 'https://api.provider.com/v1', }); ``` @@ -66,7 +68,9 @@ import { generateText } from 'ai' const provider = createOpenAICompatible({ name: 'provider-name', - apiKey: process.env.PROVIDER_API_KEY ?? '', + headers: { + Authorization: `Bearer ${process.env.PROVIDER_API_KEY}`, + }, baseURL: 'https://api.provider.com/v1' }) @@ -102,11 +106,11 @@ const model = createOpenAICompatible< ExampleCompletionModelIds, ExampleEmbeddingModelIds >({ - baseURL: 'https://api.example.com/v1', name: 'example', headers: { Authorization: `Bearer ${process.env.MY_API_KEY}`, }, + baseURL: 'https://api.example.com/v1', }); // Subsequent calls to e.g. `model.chatModel` will auto-complete the model id