diff --git a/docs/pages/docs/getting-started.mdx b/docs/pages/docs/getting-started.mdx index 7712fd833555..22ec9d5cfc8f 100644 --- a/docs/pages/docs/getting-started.mdx +++ b/docs/pages/docs/getting-started.mdx @@ -9,16 +9,10 @@ import { InlinePrompt } from '@/components/home/inline-prompt'; The Vercel AI SDK is a collection of tools to help you build AI-powered user interfaces. -In this quickstart tutorial, you'll build a simple sample AI-powered slogan generator with a streaming user interface. Along the way, you'll learn key guides and techniques that are fundamental to using the SDK in your own projects. +In this quickstart tutorial, you'll build a simple AI-chatbot with a streaming user interface. Along the way, you'll learn key guides and techniques that are fundamental to using the SDK in your own projects. If you are unfamiliar with the concepts of [Prompt Engineering](/docs/concepts/prompt-engineering) and [HTTP Streaming](/docs/concepts/streaming), you can optionally read these documents first. - - The below example uses a completion API and the `useCompletion` hook. If you - want to see a chat example (using gpt-3.5-turbo), see a [chat - guide](https://sdk.vercel.ai/docs/guides/providers/openai#guide-chat-bot). - - ### Build your app @@ -89,11 +83,11 @@ We've written some code to get you started — follow the instructions below to - Create a Next.js Route Handler, `app/api/completion/route.ts`. This handler will be using the Edge Runtime to generate a text completion via OpenAI, which will then be streamed back to Next.js. + Create a Next.js Route Handler, `app/api/chat/route.ts`. This handler will be using the Edge Runtime to generate a chat completion via OpenAI, which will then be streamed back to Next.js. Here's what the route handler should look like: - ```tsx filename="app/api/completion/route.ts" + ```tsx filename="app/api/chat/route.ts" import OpenAI from 'openai'; import { OpenAIStream, StreamingTextResponse } from 'ai'; @@ -106,23 +100,15 @@ We've written some code to get you started — follow the instructions below to export const runtime = 'edge'; export async function POST(req: Request) { - const { prompt } = await req.json(); + const { messages } = await req.json(); - // Ask OpenAI for a streaming completion given the prompt + // Ask OpenAI for a streaming chat completion given the prompt const response = await openai.chat.completions.create({ - model: 'gpt-3.5-turbo-instruct', + model: 'gpt-3.5-turbo', stream: true, - temperature: 0.6, - max_tokens: 300, - prompt: `Create three slogans for a business with unique features. - - Business: Bookstore with cats - Slogans: "Purr-fect Pages", "Books and Whiskers", "Novels and Nuzzles" - Business: Gym with rock climbing - Slogans: "Peak Performance", "Reach New Heights", "Climb Your Way Fit" - Business: ${prompt} - Slogans:`, + messages, }); + // Convert the response into a friendly text-stream const stream = OpenAIStream(response); // Respond with the stream @@ -132,11 +118,11 @@ We've written some code to get you started — follow the instructions below to - Create a SvelteKit Endpoint, `src/routes/api/completion/+server.js`. This handler will be using the Edge Runtime to generate a text completion via OpenAI, which will then be streamed back to SvelteKit. + Create a SvelteKit Endpoint, `src/routes/api/chat/+server.js`. This handler will be using the Edge Runtime to generate a chat completion via OpenAI, which will then be streamed back to SvelteKit. Here's what the endpoint should look like: - ```tsx filename="src/routes/api/completion/+server.js" + ```tsx filename="src/routes/api/chat/+server.js" import OpenAI from 'openai'; import { OpenAIStream, StreamingTextResponse } from 'ai'; import { OPENAI_API_KEY } from '$env/static/private'; @@ -152,22 +138,13 @@ We've written some code to get you started — follow the instructions below to }; export async function POST({ request }) { - const { prompt } = await request.json(); + const { messages } = await request.json(); - // Ask OpenAI for a streaming completion given the prompt + // Ask OpenAI for a streaming chat completion given the prompt const response = await openai.chat.completions.create({ - model: 'gpt-3.5-turbo-instruct', + model: 'gpt-3.5-turbo', stream: true, - temperature: 0.6, - max_tokens: 300, - prompt: `Create three slogans for a business with unique features. - - Business: Bookstore with cats - Slogans: "Purr-fect Pages", "Books and Whiskers", "Novels and Nuzzles" - Business: Gym with rock climbing - Slogans: "Peak Performance", "Reach New Heights", "Climb Your Way Fit" - Business: ${prompt} - Slogans:`, + messages, }); // Convert the response into a friendly text-stream @@ -184,62 +161,65 @@ We've written some code to get you started — follow the instructions below to #### Wire up a UI - Finally, create a client component with a form to collect the prompt from the user and stream back the completion. + Finally, create a client chat component that shows a list of chat messages and provides a user message input. ```tsx filename="app/page.tsx" showLineNumbers - 'use client' - - import { useCompletion } from 'ai/react'; + 'use client'; - export default function SloganGenerator() { - const { completion, input, handleInputChange, handleSubmit } = useCompletion(); + import { useChat } from 'ai/react'; + export default function Chat() { + const { messages, input, handleInputChange, handleSubmit } = useChat(); return ( -
+
+ {messages.map(m => ( +
+ {m.role === 'user' ? 'User: ' : 'AI: '} + {m.content} +
+ ))} +
- {completion ? ( -
{completion}
- ) : ( -
Enter a business description and click enter to generate slogans.
- )}
); } ``` - This component utilizes the `useCompletion` hook, which will, by default, use the `POST` route handler we created earlier. The hook provides functions and state for handling user input and form submission. The `useCompletion` hook provides multiple utility functions and state variables: + This component utilizes the `useChat` hook, which will, by default, use the `POST` route handler we created earlier. The hook provides functions and state for handling user input and form submission. The `useChat` hook provides multiple utility functions and state variables: - - `completion` - This is the current completion result, a string value representing the generated text. + - `messages` - The current chat messages, an array of objects with `id`, `role`, and `content` properties (among others). - `input` - This is the current value of the user's input field. - `handleInputChange` and `handleSubmit` - These functions handle user interactions such as typing into the input field and submitting the form, respectively. - `isLoading` This boolean indicates whether the API request is in progress or not. - Finally, create a Svelte component with a form to collect the prompt from the user and display the completion. + Finally, create a Svelte component that shows a list of chat messages and provides a user message input. ```svelte filename="src/routes/+page.svelte"
+
    + {#each $messages as message} +
  • {message.role}: {message.content}
  • + {/each} +
- - + +
-

{$completion}

```
@@ -253,8 +233,8 @@ We've written some code to get you started — follow the instructions below to pnpm run dev ``` - Now your application is up and running! Test it by entering a business description and see the AI-generated slogans in real-time. + Now your application is up and running! Test it by entering a message and see the AI chatbot respond in real-time. - Nice! You've built a streaming slogan generator using the Vercel AI SDK. Remember, your imagination is the limit when it comes to using AI to build apps, so feel free to experiment and extend the functionality of this application further. In the next section of the tutorial, we're going to pivot our little company to chat. + Nice! You've built an AI chatbot using the Vercel AI SDK. Remember, your imagination is the limit when it comes to using AI to build apps, so feel free to experiment and extend the functionality of this application further. In the next section of the tutorial, we'll explore the fundamental concepts in more detail.