forked from langchain-ai/langchain-nextjs-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
route.ts
139 lines (128 loc) · 4.59 KB
/
route.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import { NextRequest, NextResponse } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { createReactAgent } from "@langchain/langgraph/prebuilt";
import { ChatOpenAI } from "@langchain/openai";
import { SerpAPI } from "@langchain/community/tools/serpapi";
import { Calculator } from "@langchain/community/tools/calculator";
import {
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
} from "@langchain/core/messages";
export const runtime = "edge";
const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => {
if (message.role === "user") {
return new HumanMessage(message.content);
} else if (message.role === "assistant") {
return new AIMessage(message.content);
} else {
return new ChatMessage(message.content, message.role);
}
};
const convertLangChainMessageToVercelMessage = (message: BaseMessage) => {
if (message._getType() === "human") {
return { content: message.content, role: "user" };
} else if (message._getType() === "ai") {
return {
content: message.content,
role: "assistant",
tool_calls: (message as AIMessage).tool_calls,
};
} else {
return { content: message.content, role: message._getType() };
}
};
const AGENT_SYSTEM_TEMPLATE = `You are a talking parrot named Polly. All final responses must be how a talking parrot would respond. Squawk often!`;
/**
* This handler initializes and calls an tool caling ReAct agent.
* See the docs for more information:
*
* https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/
*/
export async function POST(req: NextRequest) {
try {
const body = await req.json();
const returnIntermediateSteps = body.show_intermediate_steps;
/**
* We represent intermediate steps as system messages for display purposes,
* but don't want them in the chat history.
*/
const messages = (body.messages ?? [])
.filter(
(message: VercelChatMessage) =>
message.role === "user" || message.role === "assistant",
)
.map(convertVercelMessageToLangChainMessage);
// Requires process.env.SERPAPI_API_KEY to be set: https://serpapi.com/
// You can remove this or use a different tool instead.
const tools = [new Calculator(), new SerpAPI()];
const chat = new ChatOpenAI({
model: "gpt-3.5-turbo-0125",
temperature: 0,
});
/**
* Use a prebuilt LangGraph agent.
*/
const agent = createReactAgent({
llm: chat,
tools,
/**
* Modify the stock prompt in the prebuilt agent. See docs
* for how to customize your agent:
*
* https://langchain-ai.github.io/langgraphjs/tutorials/quickstart/
*/
messageModifier: new SystemMessage(AGENT_SYSTEM_TEMPLATE),
});
if (!returnIntermediateSteps) {
/**
* Stream back all generated tokens and steps from their runs.
*
* We do some filtering of the generated events and only stream back
* the final response as a string.
*
* For this specific type of tool calling ReAct agents with OpenAI, we can tell when
* the agent is ready to stream back final output when it no longer calls
* a tool and instead streams back content.
*
* See: https://langchain-ai.github.io/langgraphjs/how-tos/stream-tokens/
*/
const eventStream = await agent.streamEvents(
{ messages },
{ version: "v2" },
);
const textEncoder = new TextEncoder();
const transformStream = new ReadableStream({
async start(controller) {
for await (const { event, data } of eventStream) {
if (event === "on_chat_model_stream") {
// Intermediate chat model generations will contain tool calls and no content
if (!!data.chunk.content) {
controller.enqueue(textEncoder.encode(data.chunk.content));
}
}
}
controller.close();
},
});
return new StreamingTextResponse(transformStream);
} else {
/**
* We could also pick intermediate steps out from `streamEvents` chunks, but
* they are generated as JSON objects, so streaming and displaying them with
* the AI SDK is more complicated.
*/
const result = await agent.invoke({ messages });
return NextResponse.json(
{
messages: result.messages.map(convertLangChainMessageToVercelMessage),
},
{ status: 200 },
);
}
} catch (e: any) {
return NextResponse.json({ error: e.message }, { status: e.status ?? 500 });
}
}