Skip to content

Commit

Permalink
google-genai[patch]: Hookup callbacks to stream & generate methods (#…
Browse files Browse the repository at this point in the history
…3708)

* google-genai[patch]: Hookup callbacks to stream & generate methods

* chore: lint files
  • Loading branch information
bracesproul authored Dec 18, 2023
1 parent 1167c76 commit 90743d0
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 4 deletions.
12 changes: 8 additions & 4 deletions libs/langchain-google-genai/src/chat_models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ export class ChatGoogleGenerativeAI
async _generate(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun
runManager?: CallbackManagerForLLMRun
): Promise<ChatResult> {
const prompt = convertBaseMessagesToContent(
messages,
Expand All @@ -272,14 +272,17 @@ export class ChatGoogleGenerativeAI
return output;
}
);

return mapGenerateContentResultToChatResult(res.response);
const generationResult = mapGenerateContentResultToChatResult(res.response);
await runManager?.handleLLMNewToken(
generationResult.generations[0].text ?? ""
);
return generationResult;
}

async *_streamResponseChunks(
messages: BaseMessage[],
options: this["ParsedCallOptions"],
_runManager?: CallbackManagerForLLMRun
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
const prompt = convertBaseMessagesToContent(
messages,
Expand All @@ -302,6 +305,7 @@ export class ChatGoogleGenerativeAI
}

yield chunk;
await runManager?.handleLLMNewToken(chunk.text ?? "");
}
}
}
40 changes: 40 additions & 0 deletions libs/langchain-google-genai/src/tests/chat_models.int.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -73,3 +73,43 @@ test("Test Google AI multimodal generation", async () => {
console.log(JSON.stringify(res, null, 2));
expect(res).toBeTruthy();
});

test("Test Google AI handleLLMNewToken callback", async () => {
const model = new ChatGoogleGenerativeAI({});
let tokens = "";
const res = await model.call(
[new HumanMessage("what is 1 + 1?")],
undefined,
[
{
handleLLMNewToken(token: string) {
tokens += token;
},
},
]
);
console.log({ tokens });
const responseContent = typeof res.content === "string" ? res.content : "";
expect(tokens).toBe(responseContent);
});

test("Test Google AI handleLLMNewToken callback with streaming", async () => {
const model = new ChatGoogleGenerativeAI({});
let tokens = "";
const res = await model.stream([new HumanMessage("what is 1 + 1?")], {
callbacks: [
{
handleLLMNewToken(token: string) {
tokens += token;
},
},
],
});
console.log({ tokens });
let responseContent = "";
for await (const streamItem of res) {
responseContent += streamItem.content;
}
console.log({ tokens });
expect(tokens).toBe(responseContent);
});

2 comments on commit 90743d0

@vercel
Copy link

@vercel vercel bot commented on 90743d0 Dec 19, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@vercel
Copy link

@vercel vercel bot commented on 90743d0 Dec 19, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

langchainjs-docs – ./docs/core_docs/

langchainjs-docs-langchain.vercel.app
langchainjs-docs-ruddy.vercel.app
langchainjs-docs-git-main-langchain.vercel.app
js.langchain.com

Please sign in to comment.