From b16913f8eb6583611dc97f6c888deadd05c530e3 Mon Sep 17 00:00:00 2001 From: Steph Milovic Date: Tue, 14 Nov 2023 15:49:31 -0700 Subject: [PATCH 1/2] comment the code better --- .../actions/server/lib/gen_ai_token_tracking.ts | 10 +++++++++- .../lib/get_token_count_from_bedrock_invoke.ts | 8 ++++++++ .../server/lib/get_token_count_from_invoke_stream.ts | 8 ++++++++ .../server/connector_types/bedrock/bedrock.ts | 5 +++++ .../server/connector_types/openai/openai.ts | 12 ++++++++++++ 5 files changed, 42 insertions(+), 1 deletion(-) diff --git a/x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts b/x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts index 15343999b3f00..79437ff77dfbf 100644 --- a/x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts +++ b/x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts @@ -18,7 +18,15 @@ interface OwnProps { result: ActionTypeExecutorRawResult; validatedParams: Record; } - +/* + * Calculates the total, prompt, and completion token counts from different types of responses. + * It handles streamed responses from OpenAI, and both streamed and non-streamed responses from Bedrock. + * It returns null if it cannot calculate the token counts. + * @param actionTypeId the action type id + * @param logger the logger + * @param result the result from the action executor + * @param validatedParams the validated params from the action executor + */ export const getGenAiTokenTracking = async ({ actionTypeId, logger, diff --git a/x-pack/plugins/actions/server/lib/get_token_count_from_bedrock_invoke.ts b/x-pack/plugins/actions/server/lib/get_token_count_from_bedrock_invoke.ts index ae5c2a2e745e3..26e320200830b 100644 --- a/x-pack/plugins/actions/server/lib/get_token_count_from_bedrock_invoke.ts +++ b/x-pack/plugins/actions/server/lib/get_token_count_from_bedrock_invoke.ts @@ -10,6 +10,14 @@ import { encode } from 'gpt-tokenizer'; export interface InvokeBody { prompt: string; } + +/** + * Takes the Bedrock `run` and `test` sub action response and the request prompt as inputs. + * Uses gpt-tokenizer encoding to calculate the number of tokens in the prompt and completion. + * Returns an object containing the total, prompt, and completion token counts. + * @param response (string) - the response completion from the `run` or `test` sub action + * @param body - the stringified request prompt + */ export async function getTokenCountFromBedrockInvoke({ response, body, diff --git a/x-pack/plugins/actions/server/lib/get_token_count_from_invoke_stream.ts b/x-pack/plugins/actions/server/lib/get_token_count_from_invoke_stream.ts index 78af02cc2413f..4d98d0f618827 100644 --- a/x-pack/plugins/actions/server/lib/get_token_count_from_invoke_stream.ts +++ b/x-pack/plugins/actions/server/lib/get_token_count_from_invoke_stream.ts @@ -15,6 +15,14 @@ export interface InvokeBody { content: string; }>; } + +/** + * Takes the OpenAI and Bedrock `invokeStream` sub action response stream and the request messages array as inputs. + * Uses gpt-tokenizer encoding to calculate the number of tokens in the prompt and completion parts of the response stream + * Returns an object containing the total, prompt, and completion token counts. + * @param responseStream the response stream from the `invokeStream` sub action + * @param body the request messages array + */ export async function getTokenCountFromInvokeStream({ responseStream, body, diff --git a/x-pack/plugins/stack_connectors/server/connector_types/bedrock/bedrock.ts b/x-pack/plugins/stack_connectors/server/connector_types/bedrock/bedrock.ts index 3813b245f0d40..5a42c4abaf412 100644 --- a/x-pack/plugins/stack_connectors/server/connector_types/bedrock/bedrock.ts +++ b/x-pack/plugins/stack_connectors/server/connector_types/bedrock/bedrock.ts @@ -224,6 +224,11 @@ const formatBedrockBody = ({ }; }; +/** + * Takes in a readable stream of data and returns a Transform stream that + * uses the AWS proprietary codec to parse the proprietary bedrock response into + * a string of the response text alone, returning the response string to the stream + */ const transformToString = () => new Transform({ transform(chunk, encoding, callback) { diff --git a/x-pack/plugins/stack_connectors/server/connector_types/openai/openai.ts b/x-pack/plugins/stack_connectors/server/connector_types/openai/openai.ts index 52fccc84fd7a0..02ad7bcdec6a9 100644 --- a/x-pack/plugins/stack_connectors/server/connector_types/openai/openai.ts +++ b/x-pack/plugins/stack_connectors/server/connector_types/openai/openai.ts @@ -192,6 +192,12 @@ export class OpenAIConnector extends SubActionConnector { return { available: response.success }; } + /** + * Responsible for invoking the streamApi method with the provided body and + * stream parameters set to true. It then returns a Transform stream that processes + * the response from the streamApi method and returns the response string alone. + * @param body - the OpenAI Invoke request body + */ public async invokeStream(body: InvokeAIActionParams): Promise { const res = (await this.streamApi({ body: JSON.stringify(body), @@ -222,6 +228,12 @@ export class OpenAIConnector extends SubActionConnector { }; } } + +/** + * Takes in a readable stream of data and returns a Transform stream that + * parses the proprietary OpenAI response into a string of the response text alone, + * returning the response string to the stream + */ const transformToString = () => new Transform({ transform(chunk, encoding, callback) { From 6875c637ecbc0de1c000b7fedb239597202993b8 Mon Sep 17 00:00:00 2001 From: Steph Milovic Date: Tue, 14 Nov 2023 15:51:26 -0700 Subject: [PATCH 2/2] fix comment --- x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts b/x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts index 79437ff77dfbf..ded2cab48a0f6 100644 --- a/x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts +++ b/x-pack/plugins/actions/server/lib/gen_ai_token_tracking.ts @@ -20,7 +20,7 @@ interface OwnProps { } /* * Calculates the total, prompt, and completion token counts from different types of responses. - * It handles streamed responses from OpenAI, and both streamed and non-streamed responses from Bedrock. + * It handles both streamed and non-streamed responses from OpenAI and Bedrock. * It returns null if it cannot calculate the token counts. * @param actionTypeId the action type id * @param logger the logger