diff --git a/packages/pro b/packages/pro index 0d7fa31d4c4..7b8789efd94 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 0d7fa31d4c4019690e2200323421025cdc74b89e +Subproject commit 7b8789efd940d9f8e5be9927243b19f07361c445 diff --git a/packages/server/src/automations/steps/openai.ts b/packages/server/src/automations/steps/openai.ts index 6f9adec1dc0..8464c497631 100644 --- a/packages/server/src/automations/steps/openai.ts +++ b/packages/server/src/automations/steps/openai.ts @@ -106,13 +106,13 @@ export async function run({ (await features.flags.isEnabled(FeatureFlag.BUDIBASE_AI)) && (await pro.features.isBudibaseAIEnabled()) - let llm + let llmWrapper if (budibaseAIEnabled || customConfigsEnabled) { - llm = await pro.ai.LargeLanguageModel.forCurrentTenant(inputs.model) + llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant(inputs.model) } - response = llm?.initialised - ? await llm.run(inputs.prompt) + response = llmWrapper?.llm + ? await llmWrapper.run(inputs.prompt) : await legacyOpenAIPrompt(inputs) return { diff --git a/packages/server/src/utilities/rowProcessor/utils.ts b/packages/server/src/utilities/rowProcessor/utils.ts index 3728401ab82..09d3324ded7 100644 --- a/packages/server/src/utilities/rowProcessor/utils.ts +++ b/packages/server/src/utilities/rowProcessor/utils.ts @@ -126,7 +126,9 @@ export async function processAIColumns( const numRows = Array.isArray(inputRows) ? inputRows.length : 1 span?.addTags({ table_id: table._id, numRows }) const rows = Array.isArray(inputRows) ? inputRows : [inputRows] - const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini") + const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant( + "gpt-4o-mini" + ) if (rows && llmWrapper.llm) { // Ensure we have snippet context await context.ensureSnippetContext() @@ -151,14 +153,14 @@ export async function processAIColumns( } } - const prompt = llm.buildPromptFromAIOperation({ + const prompt = llmWrapper.buildPromptFromAIOperation({ schema: aiSchema, row, }) return tracer.trace("processAIColumn", {}, async span => { span?.addTags({ table_id: table._id, column }) - const llmResponse = await llm.run(prompt!) + const llmResponse = await llmWrapper.run(prompt!) return { ...row, [column]: llmResponse,