diff --git a/packages/pro b/packages/pro index e60f4b1b364..7b8789efd94 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit e60f4b1b364fd49d2bb082f298757f83cb2032f0 +Subproject commit 7b8789efd940d9f8e5be9927243b19f07361c445 diff --git a/packages/server/src/api/routes/tests/row.spec.ts b/packages/server/src/api/routes/tests/row.spec.ts index 1f4c4bc7cbb..fb728a3fea4 100644 --- a/packages/server/src/api/routes/tests/row.spec.ts +++ b/packages/server/src/api/routes/tests/row.spec.ts @@ -48,7 +48,7 @@ jest.mock("@budibase/pro", () => ({ ai: { LargeLanguageModel: { forCurrentTenant: async () => ({ - initialised: true, + llm: {}, run: jest.fn(() => `Mock LLM Response`), buildPromptFromAIOperation: jest.fn(), }), diff --git a/packages/server/src/api/routes/tests/search.spec.ts b/packages/server/src/api/routes/tests/search.spec.ts index c66197334e2..dd1221d6fb9 100644 --- a/packages/server/src/api/routes/tests/search.spec.ts +++ b/packages/server/src/api/routes/tests/search.spec.ts @@ -52,7 +52,7 @@ jest.mock("@budibase/pro", () => ({ ai: { LargeLanguageModel: { forCurrentTenant: async () => ({ - initialised: true, + llm: {}, run: jest.fn(() => `Mock LLM Response`), buildPromptFromAIOperation: jest.fn(), }), diff --git a/packages/server/src/automations/steps/openai.ts b/packages/server/src/automations/steps/openai.ts index 6f9adec1dc0..48eaa93057c 100644 --- a/packages/server/src/automations/steps/openai.ts +++ b/packages/server/src/automations/steps/openai.ts @@ -106,13 +106,15 @@ export async function run({ (await features.flags.isEnabled(FeatureFlag.BUDIBASE_AI)) && (await pro.features.isBudibaseAIEnabled()) - let llm + let llmWrapper if (budibaseAIEnabled || customConfigsEnabled) { - llm = await pro.ai.LargeLanguageModel.forCurrentTenant(inputs.model) + llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant( + inputs.model + ) } - response = llm?.initialised - ? await llm.run(inputs.prompt) + response = llmWrapper?.llm + ? await llmWrapper.run(inputs.prompt) : await legacyOpenAIPrompt(inputs) return { diff --git a/packages/server/src/automations/tests/openai.spec.ts b/packages/server/src/automations/tests/openai.spec.ts index 9f2bc50599a..1985465fc02 100644 --- a/packages/server/src/automations/tests/openai.spec.ts +++ b/packages/server/src/automations/tests/openai.spec.ts @@ -27,7 +27,7 @@ jest.mock("@budibase/pro", () => ({ ai: { LargeLanguageModel: { forCurrentTenant: jest.fn().mockImplementation(() => ({ - initialised: true, + llm: {}, init: jest.fn(), run: jest.fn(), })), diff --git a/packages/server/src/utilities/rowProcessor/tests/utils.spec.ts b/packages/server/src/utilities/rowProcessor/tests/utils.spec.ts index 3638b628927..fa674fcc520 100644 --- a/packages/server/src/utilities/rowProcessor/tests/utils.spec.ts +++ b/packages/server/src/utilities/rowProcessor/tests/utils.spec.ts @@ -18,7 +18,7 @@ jest.mock("@budibase/pro", () => ({ ai: { LargeLanguageModel: { forCurrentTenant: async () => ({ - initialised: true, + llm: {}, run: jest.fn(() => "response from LLM"), buildPromptFromAIOperation: buildPromptMock, }), diff --git a/packages/server/src/utilities/rowProcessor/utils.ts b/packages/server/src/utilities/rowProcessor/utils.ts index 9dbeb8ebb27..09d3324ded7 100644 --- a/packages/server/src/utilities/rowProcessor/utils.ts +++ b/packages/server/src/utilities/rowProcessor/utils.ts @@ -126,8 +126,10 @@ export async function processAIColumns( const numRows = Array.isArray(inputRows) ? inputRows.length : 1 span?.addTags({ table_id: table._id, numRows }) const rows = Array.isArray(inputRows) ? inputRows : [inputRows] - const llm = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini") - if (rows && llm.initialised) { + const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant( + "gpt-4o-mini" + ) + if (rows && llmWrapper.llm) { // Ensure we have snippet context await context.ensureSnippetContext() @@ -151,14 +153,14 @@ export async function processAIColumns( } } - const prompt = llm.buildPromptFromAIOperation({ + const prompt = llmWrapper.buildPromptFromAIOperation({ schema: aiSchema, row, }) return tracer.trace("processAIColumn", {}, async span => { span?.addTags({ table_id: table._id, column }) - const llmResponse = await llm.run(prompt!) + const llmResponse = await llmWrapper.run(prompt!) return { ...row, [column]: llmResponse,