From 7fcff8a628bb1bd414e962f46ac504cbd57f785e Mon Sep 17 00:00:00 2001 From: Martin McKeaveney Date: Wed, 4 Dec 2024 10:15:22 +0000 Subject: [PATCH 1/5] rely on llm status rather than initialised variable --- packages/pro | 2 +- packages/server/src/utilities/rowProcessor/utils.ts | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/pro b/packages/pro index e60f4b1b364..0d7fa31d4c4 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit e60f4b1b364fd49d2bb082f298757f83cb2032f0 +Subproject commit 0d7fa31d4c4019690e2200323421025cdc74b89e diff --git a/packages/server/src/utilities/rowProcessor/utils.ts b/packages/server/src/utilities/rowProcessor/utils.ts index 9dbeb8ebb27..3728401ab82 100644 --- a/packages/server/src/utilities/rowProcessor/utils.ts +++ b/packages/server/src/utilities/rowProcessor/utils.ts @@ -126,8 +126,8 @@ export async function processAIColumns( const numRows = Array.isArray(inputRows) ? inputRows.length : 1 span?.addTags({ table_id: table._id, numRows }) const rows = Array.isArray(inputRows) ? inputRows : [inputRows] - const llm = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini") - if (rows && llm.initialised) { + const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini") + if (rows && llmWrapper.llm) { // Ensure we have snippet context await context.ensureSnippetContext() From e119e310efe30fa2fdaf8f5516206578cfe8385b Mon Sep 17 00:00:00 2001 From: Martin McKeaveney Date: Wed, 4 Dec 2024 10:19:57 +0000 Subject: [PATCH 2/5] pro ref --- packages/pro | 2 +- packages/server/src/automations/steps/openai.ts | 8 ++++---- packages/server/src/utilities/rowProcessor/utils.ts | 8 +++++--- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/packages/pro b/packages/pro index 0d7fa31d4c4..7b8789efd94 160000 --- a/packages/pro +++ b/packages/pro @@ -1 +1 @@ -Subproject commit 0d7fa31d4c4019690e2200323421025cdc74b89e +Subproject commit 7b8789efd940d9f8e5be9927243b19f07361c445 diff --git a/packages/server/src/automations/steps/openai.ts b/packages/server/src/automations/steps/openai.ts index 6f9adec1dc0..8464c497631 100644 --- a/packages/server/src/automations/steps/openai.ts +++ b/packages/server/src/automations/steps/openai.ts @@ -106,13 +106,13 @@ export async function run({ (await features.flags.isEnabled(FeatureFlag.BUDIBASE_AI)) && (await pro.features.isBudibaseAIEnabled()) - let llm + let llmWrapper if (budibaseAIEnabled || customConfigsEnabled) { - llm = await pro.ai.LargeLanguageModel.forCurrentTenant(inputs.model) + llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant(inputs.model) } - response = llm?.initialised - ? await llm.run(inputs.prompt) + response = llmWrapper?.llm + ? await llmWrapper.run(inputs.prompt) : await legacyOpenAIPrompt(inputs) return { diff --git a/packages/server/src/utilities/rowProcessor/utils.ts b/packages/server/src/utilities/rowProcessor/utils.ts index 3728401ab82..09d3324ded7 100644 --- a/packages/server/src/utilities/rowProcessor/utils.ts +++ b/packages/server/src/utilities/rowProcessor/utils.ts @@ -126,7 +126,9 @@ export async function processAIColumns( const numRows = Array.isArray(inputRows) ? inputRows.length : 1 span?.addTags({ table_id: table._id, numRows }) const rows = Array.isArray(inputRows) ? inputRows : [inputRows] - const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini") + const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant( + "gpt-4o-mini" + ) if (rows && llmWrapper.llm) { // Ensure we have snippet context await context.ensureSnippetContext() @@ -151,14 +153,14 @@ export async function processAIColumns( } } - const prompt = llm.buildPromptFromAIOperation({ + const prompt = llmWrapper.buildPromptFromAIOperation({ schema: aiSchema, row, }) return tracer.trace("processAIColumn", {}, async span => { span?.addTags({ table_id: table._id, column }) - const llmResponse = await llm.run(prompt!) + const llmResponse = await llmWrapper.run(prompt!) return { ...row, [column]: llmResponse, From 46bd790b3a2950ff3b1ace16354e4fe3c3ca2008 Mon Sep 17 00:00:00 2001 From: Martin McKeaveney Date: Wed, 4 Dec 2024 10:35:04 +0000 Subject: [PATCH 3/5] update tests --- packages/server/src/api/routes/tests/row.spec.ts | 2 +- packages/server/src/automations/steps/openai.ts | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/server/src/api/routes/tests/row.spec.ts b/packages/server/src/api/routes/tests/row.spec.ts index 1f4c4bc7cbb..fb728a3fea4 100644 --- a/packages/server/src/api/routes/tests/row.spec.ts +++ b/packages/server/src/api/routes/tests/row.spec.ts @@ -48,7 +48,7 @@ jest.mock("@budibase/pro", () => ({ ai: { LargeLanguageModel: { forCurrentTenant: async () => ({ - initialised: true, + llm: {}, run: jest.fn(() => `Mock LLM Response`), buildPromptFromAIOperation: jest.fn(), }), diff --git a/packages/server/src/automations/steps/openai.ts b/packages/server/src/automations/steps/openai.ts index 8464c497631..48eaa93057c 100644 --- a/packages/server/src/automations/steps/openai.ts +++ b/packages/server/src/automations/steps/openai.ts @@ -108,7 +108,9 @@ export async function run({ let llmWrapper if (budibaseAIEnabled || customConfigsEnabled) { - llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant(inputs.model) + llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant( + inputs.model + ) } response = llmWrapper?.llm From 48a367c2c2affa98e231132abaeb47716c139b1c Mon Sep 17 00:00:00 2001 From: Martin McKeaveney Date: Wed, 4 Dec 2024 10:43:44 +0000 Subject: [PATCH 4/5] fix openai automation tests --- packages/server/src/automations/tests/openai.spec.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/server/src/automations/tests/openai.spec.ts b/packages/server/src/automations/tests/openai.spec.ts index 9f2bc50599a..1985465fc02 100644 --- a/packages/server/src/automations/tests/openai.spec.ts +++ b/packages/server/src/automations/tests/openai.spec.ts @@ -27,7 +27,7 @@ jest.mock("@budibase/pro", () => ({ ai: { LargeLanguageModel: { forCurrentTenant: jest.fn().mockImplementation(() => ({ - initialised: true, + llm: {}, init: jest.fn(), run: jest.fn(), })), From 3d8e15abc3a5840f82816385ba5257a96316e57d Mon Sep 17 00:00:00 2001 From: Martin McKeaveney Date: Wed, 4 Dec 2024 10:59:31 +0000 Subject: [PATCH 5/5] more usages of initialised --- packages/server/src/api/routes/tests/search.spec.ts | 2 +- packages/server/src/utilities/rowProcessor/tests/utils.spec.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/server/src/api/routes/tests/search.spec.ts b/packages/server/src/api/routes/tests/search.spec.ts index c66197334e2..dd1221d6fb9 100644 --- a/packages/server/src/api/routes/tests/search.spec.ts +++ b/packages/server/src/api/routes/tests/search.spec.ts @@ -52,7 +52,7 @@ jest.mock("@budibase/pro", () => ({ ai: { LargeLanguageModel: { forCurrentTenant: async () => ({ - initialised: true, + llm: {}, run: jest.fn(() => `Mock LLM Response`), buildPromptFromAIOperation: jest.fn(), }), diff --git a/packages/server/src/utilities/rowProcessor/tests/utils.spec.ts b/packages/server/src/utilities/rowProcessor/tests/utils.spec.ts index 3638b628927..fa674fcc520 100644 --- a/packages/server/src/utilities/rowProcessor/tests/utils.spec.ts +++ b/packages/server/src/utilities/rowProcessor/tests/utils.spec.ts @@ -18,7 +18,7 @@ jest.mock("@budibase/pro", () => ({ ai: { LargeLanguageModel: { forCurrentTenant: async () => ({ - initialised: true, + llm: {}, run: jest.fn(() => "response from LLM"), buildPromptFromAIOperation: buildPromptMock, }),