Skip to content

Commit

Permalink
feat: Azure Open AI chat model & embeddings (n8n-io#8522)
Browse files Browse the repository at this point in the history
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
  • Loading branch information
OlegIvaniv authored Feb 1, 2024
1 parent 254700a commit 934d0d3
Show file tree
Hide file tree
Showing 8 changed files with 354 additions and 5 deletions.
11 changes: 8 additions & 3 deletions cypress/composables/workflow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -106,14 +106,19 @@ export function addSupplementalNodeToParent(
nodeName: string,
endpointType: EndpointType,
parentNodeName: string,
exactMatch = false,
) {
getAddInputEndpointByType(parentNodeName, endpointType).click({ force: true });
getNodeCreatorItems().contains(nodeName).click();
if (exactMatch) {
getNodeCreatorItems().contains(new RegExp("^" + nodeName + "$", "g")).click();
} else {
getNodeCreatorItems().contains(nodeName).click();
}
getConnectionBySourceAndTarget(parentNodeName, nodeName).should('exist');
}

export function addLanguageModelNodeToParent(nodeName: string, parentNodeName: string) {
addSupplementalNodeToParent(nodeName, 'ai_languageModel', parentNodeName);
export function addLanguageModelNodeToParent(nodeName: string, parentNodeName: string, exactMatch = false) {
addSupplementalNodeToParent(nodeName, 'ai_languageModel', parentNodeName, exactMatch);
}

export function addMemoryNodeToParent(nodeName: string, parentNodeName: string) {
Expand Down
5 changes: 3 additions & 2 deletions cypress/e2e/30-langchain.cy.ts
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ describe('Langchain Integration', () => {
addLanguageModelNodeToParent(
AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME,
BASIC_LLM_CHAIN_NODE_NAME,
true
);

clickCreateNewCredential();
Expand Down Expand Up @@ -121,7 +122,7 @@ describe('Langchain Integration', () => {
addNodeToCanvas(MANUAL_CHAT_TRIGGER_NODE_NAME, true);
addNodeToCanvas(AGENT_NODE_NAME, true);

addLanguageModelNodeToParent(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, AGENT_NODE_NAME);
addLanguageModelNodeToParent(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, AGENT_NODE_NAME, true);

clickCreateNewCredential();
setCredentialValues({
Expand Down Expand Up @@ -159,7 +160,7 @@ describe('Langchain Integration', () => {
addNodeToCanvas(MANUAL_CHAT_TRIGGER_NODE_NAME, true);
addNodeToCanvas(AGENT_NODE_NAME, true);

addLanguageModelNodeToParent(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, AGENT_NODE_NAME);
addLanguageModelNodeToParent(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, AGENT_NODE_NAME, true);

clickCreateNewCredential();
setCredentialValues({
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import type { IAuthenticateGeneric, ICredentialType, INodeProperties } from 'n8n-workflow';

export class AzureOpenAiApi implements ICredentialType {
name = 'azureOpenAiApi';

displayName = 'Azure Open AI';

documentationUrl = 'azureopenai';

properties: INodeProperties[] = [
{
displayName: 'API Key',
name: 'apiKey',
type: 'string',
typeOptions: { password: true },
required: true,
default: '',
},
{
displayName: 'Resource Name',
name: 'resourceName',
type: 'string',
required: true,
default: '',
},
{
displayName: 'API Version',
name: 'apiVersion',
type: 'string',
required: true,
default: '2023-05-15',
},
];

authenticate: IAuthenticateGeneric = {
type: 'generic',
properties: {
headers: {
'api-key': '={{$credentials.apiKey}}',
},
},
};
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import {
NodeConnectionType,
type IExecuteFunctions,
type INodeType,
type INodeTypeDescription,
type SupplyData,
} from 'n8n-workflow';

import { OpenAIEmbeddings } from 'langchain/embeddings/openai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';

export class EmbeddingsAzureOpenAi implements INodeType {
description: INodeTypeDescription = {
displayName: 'Embeddings Azure OpenAI',
name: 'embeddingsAzureOpenAi',
icon: 'file:azure.svg',
credentials: [
{
name: 'azureOpenAiApi',
required: true,
},
],
group: ['transform'],
version: 1,
description: 'Use Embeddings Azure OpenAI',
defaults: {
name: 'Embeddings Azure OpenAI',
},

codex: {
categories: ['AI'],
subcategories: {
AI: ['Embeddings'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.embeddingsazureopenai/',
},
],
},
},
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs: [],
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs: [NodeConnectionType.AiEmbedding],
outputNames: ['Embeddings'],
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiVectorStore]),
{
displayName: 'Model (Deployment) Name',
name: 'model',
type: 'string',
description: 'The name of the model(deployment) to use',
default: '',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Batch Size',
name: 'batchSize',
default: 512,
typeOptions: { maxValue: 2048 },
description: 'Maximum number of documents to send in each request',
type: 'number',
},
{
displayName: 'Strip New Lines',
name: 'stripNewLines',
default: true,
description: 'Whether to strip new lines from the input text',
type: 'boolean',
},
{
displayName: 'Timeout',
name: 'timeout',
default: -1,
description:
'Maximum amount of time a request is allowed to take in seconds. Set to -1 for no timeout.',
type: 'number',
},
],
},
],
};

async supplyData(this: IExecuteFunctions, itemIndex: number): Promise<SupplyData> {
this.logger.verbose('Supply data for embeddings');
const credentials = (await this.getCredentials('azureOpenAiApi')) as {
apiKey: string;
resourceName: string;
apiVersion: string;
};
const modelName = this.getNodeParameter('model', itemIndex) as string;

const options = this.getNodeParameter('options', itemIndex, {}) as {
batchSize?: number;
stripNewLines?: boolean;
timeout?: number;
};

if (options.timeout === -1) {
options.timeout = undefined;
}

const embeddings = new OpenAIEmbeddings({
azureOpenAIApiDeploymentName: modelName,
azureOpenAIApiInstanceName: credentials.resourceName,
azureOpenAIApiKey: credentials.apiKey,
azureOpenAIApiVersion: credentials.apiVersion,
...options,
});

return {
response: logWrapper(embeddings, this),
};
}
}
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import {
NodeConnectionType,
type IExecuteFunctions,
type INodeType,
type INodeTypeDescription,
type SupplyData,
} from 'n8n-workflow';

import type { ClientOptions } from 'openai';
import { ChatOpenAI } from 'langchain/chat_models/openai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';

export class LmChatAzureOpenAi implements INodeType {
description: INodeTypeDescription = {
displayName: 'Azure OpenAI Chat Model',
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
name: 'lmChatAzureOpenAi',
icon: 'file:azure.svg',
group: ['transform'],
version: 1,
description: 'For advanced usage with an AI chain',
defaults: {
name: 'Azure OpenAI Chat Model',
},
codex: {
categories: ['AI'],
subcategories: {
AI: ['Language Models'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatazureopenai/',
},
],
},
},
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs: [],
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs: [NodeConnectionType.AiLanguageModel],
outputNames: ['Model'],
credentials: [
{
name: 'azureOpenAiApi',
required: true,
},
],
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
{
displayName: 'Model (Deployment) Name',
name: 'model',
type: 'string',
description: 'The name of the model(deployment) to use',
default: '',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Frequency Penalty',
name: 'frequencyPenalty',
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description:
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
type: 'number',
},
{
displayName: 'Maximum Number of Tokens',
name: 'maxTokens',
default: -1,
description:
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
type: 'number',
typeOptions: {
maxValue: 32768,
},
},
{
displayName: 'Presence Penalty',
name: 'presencePenalty',
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description:
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
type: 'number',
},
{
displayName: 'Sampling Temperature',
name: 'temperature',
default: 0.7,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
{
displayName: 'Timeout',
name: 'timeout',
default: 60000,
description: 'Maximum amount of time a request is allowed to take in milliseconds',
type: 'number',
},
{
displayName: 'Max Retries',
name: 'maxRetries',
default: 2,
description: 'Maximum number of retries to attempt',
type: 'number',
},
{
displayName: 'Top P',
name: 'topP',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
type: 'number',
},
],
},
],
};

async supplyData(this: IExecuteFunctions, itemIndex: number): Promise<SupplyData> {
const credentials = (await this.getCredentials('azureOpenAiApi')) as {
apiKey: string;
resourceName: string;
apiVersion: string;
};

const modelName = this.getNodeParameter('model', itemIndex) as string;
const options = this.getNodeParameter('options', itemIndex, {}) as {
frequencyPenalty?: number;
maxTokens?: number;
maxRetries: number;
timeout: number;
presencePenalty?: number;
temperature?: number;
topP?: number;
};

const configuration: ClientOptions = {};

const model = new ChatOpenAI({
azureOpenAIApiDeploymentName: modelName,
azureOpenAIApiInstanceName: credentials.resourceName,
azureOpenAIApiKey: credentials.apiKey,
azureOpenAIApiVersion: credentials.apiVersion,
...options,
timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2,
configuration,
});

return {
response: logWrapper(model, this),
};
}
}
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 934d0d3

Please sign in to comment.