diff --git a/extensions/inference-cohere-extension/resources/settings.json b/extensions/inference-cohere-extension/resources/settings.json index 9d9fb60dec..2a32b57f8b 100644 --- a/extensions/inference-cohere-extension/resources/settings.json +++ b/extensions/inference-cohere-extension/resources/settings.json @@ -12,7 +12,7 @@ { "key": "cohere-api-key", "title": "API Key", - "description": "The Cohere API uses API keys for authentication. Visit your [API Keys](https://platform.openai.com/account/api-keys) page to retrieve the API key you'll use in your requests.", + "description": "The Cohere API uses API keys for authentication. Visit your [API Keys](https://dashboard.cohere.com/api-keys) page to retrieve the API key you'll use in your requests.", "controllerType": "input", "controllerProps": { "placeholder": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", diff --git a/extensions/inference-openrouter-extension/README.md b/extensions/inference-openrouter-extension/README.md new file mode 100644 index 0000000000..aab10755d4 --- /dev/null +++ b/extensions/inference-openrouter-extension/README.md @@ -0,0 +1,79 @@ +# Open Router Engine Extension + +Created using Jan extension example + +# Create a Jan Extension using Typescript + +Use this template to bootstrap the creation of a TypeScript Jan extension. 🚀 + +## Create Your Own Extension + +To create your own extension, you can use this repository as a template! Just follow the below instructions: + +1. Click the Use this template button at the top of the repository +2. Select Create a new repository +3. Select an owner and name for your new repository +4. Click Create repository +5. Clone your new repository + +## Initial Setup + +After you've cloned the repository to your local machine or codespace, you'll need to perform some initial setup steps before you can develop your extension. + +> [!NOTE] +> +> You'll need to have a reasonably modern version of +> [Node.js](https://nodejs.org) handy. If you are using a version manager like +> [`nodenv`](https://github.com/nodenv/nodenv) or +> [`nvm`](https://github.com/nvm-sh/nvm), you can run `nodenv install` in the +> root of your repository to install the version specified in +> [`package.json`](./package.json). Otherwise, 20.x or later should work! + +1. :hammer_and_wrench: Install the dependencies + + ```bash + npm install + ``` + +1. :building_construction: Package the TypeScript for distribution + + ```bash + npm run bundle + ``` + +1. :white_check_mark: Check your artifact + + There will be a tgz file in your extension directory now + +## Update the Extension Metadata + +The [`package.json`](package.json) file defines metadata about your extension, such as +extension name, main entry, description and version. + +When you copy this repository, update `package.json` with the name, description for your extension. + +## Update the Extension Code + +The [`src/`](./src/) directory is the heart of your extension! This contains the +source code that will be run when your extension functions are invoked. You can replace the +contents of this directory with your own code. + +There are a few things to keep in mind when writing your extension code: + +- Most Jan Extension functions are processed asynchronously. + In `index.ts`, you will see that the extension function will return a `Promise`. + + ```typescript + import { events, MessageEvent, MessageRequest } from '@janhq/core' + + function onStart(): Promise { + return events.on(MessageEvent.OnMessageSent, (data: MessageRequest) => + this.inference(data) + ) + } + ``` + + For more information about the Jan Extension Core module, see the + [documentation](https://github.com/janhq/jan/blob/main/core/README.md). + +So, what are you waiting for? Go ahead and start customizing your extension! diff --git a/extensions/inference-openrouter-extension/package.json b/extensions/inference-openrouter-extension/package.json new file mode 100644 index 0000000000..9d3d68d470 --- /dev/null +++ b/extensions/inference-openrouter-extension/package.json @@ -0,0 +1,43 @@ +{ + "name": "@janhq/inference-openrouter-extension", + "productName": "OpenRouter Inference Engine", + "version": "1.0.0", + "description": "This extension enables Open Router chat completion API calls", + "main": "dist/index.js", + "module": "dist/module.js", + "engine": "openrouter", + "author": "Jan ", + "license": "AGPL-3.0", + "scripts": { + "build": "tsc -b . && webpack --config webpack.config.js", + "build:publish": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install", + "sync:core": "cd ../.. && yarn build:core && cd extensions && rm yarn.lock && cd inference-openrouter-extension && yarn && yarn build:publish" + }, + "exports": { + ".": "./dist/index.js", + "./main": "./dist/module.js" + }, + "devDependencies": { + "cpx": "^1.5.0", + "rimraf": "^3.0.2", + "webpack": "^5.88.2", + "webpack-cli": "^5.1.4", + "ts-loader": "^9.5.0" + }, + "dependencies": { + "@janhq/core": "file:../../core", + "fetch-retry": "^5.0.6", + "ulidx": "^2.3.0" + }, + "engines": { + "node": ">=18.0.0" + }, + "files": [ + "dist/*", + "package.json", + "README.md" + ], + "bundleDependencies": [ + "fetch-retry" + ] +} diff --git a/extensions/inference-openrouter-extension/resources/models.json b/extensions/inference-openrouter-extension/resources/models.json new file mode 100644 index 0000000000..d89c07e5af --- /dev/null +++ b/extensions/inference-openrouter-extension/resources/models.json @@ -0,0 +1,28 @@ + [ + { + "sources": [ + { + "url": "https://openrouter.ai" + } + ], + "id": "open-router-auto", + "object": "model", + "name": "OpenRouter", + "version": "1.0", + "description": " OpenRouter scouts for the lowest prices and best latencies/throughputs across dozens of providers, and lets you choose how to prioritize them.", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 1024, + "temperature": 0.7, + "top_p": 0.95, + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "OpenRouter", + "tags": ["General", "Big Context Length"] + }, + "engine": "openrouter" + } +] diff --git a/extensions/inference-openrouter-extension/resources/settings.json b/extensions/inference-openrouter-extension/resources/settings.json new file mode 100644 index 0000000000..85040e96bd --- /dev/null +++ b/extensions/inference-openrouter-extension/resources/settings.json @@ -0,0 +1,23 @@ +[ + { + "key": "chat-completions-endpoint", + "title": "Chat Completions Endpoint", + "description": "The endpoint to use for chat completions. See the [OpenRouter API documentation](https://openrouter.ai/docs) for more information.", + "controllerType": "input", + "controllerProps": { + "placeholder": "https://openrouter.ai/api/v1/chat/completions", + "value": "https://openrouter.ai/api/v1/chat/completions" + } + }, + { + "key": "openrouter-api-key", + "title": "API Key", + "description": "The OpenRouter API uses API keys for authentication. Visit your [API Keys](https://openrouter.ai/keys) page to retrieve the API key you'll use in your requests.", + "controllerType": "input", + "controllerProps": { + "placeholder": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "value": "", + "type": "password" + } + } +] diff --git a/extensions/inference-openrouter-extension/src/index.ts b/extensions/inference-openrouter-extension/src/index.ts new file mode 100644 index 0000000000..5417503e5d --- /dev/null +++ b/extensions/inference-openrouter-extension/src/index.ts @@ -0,0 +1,76 @@ +/** + * @file This file exports a class that implements the InferenceExtension interface from the @janhq/core package. + * The class provides methods for initializing and stopping a model, and for making inference requests. + * It also subscribes to events emitted by the @janhq/core package and handles new message requests. + * @version 1.0.0 + * @module inference-openai-extension/src/index + */ + +import { RemoteOAIEngine } from '@janhq/core' +import { PayloadType } from '@janhq/core' +import { ChatCompletionRole } from '@janhq/core' + +declare const SETTINGS: Array +declare const MODELS: Array + +enum Settings { + apiKey = 'openrouter-api-key', + chatCompletionsEndPoint = 'chat-completions-endpoint', +} + +enum RoleType { + user = 'USER', + chatbot = 'CHATBOT', + system = 'SYSTEM', +} + +/** + * A class that implements the InferenceExtension interface from the @janhq/core package. + * The class provides methods for initializing and stopping a model, and for making inference requests. + * It also subscribes to events emitted by the @janhq/core package and handles new message requests. + */ +export default class JanInferenceOpenRouterExtension extends RemoteOAIEngine { + inferenceUrl: string = '' + provider: string = 'openrouter' + + override async onLoad(): Promise { + super.onLoad() + + // Register Settings + this.registerSettings(SETTINGS) + this.registerModels(MODELS) + + this.apiKey = await this.getSetting(Settings.apiKey, '') + this.inferenceUrl = await this.getSetting( + Settings.chatCompletionsEndPoint, + '' + ) + if (this.inferenceUrl.length === 0) { + SETTINGS.forEach((setting) => { + if (setting.key === Settings.chatCompletionsEndPoint) { + this.inferenceUrl = setting.controllerProps.value as string + } + }) + } + } + + onSettingUpdate(key: string, value: T): void { + if (key === Settings.apiKey) { + this.apiKey = value as string + } else if (key === Settings.chatCompletionsEndPoint) { + if (typeof value !== 'string') return + + if (value.trim().length === 0) { + SETTINGS.forEach((setting) => { + if (setting.key === Settings.chatCompletionsEndPoint) { + this.inferenceUrl = setting.controllerProps.value as string + } + }) + } else { + this.inferenceUrl = value + } + } + } + + transformPayload = (payload: PayloadType)=>({...payload,model:"openrouter/auto"}) +} diff --git a/extensions/inference-openrouter-extension/tsconfig.json b/extensions/inference-openrouter-extension/tsconfig.json new file mode 100644 index 0000000000..2477d58ce5 --- /dev/null +++ b/extensions/inference-openrouter-extension/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "es2016", + "module": "ES6", + "moduleResolution": "node", + "outDir": "./dist", + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "strict": false, + "skipLibCheck": true, + "rootDir": "./src" + }, + "include": ["./src"] +} diff --git a/extensions/inference-openrouter-extension/webpack.config.js b/extensions/inference-openrouter-extension/webpack.config.js new file mode 100644 index 0000000000..cd5e65c725 --- /dev/null +++ b/extensions/inference-openrouter-extension/webpack.config.js @@ -0,0 +1,37 @@ +const webpack = require('webpack') +const packageJson = require('./package.json') +const settingJson = require('./resources/settings.json') +const modelsJson = require('./resources/models.json') + +module.exports = { + experiments: { outputModule: true }, + entry: './src/index.ts', // Adjust the entry point to match your project's main file + mode: 'production', + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/, + }, + ], + }, + plugins: [ + new webpack.DefinePlugin({ + MODELS: JSON.stringify(modelsJson), + SETTINGS: JSON.stringify(settingJson), + ENGINE: JSON.stringify(packageJson.engine), + }), + ], + output: { + filename: 'index.js', // Adjust the output file name as needed + library: { type: 'module' }, // Specify ESM output format + }, + resolve: { + extensions: ['.ts', '.js'], + }, + optimization: { + minimize: false, + }, + // Add loaders and other configuration as needed for your project +}