Skip to content

Commit

Permalink
refactor: model names
Browse files Browse the repository at this point in the history
  • Loading branch information
arielweinberger committed Oct 22, 2023
1 parent da1c903 commit 7f9fcf7
Show file tree
Hide file tree
Showing 18 changed files with 60 additions and 50 deletions.
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,15 +55,15 @@ import { UniLLM } from 'unillm';
const uniLLM = new UniLLM();

// OpenAI
const response = await uniLLM.createChatCompletion("openai:gpt-3.5-turbo", { messages: ... });
const response = await uniLLM.createChatCompletion("openai:gpt-4", { messages: ... });
const response = await uniLLM.createChatCompletion("openai/gpt-3.5-turbo", { messages: ... });
const response = await uniLLM.createChatCompletion("openai/gpt-4", { messages: ... });

// Anthropic
const response = await uniLLM.createChatCompletion("anthropic:claude-2", { messages: ... });
const response = await uniLLM.createChatCompletion("anthropic:claude-1-instant", { messages: ... });
const response = await uniLLM.createChatCompletion("anthropic/claude-2", { messages: ... });
const response = await uniLLM.createChatCompletion("anthropic/claude-1-instant", { messages: ... });

// Azure OpenAI
const response = await uniLLM.createChatCompletion("azure:openai", { messages: ... });
const response = await uniLLM.createChatCompletion("azure/openai/<deployment-name>", { messages: ... });

// More coming soon!
```
Expand All @@ -75,7 +75,7 @@ Want to see more examples? Check out the **[interactive docs](https://docs.unill
To enable streaming, simply provide `stream: true` in the options object. Here is an example:

```ts
const response = await uniLLM.createChatCompletion("openai:gpt-3.5-turbo", {
const response = await uniLLM.createChatCompletion("openai/gpt-3.5-turbo", {
messages: ...,
stream: true
});
Expand Down
8 changes: 4 additions & 4 deletions apps/demo/components/LLMSelector.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,19 @@ const llms: {
}[] = [
{
name: "OpenAI GPT-3.5 Turbo",
value: LLMs["openai:gpt-3.5-turbo"],
value: LLMs["openai/gpt-3.5-turbo"],
},
{
name: "OpenAI GPT-4",
value: LLMs["openai:gpt-4"],
value: LLMs["openai/gpt-4"],
},
{
name: "Anthropic Claude-2",
value: LLMs["anthropic:claude-2"],
value: LLMs["anthropic/claude-2"],
},
{
name: "Azure OpenAI",
value: LLMs["azure:gpt35turbo"],
value: LLMs["azure/openai/gpt35turbo"],
},
];

Expand Down
8 changes: 4 additions & 4 deletions apps/demo/utils/types.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
export const LLMs = {
"openai:gpt-3.5-turbo": "openai:gpt-3.5-turbo",
"openai:gpt-4": "openai:gpt-4",
"anthropic:claude-2": "anthropic:claude-2",
"azure:gpt35turbo": "azure:gpt35turbo",
"openai/gpt-3.5-turbo": "openai/gpt-3.5-turbo",
"openai/gpt-4": "openai/gpt-4",
"anthropic/claude-2": "anthropic/claude-2",
"azure/openai/gpt35turbo": "azure/openai/gpt35turbo",
};
2 changes: 1 addition & 1 deletion apps/docs/components/DynamicCodeExample.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ export function DynamicCodeExample({ children, defaultLLM, allowedProvider }: Pr
const ref = useRef<any>();
const setupRef = useRef<any>();
const modelRef = useRef<any>();
const [selectedLLM, setSelectedLLM] = useState(defaultLLM ?? "openai:gpt-3.5-turbo");
const [selectedLLM, setSelectedLLM] = useState(defaultLLM ?? "openai/gpt-3.5-turbo");

// Find the corresponding token from the DOM
useEffect(() => {
Expand Down
2 changes: 1 addition & 1 deletion apps/docs/pages/providers-and-models/anthropic.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import { DynamicCodeExample } from "../../components/DynamicCodeExample";

## Usage

<DynamicCodeExample defaultLLM="anthropic:claude-2" allowedProvider="anthropic">
<DynamicCodeExample defaultLLM="anthropic/claude-2" allowedProvider="anthropic">
```ts copy
import { UniLLM } from 'unillm';

Expand Down
2 changes: 1 addition & 1 deletion apps/docs/pages/providers-and-models/azure-openai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import { DynamicCodeExample } from "../../components/DynamicCodeExample";

## Usage

<DynamicCodeExample defaultLLM="azure:openai" allowedProvider="azure">
<DynamicCodeExample defaultLLM="azure/openai" allowedProvider="azure">
```ts copy
import { UniLLM } from 'unillm';

Expand Down
2 changes: 1 addition & 1 deletion apps/docs/pages/providers-and-models/openai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import { DynamicCodeExample } from "../../components/DynamicCodeExample";

## Usage

<DynamicCodeExample defaultLLM="openai:gpt-3.5-turbo" allowedProvider="openai">
<DynamicCodeExample defaultLLM="openai/gpt-3.5-turbo" allowedProvider="openai">
```ts copy
import { UniLLM } from 'unillm';

Expand Down
6 changes: 5 additions & 1 deletion package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 5 additions & 5 deletions packages/llm-repo/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,27 +41,27 @@ const azureSetup = ` Make sure the following environment variables are set:
export const models: {
[key: string]: ModelDefinition;
} = {
"openai:gpt-3.5-turbo": {
"openai/gpt-3.5-turbo": {
provider: "openai",
name: "GPT-3.5 Turbo",
setup: openaiSetup,
},
"openai:gpt-4": {
"openai/gpt-4": {
provider: "openai",
name: "GPT-4",
setup: openaiSetup,
},
"anthropic:claude-2": {
"anthropic/claude-2": {
provider: "anthropic",
name: "Claude 2",
setup: anthropicSetup,
},
"anthropic:claude-1-instant": {
"anthropic/claude-1-instant": {
provider: "anthropic",
name: "Claude 1 Instant",
setup: anthropicSetup,
},
"azure:openai": {
"azure/openai": {
provider: "azure",
name: "Azure OpenAI",
setup: azureSetup,
Expand Down
3 changes: 2 additions & 1 deletion packages/unillm-node/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ export class UniLLM {
):
| Promise<UnifiedCreateChatCompletionNonStreamResult>
| Promise<UnifiedCreateChatCompletionStreamResult> {
const [providerName, model] = providerAndModel.split(":");
const [providerName, ...rest] = providerAndModel.split("/");
const model = rest.join("/");
const provider = providers[providerName];

if (!provider) {
Expand Down
9 changes: 5 additions & 4 deletions packages/unillm-node/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "unillm",
"version": "0.1.0-alpha.1",
"version": "0.1.0-alpha.2",
"main": "./index.ts",
"type": "commonjs",
"license": "MIT",
Expand All @@ -14,7 +14,8 @@
"lint": "eslint .",
"test": "vitest --coverage",
"format": "prettier --write .",
"build": "rimraf dist && rollup -c"
"build": "rimraf dist && rollup -c",
"build:watch": "rimraf dist && rollup -c --watch"
},
"devDependencies": {
"@rollup/plugin-commonjs": "^25.0.7",
Expand All @@ -41,9 +42,9 @@
"@anthropic-ai/sdk": "^0.6.2",
"@azure/openai": "^1.0.0-beta.6",
"@dqbd/tiktoken": "^1.0.7",
"llm-repo": "*",
"node-fetch": "^3.3.2",
"openai": "^4.8.0",
"llm-repo": "*"
"openai": "^4.8.0"
},
"keywords": [
"llm",
Expand Down
2 changes: 1 addition & 1 deletion packages/unillm-node/providers/anthropic.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import {
} from "../utils/types";
import { UnifiedErrorResponse } from "../utils/UnifiedErrorResponse";

import { Tiktoken } from "@dqbd/tiktoken/lite";
import { Tiktoken } from "@dqbd/tiktoken";
import cl100k_base from "@dqbd/tiktoken/encoders/cl100k_base.json";
import { Readable } from "stream";
import { BaseProvider } from "./baseProvider";
Expand Down
5 changes: 3 additions & 2 deletions packages/unillm-node/providers/azure-openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,10 @@ export class AzureOpenAIProvider extends BaseProvider<Providers.AzureOpenAI> {
);

async createChatCompletionNonStreaming(
model: ModelTypes[Providers.AzureOpenAI],
_model: ModelTypes[Providers.AzureOpenAI],
params: UnifiedCreateChatCompletionParamsNonStreaming,
): Promise<UnifiedCreateChatCompletionNonStreamResult> {
const [, model] = _model.split("/");
const { baseParams } = this.processUnifiedParamsToAzureOpenAIFormat(params);

let nativeResult: ChatCompletions;
Expand Down Expand Up @@ -198,7 +199,7 @@ export class AzureOpenAIProvider extends BaseProvider<Providers.AzureOpenAI> {

return new UnifiedErrorResponse(
{
model: `azure:openai:${deployment}`,
model: `azure/openai/${deployment}`,
},
status,
error,
Expand Down
2 changes: 2 additions & 0 deletions packages/unillm-node/rollup.config.js
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ module.exports = defineConfig({
sourcemap: true,
},
],
external: ["@dqbd/tiktoken"],
plugins: [
nodeResolve(),
typescript2({
Expand All @@ -44,6 +45,7 @@ module.exports = defineConfig({
generatePackageJson({
baseContents: () => ({
...localPackageJson,
devDependencies: {},
main: "./index.cjs.js",
module: "./index.esm.js",
types: "./types/packages/unillm-node/index.d.ts",
Expand Down
2 changes: 1 addition & 1 deletion packages/unillm-node/tests/anthropic.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import { testParams } from "./utils/test-data.util";
const uniLLM = new UniLLM();

describe("#createChatCompletion - Anthropic", () => {
const model = "anthropic:claude-2";
const model = "anthropic/claude-2";

describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
Expand Down
17 changes: 10 additions & 7 deletions packages/unillm-node/tests/azure-openai.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {
describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
const response = await uniLLM.createChatCompletion(
`azure:${deployment}`,
`azure/openai/${deployment}`,
{
...testParams,
stream: false,
Expand All @@ -24,7 +24,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {

it("Should return a valid function calling response", async () => {
const response = await uniLLM.createChatCompletion(
`azure:${deployment}`,
`azure/openai/${deployment}`,
{
...testParams,
stream: false,
Expand All @@ -39,7 +39,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {
it("Should throw an error and return a unified error response", async () => {
let errorOccurred = false;
try {
await uniLLM.createChatCompletion(`azure:${deployment}`, {
await uniLLM.createChatCompletion(`azure/openai/${deployment}`, {
...testParams,
stream: false,
messages: [],
Expand All @@ -56,10 +56,13 @@ describe("#createChatCompletion - Azure OpenAI", () => {

describe("Streaming", () => {
it("Should return a valid iterable chat completion stream", async () => {
const stream = await uniLLM.createChatCompletion(`azure:${deployment}`, {
...testParams,
stream: true,
});
const stream = await uniLLM.createChatCompletion(
`azure/openai/${deployment}`,
{
...testParams,
stream: true,
},
);

let testChunk: ChatCompletionChunk;

Expand Down
2 changes: 1 addition & 1 deletion packages/unillm-node/tests/openai.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import { testParams, testFunctions } from "./utils/test-data.util";
const uniLLM = new UniLLM();

describe("#createChatCompletion - OpenAI", () => {
const model = "openai:gpt-3.5-turbo";
const model = "openai/gpt-3.5-turbo";

describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
Expand Down
16 changes: 7 additions & 9 deletions packages/unillm-node/utils/types.ts
Original file line number Diff line number Diff line change
@@ -1,19 +1,17 @@
import OpenAI from "openai";
import Anthropic from "@anthropic-ai/sdk";

type AzurePrefix = `azure:${string}`;

type StaticParamValues = {
"openai:gpt-3.5-turbo": "openai:gpt-3.5-turbo";
"openai:gpt-4": "openai:gpt-4";
"anthropic:claude-2": "anthropic:claude-2";
"openai/gpt-3.5-turbo": "openai/gpt-3.5-turbo";
"openai/gpt-4": "openai/gpt-4";
"anthropic/claude-2": "anthropic/claude-2";
};

type DynamicParamValues = {
[key in AzurePrefix]?: string;
type AzureOpenAIDynamicValue = {
[key in `azure/openai/${string}`]?: string;
};

export type ModelParamValues = StaticParamValues & DynamicParamValues;
export type ModelParamValues = StaticParamValues & AzureOpenAIDynamicValue;

export enum Providers {
OpenAI,
Expand All @@ -24,7 +22,7 @@ export enum Providers {
export type ModelTypes = {
[Providers.OpenAI]: OpenAI.CompletionCreateParams["model"];
[Providers.Anthropic]: Anthropic.CompletionCreateParams["model"];
[Providers.AzureOpenAI]: string; // deployment name
[Providers.AzureOpenAI]: string;
};

export type UnifiedCreateChatCompletionParamsBase = Omit<
Expand Down

0 comments on commit 7f9fcf7

Please sign in to comment.