diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/.eslintrc.json b/packages/typespec-test/test/openai_generic/generated/typespec-ts/.eslintrc.json new file mode 100644 index 0000000000..619797ac39 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/.eslintrc.json @@ -0,0 +1,11 @@ +{ + "plugins": ["@azure/azure-sdk"], + "extends": ["plugin:@azure/azure-sdk/azure-sdk-base"], + "rules": { + "@azure/azure-sdk/ts-modules-only-named": "warn", + "@azure/azure-sdk/ts-apiextractor-json-types": "warn", + "@azure/azure-sdk/ts-package-json-types": "warn", + "@azure/azure-sdk/ts-package-json-engine-is-present": "warn", + "tsdoc/syntax": "warn" + } +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/README.md b/packages/typespec-test/test/openai_generic/generated/typespec-ts/README.md new file mode 100644 index 0000000000..f9b2eb4926 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/README.md @@ -0,0 +1,56 @@ +# OpenAI REST client library for JavaScript + +The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. + +**Please rely heavily on our [REST client docs](https://github.com/Azure/azure-sdk-for-js/blob/main/documentation/rest-clients.md) to use this library** + +Key links: + +- [Package (NPM)](https://www.npmjs.com/package/@msinternal/openai-generic) + +## Getting started + +### Currently supported environments + +- LTS versions of Node.js + +### Prerequisites + +- You must have an [Azure subscription](https://azure.microsoft.com/free/) to use this package. + +### Install the `@msinternal/openai-generic` package + +Install the OpenAI REST client REST client library for JavaScript with `npm`: + +```bash +npm install @msinternal/openai-generic +``` + +### Create and authenticate a `OpenAIClient` + +To use an [Azure Active Directory (AAD) token credential](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/identity/identity/samples/AzureIdentityExamples.md#authenticating-with-a-pre-fetched-access-token), +provide an instance of the desired credential type obtained from the +[@azure/identity](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/identity/identity#credentials) library. + +To authenticate with AAD, you must first `npm` install [`@azure/identity`](https://www.npmjs.com/package/@azure/identity) + +After setup, you can choose which type of [credential](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/identity/identity#credentials) from `@azure/identity` to use. +As an example, [DefaultAzureCredential](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/identity/identity#defaultazurecredential) +can be used to authenticate the client. + +Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: +AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET + +## Troubleshooting + +### Logging + +Enabling logging may help uncover useful information about failures. In order to see a log of HTTP requests and responses, set the `AZURE_LOG_LEVEL` environment variable to `info`. Alternatively, logging can be enabled at runtime by calling `setLogLevel` in the `@azure/logger`: + +```javascript +const { setLogLevel } = require("@azure/logger"); + +setLogLevel("info"); +``` + +For more detailed instructions on how to enable logs, you can look at the [@azure/logger package docs](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/core/logger). diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/api-extractor.json b/packages/typespec-test/test/openai_generic/generated/typespec-ts/api-extractor.json new file mode 100644 index 0000000000..9cb974b470 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/api-extractor.json @@ -0,0 +1,18 @@ +{ + "$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json", + "mainEntryPointFilePath": "./types/src/index.d.ts", + "docModel": { "enabled": true }, + "apiReport": { "enabled": true, "reportFolder": "./review" }, + "dtsRollup": { + "enabled": true, + "untrimmedFilePath": "", + "publicTrimmedFilePath": "./types/openai-generic.d.ts" + }, + "messages": { + "tsdocMessageReporting": { "default": { "logLevel": "none" } }, + "extractorMessageReporting": { + "ae-missing-release-tag": { "logLevel": "none" }, + "ae-unresolved-link": { "logLevel": "none" } + } + } +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/karma.conf.js b/packages/typespec-test/test/openai_generic/generated/typespec-ts/karma.conf.js new file mode 100644 index 0000000000..a9d5f1b5fc --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/karma.conf.js @@ -0,0 +1,133 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// https://github.com/karma-runner/karma-chrome-launcher +process.env.CHROME_BIN = require("puppeteer").executablePath(); +require("dotenv").config(); +const { relativeRecordingsPath } = require("@azure-tools/test-recorder"); +process.env.RECORDINGS_RELATIVE_PATH = relativeRecordingsPath(); + +module.exports = function (config) { + config.set({ + // base path that will be used to resolve all patterns (eg. files, exclude) + basePath: "./", + + // frameworks to use + // available frameworks: https://npmjs.org/browse/keyword/karma-adapter + frameworks: ["source-map-support", "mocha"], + + plugins: [ + "karma-mocha", + "karma-mocha-reporter", + "karma-chrome-launcher", + "karma-firefox-launcher", + "karma-env-preprocessor", + "karma-coverage", + "karma-sourcemap-loader", + "karma-junit-reporter", + "karma-source-map-support", + ], + + // list of files / patterns to load in the browser + files: [ + "dist-test/index.browser.js", + { + pattern: "dist-test/index.browser.js.map", + type: "html", + included: false, + served: true, + }, + ], + + // list of files / patterns to exclude + exclude: [], + + // preprocess matching files before serving them to the browser + // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor + preprocessors: { + "**/*.js": ["sourcemap", "env"], + // IMPORTANT: COMMENT following line if you want to debug in your browsers!! + // Preprocess source file to calculate code coverage, however this will make source file unreadable + // "dist-test/index.js": ["coverage"] + }, + + envPreprocessor: [ + "TEST_MODE", + "ENDPOINT", + "AZURE_CLIENT_SECRET", + "AZURE_CLIENT_ID", + "AZURE_TENANT_ID", + "SUBSCRIPTION_ID", + "RECORDINGS_RELATIVE_PATH", + ], + + // test results reporter to use + // possible values: 'dots', 'progress' + // available reporters: https://npmjs.org/browse/keyword/karma-reporter + reporters: ["mocha", "coverage", "junit"], + + coverageReporter: { + // specify a common output directory + dir: "coverage-browser/", + reporters: [ + { type: "json", subdir: ".", file: "coverage.json" }, + { type: "lcovonly", subdir: ".", file: "lcov.info" }, + { type: "html", subdir: "html" }, + { type: "cobertura", subdir: ".", file: "cobertura-coverage.xml" }, + ], + }, + + junitReporter: { + outputDir: "", // results will be saved as $outputDir/$browserName.xml + outputFile: "test-results.browser.xml", // if included, results will be saved as $outputDir/$browserName/$outputFile + suite: "", // suite will become the package name attribute in xml testsuite element + useBrowserName: false, // add browser name to report and classes names + nameFormatter: undefined, // function (browser, result) to customize the name attribute in xml testcase element + classNameFormatter: undefined, // function (browser, result) to customize the classname attribute in xml testcase element + properties: {}, // key value pair of properties to add to the section of the report + }, + + // web server port + port: 9876, + + // enable / disable colors in the output (reporters and logs) + colors: true, + + // level of logging + // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG + logLevel: config.LOG_INFO, + + // enable / disable watching file and executing tests whenever any file changes + autoWatch: false, + + // --no-sandbox allows our tests to run in Linux without having to change the system. + // --disable-web-security allows us to authenticate from the browser without having to write tests using interactive auth, which would be far more complex. + browsers: ["ChromeHeadlessNoSandbox"], + customLaunchers: { + ChromeHeadlessNoSandbox: { + base: "ChromeHeadless", + flags: ["--no-sandbox", "--disable-web-security"], + }, + }, + + // Continuous Integration mode + // if true, Karma captures browsers, runs the tests and exits + singleRun: false, + + // Concurrency level + // how many browser should be started simultaneous + concurrency: 1, + + browserNoActivityTimeout: 60000000, + browserDisconnectTimeout: 10000, + browserDisconnectTolerance: 3, + + client: { + mocha: { + // change Karma's debug.html to the mocha web reporter + reporter: "html", + timeout: "600000", + }, + }, + }); +}; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/package.json b/packages/typespec-test/test/openai_generic/generated/typespec-ts/package.json new file mode 100644 index 0000000000..d35be4ba54 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/package.json @@ -0,0 +1,125 @@ +{ + "name": "@msinternal/openai-generic", + "sdk-type": "client", + "author": "Microsoft Corporation", + "version": "1.0.0-beta.1", + "description": "OpenAI", + "keywords": ["node", "azure", "cloud", "typescript", "browser", "isomorphic"], + "license": "MIT", + "type": "module", + "main": "dist/index.js", + "module": "./dist-esm/src/index.js", + "types": "./types/openai-generic.d.ts", + "exports": { + ".": { + "types": "./types/src/index.d.ts", + "require": "./dist/index.cjs", + "import": "./dist-esm/src/index.js" + }, + "./api": { + "types": "./types/src/api/index.d.ts", + "import": "./dist-esm/src/api/index.js" + }, + "./models": { + "types": "./types/src/models/index.d.ts", + "import": "./dist-esm/src/models/index.js" + } + }, + "repository": "github:Azure/azure-sdk-for-js", + "bugs": { "url": "https://github.com/Azure/azure-sdk-for-js/issues" }, + "files": [ + "dist/", + "dist-esm/src/", + "types/openai-generic.d.ts", + "README.md", + "LICENSE", + "review/*" + ], + "engines": { "node": ">=16.0.0" }, + "scripts": { + "audit": "node ../../../common/scripts/rush-audit.js && rimraf node_modules package-lock.json && npm i --package-lock-only 2>&1 && npm audit", + "build:browser": "tsc -p . && cross-env ONLY_BROWSER=true rollup -c 2>&1", + "build:node": "tsc -p . && cross-env ONLY_NODE=true rollup -c 2>&1", + "build:samples": "echo skipped.", + "build:test": "tsc -p . && rollup -c 2>&1", + "build:debug": "echo skipped.", + "check-format": "prettier --list-different --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"*.{js,json}\" \"test/**/*.ts\"", + "clean": "rimraf --glob dist dist-browser dist-esm test-dist temp types *.tgz *.log", + "execute:samples": "echo skipped", + "extract-api": "rimraf review && mkdirp ./review && api-extractor run --local", + "format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"*.{js,json}\" \"test/**/*.ts\"", + "generate:client": "echo skipped", + "integration-test:browser": "karma start --single-run", + "integration-test:node": "nyc mocha --require source-map-support/register.js --timeout 5000000 --full-trace \"dist-esm/test/{,!(browser)/**/}*.spec.js\"", + "integration-test": "npm run integration-test:node && npm run integration-test:browser", + "lint:fix": "eslint package.json api-extractor.json src test --ext .ts --fix --fix-type [problem,suggestion]", + "lint": "eslint package.json api-extractor.json src test --ext .ts", + "pack": "npm pack 2>&1", + "test:browser": "npm run clean && npm run build:test && npm run unit-test:browser", + "test:node": "npm run clean && npm run build:test && npm run unit-test:node", + "test": "npm run clean && npm run build:test && npm run unit-test", + "unit-test": "npm run unit-test:node && npm run unit-test:browser", + "unit-test:node": "mocha --full-trace \"test/{,!(browser)/**/}*.spec.ts\"", + "unit-test:browser": "karma start --single-run", + "build": "npm run clean && tsc && rollup -c 2>&1 && npm run minify && mkdirp ./review && npm run extract-api", + "minify": "uglifyjs -c -m --comments --source-map \"content='./dist/index.js.map'\" -o ./dist/index.min.js ./dist/index.js" + }, + "sideEffects": false, + "autoPublish": false, + "dependencies": { + "@azure/core-auth": "^1.3.0", + "@azure-rest/core-client": "^1.1.4", + "@azure/core-rest-pipeline": "^1.12.0", + "@azure/logger": "^1.0.0", + "tslib": "^2.2.0", + "@azure/core-util": "^1.4.0" + }, + "devDependencies": { + "@microsoft/api-extractor": "^7.31.1", + "autorest": "latest", + "@types/node": "^16.0.0", + "dotenv": "^16.0.0", + "eslint": "^8.0.0", + "mkdirp": "^2.1.2", + "prettier": "^2.5.1", + "rimraf": "^5.0.0", + "source-map-support": "^0.5.9", + "typescript": "~5.2.0", + "@rollup/plugin-commonjs": "^24.0.0", + "@rollup/plugin-json": "^6.0.0", + "@rollup/plugin-multi-entry": "^6.0.0", + "@rollup/plugin-node-resolve": "^13.1.3", + "rollup": "^2.66.1", + "rollup-plugin-sourcemaps": "^0.6.3", + "uglify-js": "^3.4.9", + "@azure-tools/test-credential": "^1.0.0", + "@azure/identity": "^3.3.0", + "@azure-tools/test-recorder": "^3.0.0", + "mocha": "^10.0.0", + "@types/mocha": "^10.0.0", + "mocha-junit-reporter": "^1.18.0", + "cross-env": "^7.0.2", + "@types/chai": "^4.2.8", + "chai": "^4.2.0", + "karma-chrome-launcher": "^3.0.0", + "karma-coverage": "^2.0.0", + "karma-env-preprocessor": "^0.1.1", + "karma-firefox-launcher": "^2.1.2", + "karma-junit-reporter": "^2.0.1", + "karma-mocha-reporter": "^2.2.5", + "karma-mocha": "^2.0.1", + "karma-source-map-support": "~1.4.0", + "karma-sourcemap-loader": "^0.4.0", + "karma": "^6.2.0", + "nyc": "^15.0.0", + "ts-node": "^10.0.0" + }, + "browser": { + "./dist-esm/test/public/utils/env.js": "./dist-esm/test/public/utils/env.browser.js" + }, + "mocha": { + "extension": ["ts"], + "timeout": "1200000", + "loader": "ts-node/esm" + } +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/review/openai-generic.api.md b/packages/typespec-test/test/openai_generic/generated/typespec-ts/review/openai-generic.api.md new file mode 100644 index 0000000000..a45211e955 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/review/openai-generic.api.md @@ -0,0 +1,620 @@ +## API Report File for "@msinternal/openai-generic" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { Pipeline } from '@azure/core-rest-pipeline'; + +// @public (undocumented) +export interface CancelFineTuneOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CancelFineTuningJobOptions extends OperationOptions { +} + +// @public (undocumented) +export interface ChatCompletionFunctionCallOption { + name: string; +} + +// @public (undocumented) +export interface ChatCompletionFunctions { + description?: string; + name: string; + parameters: Record; +} + +// @public (undocumented) +export interface ChatCompletionRequestMessage { + content: string | null; + functionCall?: any; + name?: string; + role: "system" | "user" | "assistant" | "function"; +} + +// @public +export interface CompletionUsage { + completionTokens: number; + promptTokens: number; + totalTokens: number; +} + +// @public (undocumented) +export interface CreateChatCompletionOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateChatCompletionRequest { + frequencyPenalty?: number | null; + functionCall?: "none" | "auto" | ChatCompletionFunctionCallOption; + functions?: ChatCompletionFunctions[]; + logitBias?: Record; + maxTokens?: number | null; + messages: ChatCompletionRequestMessage[]; + model: string | "gpt4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k-0613"; + n?: number | null; + presencePenalty?: number | null; + stop?: string | string[] | null; + stream?: boolean | null; + temperature?: number | null; + topP?: number | null; + user?: string; +} + +// @public +export interface CreateChatCompletionResponse { + choices: any[]; + created: Date; + id: string; + model: string; + object: string; + // (undocumented) + usage?: CompletionUsage; +} + +// @public (undocumented) +export interface CreateCompletionOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateCompletionRequest { + bestOf?: number | null; + echo?: boolean | null; + frequencyPenalty?: number | null; + logitBias?: Record; + logprobs?: number | null; + maxTokens?: number | null; + model: string | "babbage-002" | "davinci-002" | "text-davinci-003" | "text-davinci-002" | "text-davinci-001" | "code-davinci-002" | "text-curie-001" | "text-babbage-001" | "text-ada-001"; + n?: number | null; + presencePenalty?: number | null; + prompt: string | string[] | number[] | number[][] | null; + stop?: string | string[] | null; + stream?: boolean | null; + suffix?: string | null; + temperature?: number | null; + topP?: number | null; + user?: string; +} + +// @public +export interface CreateCompletionResponse { + choices: any[]; + created: Date; + id: string; + model: string; + object: string; + // (undocumented) + usage?: CompletionUsage; +} + +// @public (undocumented) +export interface CreateEditOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateEditRequest { + input?: string | null; + instruction: string; + model: string | "text-davinci-edit-001" | "code-davinci-edit-001"; + n?: number | null; + temperature?: number | null; + topP?: number | null; +} + +// @public (undocumented) +export interface CreateEditResponse { + choices: any[]; + created: Date; + object: "edit"; + // (undocumented) + usage: CompletionUsage; +} + +// @public (undocumented) +export interface CreateEmbeddingOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateEmbeddingRequest { + input: string | string[] | number[] | number[][]; + model: string | "text-embedding-ada-002"; + // (undocumented) + user?: string; +} + +// @public (undocumented) +export interface CreateEmbeddingResponse { + data: Embedding[]; + model: string; + object: "embedding"; + usage: any; +} + +// @public (undocumented) +export interface CreateFileOptions extends OperationOptions { + // (undocumented) + contentType?: string; +} + +// @public (undocumented) +export interface CreateFileRequest { + file: Uint8Array; + purpose: string; +} + +// @public (undocumented) +export interface CreateFineTuneOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateFineTuneRequest { + batchSize?: number | null; + classificationBetas?: number[] | null; + classificationNClasses?: number | null; + classificationPositiveClass?: string | null; + computeClassificationMetrics?: boolean | null; + learningRateMultiplier?: number | null; + model?: string | "ada" | "babbage" | "curie" | "davinci" | null; + nEpochs?: number | null; + promptLossRate?: number | null; + suffix?: string | null; + trainingFile: string; + validationFile?: string | null; +} + +// @public (undocumented) +export interface CreateFineTuningJobOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateFineTuningJobRequest { + hyperparameters?: any; + model: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; + suffix?: string | null; + trainingFile: string; + validationFile?: string | null; +} + +// @public (undocumented) +export interface CreateImageEditOptions extends OperationOptions { + // (undocumented) + contentType?: string; +} + +// @public (undocumented) +export interface CreateImageEditRequest { + image: Uint8Array; + mask?: Uint8Array; + n?: number | null; + prompt: string; + responseFormat?: "url" | "b64_json" | null; + size?: "256x256" | "512x512" | "1024x1024" | null; + // (undocumented) + user?: string; +} + +// @public (undocumented) +export interface CreateImageOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateImageRequest { + n?: number | null; + prompt: string; + responseFormat?: "url" | "b64_json" | null; + size?: "256x256" | "512x512" | "1024x1024" | null; + // (undocumented) + user?: string; +} + +// @public (undocumented) +export interface CreateImageVariationOptions extends OperationOptions { + // (undocumented) + contentType?: string; +} + +// @public (undocumented) +export interface CreateImageVariationRequest { + image: Uint8Array; + n?: number | null; + responseFormat?: "url" | "b64_json" | null; + size?: "256x256" | "512x512" | "1024x1024" | null; + // (undocumented) + user?: string; +} + +// @public (undocumented) +export interface CreateModerationOptions extends OperationOptions { +} + +// @public (undocumented) +export interface CreateModerationRequest { + input: string | string[]; + model?: string | "text-moderation-latest" | "text-moderation-stable"; +} + +// @public (undocumented) +export interface CreateModerationResponse { + id: string; + model: string; + results: any[]; +} + +// @public (undocumented) +export interface CreateTranscriptionOptions extends OperationOptions { + // (undocumented) + contentType?: string; +} + +// @public (undocumented) +export interface CreateTranscriptionRequest { + file: Uint8Array; + language?: string; + model: string | "whisper-1"; + prompt?: string; + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + temperature?: number; +} + +// @public (undocumented) +export interface CreateTranscriptionResponse { + // (undocumented) + text: string; +} + +// @public (undocumented) +export interface CreateTranslationOptions extends OperationOptions { + // (undocumented) + contentType?: string; +} + +// @public (undocumented) +export interface CreateTranslationRequest { + file: Uint8Array; + model: string | "whisper-1"; + prompt?: string; + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + temperature?: number; +} + +// @public (undocumented) +export interface CreateTranslationResponse { + // (undocumented) + text: string; +} + +// @public (undocumented) +export interface DeleteFileOptions extends OperationOptions { +} + +// @public (undocumented) +export interface DeleteFileResponse { + // (undocumented) + deleted: boolean; + // (undocumented) + id: string; + // (undocumented) + object: string; +} + +// @public (undocumented) +export interface DeleteModelResponse { + // (undocumented) + deleted: boolean; + // (undocumented) + id: string; + // (undocumented) + object: string; +} + +// @public (undocumented) +export interface DeleteOptions extends OperationOptions { +} + +// @public (undocumented) +export interface DownloadFileOptions extends OperationOptions { +} + +// @public +export interface Embedding { + embedding: number[]; + index: number; + object: "embedding"; +} + +// @public (undocumented) +interface Error_2 { + // (undocumented) + code: string | null; + // (undocumented) + message: string; + // (undocumented) + param: string | null; + // (undocumented) + type: string; +} +export { Error_2 as Error } + +// @public +export interface FineTune { + createdAt: Date; + events?: FineTuneEvent[]; + fineTunedModel: string | null; + hyperparams: any; + id: string; + model: string; + object: "fine-tune"; + organizationId: string; + resultFiles: OpenAIFile[]; + status: "created" | "running" | "succeeded" | "failed" | "cancelled"; + trainingFiles: OpenAIFile[]; + updatedAt: Date; + validationFiles: OpenAIFile[]; +} + +// @public (undocumented) +export interface FineTuneEvent { + // (undocumented) + createdAt: Date; + // (undocumented) + level: string; + // (undocumented) + message: string; + // (undocumented) + object: string; +} + +// @public (undocumented) +export interface FineTuningJob { + createdAt: Date; + error: any; + fineTunedModel: string | null; + finishedAt: Date | null; + hyperparameters: any; + id: string; + model: string; + object: "fine_tuning.job"; + organizationId: string; + resultFiles: string[]; + status: "created" | "pending" | "running" | "succeeded" | "failed" | "cancelled"; + trainedTokens: number | null; + trainingFile: string; + validationFile: string | null; +} + +// @public (undocumented) +export interface FineTuningJobEvent { + // (undocumented) + createdAt: Date; + // (undocumented) + id: string; + // (undocumented) + level: "info" | "warn" | "error"; + // (undocumented) + message: string; + // (undocumented) + object: string; +} + +// @public +interface Image_2 { + b64Json?: Uint8Array; + url?: string; +} +export { Image_2 as Image } + +// @public (undocumented) +export interface ImagesResponse { + // (undocumented) + created: Date; + // (undocumented) + data: Image_2[]; +} + +// @public (undocumented) +export interface ListFilesOptions extends OperationOptions { +} + +// @public (undocumented) +export interface ListFilesResponse { + // (undocumented) + data: OpenAIFile[]; + // (undocumented) + object: string; +} + +// @public (undocumented) +export interface ListFineTuneEventsOptions extends OperationOptions { + stream?: boolean; +} + +// @public (undocumented) +export interface ListFineTuneEventsResponse { + // (undocumented) + data: FineTuneEvent[]; + // (undocumented) + object: string; +} + +// @public (undocumented) +export interface ListFineTunesOptions extends OperationOptions { +} + +// @public (undocumented) +export interface ListFineTunesResponse { + // (undocumented) + data: FineTune[]; + // (undocumented) + object: string; +} + +// @public (undocumented) +export interface ListFineTuningEventsOptions extends OperationOptions { + after?: string; + limit?: number; +} + +// @public (undocumented) +export interface ListFineTuningJobEventsResponse { + // (undocumented) + data: FineTuningJobEvent[]; + // (undocumented) + object: string; +} + +// @public (undocumented) +export interface ListModelsOptions extends OperationOptions { +} + +// @public (undocumented) +export interface ListModelsResponse { + // (undocumented) + data: Model[]; + // (undocumented) + object: string; +} + +// @public (undocumented) +export interface ListPaginatedFineTuningJobsOptions extends OperationOptions { + after?: string; + limit?: number; +} + +// @public (undocumented) +export interface ListPaginatedFineTuningJobsResponse { + // (undocumented) + data: FineTuningJob[]; + // (undocumented) + hasMore: boolean; + // (undocumented) + object: string; +} + +// @public +export interface Model { + created: Date; + id: string; + object: "model"; + ownedBy: string; +} + +// @public (undocumented) +export class OpenAIClient { + constructor(credential: KeyCredential, options?: OpenAIClientOptions); + // (undocumented) + cancelFineTune(fineTuneId: string, options?: CancelFineTuneOptions): Promise; + // (undocumented) + cancelFineTuningJob(fineTuningJobId: string, options?: CancelFineTuningJobOptions): Promise; + // (undocumented) + createChatCompletion(body: CreateChatCompletionRequest, options?: CreateChatCompletionOptions): Promise; + // (undocumented) + createCompletion(body: CreateCompletionRequest, options?: CreateCompletionOptions): Promise; + // (undocumented) + createEdit(edit: CreateEditRequest, options?: CreateEditOptions): Promise; + // (undocumented) + createEmbedding(embedding: CreateEmbeddingRequest, options?: CreateEmbeddingOptions): Promise; + // (undocumented) + createFile(file: CreateFileRequest, options?: CreateFileOptions): Promise; + // (undocumented) + createFineTune(fineTune: CreateFineTuneRequest, options?: CreateFineTuneOptions): Promise; + createFineTuningJob(job: CreateFineTuningJobRequest, options?: CreateFineTuningJobOptions): Promise; + // (undocumented) + createImage(image: CreateImageRequest, options?: CreateImageOptions): Promise; + // (undocumented) + createImageEdit(image: CreateImageEditRequest, options?: CreateImageEditOptions): Promise; + // (undocumented) + createImageVariation(image: CreateImageVariationRequest, options?: CreateImageVariationOptions): Promise; + // (undocumented) + createModeration(content: CreateModerationRequest, options?: CreateModerationOptions): Promise; + // (undocumented) + createTranscription(audio: CreateTranscriptionRequest, options?: CreateTranscriptionOptions): Promise; + // (undocumented) + createTranslation(audio: CreateTranslationRequest, options?: CreateTranslationOptions): Promise; + // (undocumented) + deleteFile(fileId: string, options?: DeleteFileOptions): Promise; + deleteOperation(model: string, options?: DeleteOptions): Promise; + // (undocumented) + downloadFile(fileId: string, options?: DownloadFileOptions): Promise; + // (undocumented) + listFiles(options?: ListFilesOptions): Promise; + // (undocumented) + listFineTuneEvents(fineTuneId: string, options?: ListFineTuneEventsOptions): Promise; + // (undocumented) + listFineTunes(options?: ListFineTunesOptions): Promise; + // (undocumented) + listFineTuningEvents(fineTuningJobId: string, options?: ListFineTuningEventsOptions): Promise; + // (undocumented) + listModels(options?: ListModelsOptions): Promise; + // (undocumented) + listPaginatedFineTuningJobs(options?: ListPaginatedFineTuningJobsOptions): Promise; + readonly pipeline: Pipeline; + // (undocumented) + retrieve(model: string, options?: RetrieveOptions): Promise; + // (undocumented) + retrieveFile(fileId: string, options?: RetrieveFileOptions): Promise; + // (undocumented) + retrieveFineTune(fineTuneId: string, options?: RetrieveFineTuneOptions): Promise; + // (undocumented) + retrieveFineTuningJob(fineTuningJobId: string, options?: RetrieveFineTuningJobOptions): Promise; +} + +// @public (undocumented) +export interface OpenAIClientOptions extends ClientOptions { +} + +// @public +export interface OpenAIFile { + bytes: number; + createdAt: Date; + filename: string; + id: string; + object: "file"; + purpose: string; + status: "uploaded" | "processed" | "pending" | "error" | "deleting" | "deleted"; + statusDetails?: string | null; +} + +// @public (undocumented) +export interface RetrieveFileOptions extends OperationOptions { +} + +// @public (undocumented) +export interface RetrieveFineTuneOptions extends OperationOptions { +} + +// @public (undocumented) +export interface RetrieveFineTuningJobOptions extends OperationOptions { +} + +// @public (undocumented) +export interface RetrieveOptions extends OperationOptions { +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/rollup.config.js b/packages/typespec-test/test/openai_generic/generated/typespec-ts/rollup.config.js new file mode 100644 index 0000000000..61251d7a8d --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/rollup.config.js @@ -0,0 +1,118 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import nodeResolve from "@rollup/plugin-node-resolve"; +import cjs from "@rollup/plugin-commonjs"; +import sourcemaps from "rollup-plugin-sourcemaps"; +import multiEntry from "@rollup/plugin-multi-entry"; +import json from "@rollup/plugin-json"; + +import nodeBuiltins from "builtin-modules"; + +// #region Warning Handler + +/** + * A function that can determine whether a rollup warning should be ignored. If + * the function returns `true`, then the warning will not be displayed. + */ + +function ignoreNiseSinonEval(warning) { + return ( + warning.code === "EVAL" && + warning.id && + (warning.id.includes("node_modules/nise") || + warning.id.includes("node_modules/sinon")) === true + ); +} + +function ignoreChaiCircularDependency(warning) { + return ( + warning.code === "CIRCULAR_DEPENDENCY" && + warning.importer && + warning.importer.includes("node_modules/chai") === true + ); +} + +const warningInhibitors = [ignoreChaiCircularDependency, ignoreNiseSinonEval]; + +/** + * Construct a warning handler for the shared rollup configuration + * that ignores certain warnings that are not relevant to testing. + */ +function makeOnWarnForTesting() { + return (warning, warn) => { + // If every inhibitor returns false (i.e. no inhibitors), then show the warning + if (warningInhibitors.every((inhib) => !inhib(warning))) { + warn(warning); + } + }; +} + +// #endregion + +function makeBrowserTestConfig() { + const config = { + input: { + include: ["dist-esm/test/**/*.spec.js"], + exclude: ["dist-esm/test/**/node/**"], + }, + output: { + file: `dist-test/index.browser.js`, + format: "umd", + sourcemap: true, + }, + preserveSymlinks: false, + plugins: [ + multiEntry({ exports: false }), + nodeResolve({ + mainFields: ["module", "browser"], + }), + cjs(), + json(), + sourcemaps(), + //viz({ filename: "dist-test/browser-stats.html", sourcemap: true }) + ], + onwarn: makeOnWarnForTesting(), + // Disable tree-shaking of test code. In rollup-plugin-node-resolve@5.0.0, + // rollup started respecting the "sideEffects" field in package.json. Since + // our package.json sets "sideEffects=false", this also applies to test + // code, which causes all tests to be removed by tree-shaking. + treeshake: false, + }; + + return config; +} + +const defaultConfigurationOptions = { + disableBrowserBundle: false, +}; + +export function makeConfig(pkg, options) { + options = { + ...defaultConfigurationOptions, + ...(options || {}), + }; + + const baseConfig = { + // Use the package's module field if it has one + input: pkg["module"] || "dist-esm/src/index.js", + external: [ + ...nodeBuiltins, + ...Object.keys(pkg.dependencies), + ...Object.keys(pkg.devDependencies), + ], + output: { file: "dist/index.js", format: "cjs", sourcemap: true }, + preserveSymlinks: false, + plugins: [sourcemaps(), nodeResolve()], + }; + + const config = [baseConfig]; + + if (!options.disableBrowserBundle) { + config.push(makeBrowserTestConfig()); + } + + return config; +} + +export default makeConfig(require("./package.json")); diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/OpenAIClient.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/OpenAIClient.ts new file mode 100644 index 0000000000..8fba8fed4f --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/OpenAIClient.ts @@ -0,0 +1,321 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { KeyCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; +import { + CreateTranscriptionRequest, + CreateTranscriptionResponse, + CreateTranslationRequest, + CreateTranslationResponse, + CreateChatCompletionRequest, + CreateChatCompletionResponse, + CreateFineTuningJobRequest, + FineTuningJob, + ListPaginatedFineTuningJobsResponse, + ListFineTuningJobEventsResponse, + CreateCompletionRequest, + CreateCompletionResponse, + CreateEditRequest, + CreateEditResponse, + CreateEmbeddingRequest, + CreateEmbeddingResponse, + ListFilesResponse, + OpenAIFile, + CreateFileRequest, + DeleteFileResponse, + CreateFineTuneRequest, + FineTune, + ListFineTunesResponse, + ListFineTuneEventsResponse, + ListModelsResponse, + Model, + DeleteModelResponse, + CreateImageRequest, + ImagesResponse, + CreateImageEditRequest, + CreateImageVariationRequest, + CreateModerationRequest, + CreateModerationResponse, +} from "./models/models.js"; +import { + CreateTranscriptionOptions, + CreateTranslationOptions, + CreateChatCompletionOptions, + CreateFineTuningJobOptions, + ListPaginatedFineTuningJobsOptions, + RetrieveFineTuningJobOptions, + ListFineTuningEventsOptions, + CancelFineTuningJobOptions, + CreateCompletionOptions, + CreateEditOptions, + CreateEmbeddingOptions, + ListFilesOptions, + CreateFileOptions, + RetrieveFileOptions, + DeleteFileOptions, + DownloadFileOptions, + CreateFineTuneOptions, + ListFineTunesOptions, + RetrieveFineTuneOptions, + ListFineTuneEventsOptions, + CancelFineTuneOptions, + ListModelsOptions, + RetrieveOptions, + DeleteOptions, + CreateImageOptions, + CreateImageEditOptions, + CreateImageVariationOptions, + CreateModerationOptions, +} from "./models/options.js"; +import { + createOpenAI, + OpenAIClientOptions, + OpenAIContext, + createTranscription, + createTranslation, + createChatCompletion, + createFineTuningJob, + listPaginatedFineTuningJobs, + retrieveFineTuningJob, + listFineTuningEvents, + cancelFineTuningJob, + createCompletion, + createEdit, + createEmbedding, + listFiles, + createFile, + retrieveFile, + deleteFile, + downloadFile, + createFineTune, + listFineTunes, + retrieveFineTune, + listFineTuneEvents, + cancelFineTune, + listModels, + retrieve, + deleteOperation, + createImage, + createImageEdit, + createImageVariation, + createModeration, +} from "./api/index.js"; + +export { OpenAIClientOptions } from "./api/OpenAIContext.js"; + +export class OpenAIClient { + private _client: OpenAIContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + /** The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. */ + constructor(credential: KeyCredential, options: OpenAIClientOptions = {}) { + this._client = createOpenAI(credential, options); + this.pipeline = this._client.pipeline; + } + + createTranscription( + audio: CreateTranscriptionRequest, + options: CreateTranscriptionOptions = { requestOptions: {} } + ): Promise { + return createTranscription(this._client, audio, options); + } + + createTranslation( + audio: CreateTranslationRequest, + options: CreateTranslationOptions = { requestOptions: {} } + ): Promise { + return createTranslation(this._client, audio, options); + } + + createChatCompletion( + body: CreateChatCompletionRequest, + options: CreateChatCompletionOptions = { requestOptions: {} } + ): Promise { + return createChatCompletion(this._client, body, options); + } + + /** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * Response includes details of the enqueued job including job status and the name of the + * fine-tuned models once complete. + * + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + */ + createFineTuningJob( + job: CreateFineTuningJobRequest, + options: CreateFineTuningJobOptions = { requestOptions: {} } + ): Promise { + return createFineTuningJob(this._client, job, options); + } + + listPaginatedFineTuningJobs( + options: ListPaginatedFineTuningJobsOptions = { requestOptions: {} } + ): Promise { + return listPaginatedFineTuningJobs(this._client, options); + } + + retrieveFineTuningJob( + fineTuningJobId: string, + options: RetrieveFineTuningJobOptions = { requestOptions: {} } + ): Promise { + return retrieveFineTuningJob(this._client, fineTuningJobId, options); + } + + listFineTuningEvents( + fineTuningJobId: string, + options: ListFineTuningEventsOptions = { requestOptions: {} } + ): Promise { + return listFineTuningEvents(this._client, fineTuningJobId, options); + } + + cancelFineTuningJob( + fineTuningJobId: string, + options: CancelFineTuningJobOptions = { requestOptions: {} } + ): Promise { + return cancelFineTuningJob(this._client, fineTuningJobId, options); + } + + createCompletion( + body: CreateCompletionRequest, + options: CreateCompletionOptions = { requestOptions: {} } + ): Promise { + return createCompletion(this._client, body, options); + } + + createEdit( + edit: CreateEditRequest, + options: CreateEditOptions = { requestOptions: {} } + ): Promise { + return createEdit(this._client, edit, options); + } + + createEmbedding( + embedding: CreateEmbeddingRequest, + options: CreateEmbeddingOptions = { requestOptions: {} } + ): Promise { + return createEmbedding(this._client, embedding, options); + } + + listFiles( + options: ListFilesOptions = { requestOptions: {} } + ): Promise { + return listFiles(this._client, options); + } + + createFile( + file: CreateFileRequest, + options: CreateFileOptions = { requestOptions: {} } + ): Promise { + return createFile(this._client, file, options); + } + + retrieveFile( + fileId: string, + options: RetrieveFileOptions = { requestOptions: {} } + ): Promise { + return retrieveFile(this._client, fileId, options); + } + + deleteFile( + fileId: string, + options: DeleteFileOptions = { requestOptions: {} } + ): Promise { + return deleteFile(this._client, fileId, options); + } + + downloadFile( + fileId: string, + options: DownloadFileOptions = { requestOptions: {} } + ): Promise { + return downloadFile(this._client, fileId, options); + } + + createFineTune( + fineTune: CreateFineTuneRequest, + options: CreateFineTuneOptions = { requestOptions: {} } + ): Promise { + return createFineTune(this._client, fineTune, options); + } + + listFineTunes( + options: ListFineTunesOptions = { requestOptions: {} } + ): Promise { + return listFineTunes(this._client, options); + } + + retrieveFineTune( + fineTuneId: string, + options: RetrieveFineTuneOptions = { requestOptions: {} } + ): Promise { + return retrieveFineTune(this._client, fineTuneId, options); + } + + listFineTuneEvents( + fineTuneId: string, + options: ListFineTuneEventsOptions = { requestOptions: {} } + ): Promise { + return listFineTuneEvents(this._client, fineTuneId, options); + } + + cancelFineTune( + fineTuneId: string, + options: CancelFineTuneOptions = { requestOptions: {} } + ): Promise { + return cancelFineTune(this._client, fineTuneId, options); + } + + listModels( + options: ListModelsOptions = { requestOptions: {} } + ): Promise { + return listModels(this._client, options); + } + + retrieve( + model: string, + options: RetrieveOptions = { requestOptions: {} } + ): Promise { + return retrieve(this._client, model, options); + } + + /** + * @fixme delete is a reserved word that cannot be used as an operation name. Please add @projectedName( + * "javascript", "") to the operation to override the generated name. + */ + deleteOperation( + model: string, + options: DeleteOptions = { requestOptions: {} } + ): Promise { + return deleteOperation(this._client, model, options); + } + + createImage( + image: CreateImageRequest, + options: CreateImageOptions = { requestOptions: {} } + ): Promise { + return createImage(this._client, image, options); + } + + createImageEdit( + image: CreateImageEditRequest, + options: CreateImageEditOptions = { requestOptions: {} } + ): Promise { + return createImageEdit(this._client, image, options); + } + + createImageVariation( + image: CreateImageVariationRequest, + options: CreateImageVariationOptions = { requestOptions: {} } + ): Promise { + return createImageVariation(this._client, image, options); + } + + createModeration( + content: CreateModerationRequest, + options: CreateModerationOptions = { requestOptions: {} } + ): Promise { + return createModeration(this._client, content, options); + } +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/OpenAIContext.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/OpenAIContext.ts new file mode 100644 index 0000000000..5aa8ff7b97 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/OpenAIContext.ts @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { KeyCredential } from "@azure/core-auth"; +import { ClientOptions } from "@azure-rest/core-client"; +import { OpenAIContext } from "../rest/index.js"; +import getClient from "../rest/index.js"; + +export interface OpenAIClientOptions extends ClientOptions {} + +export { OpenAIContext } from "../rest/index.js"; + +/** The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. */ +export function createOpenAI( + credential: KeyCredential, + options: OpenAIClientOptions = {} +): OpenAIContext { + const clientContext = getClient(credential, options); + return clientContext; +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/index.ts new file mode 100644 index 0000000000..c0cda4a15e --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/index.ts @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { + createOpenAI, + OpenAIClientOptions, + OpenAIContext, +} from "./OpenAIContext.js"; +export { + createTranscription, + createTranslation, + createChatCompletion, + createFineTuningJob, + listPaginatedFineTuningJobs, + retrieveFineTuningJob, + listFineTuningEvents, + cancelFineTuningJob, + createCompletion, + createEdit, + createEmbedding, + listFiles, + createFile, + retrieveFile, + deleteFile, + downloadFile, + createFineTune, + listFineTunes, + retrieveFineTune, + listFineTuneEvents, + cancelFineTune, + listModels, + retrieve, + deleteOperation, + createImage, + createImageEdit, + createImageVariation, + createModeration, +} from "./operations.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/operations.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/operations.ts new file mode 100644 index 0000000000..05ccd4dff8 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/operations.ts @@ -0,0 +1,1717 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { + CreateTranscriptionRequest, + CreateTranscriptionResponse, + CreateTranslationRequest, + CreateTranslationResponse, + CreateChatCompletionRequest, + CreateChatCompletionResponse, + CreateFineTuningJobRequest, + FineTuningJob, + ListPaginatedFineTuningJobsResponse, + ListFineTuningJobEventsResponse, + CreateCompletionRequest, + CreateCompletionResponse, + CreateEditRequest, + CreateEditResponse, + CreateEmbeddingRequest, + CreateEmbeddingResponse, + ListFilesResponse, + OpenAIFile, + CreateFileRequest, + DeleteFileResponse, + CreateFineTuneRequest, + FineTune, + ListFineTunesResponse, + ListFineTuneEventsResponse, + ListModelsResponse, + Model, + DeleteModelResponse, + CreateImageRequest, + ImagesResponse, + CreateImageEditRequest, + CreateImageVariationRequest, + CreateModerationRequest, + CreateModerationResponse, +} from "../models/models.js"; +import { + CancelFineTune200Response, + CancelFineTuneDefaultResponse, + CancelFineTuningJob200Response, + CancelFineTuningJobDefaultResponse, + CreateChatCompletion200Response, + CreateChatCompletionDefaultResponse, + CreateCompletion200Response, + CreateCompletionDefaultResponse, + CreateEdit200Response, + CreateEditDefaultResponse, + CreateEmbedding200Response, + CreateEmbeddingDefaultResponse, + CreateFile200Response, + CreateFileDefaultResponse, + CreateFineTune200Response, + CreateFineTuneDefaultResponse, + CreateFineTuningJob200Response, + CreateFineTuningJobDefaultResponse, + CreateImage200Response, + CreateImageDefaultResponse, + CreateImageEdit200Response, + CreateImageEditDefaultResponse, + CreateImageVariation200Response, + CreateImageVariationDefaultResponse, + CreateModeration200Response, + CreateModerationDefaultResponse, + CreateTranscription200Response, + CreateTranscriptionDefaultResponse, + CreateTranslation200Response, + CreateTranslationDefaultResponse, + DeleteFile200Response, + DeleteFileDefaultResponse, + DeleteOperation200Response, + DeleteOperationDefaultResponse, + DownloadFile200Response, + DownloadFileDefaultResponse, + isUnexpected, + ListFiles200Response, + ListFilesDefaultResponse, + ListFineTuneEvents200Response, + ListFineTuneEventsDefaultResponse, + ListFineTunes200Response, + ListFineTunesDefaultResponse, + ListFineTuningEvents200Response, + ListFineTuningEventsDefaultResponse, + ListModels200Response, + ListModelsDefaultResponse, + ListPaginatedFineTuningJobs200Response, + ListPaginatedFineTuningJobsDefaultResponse, + OpenAIContext as Client, + Retrieve200Response, + RetrieveDefaultResponse, + RetrieveFile200Response, + RetrieveFileDefaultResponse, + RetrieveFineTune200Response, + RetrieveFineTuneDefaultResponse, + RetrieveFineTuningJob200Response, + RetrieveFineTuningJobDefaultResponse, +} from "../rest/index.js"; +import { + StreamableMethod, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; +import { uint8ArrayToString, stringToUint8Array } from "@azure/core-util"; +import { + CreateTranscriptionOptions, + CreateTranslationOptions, + CreateChatCompletionOptions, + CreateFineTuningJobOptions, + ListPaginatedFineTuningJobsOptions, + RetrieveFineTuningJobOptions, + ListFineTuningEventsOptions, + CancelFineTuningJobOptions, + CreateCompletionOptions, + CreateEditOptions, + CreateEmbeddingOptions, + ListFilesOptions, + CreateFileOptions, + RetrieveFileOptions, + DeleteFileOptions, + DownloadFileOptions, + CreateFineTuneOptions, + ListFineTunesOptions, + RetrieveFineTuneOptions, + ListFineTuneEventsOptions, + CancelFineTuneOptions, + ListModelsOptions, + RetrieveOptions, + DeleteOptions, + CreateImageOptions, + CreateImageEditOptions, + CreateImageVariationOptions, + CreateModerationOptions, +} from "../models/options.js"; + +export function _createTranscriptionSend( + context: Client, + audio: CreateTranscriptionRequest, + options: CreateTranscriptionOptions = { requestOptions: {} } +): StreamableMethod< + CreateTranscription200Response | CreateTranscriptionDefaultResponse +> { + return context + .path("/audio/transcriptions") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: (options.contentType as any) ?? "multipart/form-data", + body: { + file: uint8ArrayToString(audio["file"], "base64"), + model: audio["model"], + prompt: audio["prompt"], + response_format: audio["responseFormat"], + temperature: audio["temperature"], + language: audio["language"], + }, + }); +} + +export async function _createTranscriptionDeserialize( + result: CreateTranscription200Response | CreateTranscriptionDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + text: result.body["text"], + }; +} + +export async function createTranscription( + context: Client, + audio: CreateTranscriptionRequest, + options: CreateTranscriptionOptions = { requestOptions: {} } +): Promise { + const result = await _createTranscriptionSend(context, audio, options); + return _createTranscriptionDeserialize(result); +} + +export function _createTranslationSend( + context: Client, + audio: CreateTranslationRequest, + options: CreateTranslationOptions = { requestOptions: {} } +): StreamableMethod< + CreateTranslation200Response | CreateTranslationDefaultResponse +> { + return context + .path("/audio/translations") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: (options.contentType as any) ?? "multipart/form-data", + body: { + file: uint8ArrayToString(audio["file"], "base64"), + model: audio["model"], + prompt: audio["prompt"], + response_format: audio["responseFormat"], + temperature: audio["temperature"], + }, + }); +} + +export async function _createTranslationDeserialize( + result: CreateTranslation200Response | CreateTranslationDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + text: result.body["text"], + }; +} + +export async function createTranslation( + context: Client, + audio: CreateTranslationRequest, + options: CreateTranslationOptions = { requestOptions: {} } +): Promise { + const result = await _createTranslationSend(context, audio, options); + return _createTranslationDeserialize(result); +} + +export function _createChatCompletionSend( + context: Client, + body: CreateChatCompletionRequest, + options: CreateChatCompletionOptions = { requestOptions: {} } +): StreamableMethod< + CreateChatCompletion200Response | CreateChatCompletionDefaultResponse +> { + return context + .path("/chat/completions") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + model: body["model"], + messages: (body["messages"] ?? []).map((p) => ({ + role: p["role"], + content: p["content"], + name: p["name"], + function_call: !p.functionCall + ? undefined + : { + name: p.functionCall?.["name"], + arguments: p.functionCall?.["arguments"], + }, + })), + functions: (body["functions"] ?? []).map((p) => ({ + name: p["name"], + description: p["description"], + parameters: p["parameters"], + })), + function_call: body["functionCall"], + temperature: body["temperature"], + top_p: body["topP"], + n: body["n"], + max_tokens: body["maxTokens"], + stop: body["stop"], + presence_penalty: body["presencePenalty"], + frequency_penalty: body["frequencyPenalty"], + logit_bias: body["logitBias"], + user: body["user"], + stream: body["stream"], + }, + }); +} + +export async function _createChatCompletionDeserialize( + result: CreateChatCompletion200Response | CreateChatCompletionDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + created: new Date(result.body["created"]), + model: result.body["model"], + choices: (result.body["choices"] ?? []).map((p) => ({ + index: p["index"], + message: { + role: p.message["role"] as any, + content: p.message["content"], + functionCall: !p.message.function_call + ? undefined + : { + name: p.message.function_call?.["name"], + arguments: p.message.function_call?.["arguments"], + }, + }, + finishReason: p["finish_reason"] as any, + })), + usage: !result.body.usage + ? undefined + : { + promptTokens: result.body.usage?.["prompt_tokens"], + completionTokens: result.body.usage?.["completion_tokens"], + totalTokens: result.body.usage?.["total_tokens"], + }, + }; +} + +export async function createChatCompletion( + context: Client, + body: CreateChatCompletionRequest, + options: CreateChatCompletionOptions = { requestOptions: {} } +): Promise { + const result = await _createChatCompletionSend(context, body, options); + return _createChatCompletionDeserialize(result); +} + +export function _createFineTuningJobSend( + context: Client, + job: CreateFineTuningJobRequest, + options: CreateFineTuningJobOptions = { requestOptions: {} } +): StreamableMethod< + CreateFineTuningJob200Response | CreateFineTuningJobDefaultResponse +> { + return context + .path("/fine_tuning/jobs") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + training_file: job["trainingFile"], + validation_file: job["validationFile"], + model: job["model"], + hyperparameters: !job.hyperparameters + ? undefined + : { n_epochs: job.hyperparameters?.["nEpochs"] }, + suffix: job["suffix"], + }, + }); +} + +export async function _createFineTuningJobDeserialize( + result: CreateFineTuningJob200Response | CreateFineTuningJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + finishedAt: + result.body["finished_at"] === null + ? null + : new Date(result.body["finished_at"]), + model: result.body["model"], + fineTunedModel: result.body["fine_tuned_model"], + organizationId: result.body["organization_id"], + status: result.body["status"] as any, + hyperparameters: { + nEpochs: result.body.hyperparameters["n_epochs"] as any, + }, + trainingFile: result.body["training_file"], + validationFile: result.body["validation_file"], + resultFiles: result.body["result_files"], + trainedTokens: result.body["trained_tokens"], + error: + result.body.error === null + ? null + : { + message: result.body.error["message"], + code: result.body.error["code"], + param: result.body.error["param"], + }, + }; +} + +/** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * Response includes details of the enqueued job including job status and the name of the + * fine-tuned models once complete. + * + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + */ +export async function createFineTuningJob( + context: Client, + job: CreateFineTuningJobRequest, + options: CreateFineTuningJobOptions = { requestOptions: {} } +): Promise { + const result = await _createFineTuningJobSend(context, job, options); + return _createFineTuningJobDeserialize(result); +} + +export function _listPaginatedFineTuningJobsSend( + context: Client, + options: ListPaginatedFineTuningJobsOptions = { requestOptions: {} } +): StreamableMethod< + | ListPaginatedFineTuningJobs200Response + | ListPaginatedFineTuningJobsDefaultResponse +> { + return context + .path("/fine_tuning/jobs") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { after: options?.after, limit: options?.limit }, + }); +} + +export async function _listPaginatedFineTuningJobsDeserialize( + result: + | ListPaginatedFineTuningJobs200Response + | ListPaginatedFineTuningJobsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + object: result.body["object"], + data: (result.body["data"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + createdAt: new Date(p["created_at"]), + finishedAt: p["finished_at"] === null ? null : new Date(p["finished_at"]), + model: p["model"], + fineTunedModel: p["fine_tuned_model"], + organizationId: p["organization_id"], + status: p["status"] as any, + hyperparameters: { nEpochs: p.hyperparameters["n_epochs"] as any }, + trainingFile: p["training_file"], + validationFile: p["validation_file"], + resultFiles: p["result_files"], + trainedTokens: p["trained_tokens"], + error: + p.error === null + ? null + : { + message: p.error["message"], + code: p.error["code"], + param: p.error["param"], + }, + })), + hasMore: result.body["has_more"], + }; +} + +export async function listPaginatedFineTuningJobs( + context: Client, + options: ListPaginatedFineTuningJobsOptions = { requestOptions: {} } +): Promise { + const result = await _listPaginatedFineTuningJobsSend(context, options); + return _listPaginatedFineTuningJobsDeserialize(result); +} + +export function _retrieveFineTuningJobSend( + context: Client, + fineTuningJobId: string, + options: RetrieveFineTuningJobOptions = { requestOptions: {} } +): StreamableMethod< + RetrieveFineTuningJob200Response | RetrieveFineTuningJobDefaultResponse +> { + return context + .path("/fine_tuning/jobs/{fine_tuning_job_id}", fineTuningJobId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _retrieveFineTuningJobDeserialize( + result: + | RetrieveFineTuningJob200Response + | RetrieveFineTuningJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + finishedAt: + result.body["finished_at"] === null + ? null + : new Date(result.body["finished_at"]), + model: result.body["model"], + fineTunedModel: result.body["fine_tuned_model"], + organizationId: result.body["organization_id"], + status: result.body["status"] as any, + hyperparameters: { + nEpochs: result.body.hyperparameters["n_epochs"] as any, + }, + trainingFile: result.body["training_file"], + validationFile: result.body["validation_file"], + resultFiles: result.body["result_files"], + trainedTokens: result.body["trained_tokens"], + error: + result.body.error === null + ? null + : { + message: result.body.error["message"], + code: result.body.error["code"], + param: result.body.error["param"], + }, + }; +} + +export async function retrieveFineTuningJob( + context: Client, + fineTuningJobId: string, + options: RetrieveFineTuningJobOptions = { requestOptions: {} } +): Promise { + const result = await _retrieveFineTuningJobSend( + context, + fineTuningJobId, + options + ); + return _retrieveFineTuningJobDeserialize(result); +} + +export function _listFineTuningEventsSend( + context: Client, + fineTuningJobId: string, + options: ListFineTuningEventsOptions = { requestOptions: {} } +): StreamableMethod< + ListFineTuningEvents200Response | ListFineTuningEventsDefaultResponse +> { + return context + .path("/fine_tuning/jobs/{fine_tuning_job_id}/events", fineTuningJobId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { after: options?.after, limit: options?.limit }, + }); +} + +export async function _listFineTuningEventsDeserialize( + result: ListFineTuningEvents200Response | ListFineTuningEventsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + object: result.body["object"], + data: (result.body["data"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + createdAt: new Date(p["created_at"]), + level: p["level"] as any, + message: p["message"], + })), + }; +} + +export async function listFineTuningEvents( + context: Client, + fineTuningJobId: string, + options: ListFineTuningEventsOptions = { requestOptions: {} } +): Promise { + const result = await _listFineTuningEventsSend( + context, + fineTuningJobId, + options + ); + return _listFineTuningEventsDeserialize(result); +} + +export function _cancelFineTuningJobSend( + context: Client, + fineTuningJobId: string, + options: CancelFineTuningJobOptions = { requestOptions: {} } +): StreamableMethod< + CancelFineTuningJob200Response | CancelFineTuningJobDefaultResponse +> { + return context + .path("/fine_tuning/jobs/{fine_tuning_job_id}/cancel", fineTuningJobId) + .post({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _cancelFineTuningJobDeserialize( + result: CancelFineTuningJob200Response | CancelFineTuningJobDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + finishedAt: + result.body["finished_at"] === null + ? null + : new Date(result.body["finished_at"]), + model: result.body["model"], + fineTunedModel: result.body["fine_tuned_model"], + organizationId: result.body["organization_id"], + status: result.body["status"] as any, + hyperparameters: { + nEpochs: result.body.hyperparameters["n_epochs"] as any, + }, + trainingFile: result.body["training_file"], + validationFile: result.body["validation_file"], + resultFiles: result.body["result_files"], + trainedTokens: result.body["trained_tokens"], + error: + result.body.error === null + ? null + : { + message: result.body.error["message"], + code: result.body.error["code"], + param: result.body.error["param"], + }, + }; +} + +export async function cancelFineTuningJob( + context: Client, + fineTuningJobId: string, + options: CancelFineTuningJobOptions = { requestOptions: {} } +): Promise { + const result = await _cancelFineTuningJobSend( + context, + fineTuningJobId, + options + ); + return _cancelFineTuningJobDeserialize(result); +} + +export function _createCompletionSend( + context: Client, + body: CreateCompletionRequest, + options: CreateCompletionOptions = { requestOptions: {} } +): StreamableMethod< + CreateCompletion200Response | CreateCompletionDefaultResponse +> { + return context + .path("/completions") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + model: body["model"], + prompt: body["prompt"], + suffix: body["suffix"], + temperature: body["temperature"], + top_p: body["topP"], + n: body["n"], + max_tokens: body["maxTokens"], + stop: body["stop"], + presence_penalty: body["presencePenalty"], + frequency_penalty: body["frequencyPenalty"], + logit_bias: body["logitBias"], + user: body["user"], + stream: body["stream"], + logprobs: body["logprobs"], + echo: body["echo"], + best_of: body["bestOf"], + }, + }); +} + +export async function _createCompletionDeserialize( + result: CreateCompletion200Response | CreateCompletionDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + created: new Date(result.body["created"]), + model: result.body["model"], + choices: (result.body["choices"] ?? []).map((p) => ({ + index: p["index"], + text: p["text"], + logprobs: + p.logprobs === null + ? null + : { + tokens: p.logprobs["tokens"], + tokenLogprobs: p.logprobs["token_logprobs"], + topLogprobs: p.logprobs["top_logprobs"], + textOffset: p.logprobs["text_offset"], + }, + finishReason: p["finish_reason"] as any, + })), + usage: !result.body.usage + ? undefined + : { + promptTokens: result.body.usage?.["prompt_tokens"], + completionTokens: result.body.usage?.["completion_tokens"], + totalTokens: result.body.usage?.["total_tokens"], + }, + }; +} + +export async function createCompletion( + context: Client, + body: CreateCompletionRequest, + options: CreateCompletionOptions = { requestOptions: {} } +): Promise { + const result = await _createCompletionSend(context, body, options); + return _createCompletionDeserialize(result); +} + +export function _createEditSend( + context: Client, + edit: CreateEditRequest, + options: CreateEditOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/edits") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + model: edit["model"], + input: edit["input"], + instruction: edit["instruction"], + n: edit["n"], + temperature: edit["temperature"], + top_p: edit["topP"], + }, + }); +} + +export async function _createEditDeserialize( + result: CreateEdit200Response | CreateEditDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + object: result.body["object"], + created: new Date(result.body["created"]), + choices: (result.body["choices"] ?? []).map((p) => ({ + text: p["text"], + index: p["index"], + finishReason: p["finish_reason"] as any, + })), + usage: { + promptTokens: result.body.usage["prompt_tokens"], + completionTokens: result.body.usage["completion_tokens"], + totalTokens: result.body.usage["total_tokens"], + }, + }; +} + +export async function createEdit( + context: Client, + edit: CreateEditRequest, + options: CreateEditOptions = { requestOptions: {} } +): Promise { + const result = await _createEditSend(context, edit, options); + return _createEditDeserialize(result); +} + +export function _createEmbeddingSend( + context: Client, + embedding: CreateEmbeddingRequest, + options: CreateEmbeddingOptions = { requestOptions: {} } +): StreamableMethod< + CreateEmbedding200Response | CreateEmbeddingDefaultResponse +> { + return context + .path("/embeddings") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + model: embedding["model"], + input: embedding["input"], + user: embedding["user"], + }, + }); +} + +export async function _createEmbeddingDeserialize( + result: CreateEmbedding200Response | CreateEmbeddingDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + object: result.body["object"], + model: result.body["model"], + data: (result.body["data"] ?? []).map((p) => ({ + index: p["index"], + object: p["object"], + embedding: p["embedding"], + })), + usage: { + promptTokens: result.body.usage["prompt_tokens"], + totalTokens: result.body.usage["total_tokens"], + }, + }; +} + +export async function createEmbedding( + context: Client, + embedding: CreateEmbeddingRequest, + options: CreateEmbeddingOptions = { requestOptions: {} } +): Promise { + const result = await _createEmbeddingSend(context, embedding, options); + return _createEmbeddingDeserialize(result); +} + +export function _listFilesSend( + context: Client, + options: ListFilesOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/files") + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _listFilesDeserialize( + result: ListFiles200Response | ListFilesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + object: result.body["object"], + data: (result.body["data"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + }; +} + +export async function listFiles( + context: Client, + options: ListFilesOptions = { requestOptions: {} } +): Promise { + const result = await _listFilesSend(context, options); + return _listFilesDeserialize(result); +} + +export function _createFileSend( + context: Client, + file: CreateFileRequest, + options: CreateFileOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/files") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: (options.contentType as any) ?? "multipart/form-data", + body: { + file: uint8ArrayToString(file["file"], "base64"), + purpose: file["purpose"], + }, + }); +} + +export async function _createFileDeserialize( + result: CreateFile200Response | CreateFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + bytes: result.body["bytes"], + createdAt: new Date(result.body["createdAt"]), + filename: result.body["filename"], + purpose: result.body["purpose"], + status: result.body["status"] as any, + statusDetails: result.body["status_details"], + }; +} + +export async function createFile( + context: Client, + file: CreateFileRequest, + options: CreateFileOptions = { requestOptions: {} } +): Promise { + const result = await _createFileSend(context, file, options); + return _createFileDeserialize(result); +} + +export function _retrieveFileSend( + context: Client, + fileId: string, + options: RetrieveFileOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/files/files/{file_id}", fileId) + .post({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _retrieveFileDeserialize( + result: RetrieveFile200Response | RetrieveFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + bytes: result.body["bytes"], + createdAt: new Date(result.body["createdAt"]), + filename: result.body["filename"], + purpose: result.body["purpose"], + status: result.body["status"] as any, + statusDetails: result.body["status_details"], + }; +} + +export async function retrieveFile( + context: Client, + fileId: string, + options: RetrieveFileOptions = { requestOptions: {} } +): Promise { + const result = await _retrieveFileSend(context, fileId, options); + return _retrieveFileDeserialize(result); +} + +export function _deleteFileSend( + context: Client, + fileId: string, + options: DeleteFileOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/files/files/{file_id}", fileId) + .delete({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _deleteFileDeserialize( + result: DeleteFile200Response | DeleteFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + deleted: result.body["deleted"], + }; +} + +export async function deleteFile( + context: Client, + fileId: string, + options: DeleteFileOptions = { requestOptions: {} } +): Promise { + const result = await _deleteFileSend(context, fileId, options); + return _deleteFileDeserialize(result); +} + +export function _downloadFileSend( + context: Client, + fileId: string, + options: DownloadFileOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/files/files/{file_id}/content", fileId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _downloadFileDeserialize( + result: DownloadFile200Response | DownloadFileDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return result.body; +} + +export async function downloadFile( + context: Client, + fileId: string, + options: DownloadFileOptions = { requestOptions: {} } +): Promise { + const result = await _downloadFileSend(context, fileId, options); + return _downloadFileDeserialize(result); +} + +export function _createFineTuneSend( + context: Client, + fineTune: CreateFineTuneRequest, + options: CreateFineTuneOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/fine-tunes") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + training_file: fineTune["trainingFile"], + validation_file: fineTune["validationFile"], + model: fineTune["model"], + n_epochs: fineTune["nEpochs"], + batch_size: fineTune["batchSize"], + learning_rate_multiplier: fineTune["learningRateMultiplier"], + prompt_loss_rate: fineTune["promptLossRate"], + compute_classification_metrics: + fineTune["computeClassificationMetrics"], + classification_n_classes: fineTune["classificationNClasses"], + classification_positive_class: fineTune["classificationPositiveClass"], + classification_betas: fineTune["classificationBetas"], + suffix: fineTune["suffix"], + }, + }); +} + +export async function _createFineTuneDeserialize( + result: CreateFineTune200Response | CreateFineTuneDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + updatedAt: new Date(result.body["updated_at"]), + model: result.body["model"], + fineTunedModel: result.body["fine_tuned_model"], + organizationId: result.body["organization_id"], + status: result.body["status"] as any, + hyperparams: { + nEpochs: result.body.hyperparams["n_epochs"], + batchSize: result.body.hyperparams["batch_size"], + promptLossWeight: result.body.hyperparams["prompt_loss_weight"], + learningRateMultiplier: + result.body.hyperparams["learning_rate_multiplier"], + computeClassificationMetrics: + result.body.hyperparams["compute_classification_metrics"], + classificationPositiveClass: + result.body.hyperparams["classification_positive_class"], + classificationNClasses: + result.body.hyperparams["classification_n_classes"], + }, + trainingFiles: (result.body["training_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + validationFiles: (result.body["validation_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + resultFiles: (result.body["result_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + events: (result.body["events"] ?? []).map((p) => ({ + object: p["object"], + createdAt: new Date(p["created_at"]), + level: p["level"], + message: p["message"], + })), + }; +} + +export async function createFineTune( + context: Client, + fineTune: CreateFineTuneRequest, + options: CreateFineTuneOptions = { requestOptions: {} } +): Promise { + const result = await _createFineTuneSend(context, fineTune, options); + return _createFineTuneDeserialize(result); +} + +export function _listFineTunesSend( + context: Client, + options: ListFineTunesOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/fine-tunes") + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _listFineTunesDeserialize( + result: ListFineTunes200Response | ListFineTunesDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + object: result.body["object"], + data: (result.body["data"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + createdAt: new Date(p["created_at"]), + updatedAt: new Date(p["updated_at"]), + model: p["model"], + fineTunedModel: p["fine_tuned_model"], + organizationId: p["organization_id"], + status: p["status"] as any, + hyperparams: { + nEpochs: p.hyperparams["n_epochs"], + batchSize: p.hyperparams["batch_size"], + promptLossWeight: p.hyperparams["prompt_loss_weight"], + learningRateMultiplier: p.hyperparams["learning_rate_multiplier"], + computeClassificationMetrics: + p.hyperparams["compute_classification_metrics"], + classificationPositiveClass: + p.hyperparams["classification_positive_class"], + classificationNClasses: p.hyperparams["classification_n_classes"], + }, + trainingFiles: (p["training_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + validationFiles: (p["validation_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + resultFiles: (p["result_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + events: (p["events"] ?? []).map((p) => ({ + object: p["object"], + createdAt: new Date(p["created_at"]), + level: p["level"], + message: p["message"], + })), + })), + }; +} + +export async function listFineTunes( + context: Client, + options: ListFineTunesOptions = { requestOptions: {} } +): Promise { + const result = await _listFineTunesSend(context, options); + return _listFineTunesDeserialize(result); +} + +export function _retrieveFineTuneSend( + context: Client, + fineTuneId: string, + options: RetrieveFineTuneOptions = { requestOptions: {} } +): StreamableMethod< + RetrieveFineTune200Response | RetrieveFineTuneDefaultResponse +> { + return context + .path("/fine-tunes/{fine_tune_id}", fineTuneId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _retrieveFineTuneDeserialize( + result: RetrieveFineTune200Response | RetrieveFineTuneDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + updatedAt: new Date(result.body["updated_at"]), + model: result.body["model"], + fineTunedModel: result.body["fine_tuned_model"], + organizationId: result.body["organization_id"], + status: result.body["status"] as any, + hyperparams: { + nEpochs: result.body.hyperparams["n_epochs"], + batchSize: result.body.hyperparams["batch_size"], + promptLossWeight: result.body.hyperparams["prompt_loss_weight"], + learningRateMultiplier: + result.body.hyperparams["learning_rate_multiplier"], + computeClassificationMetrics: + result.body.hyperparams["compute_classification_metrics"], + classificationPositiveClass: + result.body.hyperparams["classification_positive_class"], + classificationNClasses: + result.body.hyperparams["classification_n_classes"], + }, + trainingFiles: (result.body["training_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + validationFiles: (result.body["validation_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + resultFiles: (result.body["result_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + events: (result.body["events"] ?? []).map((p) => ({ + object: p["object"], + createdAt: new Date(p["created_at"]), + level: p["level"], + message: p["message"], + })), + }; +} + +export async function retrieveFineTune( + context: Client, + fineTuneId: string, + options: RetrieveFineTuneOptions = { requestOptions: {} } +): Promise { + const result = await _retrieveFineTuneSend(context, fineTuneId, options); + return _retrieveFineTuneDeserialize(result); +} + +export function _listFineTuneEventsSend( + context: Client, + fineTuneId: string, + options: ListFineTuneEventsOptions = { requestOptions: {} } +): StreamableMethod< + ListFineTuneEvents200Response | ListFineTuneEventsDefaultResponse +> { + return context + .path("/fine-tunes/{fine_tune_id}/events", fineTuneId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { stream: options?.stream }, + }); +} + +export async function _listFineTuneEventsDeserialize( + result: ListFineTuneEvents200Response | ListFineTuneEventsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + object: result.body["object"], + data: (result.body["data"] ?? []).map((p) => ({ + object: p["object"], + createdAt: new Date(p["created_at"]), + level: p["level"], + message: p["message"], + })), + }; +} + +export async function listFineTuneEvents( + context: Client, + fineTuneId: string, + options: ListFineTuneEventsOptions = { requestOptions: {} } +): Promise { + const result = await _listFineTuneEventsSend(context, fineTuneId, options); + return _listFineTuneEventsDeserialize(result); +} + +export function _cancelFineTuneSend( + context: Client, + fineTuneId: string, + options: CancelFineTuneOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/fine-tunes/{fine_tune_id}/cancel", fineTuneId) + .post({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _cancelFineTuneDeserialize( + result: CancelFineTune200Response | CancelFineTuneDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + updatedAt: new Date(result.body["updated_at"]), + model: result.body["model"], + fineTunedModel: result.body["fine_tuned_model"], + organizationId: result.body["organization_id"], + status: result.body["status"] as any, + hyperparams: { + nEpochs: result.body.hyperparams["n_epochs"], + batchSize: result.body.hyperparams["batch_size"], + promptLossWeight: result.body.hyperparams["prompt_loss_weight"], + learningRateMultiplier: + result.body.hyperparams["learning_rate_multiplier"], + computeClassificationMetrics: + result.body.hyperparams["compute_classification_metrics"], + classificationPositiveClass: + result.body.hyperparams["classification_positive_class"], + classificationNClasses: + result.body.hyperparams["classification_n_classes"], + }, + trainingFiles: (result.body["training_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + validationFiles: (result.body["validation_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + resultFiles: (result.body["result_files"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + bytes: p["bytes"], + createdAt: new Date(p["createdAt"]), + filename: p["filename"], + purpose: p["purpose"], + status: p["status"] as any, + statusDetails: p["status_details"], + })), + events: (result.body["events"] ?? []).map((p) => ({ + object: p["object"], + createdAt: new Date(p["created_at"]), + level: p["level"], + message: p["message"], + })), + }; +} + +export async function cancelFineTune( + context: Client, + fineTuneId: string, + options: CancelFineTuneOptions = { requestOptions: {} } +): Promise { + const result = await _cancelFineTuneSend(context, fineTuneId, options); + return _cancelFineTuneDeserialize(result); +} + +export function _listModelsSend( + context: Client, + options: ListModelsOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/models") + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _listModelsDeserialize( + result: ListModels200Response | ListModelsDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + object: result.body["object"], + data: (result.body["data"] ?? []).map((p) => ({ + id: p["id"], + object: p["object"], + created: new Date(p["created"]), + ownedBy: p["owned_by"], + })), + }; +} + +export async function listModels( + context: Client, + options: ListModelsOptions = { requestOptions: {} } +): Promise { + const result = await _listModelsSend(context, options); + return _listModelsDeserialize(result); +} + +export function _retrieveSend( + context: Client, + model: string, + options: RetrieveOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/models/{model}", model) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _retrieveDeserialize( + result: Retrieve200Response | RetrieveDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + created: new Date(result.body["created"]), + ownedBy: result.body["owned_by"], + }; +} + +export async function retrieve( + context: Client, + model: string, + options: RetrieveOptions = { requestOptions: {} } +): Promise { + const result = await _retrieveSend(context, model, options); + return _retrieveDeserialize(result); +} + +export function _deleteOperationSend( + context: Client, + model: string, + options: DeleteOptions = { requestOptions: {} } +): StreamableMethod< + DeleteOperation200Response | DeleteOperationDefaultResponse +> { + return context + .path("/models/{model}", model) + .delete({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _deleteOperationDeserialize( + result: DeleteOperation200Response | DeleteOperationDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + object: result.body["object"], + deleted: result.body["deleted"], + }; +} + +/** + * @fixme delete is a reserved word that cannot be used as an operation name. Please add @projectedName( + * "javascript", "") to the operation to override the generated name. + */ +export async function deleteOperation( + context: Client, + model: string, + options: DeleteOptions = { requestOptions: {} } +): Promise { + const result = await _deleteOperationSend(context, model, options); + return _deleteOperationDeserialize(result); +} + +export function _createImageSend( + context: Client, + image: CreateImageRequest, + options: CreateImageOptions = { requestOptions: {} } +): StreamableMethod { + return context + .path("/images/generations") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + prompt: image["prompt"], + n: image["n"], + size: image["size"], + response_format: image["responseFormat"], + user: image["user"], + }, + }); +} + +export async function _createImageDeserialize( + result: CreateImage200Response | CreateImageDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + created: new Date(result.body["created"]), + data: (result.body["data"] ?? []).map((p) => ({ + url: p["url"], + b64Json: + typeof p["b64_json"] === "string" + ? stringToUint8Array(p["b64_json"], "base64") + : p["b64_json"], + })), + }; +} + +export async function createImage( + context: Client, + image: CreateImageRequest, + options: CreateImageOptions = { requestOptions: {} } +): Promise { + const result = await _createImageSend(context, image, options); + return _createImageDeserialize(result); +} + +export function _createImageEditSend( + context: Client, + image: CreateImageEditRequest, + options: CreateImageEditOptions = { requestOptions: {} } +): StreamableMethod< + CreateImageEdit200Response | CreateImageEditDefaultResponse +> { + return context + .path("/images/edits") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: (options.contentType as any) ?? "multipart/form-data", + body: { + prompt: image["prompt"], + image: uint8ArrayToString(image["image"], "base64"), + mask: + image["mask"] !== undefined + ? uint8ArrayToString(image["mask"], "base64") + : undefined, + n: image["n"], + size: image["size"], + response_format: image["responseFormat"], + user: image["user"], + }, + }); +} + +export async function _createImageEditDeserialize( + result: CreateImageEdit200Response | CreateImageEditDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + created: new Date(result.body["created"]), + data: (result.body["data"] ?? []).map((p) => ({ + url: p["url"], + b64Json: + typeof p["b64_json"] === "string" + ? stringToUint8Array(p["b64_json"], "base64") + : p["b64_json"], + })), + }; +} + +export async function createImageEdit( + context: Client, + image: CreateImageEditRequest, + options: CreateImageEditOptions = { requestOptions: {} } +): Promise { + const result = await _createImageEditSend(context, image, options); + return _createImageEditDeserialize(result); +} + +export function _createImageVariationSend( + context: Client, + image: CreateImageVariationRequest, + options: CreateImageVariationOptions = { requestOptions: {} } +): StreamableMethod< + CreateImageVariation200Response | CreateImageVariationDefaultResponse +> { + return context + .path("/images/variations") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: (options.contentType as any) ?? "multipart/form-data", + body: { + image: uint8ArrayToString(image["image"], "base64"), + n: image["n"], + size: image["size"], + response_format: image["responseFormat"], + user: image["user"], + }, + }); +} + +export async function _createImageVariationDeserialize( + result: CreateImageVariation200Response | CreateImageVariationDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + created: new Date(result.body["created"]), + data: (result.body["data"] ?? []).map((p) => ({ + url: p["url"], + b64Json: + typeof p["b64_json"] === "string" + ? stringToUint8Array(p["b64_json"], "base64") + : p["b64_json"], + })), + }; +} + +export async function createImageVariation( + context: Client, + image: CreateImageVariationRequest, + options: CreateImageVariationOptions = { requestOptions: {} } +): Promise { + const result = await _createImageVariationSend(context, image, options); + return _createImageVariationDeserialize(result); +} + +export function _createModerationSend( + context: Client, + content: CreateModerationRequest, + options: CreateModerationOptions = { requestOptions: {} } +): StreamableMethod< + CreateModeration200Response | CreateModerationDefaultResponse +> { + return context + .path("/moderations") + .post({ + ...operationOptionsToRequestParameters(options), + body: { input: content["input"], model: content["model"] }, + }); +} + +export async function _createModerationDeserialize( + result: CreateModeration200Response | CreateModerationDefaultResponse +): Promise { + if (isUnexpected(result)) { + throw result.body; + } + + return { + id: result.body["id"], + model: result.body["model"], + results: (result.body["results"] ?? []).map((p) => ({ + flagged: p["flagged"], + categories: { + hate: p.categories["hate"], + "hate/threatening": p.categories["hate/threatening"], + harassment: p.categories["harassment"], + "harassment/threatening": p.categories["harassment/threatening"], + selfHarm: p.categories["self-harm"], + "selfHarm/intent": p.categories["self-harm/intent"], + "selfHarm/instructive": p.categories["self-harm/instructive"], + sexual: p.categories["sexual"], + "sexual/minors": p.categories["sexual/minors"], + violence: p.categories["violence"], + "violence/graphic": p.categories["violence/graphic"], + }, + categoryScores: { + hate: p.category_scores["hate"], + "hate/threatening": p.category_scores["hate/threatening"], + harassment: p.category_scores["harassment"], + "harassment/threatening": p.category_scores["harassment/threatening"], + selfHarm: p.category_scores["self-harm"], + "selfHarm/intent": p.category_scores["self-harm/intent"], + "selfHarm/instructive": p.category_scores["self-harm/instructive"], + sexual: p.category_scores["sexual"], + "sexual/minors": p.category_scores["sexual/minors"], + violence: p.category_scores["violence"], + "violence/graphic": p.category_scores["violence/graphic"], + }, + })), + }; +} + +export async function createModeration( + context: Client, + content: CreateModerationRequest, + options: CreateModerationOptions = { requestOptions: {} } +): Promise { + const result = await _createModerationSend(context, content, options); + return _createModerationDeserialize(result); +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/index.ts new file mode 100644 index 0000000000..01ea4b6d8f --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/index.ts @@ -0,0 +1,76 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { OpenAIClient, OpenAIClientOptions } from "./OpenAIClient.js"; +export { + CreateTranscriptionRequest, + CreateTranscriptionResponse, + Error, + CreateTranslationRequest, + CreateTranslationResponse, + CreateChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionFunctions, + ChatCompletionFunctionCallOption, + CreateChatCompletionResponse, + CompletionUsage, + CreateFineTuningJobRequest, + FineTuningJob, + ListPaginatedFineTuningJobsResponse, + ListFineTuningJobEventsResponse, + FineTuningJobEvent, + CreateCompletionRequest, + CreateCompletionResponse, + CreateEditRequest, + CreateEditResponse, + CreateEmbeddingRequest, + CreateEmbeddingResponse, + Embedding, + ListFilesResponse, + OpenAIFile, + CreateFileRequest, + DeleteFileResponse, + CreateFineTuneRequest, + FineTune, + FineTuneEvent, + ListFineTunesResponse, + ListFineTuneEventsResponse, + ListModelsResponse, + Model, + DeleteModelResponse, + CreateImageRequest, + ImagesResponse, + Image, + CreateImageEditRequest, + CreateImageVariationRequest, + CreateModerationRequest, + CreateModerationResponse, + CreateTranscriptionOptions, + CreateTranslationOptions, + CreateChatCompletionOptions, + CreateFineTuningJobOptions, + ListPaginatedFineTuningJobsOptions, + RetrieveFineTuningJobOptions, + ListFineTuningEventsOptions, + CancelFineTuningJobOptions, + CreateCompletionOptions, + CreateEditOptions, + CreateEmbeddingOptions, + ListFilesOptions, + CreateFileOptions, + RetrieveFileOptions, + DeleteFileOptions, + DownloadFileOptions, + CreateFineTuneOptions, + ListFineTunesOptions, + RetrieveFineTuneOptions, + ListFineTuneEventsOptions, + CancelFineTuneOptions, + ListModelsOptions, + RetrieveOptions, + DeleteOptions, + CreateImageOptions, + CreateImageEditOptions, + CreateImageVariationOptions, + CreateModerationOptions, +} from "./models/index.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/logger.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/logger.ts new file mode 100644 index 0000000000..6bbf0d62fc --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/logger.ts @@ -0,0 +1,5 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { createClientLogger } from "@azure/logger"; +export const logger = createClientLogger("openai-generic"); diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/index.ts new file mode 100644 index 0000000000..26a4ddff63 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/index.ts @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export { + CreateTranscriptionRequest, + CreateTranscriptionResponse, + Error, + CreateTranslationRequest, + CreateTranslationResponse, + CreateChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionFunctions, + ChatCompletionFunctionCallOption, + CreateChatCompletionResponse, + CompletionUsage, + CreateFineTuningJobRequest, + FineTuningJob, + ListPaginatedFineTuningJobsResponse, + ListFineTuningJobEventsResponse, + FineTuningJobEvent, + CreateCompletionRequest, + CreateCompletionResponse, + CreateEditRequest, + CreateEditResponse, + CreateEmbeddingRequest, + CreateEmbeddingResponse, + Embedding, + ListFilesResponse, + OpenAIFile, + CreateFileRequest, + DeleteFileResponse, + CreateFineTuneRequest, + FineTune, + FineTuneEvent, + ListFineTunesResponse, + ListFineTuneEventsResponse, + ListModelsResponse, + Model, + DeleteModelResponse, + CreateImageRequest, + ImagesResponse, + Image, + CreateImageEditRequest, + CreateImageVariationRequest, + CreateModerationRequest, + CreateModerationResponse, +} from "./models.js"; +export { + CreateTranscriptionOptions, + CreateTranslationOptions, + CreateChatCompletionOptions, + CreateFineTuningJobOptions, + ListPaginatedFineTuningJobsOptions, + RetrieveFineTuningJobOptions, + ListFineTuningEventsOptions, + CancelFineTuningJobOptions, + CreateCompletionOptions, + CreateEditOptions, + CreateEmbeddingOptions, + ListFilesOptions, + CreateFileOptions, + RetrieveFileOptions, + DeleteFileOptions, + DownloadFileOptions, + CreateFineTuneOptions, + ListFineTunesOptions, + RetrieveFineTuneOptions, + ListFineTuneEventsOptions, + CancelFineTuneOptions, + ListModelsOptions, + RetrieveOptions, + DeleteOptions, + CreateImageOptions, + CreateImageEditOptions, + CreateImageVariationOptions, + CreateModerationOptions, +} from "./options.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/models.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/models.ts new file mode 100644 index 0000000000..70b641fc9e --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/models.ts @@ -0,0 +1,914 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export interface CreateTranscriptionRequest { + /** + * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. + */ + file: Uint8Array; + /** ID of the model to use. Only `whisper-1` is currently available. */ + model: string | "whisper-1"; + /** + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + */ + prompt?: string; + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. + */ + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. + */ + temperature?: number; + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + * and latency. + */ + language?: string; +} + +export interface CreateTranscriptionResponse { + text: string; +} + +export interface Error { + type: string; + message: string; + param: string | null; + code: string | null; +} + +export interface CreateTranslationRequest { + /** + * The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. + */ + file: Uint8Array; + /** ID of the model to use. Only `whisper-1` is currently available. */ + model: string | "whisper-1"; + /** + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + */ + prompt?: string; + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. + */ + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. + */ + temperature?: number; +} + +export interface CreateTranslationResponse { + text: string; +} + +export interface CreateChatCompletionRequest { + /** + * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + * table for details on which models work with the Chat API. + */ + model: + | string + | "gpt4" + | "gpt-4-0314" + | "gpt-4-0613" + | "gpt-4-32k" + | "gpt-4-32k-0314" + | "gpt-4-32k-0613" + | "gpt-3.5-turbo" + | "gpt-3.5-turbo-16k" + | "gpt-3.5-turbo-0301" + | "gpt-3.5-turbo-0613" + | "gpt-3.5-turbo-16k-0613"; + /** + * A list of messages comprising the conversation so far. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + */ + messages: ChatCompletionRequestMessage[]; + /** A list of functions the model may generate JSON inputs for. */ + functions?: ChatCompletionFunctions[]; + /** + * Controls how the model responds to function calls. `none` means the model does not call a + * function, and responds to the end-user. `auto` means the model can pick between an end-user or + * calling a function. Specifying a particular function via `{\"name":\ \"my_function\"}` forces the + * model to call that function. `none` is the default when no functions are present. `auto` is the + * default if functions are present. + */ + functionCall?: "none" | "auto" | ChatCompletionFunctionCallOption; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + topP?: number | null; + /** + * How many completions to generate for each prompt. + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + n?: number | null; + /** + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + maxTokens?: number | null; + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: string | string[] | null; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + presencePenalty?: number | null; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + frequencyPenalty?: number | null; + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + logitBias?: Record; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: string; + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + */ + stream?: boolean | null; +} + +export interface ChatCompletionRequestMessage { + /** The role of the messages author. One of `system`, `user`, `assistant`, or `function`. */ + role: "system" | "user" | "assistant" | "function"; + /** + * The contents of the message. `content` is required for all messages, and may be null for + * assistant messages with function calls. + */ + content: string | null; + /** + * The name of the author of this message. `name` is required if role is `function`, and it + * should be the name of the function whose response is in the `content`. May contain a-z, + * A-Z, 0-9, and underscores, with a maximum length of 64 characters. + */ + name?: string; + /** The name and arguments of a function that should be called, as generated by the model. */ + functionCall?: any; +} + +export interface ChatCompletionFunctions { + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string; + /** + * A description of what the function does, used by the model to choose when and how to call the + * function. + */ + description?: string; + /** + * The parameters the functions accepts, described as a JSON Schema object. See the + * [guide](/docs/guides/gpt/function-calling) for examples, and the + * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + * about the format.\n\nTo describe a function that accepts no parameters, provide the value + * `{\"type\": \"object\", \"properties\": {}}`. + */ + parameters: Record; +} + +export interface ChatCompletionFunctionCallOption { + /** The name of the function to call. */ + name: string; +} + +/** Represents a chat completion response returned by model, based on the provided input. */ +export interface CreateChatCompletionResponse { + /** A unique identifier for the chat completion. */ + id: string; + /** The object type, which is always `chat.completion`. */ + object: string; + /** The Unix timestamp (in seconds) of when the chat completion was created. */ + created: Date; + /** The model used for the chat completion. */ + model: string; + /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + choices: any[]; + usage?: CompletionUsage; +} + +/** Usage statistics for the completion request. */ +export interface CompletionUsage { + /** Number of tokens in the prompt. */ + promptTokens: number; + /** Number of tokens in the generated completion */ + completionTokens: number; + /** Total number of tokens used in the request (prompt + completion). */ + totalTokens: number; +} + +export interface CreateFineTuningJobRequest { + /** + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + * the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + trainingFile: string; + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + * not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + * `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + validationFile?: string | null; + /** + * The name of the model to fine-tune. You can select one of the + * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + */ + model: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; + /** The hyperparameters used for the fine-tuning job. */ + hyperparameters?: any; + /** + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + */ + suffix?: string | null; +} + +export interface FineTuningJob { + /** The object identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "fine_tuning.job". */ + object: "fine_tuning.job"; + /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + createdAt: Date; + /** + * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + * null if the fine-tuning job is still running. + */ + finishedAt: Date | null; + /** The base model that is being fine-tuned. */ + model: string; + /** + * The name of the fine-tuned model that is being created. The value will be null if the + * fine-tuning job is still running. + */ + fineTunedModel: string | null; + /** The organization that owns the fine-tuning job. */ + organizationId: string; + /** + * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + * `succeeded`, `failed`, or `cancelled`. + */ + status: + | "created" + | "pending" + | "running" + | "succeeded" + | "failed" + | "cancelled"; + /** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + hyperparameters: any; + /** + * The file ID used for training. You can retrieve the training data with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + trainingFile: string; + /** + * The file ID used for validation. You can retrieve the validation results with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + validationFile: string | null; + /** + * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + resultFiles: string[]; + /** + * The total number of billable tokens processed by this fine tuning job. The value will be null + * if the fine-tuning job is still running. + */ + trainedTokens: number | null; + /** + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + * failure. + */ + error: any; +} + +export interface ListPaginatedFineTuningJobsResponse { + object: string; + data: FineTuningJob[]; + hasMore: boolean; +} + +export interface ListFineTuningJobEventsResponse { + object: string; + data: FineTuningJobEvent[]; +} + +export interface FineTuningJobEvent { + id: string; + object: string; + createdAt: Date; + level: "info" | "warn" | "error"; + message: string; +} + +export interface CreateCompletionRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + model: + | string + | "babbage-002" + | "davinci-002" + | "text-davinci-003" + | "text-davinci-002" + | "text-davinci-001" + | "code-davinci-002" + | "text-curie-001" + | "text-babbage-001" + | "text-ada-001"; + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of + * tokens, or array of token arrays. + * + * Note that <|endoftext|> is the document separator that the model sees during training, so if a + * prompt is not specified the model will generate as if from the beginning of a new document. + */ + prompt: string | string[] | number[] | number[][] | null; + /** The suffix that comes after a completion of inserted text. */ + suffix?: string | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + topP?: number | null; + /** + * How many completions to generate for each prompt. + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + n?: number | null; + /** + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + maxTokens?: number | null; + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: string | string[] | null; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + presencePenalty?: number | null; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + frequencyPenalty?: number | null; + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + logitBias?: Record; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: string; + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + */ + stream?: boolean | null; + /** + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + * elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + logprobs?: number | null; + /** Echo back the prompt in addition to the completion */ + echo?: boolean | null; + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the highest + * log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + * how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + bestOf?: number | null; +} + +/** + * Represents a completion response from the API. Note: both the streamed and non-streamed response + * objects share the same shape (unlike the chat endpoint). + */ +export interface CreateCompletionResponse { + /** A unique identifier for the completion. */ + id: string; + /** The object type, which is always `text_completion`. */ + object: string; + /** The Unix timestamp (in seconds) of when the completion was created. */ + created: Date; + /** The model used for the completion. */ + model: string; + /** The list of completion choices the model generated for the input. */ + choices: any[]; + usage?: CompletionUsage; +} + +export interface CreateEditRequest { + /** + * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` + * model with this endpoint. + */ + model: string | "text-davinci-edit-001" | "code-davinci-edit-001"; + /** The input text to use as a starting point for the edit. */ + input?: string | null; + /** The instruction that tells the model how to edit the prompt. */ + instruction: string; + /** How many edits to generate for the input and instruction. */ + n?: number | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + topP?: number | null; +} + +export interface CreateEditResponse { + /** The object type, which is always `edit`. */ + object: "edit"; + /** The Unix timestamp (in seconds) of when the edit was created. */ + created: Date; + /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ + choices: any[]; + usage: CompletionUsage; +} + +export interface CreateEmbeddingRequest { + /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ + model: string | "text-embedding-ada-002"; + /** + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + * single request, pass an array of strings or array of token arrays. Each input must not exceed + * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + input: string | string[] | number[] | number[][]; + user?: string; +} + +export interface CreateEmbeddingResponse { + /** The object type, which is always "embedding". */ + object: "embedding"; + /** The name of the model used to generate the embedding. */ + model: string; + /** The list of embeddings generated by the model. */ + data: Embedding[]; + /** The usage information for the request. */ + usage: any; +} + +/** Represents an embedding vector returned by embedding endpoint. */ +export interface Embedding { + /** The index of the embedding in the list of embeddings. */ + index: number; + /** The object type, which is always "embedding". */ + object: "embedding"; + /** + * The embedding vector, which is a list of floats. The length of vector depends on the model as\ + * listed in the [embedding guide](/docs/guides/embeddings). + */ + embedding: number[]; +} + +export interface ListFilesResponse { + object: string; + data: OpenAIFile[]; +} + +/** The `File` object represents a document that has been uploaded to OpenAI. */ +export interface OpenAIFile { + /** The file identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "file". */ + object: "file"; + /** The size of the file in bytes. */ + bytes: number; + /** The Unix timestamp (in seconds) for when the file was created. */ + createdAt: Date; + /** The name of the file. */ + filename: string; + /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ + purpose: string; + /** + * The current status of the file, which can be either `uploaded`, `processed`, `pending`, + * `error`, `deleting` or `deleted`. + */ + status: + | "uploaded" + | "processed" + | "pending" + | "error" + | "deleting" + | "deleted"; + /** + * Additional details about the status of the file. If the file is in the `error` state, this will + * include a message describing the error. + */ + statusDetails?: string | null; +} + +export interface CreateFileRequest { + /** + * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + * + * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + */ + file: Uint8Array; + /** + * The intended purpose of the uploaded documents. Use "fine-tune" for + * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the + * uploaded file. + */ + purpose: string; +} + +export interface DeleteFileResponse { + id: string; + object: string; + deleted: boolean; +} + +export interface CreateFineTuneRequest { + /** + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file, where each training example is a JSON object + * with the keys "prompt" and "completion". Additionally, you must upload your file with the + * purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + * details. + */ + trainingFile: string; + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the + * [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + * Your train and validation data should be mutually exclusive. + * + * Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + * with the keys "prompt" and "completion". Additionally, you must upload your file with the + * purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + * details. + */ + validationFile?: string | null; + /** + * The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + * "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + * about these models, see the [Models](/docs/models) documentation. + */ + model?: string | "ada" | "babbage" | "curie" | "davinci" | null; + /** + * The number of epochs to train the model for. An epoch refers to one full cycle through the + * training dataset. + */ + nEpochs?: number | null; + /** + * The batch size to use for training. The batch size is the number of training examples used to + * train a single forward and backward pass. + * + * By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + * in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + * work better for larger datasets. + */ + batchSize?: number | null; + /** + * The learning rate multiplier to use for training. The fine-tuning learning rate is the original + * learning rate used for pretraining multiplied by this value. + * + * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + * `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + * recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + * results. + */ + learningRateMultiplier?: number | null; + /** + * The weight to use for loss on the prompt tokens. This controls how much the model tries to + * learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + * and can add a stabilizing effect to training when completions are short. + * + * If prompts are extremely long (relative to completions), it may make sense to reduce this + * weight so as to avoid over-prioritizing learning the prompt. + */ + promptLossRate?: number | null; + /** + * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + * validation set at the end of every epoch. These metrics can be viewed in the + * [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + * + * In order to compute classification metrics, you must provide a `validation_file`. Additionally, + * you must specify `classification_n_classes` for multiclass classification or + * `classification_positive_class` for binary classification. + */ + computeClassificationMetrics?: boolean | null; + /** + * The number of classes in a classification task. + * + * This parameter is required for multiclass classification. + */ + classificationNClasses?: number | null; + /** + * The positive class in binary classification. + * + * This parameter is needed to generate precision, recall, and F1 metrics when doing binary + * classification. + */ + classificationPositiveClass?: string | null; + /** + * If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + * is a generalization of F-1 score. This is only used for binary classification. + * + * With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + * beta score puts more weight on recall and less on precision. A smaller beta score puts more + * weight on precision and less on recall. + */ + classificationBetas?: number[] | null; + /** + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + */ + suffix?: string | null; +} + +/** The `FineTune` object represents a legacy fine-tune job that has been created through the API. */ +export interface FineTune { + /** The object identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "fine-tune". */ + object: "fine-tune"; + /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + createdAt: Date; + /** The Unix timestamp (in seconds) for when the fine-tuning job was last updated. */ + updatedAt: Date; + /** The base model that is being fine-tuned. */ + model: string; + /** The name of the fine-tuned model that is being created. */ + fineTunedModel: string | null; + /** The organization that owns the fine-tuning job. */ + organizationId: string; + /** + * The current status of the fine-tuning job, which can be either `created`, `running`, + * `succeeded`, `failed`, or `cancelled`. + */ + status: "created" | "running" | "succeeded" | "failed" | "cancelled"; + /** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + */ + hyperparams: any; + /** The list of files used for training. */ + trainingFiles: OpenAIFile[]; + /** The list of files used for validation. */ + validationFiles: OpenAIFile[]; + /** The compiled results files for the fine-tuning job. */ + resultFiles: OpenAIFile[]; + /** The list of events that have been observed in the lifecycle of the FineTune job. */ + events?: FineTuneEvent[]; +} + +export interface FineTuneEvent { + object: string; + createdAt: Date; + level: string; + message: string; +} + +export interface ListFineTunesResponse { + object: string; + data: FineTune[]; +} + +export interface ListFineTuneEventsResponse { + object: string; + data: FineTuneEvent[]; +} + +export interface ListModelsResponse { + object: string; + data: Model[]; +} + +/** Describes an OpenAI model offering that can be used with the API. */ +export interface Model { + /** The model identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "model". */ + object: "model"; + /** The Unix timestamp (in seconds) when the model was created. */ + created: Date; + /** The organization that owns the model. */ + ownedBy: string; +} + +export interface DeleteModelResponse { + id: string; + object: string; + deleted: boolean; +} + +export interface CreateImageRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json" | null; + user?: string; +} + +export interface ImagesResponse { + created: Date; + data: Image[]; +} + +/** Represents the url or the content of an image generated by the OpenAI API. */ +export interface Image { + /** The URL of the generated image, if `response_format` is `url` (default). */ + url?: string; + /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ + b64Json?: Uint8Array; +} + +export interface CreateImageEditRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + /** + * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + * provided, image must have transparency, which will be used as the mask. + */ + image: Uint8Array; + /** + * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + * as `image`. + */ + mask?: Uint8Array; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json" | null; + user?: string; +} + +export interface CreateImageVariationRequest { + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + * and square. + */ + image: Uint8Array; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json" | null; + user?: string; +} + +export interface CreateModerationRequest { + /** The input text to classify */ + input: string | string[]; + /** + * Two content moderations models are available: `text-moderation-stable` and + * `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + * upgraded over time. This ensures you are always using our most accurate model. If you use + * `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + */ + model?: string | "text-moderation-latest" | "text-moderation-stable"; +} + +export interface CreateModerationResponse { + /** The unique identifier for the moderation request. */ + id: string; + /** The model used to generate the moderation results. */ + model: string; + /** A list of moderation objects. */ + results: any[]; +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/options.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/options.ts new file mode 100644 index 0000000000..3f45e59f9a --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/options.ts @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { OperationOptions } from "@azure-rest/core-client"; + +export interface CreateTranscriptionOptions extends OperationOptions { + contentType?: string; +} + +export interface CreateTranslationOptions extends OperationOptions { + contentType?: string; +} + +export interface CreateChatCompletionOptions extends OperationOptions {} + +export interface CreateFineTuningJobOptions extends OperationOptions {} + +export interface ListPaginatedFineTuningJobsOptions extends OperationOptions { + /** Identifier for the last job from the previous pagination request. */ + after?: string; + /** Number of fine-tuning jobs to retrieve. */ + limit?: number; +} + +export interface RetrieveFineTuningJobOptions extends OperationOptions {} + +export interface ListFineTuningEventsOptions extends OperationOptions { + /** Identifier for the last event from the previous pagination request. */ + after?: string; + /** Number of events to retrieve. */ + limit?: number; +} + +export interface CancelFineTuningJobOptions extends OperationOptions {} + +export interface CreateCompletionOptions extends OperationOptions {} + +export interface CreateEditOptions extends OperationOptions {} + +export interface CreateEmbeddingOptions extends OperationOptions {} + +export interface ListFilesOptions extends OperationOptions {} + +export interface CreateFileOptions extends OperationOptions { + contentType?: string; +} + +export interface RetrieveFileOptions extends OperationOptions {} + +export interface DeleteFileOptions extends OperationOptions {} + +export interface DownloadFileOptions extends OperationOptions {} + +export interface CreateFineTuneOptions extends OperationOptions {} + +export interface ListFineTunesOptions extends OperationOptions {} + +export interface RetrieveFineTuneOptions extends OperationOptions {} + +export interface ListFineTuneEventsOptions extends OperationOptions { + /** + * Whether to stream events for the fine-tune job. If set to true, events will be sent as + * data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available. The stream will terminate with a `data: [DONE]` message when the + * job is finished (succeeded, cancelled, or failed). + * + * If set to false, only events generated so far will be returned. + */ + stream?: boolean; +} + +export interface CancelFineTuneOptions extends OperationOptions {} + +export interface ListModelsOptions extends OperationOptions {} + +export interface RetrieveOptions extends OperationOptions {} + +export interface DeleteOptions extends OperationOptions {} + +export interface CreateImageOptions extends OperationOptions {} + +export interface CreateImageEditOptions extends OperationOptions { + contentType?: string; +} + +export interface CreateImageVariationOptions extends OperationOptions { + contentType?: string; +} + +export interface CreateModerationOptions extends OperationOptions {} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/clientDefinitions.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/clientDefinitions.ts new file mode 100644 index 0000000000..cbb44665c7 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/clientDefinitions.ts @@ -0,0 +1,358 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { + CreateTranscriptionParameters, + CreateTranslationParameters, + CreateChatCompletionParameters, + CreateFineTuningJobParameters, + ListPaginatedFineTuningJobsParameters, + RetrieveFineTuningJobParameters, + ListFineTuningEventsParameters, + CancelFineTuningJobParameters, + CreateCompletionParameters, + CreateEditParameters, + CreateEmbeddingParameters, + ListFilesParameters, + CreateFileParameters, + RetrieveFileParameters, + DeleteFileParameters, + DownloadFileParameters, + CreateFineTuneParameters, + ListFineTunesParameters, + RetrieveFineTuneParameters, + ListFineTuneEventsParameters, + CancelFineTuneParameters, + ListModelsParameters, + RetrieveParameters, + DeleteParameters, + CreateImageParameters, + CreateImageEditParameters, + CreateImageVariationParameters, + CreateModerationParameters, +} from "./parameters.js"; +import { + CreateTranscription200Response, + CreateTranscriptionDefaultResponse, + CreateTranslation200Response, + CreateTranslationDefaultResponse, + CreateChatCompletion200Response, + CreateChatCompletionDefaultResponse, + CreateFineTuningJob200Response, + CreateFineTuningJobDefaultResponse, + ListPaginatedFineTuningJobs200Response, + ListPaginatedFineTuningJobsDefaultResponse, + RetrieveFineTuningJob200Response, + RetrieveFineTuningJobDefaultResponse, + ListFineTuningEvents200Response, + ListFineTuningEventsDefaultResponse, + CancelFineTuningJob200Response, + CancelFineTuningJobDefaultResponse, + CreateCompletion200Response, + CreateCompletionDefaultResponse, + CreateEdit200Response, + CreateEditDefaultResponse, + CreateEmbedding200Response, + CreateEmbeddingDefaultResponse, + ListFiles200Response, + ListFilesDefaultResponse, + CreateFile200Response, + CreateFileDefaultResponse, + RetrieveFile200Response, + RetrieveFileDefaultResponse, + DeleteFile200Response, + DeleteFileDefaultResponse, + DownloadFile200Response, + DownloadFileDefaultResponse, + CreateFineTune200Response, + CreateFineTuneDefaultResponse, + ListFineTunes200Response, + ListFineTunesDefaultResponse, + RetrieveFineTune200Response, + RetrieveFineTuneDefaultResponse, + ListFineTuneEvents200Response, + ListFineTuneEventsDefaultResponse, + CancelFineTune200Response, + CancelFineTuneDefaultResponse, + ListModels200Response, + ListModelsDefaultResponse, + Retrieve200Response, + RetrieveDefaultResponse, + DeleteOperation200Response, + DeleteOperationDefaultResponse, + CreateImage200Response, + CreateImageDefaultResponse, + CreateImageEdit200Response, + CreateImageEditDefaultResponse, + CreateImageVariation200Response, + CreateImageVariationDefaultResponse, + CreateModeration200Response, + CreateModerationDefaultResponse, +} from "./responses.js"; +import { Client, StreamableMethod } from "@azure-rest/core-client"; + +export interface CreateTranscription { + post( + options: CreateTranscriptionParameters + ): StreamableMethod< + CreateTranscription200Response | CreateTranscriptionDefaultResponse + >; +} + +export interface CreateTranslation { + post( + options: CreateTranslationParameters + ): StreamableMethod< + CreateTranslation200Response | CreateTranslationDefaultResponse + >; +} + +export interface CreateChatCompletion { + post( + options?: CreateChatCompletionParameters + ): StreamableMethod< + CreateChatCompletion200Response | CreateChatCompletionDefaultResponse + >; +} + +export interface CreateFineTuningJob { + /** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * Response includes details of the enqueued job including job status and the name of the + * fine-tuned models once complete. + * + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + */ + post( + options: CreateFineTuningJobParameters + ): StreamableMethod< + CreateFineTuningJob200Response | CreateFineTuningJobDefaultResponse + >; + get( + options?: ListPaginatedFineTuningJobsParameters + ): StreamableMethod< + | ListPaginatedFineTuningJobs200Response + | ListPaginatedFineTuningJobsDefaultResponse + >; +} + +export interface RetrieveFineTuningJob { + get( + options?: RetrieveFineTuningJobParameters + ): StreamableMethod< + RetrieveFineTuningJob200Response | RetrieveFineTuningJobDefaultResponse + >; +} + +export interface ListFineTuningEvents { + get( + options?: ListFineTuningEventsParameters + ): StreamableMethod< + ListFineTuningEvents200Response | ListFineTuningEventsDefaultResponse + >; +} + +export interface CancelFineTuningJob { + post( + options?: CancelFineTuningJobParameters + ): StreamableMethod< + CancelFineTuningJob200Response | CancelFineTuningJobDefaultResponse + >; +} + +export interface CreateCompletion { + post( + options?: CreateCompletionParameters + ): StreamableMethod< + CreateCompletion200Response | CreateCompletionDefaultResponse + >; +} + +export interface CreateEdit { + post( + options: CreateEditParameters + ): StreamableMethod; +} + +export interface CreateEmbedding { + post( + options: CreateEmbeddingParameters + ): StreamableMethod< + CreateEmbedding200Response | CreateEmbeddingDefaultResponse + >; +} + +export interface ListFiles { + get( + options?: ListFilesParameters + ): StreamableMethod; + post( + options: CreateFileParameters + ): StreamableMethod; +} + +export interface RetrieveFile { + post( + options?: RetrieveFileParameters + ): StreamableMethod; + delete( + options?: DeleteFileParameters + ): StreamableMethod; +} + +export interface DownloadFile { + get( + options?: DownloadFileParameters + ): StreamableMethod; +} + +export interface CreateFineTune { + post( + options: CreateFineTuneParameters + ): StreamableMethod< + CreateFineTune200Response | CreateFineTuneDefaultResponse + >; + get( + options?: ListFineTunesParameters + ): StreamableMethod; +} + +export interface RetrieveFineTune { + get( + options?: RetrieveFineTuneParameters + ): StreamableMethod< + RetrieveFineTune200Response | RetrieveFineTuneDefaultResponse + >; +} + +export interface ListFineTuneEvents { + get( + options?: ListFineTuneEventsParameters + ): StreamableMethod< + ListFineTuneEvents200Response | ListFineTuneEventsDefaultResponse + >; +} + +export interface CancelFineTune { + post( + options?: CancelFineTuneParameters + ): StreamableMethod< + CancelFineTune200Response | CancelFineTuneDefaultResponse + >; +} + +export interface ListModels { + get( + options?: ListModelsParameters + ): StreamableMethod; +} + +export interface Retrieve { + get( + options?: RetrieveParameters + ): StreamableMethod; + delete( + options?: DeleteParameters + ): StreamableMethod< + DeleteOperation200Response | DeleteOperationDefaultResponse + >; +} + +export interface CreateImage { + post( + options: CreateImageParameters + ): StreamableMethod; +} + +export interface CreateImageEdit { + post( + options: CreateImageEditParameters + ): StreamableMethod< + CreateImageEdit200Response | CreateImageEditDefaultResponse + >; +} + +export interface CreateImageVariation { + post( + options: CreateImageVariationParameters + ): StreamableMethod< + CreateImageVariation200Response | CreateImageVariationDefaultResponse + >; +} + +export interface CreateModeration { + post( + options: CreateModerationParameters + ): StreamableMethod< + CreateModeration200Response | CreateModerationDefaultResponse + >; +} + +export interface Routes { + /** Resource for '/audio/transcriptions' has methods for the following verbs: post */ + (path: "/audio/transcriptions"): CreateTranscription; + /** Resource for '/audio/translations' has methods for the following verbs: post */ + (path: "/audio/translations"): CreateTranslation; + /** Resource for '/chat/completions' has methods for the following verbs: post */ + (path: "/chat/completions"): CreateChatCompletion; + /** Resource for '/fine_tuning/jobs' has methods for the following verbs: post, get */ + (path: "/fine_tuning/jobs"): CreateFineTuningJob; + /** Resource for '/fine_tuning/jobs/\{fine_tuning_job_id\}' has methods for the following verbs: get */ + ( + path: "/fine_tuning/jobs/{fine_tuning_job_id}", + fineTuningJobId: string + ): RetrieveFineTuningJob; + /** Resource for '/fine_tuning/jobs/\{fine_tuning_job_id\}/events' has methods for the following verbs: get */ + ( + path: "/fine_tuning/jobs/{fine_tuning_job_id}/events", + fineTuningJobId: string + ): ListFineTuningEvents; + /** Resource for '/fine_tuning/jobs/\{fine_tuning_job_id\}/cancel' has methods for the following verbs: post */ + ( + path: "/fine_tuning/jobs/{fine_tuning_job_id}/cancel", + fineTuningJobId: string + ): CancelFineTuningJob; + /** Resource for '/completions' has methods for the following verbs: post */ + (path: "/completions"): CreateCompletion; + /** Resource for '/edits' has methods for the following verbs: post */ + (path: "/edits"): CreateEdit; + /** Resource for '/embeddings' has methods for the following verbs: post */ + (path: "/embeddings"): CreateEmbedding; + /** Resource for '/files' has methods for the following verbs: get, post */ + (path: "/files"): ListFiles; + /** Resource for '/files/files/\{file_id\}' has methods for the following verbs: post, delete */ + (path: "/files/files/{file_id}", fileId: string): RetrieveFile; + /** Resource for '/files/files/\{file_id\}/content' has methods for the following verbs: get */ + (path: "/files/files/{file_id}/content", fileId: string): DownloadFile; + /** Resource for '/fine-tunes' has methods for the following verbs: post, get */ + (path: "/fine-tunes"): CreateFineTune; + /** Resource for '/fine-tunes/\{fine_tune_id\}' has methods for the following verbs: get */ + (path: "/fine-tunes/{fine_tune_id}", fineTuneId: string): RetrieveFineTune; + /** Resource for '/fine-tunes/\{fine_tune_id\}/events' has methods for the following verbs: get */ + ( + path: "/fine-tunes/{fine_tune_id}/events", + fineTuneId: string + ): ListFineTuneEvents; + /** Resource for '/fine-tunes/\{fine_tune_id\}/cancel' has methods for the following verbs: post */ + ( + path: "/fine-tunes/{fine_tune_id}/cancel", + fineTuneId: string + ): CancelFineTune; + /** Resource for '/models' has methods for the following verbs: get */ + (path: "/models"): ListModels; + /** Resource for '/models/\{model\}' has methods for the following verbs: get, delete */ + (path: "/models/{model}", model: string): Retrieve; + /** Resource for '/images/generations' has methods for the following verbs: post */ + (path: "/images/generations"): CreateImage; + /** Resource for '/images/edits' has methods for the following verbs: post */ + (path: "/images/edits"): CreateImageEdit; + /** Resource for '/images/variations' has methods for the following verbs: post */ + (path: "/images/variations"): CreateImageVariation; + /** Resource for '/moderations' has methods for the following verbs: post */ + (path: "/moderations"): CreateModeration; +} + +export type OpenAIContext = Client & { + path: Routes; +}; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/index.ts new file mode 100644 index 0000000000..a2d1968f90 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/index.ts @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import OpenAIClient from "./openAIClient.js"; + +export * from "./openAIClient.js"; +export * from "./parameters.js"; +export * from "./responses.js"; +export * from "./clientDefinitions.js"; +export * from "./isUnexpected.js"; +export * from "./models.js"; +export * from "./outputModels.js"; + +export default OpenAIClient; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/isUnexpected.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/isUnexpected.ts new file mode 100644 index 0000000000..e2f3d5509b --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/isUnexpected.ts @@ -0,0 +1,356 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { + CreateTranscription200Response, + CreateTranscriptionDefaultResponse, + CreateTranslation200Response, + CreateTranslationDefaultResponse, + CreateChatCompletion200Response, + CreateChatCompletionDefaultResponse, + CreateFineTuningJob200Response, + CreateFineTuningJobDefaultResponse, + ListPaginatedFineTuningJobs200Response, + ListPaginatedFineTuningJobsDefaultResponse, + RetrieveFineTuningJob200Response, + RetrieveFineTuningJobDefaultResponse, + ListFineTuningEvents200Response, + ListFineTuningEventsDefaultResponse, + CancelFineTuningJob200Response, + CancelFineTuningJobDefaultResponse, + CreateCompletion200Response, + CreateCompletionDefaultResponse, + CreateEdit200Response, + CreateEditDefaultResponse, + CreateEmbedding200Response, + CreateEmbeddingDefaultResponse, + ListFiles200Response, + ListFilesDefaultResponse, + CreateFile200Response, + CreateFileDefaultResponse, + RetrieveFile200Response, + RetrieveFileDefaultResponse, + DeleteFile200Response, + DeleteFileDefaultResponse, + DownloadFile200Response, + DownloadFileDefaultResponse, + CreateFineTune200Response, + CreateFineTuneDefaultResponse, + ListFineTunes200Response, + ListFineTunesDefaultResponse, + RetrieveFineTune200Response, + RetrieveFineTuneDefaultResponse, + ListFineTuneEvents200Response, + ListFineTuneEventsDefaultResponse, + CancelFineTune200Response, + CancelFineTuneDefaultResponse, + ListModels200Response, + ListModelsDefaultResponse, + Retrieve200Response, + RetrieveDefaultResponse, + DeleteOperation200Response, + DeleteOperationDefaultResponse, + CreateImage200Response, + CreateImageDefaultResponse, + CreateImageEdit200Response, + CreateImageEditDefaultResponse, + CreateImageVariation200Response, + CreateImageVariationDefaultResponse, + CreateModeration200Response, + CreateModerationDefaultResponse, +} from "./responses.js"; + +const responseMap: Record = { + "POST /audio/transcriptions": ["200"], + "POST /audio/translations": ["200"], + "POST /chat/completions": ["200"], + "POST /fine_tuning/jobs": ["200"], + "GET /fine_tuning/jobs": ["200"], + "GET /fine_tuning/jobs/{fine_tuning_job_id}": ["200"], + "GET /fine_tuning/jobs/{fine_tuning_job_id}/events": ["200"], + "POST /fine_tuning/jobs/{fine_tuning_job_id}/cancel": ["200"], + "POST /completions": ["200"], + "POST /edits": ["200"], + "POST /embeddings": ["200"], + "GET /files": ["200"], + "POST /files": ["200"], + "POST /files/files/{file_id}": ["200"], + "DELETE /files/files/{file_id}": ["200"], + "GET /files/files/{file_id}/content": ["200"], + "POST /fine-tunes": ["200"], + "GET /fine-tunes": ["200"], + "GET /fine-tunes/{fine_tune_id}": ["200"], + "GET /fine-tunes/{fine_tune_id}/events": ["200"], + "POST /fine-tunes/{fine_tune_id}/cancel": ["200"], + "GET /models": ["200"], + "GET /models/{model}": ["200"], + "DELETE /models/{model}": ["200"], + "POST /images/generations": ["200"], + "POST /images/edits": ["200"], + "POST /images/variations": ["200"], + "POST /moderations": ["200"], +}; + +export function isUnexpected( + response: CreateTranscription200Response | CreateTranscriptionDefaultResponse +): response is CreateTranscriptionDefaultResponse; +export function isUnexpected( + response: CreateTranslation200Response | CreateTranslationDefaultResponse +): response is CreateTranslationDefaultResponse; +export function isUnexpected( + response: + | CreateChatCompletion200Response + | CreateChatCompletionDefaultResponse +): response is CreateChatCompletionDefaultResponse; +export function isUnexpected( + response: CreateFineTuningJob200Response | CreateFineTuningJobDefaultResponse +): response is CreateFineTuningJobDefaultResponse; +export function isUnexpected( + response: + | ListPaginatedFineTuningJobs200Response + | ListPaginatedFineTuningJobsDefaultResponse +): response is ListPaginatedFineTuningJobsDefaultResponse; +export function isUnexpected( + response: + | RetrieveFineTuningJob200Response + | RetrieveFineTuningJobDefaultResponse +): response is RetrieveFineTuningJobDefaultResponse; +export function isUnexpected( + response: + | ListFineTuningEvents200Response + | ListFineTuningEventsDefaultResponse +): response is ListFineTuningEventsDefaultResponse; +export function isUnexpected( + response: CancelFineTuningJob200Response | CancelFineTuningJobDefaultResponse +): response is CancelFineTuningJobDefaultResponse; +export function isUnexpected( + response: CreateCompletion200Response | CreateCompletionDefaultResponse +): response is CreateCompletionDefaultResponse; +export function isUnexpected( + response: CreateEdit200Response | CreateEditDefaultResponse +): response is CreateEditDefaultResponse; +export function isUnexpected( + response: CreateEmbedding200Response | CreateEmbeddingDefaultResponse +): response is CreateEmbeddingDefaultResponse; +export function isUnexpected( + response: ListFiles200Response | ListFilesDefaultResponse +): response is ListFilesDefaultResponse; +export function isUnexpected( + response: CreateFile200Response | CreateFileDefaultResponse +): response is CreateFileDefaultResponse; +export function isUnexpected( + response: RetrieveFile200Response | RetrieveFileDefaultResponse +): response is RetrieveFileDefaultResponse; +export function isUnexpected( + response: DeleteFile200Response | DeleteFileDefaultResponse +): response is DeleteFileDefaultResponse; +export function isUnexpected( + response: DownloadFile200Response | DownloadFileDefaultResponse +): response is DownloadFileDefaultResponse; +export function isUnexpected( + response: CreateFineTune200Response | CreateFineTuneDefaultResponse +): response is CreateFineTuneDefaultResponse; +export function isUnexpected( + response: ListFineTunes200Response | ListFineTunesDefaultResponse +): response is ListFineTunesDefaultResponse; +export function isUnexpected( + response: RetrieveFineTune200Response | RetrieveFineTuneDefaultResponse +): response is RetrieveFineTuneDefaultResponse; +export function isUnexpected( + response: ListFineTuneEvents200Response | ListFineTuneEventsDefaultResponse +): response is ListFineTuneEventsDefaultResponse; +export function isUnexpected( + response: CancelFineTune200Response | CancelFineTuneDefaultResponse +): response is CancelFineTuneDefaultResponse; +export function isUnexpected( + response: ListModels200Response | ListModelsDefaultResponse +): response is ListModelsDefaultResponse; +export function isUnexpected( + response: Retrieve200Response | RetrieveDefaultResponse +): response is RetrieveDefaultResponse; +export function isUnexpected( + response: DeleteOperation200Response | DeleteOperationDefaultResponse +): response is DeleteOperationDefaultResponse; +export function isUnexpected( + response: CreateImage200Response | CreateImageDefaultResponse +): response is CreateImageDefaultResponse; +export function isUnexpected( + response: CreateImageEdit200Response | CreateImageEditDefaultResponse +): response is CreateImageEditDefaultResponse; +export function isUnexpected( + response: + | CreateImageVariation200Response + | CreateImageVariationDefaultResponse +): response is CreateImageVariationDefaultResponse; +export function isUnexpected( + response: CreateModeration200Response | CreateModerationDefaultResponse +): response is CreateModerationDefaultResponse; +export function isUnexpected( + response: + | CreateTranscription200Response + | CreateTranscriptionDefaultResponse + | CreateTranslation200Response + | CreateTranslationDefaultResponse + | CreateChatCompletion200Response + | CreateChatCompletionDefaultResponse + | CreateFineTuningJob200Response + | CreateFineTuningJobDefaultResponse + | ListPaginatedFineTuningJobs200Response + | ListPaginatedFineTuningJobsDefaultResponse + | RetrieveFineTuningJob200Response + | RetrieveFineTuningJobDefaultResponse + | ListFineTuningEvents200Response + | ListFineTuningEventsDefaultResponse + | CancelFineTuningJob200Response + | CancelFineTuningJobDefaultResponse + | CreateCompletion200Response + | CreateCompletionDefaultResponse + | CreateEdit200Response + | CreateEditDefaultResponse + | CreateEmbedding200Response + | CreateEmbeddingDefaultResponse + | ListFiles200Response + | ListFilesDefaultResponse + | CreateFile200Response + | CreateFileDefaultResponse + | RetrieveFile200Response + | RetrieveFileDefaultResponse + | DeleteFile200Response + | DeleteFileDefaultResponse + | DownloadFile200Response + | DownloadFileDefaultResponse + | CreateFineTune200Response + | CreateFineTuneDefaultResponse + | ListFineTunes200Response + | ListFineTunesDefaultResponse + | RetrieveFineTune200Response + | RetrieveFineTuneDefaultResponse + | ListFineTuneEvents200Response + | ListFineTuneEventsDefaultResponse + | CancelFineTune200Response + | CancelFineTuneDefaultResponse + | ListModels200Response + | ListModelsDefaultResponse + | Retrieve200Response + | RetrieveDefaultResponse + | DeleteOperation200Response + | DeleteOperationDefaultResponse + | CreateImage200Response + | CreateImageDefaultResponse + | CreateImageEdit200Response + | CreateImageEditDefaultResponse + | CreateImageVariation200Response + | CreateImageVariationDefaultResponse + | CreateModeration200Response + | CreateModerationDefaultResponse +): response is + | CreateTranscriptionDefaultResponse + | CreateTranslationDefaultResponse + | CreateChatCompletionDefaultResponse + | CreateFineTuningJobDefaultResponse + | ListPaginatedFineTuningJobsDefaultResponse + | RetrieveFineTuningJobDefaultResponse + | ListFineTuningEventsDefaultResponse + | CancelFineTuningJobDefaultResponse + | CreateCompletionDefaultResponse + | CreateEditDefaultResponse + | CreateEmbeddingDefaultResponse + | ListFilesDefaultResponse + | CreateFileDefaultResponse + | RetrieveFileDefaultResponse + | DeleteFileDefaultResponse + | DownloadFileDefaultResponse + | CreateFineTuneDefaultResponse + | ListFineTunesDefaultResponse + | RetrieveFineTuneDefaultResponse + | ListFineTuneEventsDefaultResponse + | CancelFineTuneDefaultResponse + | ListModelsDefaultResponse + | RetrieveDefaultResponse + | DeleteOperationDefaultResponse + | CreateImageDefaultResponse + | CreateImageEditDefaultResponse + | CreateImageVariationDefaultResponse + | CreateModerationDefaultResponse { + const lroOriginal = response.headers["x-ms-original-url"]; + const url = new URL(lroOriginal ?? response.request.url); + const method = response.request.method; + let pathDetails = responseMap[`${method} ${url.pathname}`]; + if (!pathDetails) { + pathDetails = getParametrizedPathSuccess(method, url.pathname); + } + return !pathDetails.includes(response.status); +} + +function getParametrizedPathSuccess(method: string, path: string): string[] { + const pathParts = path.split("/"); + + // Traverse list to match the longest candidate + // matchedLen: the length of candidate path + // matchedValue: the matched status code array + let matchedLen = -1, + matchedValue: string[] = []; + + // Iterate the responseMap to find a match + for (const [key, value] of Object.entries(responseMap)) { + // Extracting the path from the map key which is in format + // GET /path/foo + if (!key.startsWith(method)) { + continue; + } + const candidatePath = getPathFromMapKey(key); + // Get each part of the url path + const candidateParts = candidatePath.split("/"); + + // track if we have found a match to return the values found. + let found = true; + for ( + let i = candidateParts.length - 1, j = pathParts.length - 1; + i >= 1 && j >= 1; + i--, j-- + ) { + if ( + candidateParts[i]?.startsWith("{") && + candidateParts[i]?.indexOf("}") !== -1 + ) { + const start = candidateParts[i]!.indexOf("}") + 1, + end = candidateParts[i]?.length; + // If the current part of the candidate is a "template" part + // Try to use the suffix of pattern to match the path + // {guid} ==> $ + // {guid}:export ==> :export$ + const isMatched = new RegExp( + `${candidateParts[i]?.slice(start, end)}` + ).test(pathParts[j] || ""); + + if (!isMatched) { + found = false; + break; + } + continue; + } + + // If the candidate part is not a template and + // the parts don't match mark the candidate as not found + // to move on with the next candidate path. + if (candidateParts[i] !== pathParts[j]) { + found = false; + break; + } + } + + // We finished evaluating the current candidate parts + // Update the matched value if and only if we found the longer pattern + if (found && candidatePath.length > matchedLen) { + matchedLen = candidatePath.length; + matchedValue = value; + } + } + + return matchedValue; +} + +function getPathFromMapKey(mapKey: string): string { + const pathStart = mapKey.indexOf("/"); + return mapKey.slice(pathStart); +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/models.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/models.ts new file mode 100644 index 0000000000..9f608946b6 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/models.ts @@ -0,0 +1,604 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export interface CreateTranscriptionRequest { + /** + * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. + */ + file: string; + /** ID of the model to use. Only `whisper-1` is currently available. */ + model: string | "whisper-1"; + /** + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + */ + prompt?: string; + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. + */ + response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. + */ + temperature?: number; + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + * and latency. + */ + language?: string; +} + +export interface CreateTranslationRequest { + /** + * The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. + */ + file: string; + /** ID of the model to use. Only `whisper-1` is currently available. */ + model: string | "whisper-1"; + /** + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + */ + prompt?: string; + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. + */ + response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. + */ + temperature?: number; +} + +export interface CreateChatCompletionRequest { + /** + * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + * table for details on which models work with the Chat API. + */ + model: + | string + | "gpt4" + | "gpt-4-0314" + | "gpt-4-0613" + | "gpt-4-32k" + | "gpt-4-32k-0314" + | "gpt-4-32k-0613" + | "gpt-3.5-turbo" + | "gpt-3.5-turbo-16k" + | "gpt-3.5-turbo-0301" + | "gpt-3.5-turbo-0613" + | "gpt-3.5-turbo-16k-0613"; + /** + * A list of messages comprising the conversation so far. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + */ + messages: Array; + /** A list of functions the model may generate JSON inputs for. */ + functions?: Array; + /** + * Controls how the model responds to function calls. `none` means the model does not call a + * function, and responds to the end-user. `auto` means the model can pick between an end-user or + * calling a function. Specifying a particular function via `{\"name":\ \"my_function\"}` forces the + * model to call that function. `none` is the default when no functions are present. `auto` is the + * default if functions are present. + */ + function_call?: "none" | "auto" | ChatCompletionFunctionCallOption; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; + /** + * How many completions to generate for each prompt. + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + n?: number | null; + /** + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + max_tokens?: number | null; + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + presence_penalty?: number | null; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + frequency_penalty?: number | null; + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + logit_bias?: Record | null; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: string; + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + */ + stream?: boolean | null; +} + +export interface ChatCompletionRequestMessage { + /** The role of the messages author. One of `system`, `user`, `assistant`, or `function`. */ + role: "system" | "user" | "assistant" | "function"; + /** + * The contents of the message. `content` is required for all messages, and may be null for + * assistant messages with function calls. + */ + content: string | null; + /** + * The name of the author of this message. `name` is required if role is `function`, and it + * should be the name of the function whose response is in the `content`. May contain a-z, + * A-Z, 0-9, and underscores, with a maximum length of 64 characters. + */ + name?: string; + /** The name and arguments of a function that should be called, as generated by the model. */ + function_call?: object; +} + +export interface ChatCompletionFunctions { + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string; + /** + * A description of what the function does, used by the model to choose when and how to call the + * function. + */ + description?: string; + /** + * The parameters the functions accepts, described as a JSON Schema object. See the + * [guide](/docs/guides/gpt/function-calling) for examples, and the + * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + * about the format.\n\nTo describe a function that accepts no parameters, provide the value + * `{\"type\": \"object\", \"properties\": {}}`. + */ + parameters: ChatCompletionFunctionParameters; +} + +export interface ChatCompletionFunctionParameters + extends Record {} + +export interface ChatCompletionFunctionCallOption { + /** The name of the function to call. */ + name: string; +} + +export interface CreateFineTuningJobRequest { + /** + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + * the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + training_file: string; + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + * not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + * `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + validation_file?: string | null; + /** + * The name of the model to fine-tune. You can select one of the + * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + */ + model: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; + /** The hyperparameters used for the fine-tuning job. */ + hyperparameters?: object; + /** + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + */ + suffix?: string | null; +} + +export interface CreateCompletionRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + model: + | string + | "babbage-002" + | "davinci-002" + | "text-davinci-003" + | "text-davinci-002" + | "text-davinci-001" + | "code-davinci-002" + | "text-curie-001" + | "text-babbage-001" + | "text-ada-001"; + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of + * tokens, or array of token arrays. + * + * Note that <|endoftext|> is the document separator that the model sees during training, so if a + * prompt is not specified the model will generate as if from the beginning of a new document. + */ + prompt: Prompt; + /** The suffix that comes after a completion of inserted text. */ + suffix?: string | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; + /** + * How many completions to generate for each prompt. + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + n?: number | null; + /** + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + max_tokens?: number | null; + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + presence_penalty?: number | null; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + frequency_penalty?: number | null; + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + logit_bias?: Record | null; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: string; + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + */ + stream?: boolean | null; + /** + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + * elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + logprobs?: number | null; + /** Echo back the prompt in addition to the completion */ + echo?: boolean | null; + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the highest + * log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + * how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + best_of?: number | null; +} + +export interface CreateEditRequest { + /** + * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` + * model with this endpoint. + */ + model: string | "text-davinci-edit-001" | "code-davinci-edit-001"; + /** The input text to use as a starting point for the edit. */ + input?: string | null; + /** The instruction that tells the model how to edit the prompt. */ + instruction: string; + /** How many edits to generate for the input and instruction. */ + n?: number | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; +} + +export interface CreateEmbeddingRequest { + /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ + model: string | "text-embedding-ada-002"; + /** + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + * single request, pass an array of strings or array of token arrays. Each input must not exceed + * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + input: string | string[] | number[] | number[][]; + user?: string; +} + +export interface CreateFileRequest { + /** + * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + * + * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + */ + file: string; + /** + * The intended purpose of the uploaded documents. Use "fine-tune" for + * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the + * uploaded file. + */ + purpose: string; +} + +export interface CreateFineTuneRequest { + /** + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file, where each training example is a JSON object + * with the keys "prompt" and "completion". Additionally, you must upload your file with the + * purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + * details. + */ + training_file: string; + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the + * [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + * Your train and validation data should be mutually exclusive. + * + * Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + * with the keys "prompt" and "completion". Additionally, you must upload your file with the + * purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + * details. + */ + validation_file?: string | null; + /** + * The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + * "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + * about these models, see the [Models](/docs/models) documentation. + */ + model?: string | "ada" | "babbage" | "curie" | "davinci" | null; + /** + * The number of epochs to train the model for. An epoch refers to one full cycle through the + * training dataset. + */ + n_epochs?: number | null; + /** + * The batch size to use for training. The batch size is the number of training examples used to + * train a single forward and backward pass. + * + * By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + * in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + * work better for larger datasets. + */ + batch_size?: number | null; + /** + * The learning rate multiplier to use for training. The fine-tuning learning rate is the original + * learning rate used for pretraining multiplied by this value. + * + * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + * `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + * recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + * results. + */ + learning_rate_multiplier?: number | null; + /** + * The weight to use for loss on the prompt tokens. This controls how much the model tries to + * learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + * and can add a stabilizing effect to training when completions are short. + * + * If prompts are extremely long (relative to completions), it may make sense to reduce this + * weight so as to avoid over-prioritizing learning the prompt. + */ + prompt_loss_rate?: number | null; + /** + * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + * validation set at the end of every epoch. These metrics can be viewed in the + * [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + * + * In order to compute classification metrics, you must provide a `validation_file`. Additionally, + * you must specify `classification_n_classes` for multiclass classification or + * `classification_positive_class` for binary classification. + */ + compute_classification_metrics?: boolean | null; + /** + * The number of classes in a classification task. + * + * This parameter is required for multiclass classification. + */ + classification_n_classes?: number | null; + /** + * The positive class in binary classification. + * + * This parameter is needed to generate precision, recall, and F1 metrics when doing binary + * classification. + */ + classification_positive_class?: string | null; + /** + * If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + * is a generalization of F-1 score. This is only used for binary classification. + * + * With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + * beta score puts more weight on recall and less on precision. A smaller beta score puts more + * weight on precision and less on recall. + */ + classification_betas?: number[] | null; + /** + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + */ + suffix?: string | null; +} + +export interface CreateImageRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null; + user?: string; +} + +export interface CreateImageEditRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + /** + * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + * provided, image must have transparency, which will be used as the mask. + */ + image: string; + /** + * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + * as `image`. + */ + mask?: string; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null; + user?: string; +} + +export interface CreateImageVariationRequest { + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + * and square. + */ + image: string; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024" | null; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null; + user?: string; +} + +export interface CreateModerationRequest { + /** The input text to classify */ + input: string | string[]; + /** + * Two content moderations models are available: `text-moderation-stable` and + * `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + * upgraded over time. This ensures you are always using our most accurate model. If you use + * `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + */ + model?: string | "text-moderation-latest" | "text-moderation-stable"; +} + +export type Stop = string | string[] | null; +export type Prompt = string | string[] | number[] | number[][] | null; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/openAIClient.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/openAIClient.ts new file mode 100644 index 0000000000..dbf159046a --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/openAIClient.ts @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { getClient, ClientOptions } from "@azure-rest/core-client"; +import { logger } from "../logger.js"; +import { KeyCredential } from "@azure/core-auth"; +import { OpenAIContext } from "./clientDefinitions.js"; + +/** + * Initialize a new instance of `OpenAIContext` + * @param credentials - uniquely identify client credential + * @param options - the parameter for all optional parameters + */ +export default function createClient( + credentials: KeyCredential, + options: ClientOptions = {} +): OpenAIContext { + const baseUrl = options.baseUrl ?? `https://api.openai.com/v1`; + options.apiVersion = options.apiVersion ?? "2.0.0"; + const userAgentInfo = `azsdk-js-openai-generic-rest/1.0.0-beta.1`; + const userAgentPrefix = + options.userAgentOptions && options.userAgentOptions.userAgentPrefix + ? `${options.userAgentOptions.userAgentPrefix} ${userAgentInfo}` + : `${userAgentInfo}`; + options = { + ...options, + userAgentOptions: { + userAgentPrefix, + }, + loggingOptions: { + logger: options.loggingOptions?.logger ?? logger.info, + }, + }; + + const client = getClient(baseUrl, options) as OpenAIContext; + + client.pipeline.addPolicy({ + name: "customKeyCredentialPolicy", + async sendRequest(request, next) { + request.headers.set("Authorization", "bearer " + credentials.key); + return next(request); + }, + }); + return client; +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/outputModels.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/outputModels.ts new file mode 100644 index 0000000000..6eb22f2db7 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/outputModels.ts @@ -0,0 +1,331 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export interface CreateTranscriptionResponseOutput { + text: string; +} + +export interface ErrorResponseOutput { + error: ErrorModelOutput; +} + +export interface ErrorModelOutput { + type: string; + message: string; + param: string | null; + code: string | null; +} + +export interface CreateTranslationResponseOutput { + text: string; +} + +/** Represents a chat completion response returned by model, based on the provided input. */ +export interface CreateChatCompletionResponseOutput { + /** A unique identifier for the chat completion. */ + id: string; + /** The object type, which is always `chat.completion`. */ + object: string; + /** The Unix timestamp (in seconds) of when the chat completion was created. */ + created: number; + /** The model used for the chat completion. */ + model: string; + /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + choices: Record[]; + usage?: CompletionUsageOutput; +} + +export interface ChatCompletionResponseMessageOutput { + /** The role of the author of this message. */ + role: "system" | "user" | "assistant" | "function"; + /** The contents of the message. */ + content: string | null; + /** The name and arguments of a function that should be called, as generated by the model. */ + function_call?: Record; +} + +/** Usage statistics for the completion request. */ +export interface CompletionUsageOutput { + /** Number of tokens in the prompt. */ + prompt_tokens: number; + /** Number of tokens in the generated completion */ + completion_tokens: number; + /** Total number of tokens used in the request (prompt + completion). */ + total_tokens: number; +} + +export interface FineTuningJobOutput { + /** The object identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "fine_tuning.job". */ + object: "fine_tuning.job"; + /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + created_at: number; + /** + * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + * null if the fine-tuning job is still running. + */ + finished_at: string | null; + /** The base model that is being fine-tuned. */ + model: string; + /** + * The name of the fine-tuned model that is being created. The value will be null if the + * fine-tuning job is still running. + */ + fine_tuned_model: string | null; + /** The organization that owns the fine-tuning job. */ + organization_id: string; + /** + * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + * `succeeded`, `failed`, or `cancelled`. + */ + status: + | "created" + | "pending" + | "running" + | "succeeded" + | "failed" + | "cancelled"; + /** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + hyperparameters: Record; + /** + * The file ID used for training. You can retrieve the training data with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + training_file: string; + /** + * The file ID used for validation. You can retrieve the validation results with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + validation_file: string | null; + /** + * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + result_files: string[]; + /** + * The total number of billable tokens processed by this fine tuning job. The value will be null + * if the fine-tuning job is still running. + */ + trained_tokens: number | null; + /** + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + * failure. + */ + error: Record | null; +} + +export interface ListPaginatedFineTuningJobsResponseOutput { + object: string; + data: Array; + has_more: boolean; +} + +export interface ListFineTuningJobEventsResponseOutput { + object: string; + data: Array; +} + +export interface FineTuningJobEventOutput { + id: string; + object: string; + created_at: number; + level: "info" | "warn" | "error"; + message: string; +} + +/** + * Represents a completion response from the API. Note: both the streamed and non-streamed response + * objects share the same shape (unlike the chat endpoint). + */ +export interface CreateCompletionResponseOutput { + /** A unique identifier for the completion. */ + id: string; + /** The object type, which is always `text_completion`. */ + object: string; + /** The Unix timestamp (in seconds) of when the completion was created. */ + created: number; + /** The model used for the completion. */ + model: string; + /** The list of completion choices the model generated for the input. */ + choices: Record[]; + usage?: CompletionUsageOutput; +} + +export interface CreateEditResponseOutput { + /** The object type, which is always `edit`. */ + object: "edit"; + /** The Unix timestamp (in seconds) of when the edit was created. */ + created: number; + /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ + choices: Record[]; + usage: CompletionUsageOutput; +} + +export interface CreateEmbeddingResponseOutput { + /** The object type, which is always "embedding". */ + object: "embedding"; + /** The name of the model used to generate the embedding. */ + model: string; + /** The list of embeddings generated by the model. */ + data: Array; + /** The usage information for the request. */ + usage: Record; +} + +/** Represents an embedding vector returned by embedding endpoint. */ +export interface EmbeddingOutput { + /** The index of the embedding in the list of embeddings. */ + index: number; + /** The object type, which is always "embedding". */ + object: "embedding"; + /** + * The embedding vector, which is a list of floats. The length of vector depends on the model as\ + * listed in the [embedding guide](/docs/guides/embeddings). + */ + embedding: number[]; +} + +export interface ListFilesResponseOutput { + object: string; + data: Array; +} + +/** The `File` object represents a document that has been uploaded to OpenAI. */ +export interface OpenAIFileOutput { + /** The file identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "file". */ + object: "file"; + /** The size of the file in bytes. */ + bytes: number; + /** The Unix timestamp (in seconds) for when the file was created. */ + createdAt: number; + /** The name of the file. */ + filename: string; + /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ + purpose: string; + /** + * The current status of the file, which can be either `uploaded`, `processed`, `pending`, + * `error`, `deleting` or `deleted`. + */ + status: + | "uploaded" + | "processed" + | "pending" + | "error" + | "deleting" + | "deleted"; + /** + * Additional details about the status of the file. If the file is in the `error` state, this will + * include a message describing the error. + */ + status_details?: string | null; +} + +export interface DeleteFileResponseOutput { + id: string; + object: string; + deleted: boolean; +} + +/** The `FineTune` object represents a legacy fine-tune job that has been created through the API. */ +export interface FineTuneOutput { + /** The object identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "fine-tune". */ + object: "fine-tune"; + /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + created_at: number; + /** The Unix timestamp (in seconds) for when the fine-tuning job was last updated. */ + updated_at: number; + /** The base model that is being fine-tuned. */ + model: string; + /** The name of the fine-tuned model that is being created. */ + fine_tuned_model: string | null; + /** The organization that owns the fine-tuning job. */ + organization_id: string; + /** + * The current status of the fine-tuning job, which can be either `created`, `running`, + * `succeeded`, `failed`, or `cancelled`. + */ + status: "created" | "running" | "succeeded" | "failed" | "cancelled"; + /** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + */ + hyperparams: Record; + /** The list of files used for training. */ + training_files: Array; + /** The list of files used for validation. */ + validation_files: Array; + /** The compiled results files for the fine-tuning job. */ + result_files: Array; + /** The list of events that have been observed in the lifecycle of the FineTune job. */ + events?: Array; +} + +export interface FineTuneEventOutput { + object: string; + created_at: number; + level: string; + message: string; +} + +export interface ListFineTunesResponseOutput { + object: string; + data: Array; +} + +export interface ListFineTuneEventsResponseOutput { + object: string; + data: Array; +} + +export interface ListModelsResponseOutput { + object: string; + data: Array; +} + +/** Describes an OpenAI model offering that can be used with the API. */ +export interface ModelOutput { + /** The model identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "model". */ + object: "model"; + /** The Unix timestamp (in seconds) when the model was created. */ + created: number; + /** The organization that owns the model. */ + owned_by: string; +} + +export interface DeleteModelResponseOutput { + id: string; + object: string; + deleted: boolean; +} + +export interface ImagesResponseOutput { + created: number; + data: Array; +} + +/** Represents the url or the content of an image generated by the OpenAI API. */ +export interface ImageOutput { + /** The URL of the generated image, if `response_format` is `url` (default). */ + url?: string; + /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ + b64_json?: string; +} + +export interface CreateModerationResponseOutput { + /** The unique identifier for the moderation request. */ + id: string; + /** The model used to generate the moderation results. */ + model: string; + /** A list of moderation objects. */ + results: Record[]; +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/parameters.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/parameters.ts new file mode 100644 index 0000000000..14b333dada --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/parameters.ts @@ -0,0 +1,229 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { RequestParameters } from "@azure-rest/core-client"; +import { + CreateChatCompletionRequest, + CreateFineTuningJobRequest, + CreateCompletionRequest, + CreateEditRequest, + CreateEmbeddingRequest, + CreateFineTuneRequest, + CreateImageRequest, + CreateModerationRequest, +} from "./models.js"; + +export interface CreateTranscriptionBodyParam { + body: CreateTranscriptionFormBody; +} + +export interface CreateTranscriptionFormBody { + file: string; + model: string | "whisper-1"; + prompt?: string; + response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + temperature?: number; + language?: string; +} + +export interface CreateTranscriptionMediaTypesParam { + contentType: "multipart/form-data"; +} + +export type CreateTranscriptionParameters = CreateTranscriptionMediaTypesParam & + CreateTranscriptionBodyParam & + RequestParameters; + +export interface CreateTranslationBodyParam { + body: CreateTranslationFormBody; +} + +export interface CreateTranslationFormBody { + file: string; + model: string | "whisper-1"; + prompt?: string; + response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + temperature?: number; +} + +export interface CreateTranslationMediaTypesParam { + contentType: "multipart/form-data"; +} + +export type CreateTranslationParameters = CreateTranslationMediaTypesParam & + CreateTranslationBodyParam & + RequestParameters; + +export interface CreateChatCompletionBodyParam { + body?: CreateChatCompletionRequest; +} + +export type CreateChatCompletionParameters = CreateChatCompletionBodyParam & + RequestParameters; + +export interface CreateFineTuningJobBodyParam { + body: CreateFineTuningJobRequest; +} + +export type CreateFineTuningJobParameters = CreateFineTuningJobBodyParam & + RequestParameters; + +export interface ListPaginatedFineTuningJobsQueryParamProperties { + /** Identifier for the last job from the previous pagination request. */ + after?: string; + /** Number of fine-tuning jobs to retrieve. */ + limit?: number; +} + +export interface ListPaginatedFineTuningJobsQueryParam { + queryParameters?: ListPaginatedFineTuningJobsQueryParamProperties; +} + +export type ListPaginatedFineTuningJobsParameters = + ListPaginatedFineTuningJobsQueryParam & RequestParameters; +export type RetrieveFineTuningJobParameters = RequestParameters; + +export interface ListFineTuningEventsQueryParamProperties { + /** Identifier for the last event from the previous pagination request. */ + after?: string; + /** Number of events to retrieve. */ + limit?: number; +} + +export interface ListFineTuningEventsQueryParam { + queryParameters?: ListFineTuningEventsQueryParamProperties; +} + +export type ListFineTuningEventsParameters = ListFineTuningEventsQueryParam & + RequestParameters; +export type CancelFineTuningJobParameters = RequestParameters; + +export interface CreateCompletionBodyParam { + body?: CreateCompletionRequest; +} + +export type CreateCompletionParameters = CreateCompletionBodyParam & + RequestParameters; + +export interface CreateEditBodyParam { + body: CreateEditRequest; +} + +export type CreateEditParameters = CreateEditBodyParam & RequestParameters; + +export interface CreateEmbeddingBodyParam { + body: CreateEmbeddingRequest; +} + +export type CreateEmbeddingParameters = CreateEmbeddingBodyParam & + RequestParameters; +export type ListFilesParameters = RequestParameters; + +export interface CreateFileBodyParam { + body: CreateFileFormBody; +} + +export interface CreateFileFormBody { + file: string; + purpose: string; +} + +export interface CreateFileMediaTypesParam { + contentType: "multipart/form-data"; +} + +export type CreateFileParameters = CreateFileMediaTypesParam & + CreateFileBodyParam & + RequestParameters; +export type RetrieveFileParameters = RequestParameters; +export type DeleteFileParameters = RequestParameters; +export type DownloadFileParameters = RequestParameters; + +export interface CreateFineTuneBodyParam { + body: CreateFineTuneRequest; +} + +export type CreateFineTuneParameters = CreateFineTuneBodyParam & + RequestParameters; +export type ListFineTunesParameters = RequestParameters; +export type RetrieveFineTuneParameters = RequestParameters; + +export interface ListFineTuneEventsQueryParamProperties { + /** + * Whether to stream events for the fine-tune job. If set to true, events will be sent as + * data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available. The stream will terminate with a `data: [DONE]` message when the + * job is finished (succeeded, cancelled, or failed). + * + * If set to false, only events generated so far will be returned. + */ + stream?: boolean; +} + +export interface ListFineTuneEventsQueryParam { + queryParameters?: ListFineTuneEventsQueryParamProperties; +} + +export type ListFineTuneEventsParameters = ListFineTuneEventsQueryParam & + RequestParameters; +export type CancelFineTuneParameters = RequestParameters; +export type ListModelsParameters = RequestParameters; +export type RetrieveParameters = RequestParameters; +export type DeleteParameters = RequestParameters; + +export interface CreateImageBodyParam { + body: CreateImageRequest; +} + +export type CreateImageParameters = CreateImageBodyParam & RequestParameters; + +export interface CreateImageEditBodyParam { + body: CreateImageEditFormBody; +} + +export interface CreateImageEditFormBody { + prompt: string; + image: string; + mask?: string; + n?: number | null; + size?: "256x256" | "512x512" | "1024x1024" | null; + response_format?: "url" | "b64_json" | null; + user?: string; +} + +export interface CreateImageEditMediaTypesParam { + contentType: "multipart/form-data"; +} + +export type CreateImageEditParameters = CreateImageEditMediaTypesParam & + CreateImageEditBodyParam & + RequestParameters; + +export interface CreateImageVariationBodyParam { + body: CreateImageVariationFormBody; +} + +export interface CreateImageVariationFormBody { + image: string; + n?: number | null; + size?: "256x256" | "512x512" | "1024x1024" | null; + response_format?: "url" | "b64_json" | null; + user?: string; +} + +export interface CreateImageVariationMediaTypesParam { + contentType: "multipart/form-data"; +} + +export type CreateImageVariationParameters = + CreateImageVariationMediaTypesParam & + CreateImageVariationBodyParam & + RequestParameters; + +export interface CreateModerationBodyParam { + body: CreateModerationRequest; +} + +export type CreateModerationParameters = CreateModerationBodyParam & + RequestParameters; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/responses.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/responses.ts new file mode 100644 index 0000000000..829265b555 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/rest/responses.ts @@ -0,0 +1,336 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { HttpResponse } from "@azure-rest/core-client"; +import { + CreateTranscriptionResponseOutput, + ErrorResponseOutput, + CreateTranslationResponseOutput, + CreateChatCompletionResponseOutput, + FineTuningJobOutput, + ListPaginatedFineTuningJobsResponseOutput, + ListFineTuningJobEventsResponseOutput, + CreateCompletionResponseOutput, + CreateEditResponseOutput, + CreateEmbeddingResponseOutput, + ListFilesResponseOutput, + OpenAIFileOutput, + DeleteFileResponseOutput, + FineTuneOutput, + ListFineTunesResponseOutput, + ListFineTuneEventsResponseOutput, + ListModelsResponseOutput, + ModelOutput, + DeleteModelResponseOutput, + ImagesResponseOutput, + CreateModerationResponseOutput, +} from "./outputModels.js"; + +/** The request has succeeded. */ +export interface CreateTranscription200Response extends HttpResponse { + status: "200"; + body: CreateTranscriptionResponseOutput; +} + +export interface CreateTranscriptionDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateTranslation200Response extends HttpResponse { + status: "200"; + body: CreateTranslationResponseOutput; +} + +export interface CreateTranslationDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateChatCompletion200Response extends HttpResponse { + status: "200"; + body: CreateChatCompletionResponseOutput; +} + +export interface CreateChatCompletionDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateFineTuningJob200Response extends HttpResponse { + status: "200"; + body: FineTuningJobOutput; +} + +export interface CreateFineTuningJobDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface ListPaginatedFineTuningJobs200Response extends HttpResponse { + status: "200"; + body: ListPaginatedFineTuningJobsResponseOutput; +} + +export interface ListPaginatedFineTuningJobsDefaultResponse + extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface RetrieveFineTuningJob200Response extends HttpResponse { + status: "200"; + body: FineTuningJobOutput; +} + +export interface RetrieveFineTuningJobDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface ListFineTuningEvents200Response extends HttpResponse { + status: "200"; + body: ListFineTuningJobEventsResponseOutput; +} + +export interface ListFineTuningEventsDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CancelFineTuningJob200Response extends HttpResponse { + status: "200"; + body: FineTuningJobOutput; +} + +export interface CancelFineTuningJobDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateCompletion200Response extends HttpResponse { + status: "200"; + body: CreateCompletionResponseOutput; +} + +export interface CreateCompletionDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateEdit200Response extends HttpResponse { + status: "200"; + body: CreateEditResponseOutput; +} + +export interface CreateEditDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateEmbedding200Response extends HttpResponse { + status: "200"; + body: CreateEmbeddingResponseOutput; +} + +export interface CreateEmbeddingDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface ListFiles200Response extends HttpResponse { + status: "200"; + body: ListFilesResponseOutput; +} + +export interface ListFilesDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateFile200Response extends HttpResponse { + status: "200"; + body: OpenAIFileOutput; +} + +export interface CreateFileDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface RetrieveFile200Response extends HttpResponse { + status: "200"; + body: OpenAIFileOutput; +} + +export interface RetrieveFileDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface DeleteFile200Response extends HttpResponse { + status: "200"; + body: DeleteFileResponseOutput; +} + +export interface DeleteFileDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface DownloadFile200Response extends HttpResponse { + status: "200"; + body: string; +} + +export interface DownloadFileDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateFineTune200Response extends HttpResponse { + status: "200"; + body: FineTuneOutput; +} + +export interface CreateFineTuneDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface ListFineTunes200Response extends HttpResponse { + status: "200"; + body: ListFineTunesResponseOutput; +} + +export interface ListFineTunesDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface RetrieveFineTune200Response extends HttpResponse { + status: "200"; + body: FineTuneOutput; +} + +export interface RetrieveFineTuneDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface ListFineTuneEvents200Response extends HttpResponse { + status: "200"; + body: ListFineTuneEventsResponseOutput; +} + +export interface ListFineTuneEventsDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CancelFineTune200Response extends HttpResponse { + status: "200"; + body: FineTuneOutput; +} + +export interface CancelFineTuneDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface ListModels200Response extends HttpResponse { + status: "200"; + body: ListModelsResponseOutput; +} + +export interface ListModelsDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface Retrieve200Response extends HttpResponse { + status: "200"; + body: ModelOutput; +} + +export interface RetrieveDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface DeleteOperation200Response extends HttpResponse { + status: "200"; + body: DeleteModelResponseOutput; +} + +export interface DeleteOperationDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateImage200Response extends HttpResponse { + status: "200"; + body: ImagesResponseOutput; +} + +export interface CreateImageDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateImageEdit200Response extends HttpResponse { + status: "200"; + body: ImagesResponseOutput; +} + +export interface CreateImageEditDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateImageVariation200Response extends HttpResponse { + status: "200"; + body: ImagesResponseOutput; +} + +export interface CreateImageVariationDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} + +/** The request has succeeded. */ +export interface CreateModeration200Response extends HttpResponse { + status: "200"; + body: CreateModerationResponseOutput; +} + +export interface CreateModerationDefaultResponse extends HttpResponse { + status: string; + body: ErrorResponseOutput; +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/sampleTest.spec.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/sampleTest.spec.ts new file mode 100644 index 0000000000..97b8e8a02b --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/sampleTest.spec.ts @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Recorder } from "@azure-tools/test-recorder"; +import { assert } from "chai"; +import { createRecorder } from "./utils/recordedClient.js"; +import { Context } from "mocha"; + +describe("My test", () => { + let recorder: Recorder; + + beforeEach(async function (this: Context) { + recorder = await createRecorder(this); + }); + + afterEach(async function () { + await recorder.stop(); + }); + + it("sample test", async function () { + assert.equal(1, 1); + }); +}); diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/env.browser.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/env.browser.ts new file mode 100644 index 0000000000..fd2aca680c --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/env.browser.ts @@ -0,0 +1,2 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/env.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/env.ts new file mode 100644 index 0000000000..0e06855b73 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/env.ts @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import * as dotenv from "dotenv"; + +dotenv.config(); diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/recordedClient.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/recordedClient.ts new file mode 100644 index 0000000000..6cc58bc15e --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/test/public/utils/recordedClient.ts @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { Context } from "mocha"; +import { Recorder, RecorderStartOptions } from "@azure-tools/test-recorder"; +import "./env"; + +const envSetupForPlayback: Record = { + ENDPOINT: "https://endpoint", + AZURE_CLIENT_ID: "azure_client_id", + AZURE_CLIENT_SECRET: "azure_client_secret", + AZURE_TENANT_ID: "88888888-8888-8888-8888-888888888888", + SUBSCRIPTION_ID: "azure_subscription_id", +}; + +const recorderEnvSetup: RecorderStartOptions = { + envSetupForPlayback, +}; + +/** + * creates the recorder and reads the environment variables from the `.env` file. + * Should be called first in the test suite to make sure environment variables are + * read before they are being used. + */ +export async function createRecorder(context: Context): Promise { + const recorder = new Recorder(context.currentTest); + await recorder.start(recorderEnvSetup); + return recorder; +} diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/tsconfig.json b/packages/typespec-test/test/openai_generic/generated/typespec-ts/tsconfig.json new file mode 100644 index 0000000000..c1c30102d9 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2017", + "module": "NodeNext", + "lib": ["esnext", "dom"], + "declaration": true, + "declarationMap": true, + "inlineSources": true, + "sourceMap": true, + "importHelpers": true, + "strict": true, + "alwaysStrict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "forceConsistentCasingInFileNames": true, + "moduleResolution": "NodeNext", + "allowSyntheticDefaultImports": true, + "esModuleInterop": true, + "outDir": "./dist-esm", + "declarationDir": "./types", + "rootDir": "." + }, + "ts-node": { "esm": true }, + "include": ["./src/**/*.ts", "./test/**/*.ts"] +} diff --git a/packages/typespec-test/test/openai_generic/spec/audio/main.tsp b/packages/typespec-test/test/openai_generic/spec/audio/main.tsp new file mode 100644 index 0000000000..c6458821ff --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/audio/main.tsp @@ -0,0 +1,2 @@ +import "./operations.tsp"; +import "./models.tsp"; diff --git a/packages/typespec-test/test/openai_generic/spec/audio/models.tsp b/packages/typespec-test/test/openai_generic/spec/audio/models.tsp new file mode 100644 index 0000000000..a2a440a902 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/audio/models.tsp @@ -0,0 +1,92 @@ +namespace OpenAI; +using TypeSpec.OpenAPI; + +model CreateTranscriptionRequest { + /** + * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. + */ + @encode("binary") + @extension("x-oaiTypeLabel", "file") + file: bytes; + + /** ID of the model to use. Only `whisper-1` is currently available. */ + @extension("x-oaiTypeLabel", "string") + `model`: string | "whisper-1"; + + /** + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + */ + prompt?: string; + + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. + */ + response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt" = "json"; + + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. + */ + @minValue(0) + @maxValue(1) + temperature?: float64 = 0; + + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + * and latency. + */ + language?: string; +} + +// Note: This does not currently support the non-default response format types. +model CreateTranscriptionResponse { + text: string; +} + +model CreateTranslationRequest { + /** + * The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. + */ + @encode("binary") + @extension("x-oaiTypeLabel", "file") + file: bytes; + + /** ID of the model to use. Only `whisper-1` is currently available. */ + @extension("x-oaiTypeLabel", "string") + `model`: string | "whisper-1"; + + /** + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + */ + prompt?: string; + + // NOTE: this is just string in the actual API? + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. + */ + response_format?: "json" | "text" | "srt" | "verbose_json" | "vtt" = "json"; + + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. + */ + @minValue(0) + @maxValue(1) + temperature?: float64 = 0; +} + +// Note: This does not currently support the non-default response format types. +model CreateTranslationResponse { + text: string; +} diff --git a/packages/typespec-test/test/openai_generic/spec/audio/operations.tsp b/packages/typespec-test/test/openai_generic/spec/audio/operations.tsp new file mode 100644 index 0000000000..636fb941ac --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/audio/operations.tsp @@ -0,0 +1,35 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; +@route("/audio") +namespace Audio { + @route("transcriptions") + interface Transcriptions { + @post + @operationId("createTranscription") + @tag("OpenAI") + @summary("Transcribes audio into the input language.") + createTranscription( + @header contentType: "multipart/form-data", + @body audio: CreateTranscriptionRequest, + ): CreateTranscriptionResponse | ErrorResponse; + } + + @route("translations") + interface Translations { + @post + @operationId("createTranslation") + @tag("OpenAI") + @summary("Transcribes audio into the input language.") + createTranslation( + @header contentType: "multipart/form-data", + @body audio: CreateTranslationRequest, + ): CreateTranslationResponse | ErrorResponse; + } +} diff --git a/packages/typespec-test/test/openai_generic/spec/common/errors.tsp b/packages/typespec-test/test/openai_generic/spec/common/errors.tsp new file mode 100644 index 0000000000..cd4395a050 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/common/errors.tsp @@ -0,0 +1,13 @@ +namespace OpenAI; + +model Error { + type: string; + message: string; + param: string | null; + code: string | null; +} + +@error +model ErrorResponse { + error: Error; +} diff --git a/packages/typespec-test/test/openai_generic/spec/common/models.tsp b/packages/typespec-test/test/openai_generic/spec/common/models.tsp new file mode 100644 index 0000000000..d6d0d4f91d --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/common/models.tsp @@ -0,0 +1,39 @@ +namespace OpenAI; +using TypeSpec.OpenAPI; + +model ListModelsResponse { + object: string; + data: Model[]; +} + +/** Describes an OpenAI model offering that can be used with the API. */ +model Model { + /** The model identifier, which can be referenced in the API endpoints. */ + id: string; + + /** The object type, which is always "model". */ + object: "model"; + + /** The Unix timestamp (in seconds) when the model was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The organization that owns the model. */ + owned_by: string; +} + +model DeleteModelResponse { + id: string; + object: string; + deleted: boolean; +} + +// this is using yaml refs instead of a def in the openapi, not sure if that's required? + +scalar User extends string; + +@minItems(1) +model TokenArray is safeint[]; + +@minItems(1) +model TokenArrayArray is TokenArray[]; diff --git a/packages/typespec-test/test/openai_generic/spec/completions/chat-meta.tsp b/packages/typespec-test/test/openai_generic/spec/completions/chat-meta.tsp new file mode 100644 index 0000000000..7823da41d5 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/completions/chat-meta.tsp @@ -0,0 +1,168 @@ +using TypeSpec.OpenAPI; + +@@extension(OpenAI.Completions.createCompletion, + "x-oaiMeta", + { + name: "Create chat completion", + group: "chat", + returns: """ + Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of + [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed. + """, + path: "create", + examples: [ + { + title: "No streaming", + request: { + curl: """ + curl https://api.openai.com/v1/chat/completions \\ + -H "Content-Type: application/json" \\ + -H "Authorization: Bearer $OPENAI_API_KEY" \\ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + """, + python: """ + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] + ) + + print(completion.choices[0].message) + """, + `node.js`: """ + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "system", content: "string" }], + model: "VAR_model_id", + }); + + console.log(completion.choices[0]); + } + + main(); + """, + }, + response: """ + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + """, + }, + { + title: "Streaming", + request: { + curl: """ + curl https://api.openai.com/v1/chat/completions \\ + -H "Content-Type: application/json" \\ + -H "Authorization: Bearer $OPENAI_API_KEY" \\ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ], + "stream": true + }' + """, + python: """ + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream=True + ) + + for chunk in completion: + print(chunk.choices[0].delta) + """, + `node.js`: """ + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + model: "VAR_model_id", + messages: [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream: true, + }); + + for await (const chunk of completion) { + console.log(chunk.choices[0].delta.content); + } + } + + main(); + """, + }, + response: """ + { + "id": "chatcmpl-123", + "object": "chat.completion.chunk", + "created": 1677652288, + "model": "gpt-3.5-turbo", + "choices": [{ + "index": 0, + "delta": { + "content": "Hello", + }, + "finish_reason": "stop" + }] + } + """, + } + ], + } +); diff --git a/packages/typespec-test/test/openai_generic/spec/completions/main.tsp b/packages/typespec-test/test/openai_generic/spec/completions/main.tsp new file mode 100644 index 0000000000..144c4aeaff --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/completions/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; diff --git a/packages/typespec-test/test/openai_generic/spec/completions/models.tsp b/packages/typespec-test/test/openai_generic/spec/completions/models.tsp new file mode 100644 index 0000000000..5aa332b32a --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/completions/models.tsp @@ -0,0 +1,420 @@ +namespace OpenAI; +using TypeSpec.OpenAPI; + +alias CHAT_COMPLETION_MODELS = + | "gpt4" + | "gpt-4-0314" + | "gpt-4-0613" + | "gpt-4-32k" + | "gpt-4-32k-0314" + | "gpt-4-32k-0613" + | "gpt-3.5-turbo" + | "gpt-3.5-turbo-16k" + | "gpt-3.5-turbo-0301" + | "gpt-3.5-turbo-0613" + | "gpt-3.5-turbo-16k-0613"; + +alias COMPLETION_MODELS = + | "babbage-002" + | "davinci-002" + | "text-davinci-003" + | "text-davinci-002" + | "text-davinci-001" + | "code-davinci-002" + | "text-curie-001" + | "text-babbage-001" + | "text-ada-001"; + +alias SharedCompletionProperties = { + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: Temperature | null = 1; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: TopP | null = 1; + + /** + * How many completions to generate for each prompt. + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + n?: N | null = 1; + + /** + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + max_tokens?: MaxTokens | null = 16; + + // todo: consider inlining when https://github.com/microsoft/typespec/issues/2356 is resolved + // https://github.com/microsoft/typespec/issues/2355 + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop = null; + + // needs default + // https://github.com/microsoft/typespec/issues/1646 + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + presence_penalty?: Penalty | null; + + // needs default + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + frequency_penalty?: Penalty | null; + + // needs default of null + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + @extension("x-oaiTypeLabel", "map") + logit_bias?: Record | null; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ + user?: User; + + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + */ + stream?: boolean | null = true; +}; + +@oneOf +union Stop { + string, + StopSequences, + null, +} + +@minValue(-2) +@maxValue(2) +scalar Penalty extends float64; + +@minItems(1) +@maxItems(4) +model StopSequences is string[]; + +@minValue(0) +@maxValue(2) +scalar Temperature extends float64; + +@minValue(0) +@maxValue(1) +scalar TopP extends float64; + +@minValue(1) +@maxValue(128) +scalar N extends safeint; + +@minValue(0) +scalar MaxTokens extends safeint; + +model CreateChatCompletionRequest { + /** + * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + * table for details on which models work with the Chat API. + */ + @extension("x-oaiTypeLabel", "string") + `model`: string | CHAT_COMPLETION_MODELS; + + /** + * A list of messages comprising the conversation so far. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + */ + @minItems(1) + messages: ChatCompletionRequestMessage[]; + + /** A list of functions the model may generate JSON inputs for. */ + @minItems(1) + @maxItems(128) + functions?: ChatCompletionFunctions[]; + + /** + * Controls how the model responds to function calls. `none` means the model does not call a + * function, and responds to the end-user. `auto` means the model can pick between an end-user or + * calling a function. Specifying a particular function via `{\"name":\ \"my_function\"}` forces the + * model to call that function. `none` is the default when no functions are present. `auto` is the + * default if functions are present. + */ + function_call?: "none" | "auto" | ChatCompletionFunctionCallOption; + + ...SharedCompletionProperties; +} + +model ChatCompletionFunctionCallOption { + /** The name of the function to call. */ + name: string; +} + +model ChatCompletionFunctions { + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string; + + /** + * A description of what the function does, used by the model to choose when and how to call the + * function. + */ + description?: string; + + /** + * The parameters the functions accepts, described as a JSON Schema object. See the + * [guide](/docs/guides/gpt/function-calling) for examples, and the + * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + * about the format.\n\nTo describe a function that accepts no parameters, provide the value + * `{\"type\": \"object\", \"properties\": {}}`. + */ + parameters: ChatCompletionFunctionParameters; +} + +model ChatCompletionFunctionParameters is Record; + +model ChatCompletionRequestMessage { + /** The role of the messages author. One of `system`, `user`, `assistant`, or `function`. */ + role: "system" | "user" | "assistant" | "function"; + + /** + * The contents of the message. `content` is required for all messages, and may be null for + * assistant messages with function calls. + */ + content: string | null; + + // TODO: the constraints are not specified in the API + /** + * The name of the author of this message. `name` is required if role is `function`, and it + * should be the name of the function whose response is in the `content`. May contain a-z, + * A-Z, 0-9, and underscores, with a maximum length of 64 characters. + */ + name?: string; + + /** The name and arguments of a function that should be called, as generated by the model. */ + function_call?: { + /** The name of the function to call. */ + name: string; + + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that + * the model does not always generate valid JSON, and may hallucinate parameters not defined by + * your function schema. Validate the arguments in your code before calling your function. + */ + arguments: string; + }; +} + +/** Represents a chat completion response returned by model, based on the provided input. */ +// TODO: Fill in example here. +@extension( + "x-oaiMeta", + { + name: "The chat completion object", + group: "chat", + example: "", + } +) +model CreateChatCompletionResponse { + /** A unique identifier for the chat completion. */ + id: string; + + /** The object type, which is always `chat.completion`. */ + object: string; + + /** The Unix timestamp (in seconds) of when the chat completion was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The model used for the chat completion. */ + `model`: string; + + /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + choices: { + /** The index of the choice in the list of choices. */ + index: safeint; + + message: ChatCompletionResponseMessage; + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a + * natural stop point or a provided stop sequence, `length` if the maximum number of tokens + * specified in the request was reached, `content_filter` if the content was omitted due to + * a flag from our content filters, or `function_call` if the model called a function. + */ + finish_reason: "stop" | "length" | "function_call" | "content_filter"; + }[]; + + usage?: CompletionUsage; +} + +/** Usage statistics for the completion request. */ +model CompletionUsage { + /** Number of tokens in the prompt. */ + prompt_tokens: safeint; + + /** Number of tokens in the generated completion */ + completion_tokens: safeint; + + /** Total number of tokens used in the request (prompt + completion). */ + total_tokens: safeint; +} + +model ChatCompletionResponseMessage { + /** The role of the author of this message. */ + role: "system" | "user" | "assistant" | "function"; + + /** The contents of the message. */ + content: string | null; + + /** The name and arguments of a function that should be called, as generated by the model. */ + function_call?: { + /** The name of the function to call. */ + name: string; + + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that + * the model does not always generate valid JSON, and may hallucinate parameters not defined by + * your function schema. Validate the arguments in your code before calling your function. + */ + arguments: string; + }; +} + +model CreateCompletionRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + @extension("x-oaiTypeLabel", "string") + `model`: string | COMPLETION_MODELS; + + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of + * tokens, or array of token arrays. + * + * Note that <|endoftext|> is the document separator that the model sees during training, so if a + * prompt is not specified the model will generate as if from the beginning of a new document. + */ + // TODO: consider inlining when https://github.com/microsoft/typespec/issues/2356 fixed + prompt: Prompt = "<|endoftext|>"; + + /** The suffix that comes after a completion of inserted text. */ + suffix?: string | null = null; + + ...SharedCompletionProperties; + + /** + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + * elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + logprobs?: safeint | null = null; + + /** Echo back the prompt in addition to the completion */ + echo?: boolean | null = false; + + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the highest + * log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + * how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + best_of?: safeint | null = 1; +} + +@oneOf +union Prompt { + string, + string[], + TokenArray, + TokenArrayArray, + null, +} +/** + * Represents a completion response from the API. Note: both the streamed and non-streamed response + * objects share the same shape (unlike the chat endpoint). + */ +@extension( + "x-oaiMeta", + { + name: "The completion object", + legacy: true, + example: "", // fill in + } +) +model CreateCompletionResponse { + /** A unique identifier for the completion. */ + id: string; + + /** The object type, which is always `text_completion`. */ + object: string; + + /** The Unix timestamp (in seconds) of when the completion was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** The model used for the completion. */ + `model`: string; + + /** The list of completion choices the model generated for the input. */ + choices: { + index: safeint; + text: string; + logprobs: null | { + tokens: string[]; + token_logprobs: float64[]; + top_logprobs: Record[]; + text_offset: safeint[]; + }; + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a + * natural stop point or a provided stop sequence, or `content_filter` if content was omitted + * due to a flag from our content filters, `length` if the maximum number of tokens specified + * in the request was reached, or `content_filter` if content was omitted due to a flag from our + * content filters. + */ + finish_reason: "stop" | "length" | "content_filter"; + }[]; + + usage?: CompletionUsage; +} diff --git a/packages/typespec-test/test/openai_generic/spec/completions/operations.tsp b/packages/typespec-test/test/openai_generic/spec/completions/operations.tsp new file mode 100644 index 0000000000..d53245f7c7 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/completions/operations.tsp @@ -0,0 +1,33 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; +import "./chat-meta.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/chat") +namespace Chat { + @route("/completions") + interface Completions { + @tag("OpenAI") + @post + @operationId("createChatCompletion") + createChatCompletion( + ...CreateChatCompletionRequest, + ): CreateChatCompletionResponse | ErrorResponse; + } +} +@route("/completions") +interface Completions { + @tag("OpenAI") + @post + @operationId("createCompletion") + createCompletion( + ...CreateCompletionRequest, + ): CreateCompletionResponse | ErrorResponse; +} diff --git a/packages/typespec-test/test/openai_generic/spec/edits/main.tsp b/packages/typespec-test/test/openai_generic/spec/edits/main.tsp new file mode 100644 index 0000000000..144c4aeaff --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/edits/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; diff --git a/packages/typespec-test/test/openai_generic/spec/edits/models.tsp b/packages/typespec-test/test/openai_generic/spec/edits/models.tsp new file mode 100644 index 0000000000..d763726497 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/edits/models.tsp @@ -0,0 +1,69 @@ +namespace OpenAI; +using TypeSpec.OpenAPI; + +model CreateEditRequest { + /** + * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` + * model with this endpoint. + */ + @extension("x-oaiTypeLabel", "string") + `model`: string | "text-davinci-edit-001" | "code-davinci-edit-001"; + + /** The input text to use as a starting point for the edit. */ + input?: string | null = ""; + + /** The instruction that tells the model how to edit the prompt. */ + instruction: string; + + /** How many edits to generate for the input and instruction. */ + n?: EditN | null = 1; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: Temperature | null = 1; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: TopP | null = 1; +} + +#deprecated "deprecated" +model CreateEditResponse { + /** The object type, which is always `edit`. */ + object: "edit"; + + /** The Unix timestamp (in seconds) of when the edit was created. */ + @encode("unixTimestamp", int32) + created: utcDateTime; + + /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ + choices: { + /** The edited result. */ + text: string; + + /** The index of the choice in the list of choices. */ + index: safeint; + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a + * natural stop point or a provided stop sequence, or `length` if the maximum number of tokens + * specified in the request was reached. + */ + finish_reason: "stop" | "length"; + }[]; + + usage: CompletionUsage; +} + +@minValue(0) +@maxValue(20) +scalar EditN extends safeint; diff --git a/packages/typespec-test/test/openai_generic/spec/edits/operations.tsp b/packages/typespec-test/test/openai_generic/spec/edits/operations.tsp new file mode 100644 index 0000000000..08497364e6 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/edits/operations.tsp @@ -0,0 +1,19 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/edits") +interface Edits { + #deprecated "deprecated" + @post + @tag("OpenAI") + @operationId("createEdit") + createEdit(@body edit: CreateEditRequest): CreateEditResponse | ErrorResponse; +} diff --git a/packages/typespec-test/test/openai_generic/spec/embeddings/main.tsp b/packages/typespec-test/test/openai_generic/spec/embeddings/main.tsp new file mode 100644 index 0000000000..144c4aeaff --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/embeddings/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; diff --git a/packages/typespec-test/test/openai_generic/spec/embeddings/models.tsp b/packages/typespec-test/test/openai_generic/spec/embeddings/models.tsp new file mode 100644 index 0000000000..ab46275b24 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/embeddings/models.tsp @@ -0,0 +1,55 @@ +import "../common/models.tsp"; + +namespace OpenAI; +using TypeSpec.OpenAPI; + +model CreateEmbeddingRequest { + /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ + @extension("x-oaiTypeLabel", "string") + `model`: string | "text-embedding-ada-002"; + + /** + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + * single request, pass an array of strings or array of token arrays. Each input must not exceed + * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + input: string | string[] | TokenArray | TokenArrayArray; + + user?: User; +} +model CreateEmbeddingResponse { + /** The object type, which is always "embedding". */ + object: "embedding"; + + /** The name of the model used to generate the embedding. */ + `model`: string; + + /** The list of embeddings generated by the model. */ + data: Embedding[]; + + /** The usage information for the request. */ + usage: { + /** The number of tokens used by the prompt. */ + prompt_tokens: safeint; + + /** The total number of tokens used by the request. */ + total_tokens: safeint; + }; +} + +/** Represents an embedding vector returned by embedding endpoint. */ +model Embedding { + /** The index of the embedding in the list of embeddings. */ + index: safeint; + + /** The object type, which is always "embedding". */ + object: "embedding"; + + /** + * The embedding vector, which is a list of floats. The length of vector depends on the model as\ + * listed in the [embedding guide](/docs/guides/embeddings). + */ + embedding: float64[]; +} diff --git a/packages/typespec-test/test/openai_generic/spec/embeddings/operations.tsp b/packages/typespec-test/test/openai_generic/spec/embeddings/operations.tsp new file mode 100644 index 0000000000..012d97c586 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/embeddings/operations.tsp @@ -0,0 +1,21 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/embeddings") +interface Embeddings { + @tag("OpenAI") + @summary("Creates an embedding vector representing the input text.") + @post + @operationId("createEmbedding") + createEmbedding( + @body embedding: CreateEmbeddingRequest, + ): CreateEmbeddingResponse | ErrorResponse; +} diff --git a/packages/typespec-test/test/openai_generic/spec/files/main.tsp b/packages/typespec-test/test/openai_generic/spec/files/main.tsp new file mode 100644 index 0000000000..144c4aeaff --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/files/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; diff --git a/packages/typespec-test/test/openai_generic/spec/files/models.tsp b/packages/typespec-test/test/openai_generic/spec/files/models.tsp new file mode 100644 index 0000000000..990c1ea11f --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/files/models.tsp @@ -0,0 +1,70 @@ +namespace OpenAI; +using TypeSpec.OpenAPI; + +model ListFilesResponse { + object: string; // presumably this is always some constant, but not defined. + data: OpenAIFile[]; +} + +model CreateFileRequest { + /** + * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + * + * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + */ + @encode("binary") + file: bytes; + + /** + * The intended purpose of the uploaded documents. Use "fine-tune" for + * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the + * uploaded file. + */ + purpose: string; +} + +/** The `File` object represents a document that has been uploaded to OpenAI. */ +model OpenAIFile { + /** The file identifier, which can be referenced in the API endpoints. */ + id: string; + + /** The object type, which is always "file". */ + object: "file"; + + /** The size of the file in bytes. */ + bytes: safeint; + + /** The Unix timestamp (in seconds) for when the file was created. */ + @encode("unixTimestamp", int32) + createdAt: utcDateTime; + + /** The name of the file. */ + filename: string; + + /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ + purpose: string; + + /** + * The current status of the file, which can be either `uploaded`, `processed`, `pending`, + * `error`, `deleting` or `deleted`. + */ + status: + | "uploaded" + | "processed" + | "pending" + | "error" + | "deleting" + | "deleted"; + + /** + * Additional details about the status of the file. If the file is in the `error` state, this will + * include a message describing the error. + */ + status_details?: string | null; +} + +model DeleteFileResponse { + id: string; + object: string; + deleted: boolean; +} diff --git a/packages/typespec-test/test/openai_generic/spec/files/operations.tsp b/packages/typespec-test/test/openai_generic/spec/files/operations.tsp new file mode 100644 index 0000000000..2e601ae030 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/files/operations.tsp @@ -0,0 +1,58 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/files") +interface Files { + @tag("OpenAI") + @get + @summary("Returns a list of files that belong to the user's organization.") + @operationId("listFiles") + listFiles(): ListFilesResponse | ErrorResponse; + + @tag("OpenAI") + @post + @summary("Returns a list of files that belong to the user's organization.") + @operationId("createFile") + createFile( + @header contentType: "multipart/form-data", + @body file: CreateFileRequest, + ): OpenAIFile | ErrorResponse; + + @tag("OpenAI") + @post + @summary("Returns information about a specific file.") + @operationId("retrieveFile") + @route("/files/{file_id}") + retrieveFile( + /** The ID of the file to use for this request. */ + @path file_id: string, + ): OpenAIFile | ErrorResponse; + + @tag("OpenAI") + @delete + @summary("Delete a file") + @operationId("deleteFile") + @route("/files/{file_id}") + deleteFile( + /** The ID of the file to use for this request. */ + @path file_id: string, + ): DeleteFileResponse | ErrorResponse; + + @route("/files/{file_id}/content") + @tag("OpenAI") + @get + @summary("Returns the contents of the specified file.") + @operationId("downloadFile") + downloadFile( + /** The ID of the file to use for this request. */ + @path file_id: string, + ): string | ErrorResponse; +} diff --git a/packages/typespec-test/test/openai_generic/spec/fine-tuning/main.tsp b/packages/typespec-test/test/openai_generic/spec/fine-tuning/main.tsp new file mode 100644 index 0000000000..144c4aeaff --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/fine-tuning/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; diff --git a/packages/typespec-test/test/openai_generic/spec/fine-tuning/models.tsp b/packages/typespec-test/test/openai_generic/spec/fine-tuning/models.tsp new file mode 100644 index 0000000000..bf846072bf --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/fine-tuning/models.tsp @@ -0,0 +1,416 @@ +namespace OpenAI; +using TypeSpec.OpenAPI; + +model FineTuningJob { + /** The object identifier, which can be referenced in the API endpoints. */ + id: string; + + /** The object type, which is always "fine_tuning.job". */ + object: "fine_tuning.job"; + + /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** + * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + * null if the fine-tuning job is still running. + */ + @encode("unixTimestamp", int32) + finished_at: utcDateTime | null; + + /** The base model that is being fine-tuned. */ + `model`: string; + + /** + * The name of the fine-tuned model that is being created. The value will be null if the + * fine-tuning job is still running. + */ + fine_tuned_model: string | null; + + /** The organization that owns the fine-tuning job. */ + organization_id: string; + + /** + * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + * `succeeded`, `failed`, or `cancelled`. + */ + status: + | "created" + | "pending" + | "running" + | "succeeded" + | "failed" + | "cancelled"; + + /** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + hyperparameters: { + /** + * The number of epochs to train the model for. An epoch refers to one full cycle through the + * training dataset. + * + * "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the + * number manually, we support any number between 1 and 50 epochs. + */ + n_epochs?: "auto" | NEpochs = "auto"; + }; + + /** + * The file ID used for training. You can retrieve the training data with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + training_file: string; + + /** + * The file ID used for validation. You can retrieve the validation results with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + validation_file: string | null; + + /** + * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + * [Files API](/docs/api-reference/files/retrieve-contents). + */ + result_files: string[]; + + /** + * The total number of billable tokens processed by this fine tuning job. The value will be null + * if the fine-tuning job is still running. + */ + trained_tokens: safeint | null; + + /** + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + * failure. + */ + error: { + /** A human-readable error message. */ + message?: string; // likely should be required, but spec doesn't say so. + + /** A machine-readable error code. */ + code?: string; + + /** + * The parameter that was invalid, usually `training_file` or `validation_file`. This field + * will be null if the failure was not parameter-specific. + */ + param?: string | null; + } | null; +} + +model FineTuningEvent { + object: string; + + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + level: string; + message: string; + data?: Record | null; + type?: "message" | "metrics"; // "default is "none"? +} + +/** The `FineTune` object represents a legacy fine-tune job that has been created through the API. */ +#deprecated "deprecated" +model FineTune { + /** The object identifier, which can be referenced in the API endpoints. */ + id: string; + + /** The object type, which is always "fine-tune". */ + object: "fine-tune"; + + /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + /** The Unix timestamp (in seconds) for when the fine-tuning job was last updated. */ + @encode("unixTimestamp", int32) + updated_at: utcDateTime; + + /** The base model that is being fine-tuned. */ + `model`: string; + + /** The name of the fine-tuned model that is being created. */ + fine_tuned_model: string | null; + + /** The organization that owns the fine-tuning job. */ + organization_id: string; + + /** + * The current status of the fine-tuning job, which can be either `created`, `running`, + * `succeeded`, `failed`, or `cancelled`. + */ + status: "created" | "running" | "succeeded" | "failed" | "cancelled"; + + /** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + */ + hyperparams: { + /** + * The number of epochs to train the model for. An epoch refers to one full cycle through the + * training dataset. + */ + n_epochs: safeint; + + /** + * The batch size to use for training. The batch size is the number of training examples used to + * train a single forward and backward pass. + */ + batch_size: safeint; + + /** The weight to use for loss on the prompt tokens. */ + prompt_loss_weight: float64; + + /** The learning rate multiplier to use for training. */ + learning_rate_multiplier: float64; + + /** The classification metrics to compute using the validation dataset at the end of every epoch. */ + compute_classification_metrics?: boolean; + + /** The positive class to use for computing classification metrics. */ + classification_positive_class?: string; + + /** The number of classes to use for computing classification metrics. */ + classification_n_classes?: safeint; + }; + + /** The list of files used for training. */ + training_files: OpenAIFile[]; + + /** The list of files used for validation. */ + validation_files: OpenAIFile[]; + + /** The compiled results files for the fine-tuning job. */ + result_files: OpenAIFile[]; + + /** The list of events that have been observed in the lifecycle of the FineTune job. */ + events?: FineTuneEvent[]; +} + +model FineTuningJobEvent { + id: string; + object: string; + + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + level: "info" | "warn" | "error"; + message: string; +} + +model FineTuneEvent { + object: string; + + @encode("unixTimestamp", int32) + created_at: utcDateTime; + + level: string; + message: string; +} + +model CreateFineTuningJobRequest { + /** + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + * the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + training_file: string; + + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + * not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + * `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + validation_file?: string | null; + + /** + * The name of the model to fine-tune. You can select one of the + * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + */ + @extension("x-oaiTypeLabel", "string") + `model`: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; + + /** The hyperparameters used for the fine-tuning job. */ + hyperparameters?: { + /** + * The number of epochs to train the model for. An epoch refers to one full cycle through the + * training dataset. + */ + n_epochs?: "auto" | NEpochs = "auto"; + }; + + /** + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + */ + suffix?: SuffixString | null = null; +} + +@minValue(1) +@maxValue(50) +scalar NEpochs extends safeint; + +model ListFineTuningJobEventsResponse { + object: string; + data: FineTuningJobEvent[]; +} + +model CreateFineTuneRequest { + /** + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file, where each training example is a JSON object + * with the keys "prompt" and "completion". Additionally, you must upload your file with the + * purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + * details. + */ + training_file: string; + + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the + * [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + * Your train and validation data should be mutually exclusive. + * + * Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + * with the keys "prompt" and "completion". Additionally, you must upload your file with the + * purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + * details. + */ + validation_file?: string | null; + + /** + * The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + * "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + * about these models, see the [Models](/docs/models) documentation. + */ + @extension("x-oaiTypeLabel", "string") + `model`?: string | "ada" | "babbage" | "curie" | "davinci" | null; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle through the + * training dataset. + */ + n_epochs?: safeint | null = 4; + + /** + * The batch size to use for training. The batch size is the number of training examples used to + * train a single forward and backward pass. + * + * By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + * in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + * work better for larger datasets. + */ + batch_size?: safeint | null = null; + + /** + * The learning rate multiplier to use for training. The fine-tuning learning rate is the original + * learning rate used for pretraining multiplied by this value. + * + * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + * `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + * recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + * results. + */ + learning_rate_multiplier?: float64 | null = null; + + /** + * The weight to use for loss on the prompt tokens. This controls how much the model tries to + * learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + * and can add a stabilizing effect to training when completions are short. + * + * If prompts are extremely long (relative to completions), it may make sense to reduce this + * weight so as to avoid over-prioritizing learning the prompt. + */ + prompt_loss_rate?: float64 | null = 0.01; + + /** + * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + * validation set at the end of every epoch. These metrics can be viewed in the + * [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + * + * In order to compute classification metrics, you must provide a `validation_file`. Additionally, + * you must specify `classification_n_classes` for multiclass classification or + * `classification_positive_class` for binary classification. + */ + compute_classification_metrics?: boolean | null = false; + + /** + * The number of classes in a classification task. + * + * This parameter is required for multiclass classification. + */ + classification_n_classes?: safeint | null = null; + + /** + * The positive class in binary classification. + * + * This parameter is needed to generate precision, recall, and F1 metrics when doing binary + * classification. + */ + classification_positive_class?: string | null = null; + + /** + * If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + * is a generalization of F-1 score. This is only used for binary classification. + * + * With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + * beta score puts more weight on recall and less on precision. A smaller beta score puts more + * weight on precision and less on recall. + */ + classification_betas?: float64[] | null = null; + + /** + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + */ + suffix?: SuffixString | null = null; +} + +@minLength(1) +@maxLength(40) +scalar SuffixString extends string; + +model ListFineTunesResponse { + object: string; + data: FineTune[]; +} + +model ListFineTuneEventsResponse { + object: string; + data: FineTuneEvent[]; +} + +model ListPaginatedFineTuningJobsResponse { + object: string; + data: FineTuningJob[]; + has_more: boolean; +} diff --git a/packages/typespec-test/test/openai_generic/spec/fine-tuning/operations.tsp b/packages/typespec-test/test/openai_generic/spec/fine-tuning/operations.tsp new file mode 100644 index 0000000000..15491f62e8 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/fine-tuning/operations.tsp @@ -0,0 +1,191 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/fine_tuning") +namespace FineTuning { + @route("jobs") + interface Jobs { + /** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * Response includes details of the enqueued job including job status and the name of the + * fine-tuned models once complete. + * + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + */ + @post + @tag("OpenAI") + @operationId("createFineTuningJob") + createFineTuningJob( + @body job: CreateFineTuningJobRequest, + ): FineTuningJob | ErrorResponse; + + @get + @tag("OpenAI") + @operationId("listPaginatedFineTuningJobs") + listPaginatedFineTuningJobs( + /** Identifier for the last job from the previous pagination request. */ + @query after?: string, + + /** Number of fine-tuning jobs to retrieve. */ + @query limit?: safeint = 20, + ): ListPaginatedFineTuningJobsResponse | ErrorResponse; + + @summary(""" + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + """) + @route("{fine_tuning_job_id}") + @tag("OpenAI") + @get + @operationId("retrieveFineTuningJob") + retrieveFineTuningJob( + @path fine_tuning_job_id: string, + ): FineTuningJob | ErrorResponse; + + @summary("Get status updates for a fine-tuning job.") + @tag("OpenAI") + @route("{fine_tuning_job_id}/events") + @get + @operationId("listFineTuningEvents") + listFineTuningEvents( + /** The ID of the fine-tuning job to get events for. */ + @path fine_tuning_job_id: string, + + /** Identifier for the last event from the previous pagination request. */ + @query after?: string, + + /** Number of events to retrieve. */ + @query limit?: integer = 20, + ): ListFineTuningJobEventsResponse | ErrorResponse; + + @summary("Immediately cancel a fine-tune job.") + @tag("OpenAI") + @route("{fine_tuning_job_id}/cancel") + @post + @operationId("cancelFineTuningJob") + cancelFineTuningJob( + /** The ID of the fine-tuning job to cancel. */ + @path fine_tuning_job_id: string, + ): FineTuningJob | ErrorResponse; + } +} + +@route("/fine-tunes") +interface FineTunes { + #deprecated "deprecated" + @post + @tag("OpenAI") + @summary(""" + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + """) + @operationId("createFineTune") + createFineTune( + @body fine_tune: CreateFineTuneRequest, + ): FineTune | ErrorResponse; + + #deprecated "deprecated" + @get + @tag("OpenAI") + @summary("List your organization's fine-tuning jobs") + @operationId("listFineTunes") + listFineTunes(): ListFineTunesResponse | ErrorResponse; + + #deprecated "deprecated" + @get + @route("{fine_tune_id}") + @tag("OpenAI") + @summary(""" + Gets info about the fine-tune job. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + """) + @operationId("retrieveFineTune") + retrieveFineTune( + /** The ID of the fine-tune job */ + @path fine_tune_id: string, + ): FineTune | ErrorResponse; + + #deprecated "deprecated" + @route("{fine_tune_id}/events") + @get + @tag("OpenAI") + @summary("Get fine-grained status updates for a fine-tune job.") + @operationId("listFineTuneEvents") + listFineTuneEvents( + /** The ID of the fine-tune job to get events for. */ + @path fine_tune_id: string, + + /** + * Whether to stream events for the fine-tune job. If set to true, events will be sent as + * data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available. The stream will terminate with a `data: [DONE]` message when the + * job is finished (succeeded, cancelled, or failed). + * + * If set to false, only events generated so far will be returned. + */ + @query stream?: boolean = false, + ): ListFineTuneEventsResponse | ErrorResponse; + + #deprecated "deprecated" + @route("{fine_tune_id}/cancel") + @post + @tag("OpenAI") + @summary("Immediately cancel a fine-tune job.") + @operationId("cancelFineTune") + cancelFineTune( + /** The ID of the fine-tune job to cancel */ + @path fine_tune_id: string, + ): FineTune | ErrorResponse; +} + +@route("/models") +interface Models { + @get + @tag("OpenAI") + @summary(""" + Lists the currently available models, and provides basic information about each one such as the + owner and availability. + """) + @operationId("listModels") + listModels(): ListModelsResponse | ErrorResponse; + + @get + @route("{model}") + @operationId("retrieveModel") + @tag("OpenAI") + @summary(""" + Retrieves a model instance, providing basic information about the model such as the owner and + permissioning. + """) + retrieve( + /** The ID of the model to use for this request. */ + @path `model`: string, + ): Model | ErrorResponse; + + @delete + @route("{model}") + @operationId("deleteModel") + @tag("OpenAI") + @summary(""" + Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + """) + delete( + /** The model to delete */ + @path `model`: string, + ): DeleteModelResponse | ErrorResponse; +} diff --git a/packages/typespec-test/test/openai_generic/spec/images/main.tsp b/packages/typespec-test/test/openai_generic/spec/images/main.tsp new file mode 100644 index 0000000000..144c4aeaff --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/images/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; diff --git a/packages/typespec-test/test/openai_generic/spec/images/models.tsp b/packages/typespec-test/test/openai_generic/spec/images/models.tsp new file mode 100644 index 0000000000..3d7020b51d --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/images/models.tsp @@ -0,0 +1,80 @@ +import "../common/models.tsp"; + +namespace OpenAI; +using TypeSpec.OpenAPI; + +alias SharedImageProperties = { + /** The number of images to generate. Must be between 1 and 10. */ + n?: ImagesN | null = 1; + + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: IMAGE_SIZES | null = "1024x1024"; + + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + response_format?: "url" | "b64_json" | null = "url"; + + user?: User; +}; + +model CreateImageRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + + ...SharedImageProperties; +} + +model ImagesResponse { + @encode("unixTimestamp", int32) + created: utcDateTime; + + data: Image[]; +} + +alias IMAGE_SIZES = "256x256" | "512x512" | "1024x1024"; + +/** Represents the url or the content of an image generated by the OpenAI API. */ +model Image { + /** The URL of the generated image, if `response_format` is `url` (default). */ + url?: url; + + /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ + @encode("base64", string) + b64_json?: bytes; +} + +model CreateImageEditRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + + /** + * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + * provided, image must have transparency, which will be used as the mask. + */ + @encode("binary") + image: bytes; + + /** + * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + * as `image`. + */ + @encode("binary") + mask?: bytes; + + ...SharedImageProperties; +} + +model CreateImageVariationRequest { + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + * and square. + */ + @encode("binary") + image: bytes; + + ...SharedImageProperties; +} + +@minValue(1) +@maxValue(10) +scalar ImagesN extends safeint; diff --git a/packages/typespec-test/test/openai_generic/spec/images/operations.tsp b/packages/typespec-test/test/openai_generic/spec/images/operations.tsp new file mode 100644 index 0000000000..09203262b1 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/images/operations.tsp @@ -0,0 +1,40 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/images") +interface Images { + @route("generations") + @post + @operationId("createImage") + @tag("OpenAI") + @summary("Creates an image given a prompt") + createImage(@body image: CreateImageRequest): ImagesResponse | ErrorResponse; + + @route("edits") + @post + @operationId("createImageEdit") + @tag("OpenAI") + @summary("Creates an edited or extended image given an original image and a prompt.") + createImageEdit( + @header contentType: "multipart/form-data", + @body image: CreateImageEditRequest, + ): ImagesResponse | ErrorResponse; + + @route("variations") + @post + @operationId("createImageVariation") + @tag("OpenAI") + @summary("Creates an edited or extended image given an original image and a prompt.") + createImageVariation( + @header contentType: "multipart/form-data", + @body image: CreateImageVariationRequest, + ): ImagesResponse | ErrorResponse; +} diff --git a/packages/typespec-test/test/openai_generic/spec/main.tsp b/packages/typespec-test/test/openai_generic/spec/main.tsp new file mode 100644 index 0000000000..2ea8cbbc35 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/main.tsp @@ -0,0 +1,32 @@ +import "@typespec/http"; +import "@typespec/openapi3"; +import "@typespec/openapi"; + +import "./audio"; +import "./completions"; +import "./edits"; +import "./embeddings"; +import "./files"; +import "./fine-tuning"; +import "./images"; +import "./moderation"; + +using TypeSpec.Http; + +/** The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. */ +@service({ + title: "OpenAI API", + termsOfService: "https://openai.com/policies/terms-of-use", + contact: { + name: "OpenAI Support", + url: "https://help.openai.com", + }, + license: { + name: "MIT", + url: "https://github.com/openai/openai-openapi/blob/master/LICENSE", + }, + version: "2.0.0", +}) +@server("https://api.openai.com/v1", "OpenAI Endpoint") +@useAuth(BearerAuth) +namespace OpenAI; diff --git a/packages/typespec-test/test/openai_generic/spec/moderation/main.tsp b/packages/typespec-test/test/openai_generic/spec/moderation/main.tsp new file mode 100644 index 0000000000..144c4aeaff --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/moderation/main.tsp @@ -0,0 +1 @@ +import "./operations.tsp"; diff --git a/packages/typespec-test/test/openai_generic/spec/moderation/models.tsp b/packages/typespec-test/test/openai_generic/spec/moderation/models.tsp new file mode 100644 index 0000000000..f47b21be1d --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/moderation/models.tsp @@ -0,0 +1,123 @@ +namespace OpenAI; +using TypeSpec.OpenAPI; + +model CreateModerationRequest { + /** The input text to classify */ + input: string | string[]; + + /** + * Two content moderations models are available: `text-moderation-stable` and + * `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + * upgraded over time. This ensures you are always using our most accurate model. If you use + * `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + */ + @extension("x-oaiTypeLabel", "string") + `model`?: string | "text-moderation-latest" | "text-moderation-stable" = "text-moderation-latest"; +} + +model CreateModerationResponse { + /** The unique identifier for the moderation request. */ + id: string; + + /** The model used to generate the moderation results. */ + `model`: string; + + /** A list of moderation objects. */ + results: { + /** Whether the content violates [OpenAI's usage policies](/policies/usage-policies). */ + flagged: boolean; + + /** A list of the categories, and whether they are flagged or not. */ + categories: { + /** + * Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + * religion, nationality, sexual orientation, disability status, or caste. Hateful content + * aimed at non-protected groups (e.g., chess players) is harrassment. + */ + hate: boolean; + + /** + * Hateful content that also includes violence or serious harm towards the targeted group + * based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + * status, or caste. + */ + `hate/threatening`: boolean; + + /** Content that expresses, incites, or promotes harassing language towards any target. */ + harassment: boolean; + + /** Harassment content that also includes violence or serious harm towards any target. */ + `harassment/threatening`: boolean; + + /** + * Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + * and eating disorders. + */ + `self-harm`: boolean; + + /** + * Content where the speaker expresses that they are engaging or intend to engage in acts of + * self-harm, such as suicide, cutting, and eating disorders. + */ + `self-harm/intent`: boolean; + + /** + * Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + * disorders, or that gives instructions or advice on how to commit such acts. + */ + `self-harm/instructive`: boolean; + + /** + * Content meant to arouse sexual excitement, such as the description of sexual activity, or + * that promotes sexual services (excluding sex education and wellness). + */ + sexual: boolean; + + /** Sexual content that includes an individual who is under 18 years old. */ + `sexual/minors`: boolean; + + /** Content that depicts death, violence, or physical injury. */ + violence: boolean; + + /** Content that depicts death, violence, or physical injury in graphic detail. */ + `violence/graphic`: boolean; + }; + + /** A list of the categories along with their scores as predicted by model. */ + category_scores: { + /** The score for the category 'hate'. */ + hate: float64; + + /** The score for the category 'hate/threatening'. */ + `hate/threatening`: float64; + + /** The score for the category 'harassment'. */ + harassment: float64; + + /** The score for the category 'harassment/threatening'. */ + `harassment/threatening`: float64; + + /** The score for the category 'self-harm'. */ + `self-harm`: float64; + + /** The score for the category 'self-harm/intent'. */ + `self-harm/intent`: float64; + + /** The score for the category 'self-harm/instructive'. */ + `self-harm/instructive`: float64; + + /** The score for the category 'sexual'. */ + sexual: float64; + + /** The score for the category 'sexual/minors'. */ + `sexual/minors`: float64; + + /** The score for the category 'violence'. */ + violence: float64; + + /** The score for the category 'violence/graphic'. */ + `violence/graphic`: float64; + }; + }[]; +} diff --git a/packages/typespec-test/test/openai_generic/spec/moderation/operations.tsp b/packages/typespec-test/test/openai_generic/spec/moderation/operations.tsp new file mode 100644 index 0000000000..5f29bc3be8 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/moderation/operations.tsp @@ -0,0 +1,20 @@ +import "@typespec/http"; +import "@typespec/openapi"; + +import "../common/errors.tsp"; +import "./models.tsp"; + +using TypeSpec.Http; +using TypeSpec.OpenAPI; + +namespace OpenAI; + +@route("/moderations") +interface Moderations { + @operationId("createModeration") + @tag("OpenAI") + @summary("Classifies if text violates OpenAI's Content Policy") + createModeration( + @body content: CreateModerationRequest, + ): CreateModerationResponse | ErrorResponse; +} diff --git a/packages/typespec-test/test/openai_generic/spec/tsp-output/@typespec/openapi3/openapi.yaml b/packages/typespec-test/test/openai_generic/spec/tsp-output/@typespec/openapi3/openapi.yaml new file mode 100644 index 0000000000..d374906803 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/tsp-output/@typespec/openapi3/openapi.yaml @@ -0,0 +1,2963 @@ +openapi: 3.0.0 +info: + title: OpenAI API + version: 2.0.0 + description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. +tags: + - name: OpenAI +paths: + /audio/transcriptions: + post: + tags: + - OpenAI + operationId: createTranscription + summary: Transcribes audio into the input language. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateTranscriptionResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateTranscriptionRequest' + /audio/translations: + post: + tags: + - OpenAI + operationId: createTranslation + summary: Transcribes audio into the input language. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateTranslationResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateTranslationRequest' + /chat/completions: + post: + tags: + - OpenAI + operationId: createChatCompletion + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateChatCompletionResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateChatCompletionRequest' + /completions: + post: + tags: + - OpenAI + operationId: createCompletion + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCompletionResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCompletionRequest' + x-oaiMeta: + name: Create chat completion + group: chat + returns: |- + Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of + [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed. + path: create + examples: + - title: No streaming + request: + curl: |- + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + python: |- + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] + ) + + print(completion.choices[0].message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "system", content: "string" }], + model: "VAR_model_id", + }); + + console.log(completion.choices[0]); + } + + main(); + response: |- + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": " + + Hello there, how may I assist you today?", + }, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + - title: Streaming + request: + curl: |- + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ], + "stream": true + }' + python: |- + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream=True + ) + + for chunk in completion: + print(chunk.choices[0].delta) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + model: "VAR_model_id", + messages: [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream: true, + }); + + for await (const chunk of completion) { + console.log(chunk.choices[0].delta.content); + } + } + + main(); + response: |- + { + "id": "chatcmpl-123", + "object": "chat.completion.chunk", + "created": 1677652288, + "model": "gpt-3.5-turbo", + "choices": [{ + "index": 0, + "delta": { + "content": "Hello", + }, + "finish_reason": "stop" + }] + } + /edits: + post: + tags: + - OpenAI + operationId: createEdit + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEditResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEditRequest' + deprecated: true + /embeddings: + post: + tags: + - OpenAI + operationId: createEmbedding + summary: Creates an embedding vector representing the input text. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingRequest' + /files: + get: + tags: + - OpenAI + operationId: listFiles + summary: Returns a list of files that belong to the user's organization. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFilesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + post: + tags: + - OpenAI + operationId: createFile + summary: Returns a list of files that belong to the user's organization. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateFileRequest' + /files/files/{file_id}: + post: + tags: + - OpenAI + operationId: retrieveFile + summary: Returns information about a specific file. + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - OpenAI + operationId: deleteFile + summary: Delete a file + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteFileResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /files/files/{file_id}/content: + get: + tags: + - OpenAI + operationId: downloadFile + summary: Returns the contents of the specified file. + parameters: + - name: file_id + in: path + required: true + description: The ID of the file to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + type: string + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine-tunes: + post: + tags: + - OpenAI + operationId: createFineTune + summary: |- + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateFineTuneRequest' + deprecated: true + get: + tags: + - OpenAI + operationId: listFineTunes + summary: List your organization's fine-tuning jobs + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTunesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + deprecated: true + /fine-tunes/{fine_tune_id}: + get: + tags: + - OpenAI + operationId: retrieveFineTune + summary: |- + Gets info about the fine-tune job. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + parameters: + - name: fine_tune_id + in: path + required: true + description: The ID of the fine-tune job + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + deprecated: true + /fine-tunes/{fine_tune_id}/cancel: + post: + tags: + - OpenAI + operationId: cancelFineTune + summary: Immediately cancel a fine-tune job. + parameters: + - name: fine_tune_id + in: path + required: true + description: The ID of the fine-tune job to cancel + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTune' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + deprecated: true + /fine-tunes/{fine_tune_id}/events: + get: + tags: + - OpenAI + operationId: listFineTuneEvents + summary: Get fine-grained status updates for a fine-tune job. + parameters: + - name: fine_tune_id + in: path + required: true + description: The ID of the fine-tune job to get events for. + schema: + type: string + - name: stream + in: query + required: false + description: |- + Whether to stream events for the fine-tune job. If set to true, events will be sent as + data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available. The stream will terminate with a `data: [DONE]` message when the + job is finished (succeeded, cancelled, or failed). + + If set to false, only events generated so far will be returned. + schema: + type: boolean + default: false + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTuneEventsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + deprecated: true + /fine_tuning/jobs: + post: + tags: + - OpenAI + operationId: createFineTuningJob + description: |- + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the + fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTuningJob' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateFineTuningJobRequest' + get: + tags: + - OpenAI + operationId: listPaginatedFineTuningJobs + parameters: + - name: after + in: query + required: false + description: Identifier for the last job from the previous pagination request. + schema: + type: string + - name: limit + in: query + required: false + description: Number of fine-tuning jobs to retrieve. + schema: + type: integer + format: int64 + default: 20 + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListPaginatedFineTuningJobsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine_tuning/jobs/{fine_tuning_job_id}: + get: + tags: + - OpenAI + operationId: retrieveFineTuningJob + summary: |- + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + parameters: + - name: fine_tuning_job_id + in: path + required: true + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTuningJob' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine_tuning/jobs/{fine_tuning_job_id}/cancel: + post: + tags: + - OpenAI + operationId: cancelFineTuningJob + summary: Immediately cancel a fine-tune job. + parameters: + - name: fine_tuning_job_id + in: path + required: true + description: The ID of the fine-tuning job to cancel. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/FineTuningJob' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /fine_tuning/jobs/{fine_tuning_job_id}/events: + get: + tags: + - OpenAI + operationId: listFineTuningEvents + summary: Get status updates for a fine-tuning job. + parameters: + - name: fine_tuning_job_id + in: path + required: true + description: The ID of the fine-tuning job to get events for. + schema: + type: string + - name: after + in: query + required: false + description: Identifier for the last event from the previous pagination request. + schema: + type: string + - name: limit + in: query + required: false + description: Number of events to retrieve. + schema: + type: integer + default: 20 + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTuningJobEventsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /images/edits: + post: + tags: + - OpenAI + operationId: createImageEdit + summary: Creates an edited or extended image given an original image and a prompt. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageEditRequest' + /images/generations: + post: + tags: + - OpenAI + operationId: createImage + summary: Creates an image given a prompt + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateImageRequest' + /images/variations: + post: + tags: + - OpenAI + operationId: createImageVariation + summary: Creates an edited or extended image given an original image and a prompt. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageVariationRequest' + /models: + get: + tags: + - OpenAI + operationId: listModels + summary: |- + Lists the currently available models, and provides basic information about each one such as the + owner and availability. + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ListModelsResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /models/{model}: + get: + tags: + - OpenAI + operationId: retrieveModel + summary: |- + Retrieves a model instance, providing basic information about the model such as the owner and + permissioning. + parameters: + - name: model + in: path + required: true + description: The ID of the model to use for this request. + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/Model' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - OpenAI + operationId: deleteModel + summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + parameters: + - name: model + in: path + required: true + description: The model to delete + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteModelResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /moderations: + post: + tags: + - OpenAI + operationId: createModeration + summary: Classifies if text violates OpenAI's Content Policy + parameters: [] + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/CreateModerationResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateModerationRequest' +security: + - BearerAuth: [] +components: + schemas: + ChatCompletionFunctionCallOption: + type: object + required: + - name + properties: + name: + type: string + description: The name of the function to call. + ChatCompletionFunctionParameters: + type: object + additionalProperties: {} + ChatCompletionFunctions: + type: object + required: + - name + - parameters + properties: + name: + type: string + description: |- + The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + dashes, with a maximum length of 64. + description: + type: string + description: |- + A description of what the function does, used by the model to choose when and how to call the + function. + parameters: + allOf: + - $ref: '#/components/schemas/ChatCompletionFunctionParameters' + description: |- + The parameters the functions accepts, described as a JSON Schema object. See the + [guide](/docs/guides/gpt/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation + about the format.\n\nTo describe a function that accepts no parameters, provide the value + `{\"type\": \"object\", \"properties\": {}}`. + ChatCompletionRequestMessage: + type: object + required: + - role + - content + properties: + role: + type: string + enum: + - system + - user + - assistant + - function + description: The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + content: + type: string + nullable: true + description: |- + The contents of the message. `content` is required for all messages, and may be null for + assistant messages with function calls. + name: + type: string + description: |- + The name of the author of this message. `name` is required if role is `function`, and it + should be the name of the function whose response is in the `content`. May contain a-z, + A-Z, 0-9, and underscores, with a maximum length of 64 characters. + function_call: + type: object + description: The name and arguments of a function that should be called, as generated by the model. + required: + - name + - arguments + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + ChatCompletionResponseMessage: + type: object + required: + - role + - content + properties: + role: + type: string + enum: + - system + - user + - assistant + - function + description: The role of the author of this message. + content: + type: string + nullable: true + description: The contents of the message. + function_call: + type: object + description: The name and arguments of a function that should be called, as generated by the model. + required: + - name + - arguments + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: |- + The arguments to call the function with, as generated by the model in JSON format. Note that + the model does not always generate valid JSON, and may hallucinate parameters not defined by + your function schema. Validate the arguments in your code before calling your function. + CompletionUsage: + type: object + description: Usage statistics for the completion request. + required: + - prompt_tokens + - completion_tokens + - total_tokens + properties: + prompt_tokens: + type: integer + format: int64 + description: Number of tokens in the prompt. + completion_tokens: + type: integer + format: int64 + description: Number of tokens in the generated completion + total_tokens: + type: integer + format: int64 + description: Total number of tokens used in the request (prompt + completion). + CreateChatCompletionRequest: + type: object + required: + - model + - messages + properties: + model: + anyOf: + - type: string + - type: string + enum: + - gpt4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0301 + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-16k-0613 + description: |- + ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) + table for details on which models work with the Chat API. + x-oaiTypeLabel: string + messages: + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessage' + description: |- + A list of messages comprising the conversation so far. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + minItems: 1 + functions: + type: array + items: + $ref: '#/components/schemas/ChatCompletionFunctions' + description: A list of functions the model may generate JSON inputs for. + minItems: 1 + maxItems: 128 + function_call: + anyOf: + - type: string + enum: + - none + - auto + - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' + description: |- + Controls how the model responds to function calls. `none` means the model does not call a + function, and responds to the end-user. `auto` means the model can pick between an end-user or + calling a function. Specifying a particular function via `{\"name":\ \"my_function\"}` forces the + model to call that function. `none` is the default when no functions are present. `auto` is the + default if functions are present. + temperature: + oneOf: + - $ref: '#/components/schemas/Temperature' + nullable: true + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + oneOf: + - $ref: '#/components/schemas/TopP' + nullable: true + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + n: + oneOf: + - $ref: '#/components/schemas/N' + nullable: true + description: |- + How many completions to generate for each prompt. + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + max_tokens: + oneOf: + - $ref: '#/components/schemas/MaxTokens' + nullable: true + description: |- + The maximum number of [tokens](/tokenizer) to generate in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + default: 16 + stop: + allOf: + - $ref: '#/components/schemas/Stop' + description: Up to 4 sequences where the API will stop generating further tokens. + default: null + presence_penalty: + oneOf: + - $ref: '#/components/schemas/Penalty' + nullable: true + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + frequency_penalty: + oneOf: + - $ref: '#/components/schemas/Penalty' + nullable: true + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + logit_bias: + type: object + description: |- + Modify the likelihood of specified tokens appearing in the completion. + Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + associated bias value from -100 to 100. Mathematically, the bias is added to the logits + generated by the model prior to sampling. The exact effect will vary per model, but values + between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + should result in a ban or exclusive selection of the relevant token. + additionalProperties: + type: integer + format: int64 + nullable: true + x-oaiTypeLabel: map + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + stream: + type: boolean + nullable: true + description: |- + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + default: true + CreateChatCompletionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + required: + - id + - object + - created + - model + - choices + properties: + id: + type: string + description: A unique identifier for the chat completion. + object: + type: string + description: The object type, which is always `chat.completion`. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + choices: + type: array + items: + type: object + required: + - index + - message + - finish_reason + properties: + index: + type: integer + format: int64 + description: The index of the choice in the list of choices. + message: + $ref: '#/components/schemas/ChatCompletionResponseMessage' + finish_reason: + type: string + enum: + - stop + - length + - function_call + - content_filter + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, `length` if the maximum number of tokens + specified in the request was reached, `content_filter` if the content was omitted due to + a flag from our content filters, or `function_call` if the model called a function. + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + usage: + $ref: '#/components/schemas/CompletionUsage' + x-oaiMeta: + name: The chat completion object + group: chat + example: '' + CreateCompletionRequest: + type: object + required: + - model + - prompt + properties: + model: + anyOf: + - type: string + - type: string + enum: + - babbage-002 + - davinci-002 + - text-davinci-003 + - text-davinci-002 + - text-davinci-001 + - code-davinci-002 + - text-curie-001 + - text-babbage-001 + - text-ada-001 + description: |- + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + see all of your available models, or see our [Model overview](/docs/models/overview) for + descriptions of them. + x-oaiTypeLabel: string + prompt: + allOf: + - $ref: '#/components/schemas/Prompt' + description: |- + The prompt(s) to generate completions for, encoded as a string, array of strings, array of + tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a + prompt is not specified the model will generate as if from the beginning of a new document. + default: <|endoftext|> + suffix: + type: string + nullable: true + description: The suffix that comes after a completion of inserted text. + default: null + temperature: + oneOf: + - $ref: '#/components/schemas/Temperature' + nullable: true + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + oneOf: + - $ref: '#/components/schemas/TopP' + nullable: true + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + n: + oneOf: + - $ref: '#/components/schemas/N' + nullable: true + description: |- + How many completions to generate for each prompt. + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + max_tokens: + oneOf: + - $ref: '#/components/schemas/MaxTokens' + nullable: true + description: |- + The maximum number of [tokens](/tokenizer) to generate in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + default: 16 + stop: + allOf: + - $ref: '#/components/schemas/Stop' + description: Up to 4 sequences where the API will stop generating further tokens. + default: null + presence_penalty: + oneOf: + - $ref: '#/components/schemas/Penalty' + nullable: true + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + frequency_penalty: + oneOf: + - $ref: '#/components/schemas/Penalty' + nullable: true + description: |- + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + logit_bias: + type: object + description: |- + Modify the likelihood of specified tokens appearing in the completion. + Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + associated bias value from -100 to 100. Mathematically, the bias is added to the logits + generated by the model prior to sampling. The exact effect will vary per model, but values + between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + should result in a ban or exclusive selection of the relevant token. + additionalProperties: + type: integer + format: int64 + nullable: true + x-oaiTypeLabel: map + user: + allOf: + - $ref: '#/components/schemas/User' + description: |- + A unique identifier representing your end-user, which can help OpenAI to monitor and detect + abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + stream: + type: boolean + nullable: true + description: |- + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + default: true + logprobs: + type: integer + format: int64 + nullable: true + description: |- + Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + elements in the response. + + The maximum value for `logprobs` is 5. + default: null + echo: + type: boolean + nullable: true + description: Echo back the prompt in addition to the completion + default: false + best_of: + type: integer + format: int64 + nullable: true + description: |- + Generates `best_of` completions server-side and returns the "best" (the one with the highest + log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token + quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + default: 1 + CreateCompletionResponse: + type: object + description: |- + Represents a completion response from the API. Note: both the streamed and non-streamed response + objects share the same shape (unlike the chat endpoint). + required: + - id + - object + - created + - model + - choices + properties: + id: + type: string + description: A unique identifier for the completion. + object: + type: string + description: The object type, which is always `text_completion`. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for the completion. + choices: + type: array + items: + type: object + required: + - index + - text + - logprobs + - finish_reason + properties: + index: + type: integer + format: int64 + text: + type: string + logprobs: + type: object + required: + - tokens + - token_logprobs + - top_logprobs + - text_offset + properties: + tokens: + type: array + items: + type: string + token_logprobs: + type: array + items: + type: number + format: double + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: integer + format: int64 + text_offset: + type: array + items: + type: integer + format: int64 + nullable: true + finish_reason: + type: string + enum: + - stop + - length + - content_filter + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, or `content_filter` if content was omitted + due to a flag from our content filters, `length` if the maximum number of tokens specified + in the request was reached, or `content_filter` if content was omitted due to a flag from our + content filters. + description: The list of completion choices the model generated for the input. + usage: + $ref: '#/components/schemas/CompletionUsage' + x-oaiMeta: + name: The completion object + legacy: true + example: '' + CreateEditRequest: + type: object + required: + - model + - instruction + properties: + model: + anyOf: + - type: string + - type: string + enum: + - text-davinci-edit-001 + - code-davinci-edit-001 + description: |- + ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` + model with this endpoint. + x-oaiTypeLabel: string + input: + type: string + nullable: true + description: The input text to use as a starting point for the edit. + default: '' + instruction: + type: string + description: The instruction that tells the model how to edit the prompt. + n: + oneOf: + - $ref: '#/components/schemas/EditN' + nullable: true + description: How many edits to generate for the input and instruction. + default: 1 + temperature: + oneOf: + - $ref: '#/components/schemas/Temperature' + nullable: true + description: |- + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + default: 1 + top_p: + oneOf: + - $ref: '#/components/schemas/TopP' + nullable: true + description: |- + An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + default: 1 + CreateEditResponse: + type: object + required: + - object + - created + - choices + - usage + properties: + object: + type: string + enum: + - edit + description: The object type, which is always `edit`. + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) of when the edit was created. + choices: + type: array + items: + type: object + required: + - text + - index + - finish_reason + properties: + text: + type: string + description: The edited result. + index: + type: integer + format: int64 + description: The index of the choice in the list of choices. + finish_reason: + type: string + enum: + - stop + - length + description: |- + The reason the model stopped generating tokens. This will be `stop` if the model hit a + natural stop point or a provided stop sequence, or `length` if the maximum number of tokens + specified in the request was reached. + description: 'description: A list of edit choices. Can be more than one if `n` is greater than 1.' + usage: + $ref: '#/components/schemas/CompletionUsage' + CreateEmbeddingRequest: + type: object + required: + - model + - input + properties: + model: + anyOf: + - type: string + - type: string + enum: + - text-embedding-ada-002 + description: ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + x-oaiTypeLabel: string + input: + anyOf: + - type: string + - type: array + items: + type: string + - $ref: '#/components/schemas/TokenArray' + - $ref: '#/components/schemas/TokenArrayArray' + description: |- + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + single request, pass an array of strings or array of token arrays. Each input must not exceed + the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. + [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + for counting tokens. + user: + $ref: '#/components/schemas/User' + CreateEmbeddingResponse: + type: object + required: + - object + - model + - data + - usage + properties: + object: + type: string + enum: + - embedding + description: The object type, which is always "embedding". + model: + type: string + description: The name of the model used to generate the embedding. + data: + type: array + items: + $ref: '#/components/schemas/Embedding' + description: The list of embeddings generated by the model. + usage: + type: object + description: The usage information for the request. + required: + - prompt_tokens + - total_tokens + properties: + prompt_tokens: + type: integer + format: int64 + description: The number of tokens used by the prompt. + total_tokens: + type: integer + format: int64 + description: The total number of tokens used by the request. + CreateFileRequest: + type: object + required: + - file + - purpose + properties: + file: + type: string + format: binary + description: |- + Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + + If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + purpose: + type: string + description: |- + The intended purpose of the uploaded documents. Use "fine-tune" for + [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the + uploaded file. + CreateFineTuneRequest: + type: object + required: + - training_file + properties: + training_file: + type: string + description: |- + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file, where each training example is a JSON object + with the keys "prompt" and "completion". Additionally, you must upload your file with the + purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + details. + validation_file: + type: string + nullable: true + description: |- + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics periodically during + fine-tuning. These metrics can be viewed in the + [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + Your train and validation data should be mutually exclusive. + + Your dataset must be formatted as a JSONL file, where each validation example is a JSON object + with the keys "prompt" and "completion". Additionally, you must upload your file with the + purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more + details. + model: + anyOf: + - type: string + - type: string + enum: + - ada + - babbage + - curie + - davinci + nullable: true + description: |- + The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", + "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more + about these models, see the [Models](/docs/models) documentation. + x-oaiTypeLabel: string + n_epochs: + type: integer + format: int64 + nullable: true + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + default: 4 + batch_size: + type: integer + format: int64 + nullable: true + description: |- + The batch size to use for training. The batch size is the number of training examples used to + train a single forward and backward pass. + + By default, the batch size will be dynamically configured to be ~0.2% of the number of examples + in the training set, capped at 256 - in general, we've found that larger batch sizes tend to + work better for larger datasets. + default: null + learning_rate_multiplier: + type: number + format: double + nullable: true + description: |- + The learning rate multiplier to use for training. The fine-tuning learning rate is the original + learning rate used for pretraining multiplied by this value. + + By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final + `batch_size` (larger learning rates tend to perform better with larger batch sizes). We + recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best + results. + default: null + prompt_loss_rate: + type: number + format: double + nullable: true + description: |- + The weight to use for loss on the prompt tokens. This controls how much the model tries to + learn to generate the prompt (as compared to the completion which always has a weight of 1.0), + and can add a stabilizing effect to training when completions are short. + + If prompts are extremely long (relative to completions), it may make sense to reduce this + weight so as to avoid over-prioritizing learning the prompt. + default: 0.01 + compute_classification_metrics: + type: boolean + nullable: true + description: |- + If set, we calculate classification-specific metrics such as accuracy and F-1 score using the + validation set at the end of every epoch. These metrics can be viewed in the + [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + + In order to compute classification metrics, you must provide a `validation_file`. Additionally, + you must specify `classification_n_classes` for multiclass classification or + `classification_positive_class` for binary classification. + default: false + classification_n_classes: + type: integer + format: int64 + nullable: true + description: |- + The number of classes in a classification task. + + This parameter is required for multiclass classification. + default: null + classification_positive_class: + type: string + nullable: true + description: |- + The positive class in binary classification. + + This parameter is needed to generate precision, recall, and F1 metrics when doing binary + classification. + default: null + classification_betas: + type: array + items: + type: number + format: double + nullable: true + description: |- + If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score + is a generalization of F-1 score. This is only used for binary classification. + + With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger + beta score puts more weight on recall and less on precision. A smaller beta score puts more + weight on precision and less on recall. + default: null + suffix: + oneOf: + - $ref: '#/components/schemas/SuffixString' + nullable: true + description: |- + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + default: null + CreateFineTuningJobRequest: + type: object + required: + - training_file + - model + properties: + training_file: + type: string + description: |- + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + validation_file: + type: string + nullable: true + description: |- + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation metrics periodically during + fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + model: + anyOf: + - type: string + - type: string + enum: + - babbage-002 + - davinci-002 + - gpt-3.5-turbo + description: |- + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + x-oaiTypeLabel: string + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + n_epochs: + anyOf: + - type: string + enum: + - auto + - $ref: '#/components/schemas/NEpochs' + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + default: auto + suffix: + oneOf: + - $ref: '#/components/schemas/SuffixString' + nullable: true + description: |- + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like + `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + default: null + CreateImageEditRequest: + type: object + required: + - prompt + - image + properties: + prompt: + type: string + description: A text description of the desired image(s). The maximum length is 1000 characters. + image: + type: string + format: binary + description: |- + The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + provided, image must have transparency, which will be used as the mask. + mask: + type: string + format: binary + description: |- + An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + as `image`. + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: The number of images to generate. Must be between 1 and 10. + default: 1 + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 + response_format: + type: string + enum: + - url + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + user: + $ref: '#/components/schemas/User' + CreateImageRequest: + type: object + required: + - prompt + properties: + prompt: + type: string + description: A text description of the desired image(s). The maximum length is 1000 characters. + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: The number of images to generate. Must be between 1 and 10. + default: 1 + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 + response_format: + type: string + enum: + - url + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + user: + $ref: '#/components/schemas/User' + CreateImageVariationRequest: + type: object + required: + - image + properties: + image: + type: string + format: binary + description: |- + The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + and square. + n: + oneOf: + - $ref: '#/components/schemas/ImagesN' + nullable: true + description: The number of images to generate. Must be between 1 and 10. + default: 1 + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + default: 1024x1024 + response_format: + type: string + enum: + - url + - b64_json + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + default: url + user: + $ref: '#/components/schemas/User' + CreateModerationRequest: + type: object + required: + - input + properties: + input: + anyOf: + - type: string + - type: array + items: + type: string + description: The input text to classify + model: + anyOf: + - type: string + - type: string + enum: + - text-moderation-latest + - text-moderation-stable + description: |- + Two content moderations models are available: `text-moderation-stable` and + `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + upgraded over time. This ensures you are always using our most accurate model. If you use + `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + x-oaiTypeLabel: string + default: text-moderation-latest + CreateModerationResponse: + type: object + required: + - id + - model + - results + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + items: + type: object + required: + - flagged + - categories + - category_scores + properties: + flagged: + type: boolean + description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructive + - sexual + - sexual/minors + - violence + - violence/graphic + properties: + hate: + type: boolean + description: |- + Content that expresses, incites, or promotes hate based on race, gender, ethnicity, + religion, nationality, sexual orientation, disability status, or caste. Hateful content + aimed at non-protected groups (e.g., chess players) is harrassment. + hate/threatening: + type: boolean + description: |- + Hateful content that also includes violence or serious harm towards the targeted group + based on race, gender, ethnicity, religion, nationality, sexual orientation, disability + status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: |- + Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, + and eating disorders. + self-harm/intent: + type: boolean + description: |- + Content where the speaker expresses that they are engaging or intend to engage in acts of + self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructive: + type: boolean + description: |- + Content that encourages performing acts of self-harm, such as suicide, cutting, and eating + disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: |- + Content meant to arouse sexual excitement, such as the description of sexual activity, or + that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructive + - sexual + - sexual/minors + - violence + - violence/graphic + properties: + hate: + type: number + format: double + description: The score for the category 'hate'. + hate/threatening: + type: number + format: double + description: The score for the category 'hate/threatening'. + harassment: + type: number + format: double + description: The score for the category 'harassment'. + harassment/threatening: + type: number + format: double + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + format: double + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + format: double + description: The score for the category 'self-harm/intent'. + self-harm/instructive: + type: number + format: double + description: The score for the category 'self-harm/instructive'. + sexual: + type: number + format: double + description: The score for the category 'sexual'. + sexual/minors: + type: number + format: double + description: The score for the category 'sexual/minors'. + violence: + type: number + format: double + description: The score for the category 'violence'. + violence/graphic: + type: number + format: double + description: The score for the category 'violence/graphic'. + description: A list of moderation objects. + CreateTranscriptionRequest: + type: object + required: + - file + - model + properties: + file: + type: string + format: binary + description: |- + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + mpeg, mpga, m4a, ogg, wav, or webm. + x-oaiTypeLabel: file + model: + anyOf: + - type: string + - type: string + enum: + - whisper-1 + description: ID of the model to use. Only `whisper-1` is currently available. + x-oaiTypeLabel: string + prompt: + type: string + description: |- + An optional text to guide the model's style or continue a previous audio segment. The + [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + response_format: + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + description: |- + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + vtt. + default: json + temperature: + type: number + format: double + description: |- + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + minimum: 0 + maximum: 1 + default: 0 + language: + type: string + description: |- + The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + and latency. + CreateTranscriptionResponse: + type: object + required: + - text + properties: + text: + type: string + CreateTranslationRequest: + type: object + required: + - file + - model + properties: + file: + type: string + format: binary + description: |- + The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + mpeg, mpga, m4a, ogg, wav, or webm. + x-oaiTypeLabel: file + model: + anyOf: + - type: string + - type: string + enum: + - whisper-1 + description: ID of the model to use. Only `whisper-1` is currently available. + x-oaiTypeLabel: string + prompt: + type: string + description: |- + An optional text to guide the model's style or continue a previous audio segment. The + [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + response_format: + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + description: |- + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + vtt. + default: json + temperature: + type: number + format: double + description: |- + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + minimum: 0 + maximum: 1 + default: 0 + CreateTranslationResponse: + type: object + required: + - text + properties: + text: + type: string + DeleteFileResponse: + type: object + required: + - id + - object + - deleted + properties: + id: + type: string + object: + type: string + deleted: + type: boolean + DeleteModelResponse: + type: object + required: + - id + - object + - deleted + properties: + id: + type: string + object: + type: string + deleted: + type: boolean + EditN: + type: integer + format: int64 + minimum: 0 + maximum: 20 + Embedding: + type: object + description: Represents an embedding vector returned by embedding endpoint. + required: + - index + - object + - embedding + properties: + index: + type: integer + format: int64 + description: The index of the embedding in the list of embeddings. + object: + type: string + enum: + - embedding + description: The object type, which is always "embedding". + embedding: + type: array + items: + type: number + format: double + description: |- + The embedding vector, which is a list of floats. The length of vector depends on the model as\ + listed in the [embedding guide](/docs/guides/embeddings). + Error: + type: object + required: + - type + - message + - param + - code + properties: + type: + type: string + message: + type: string + param: + type: string + nullable: true + code: + type: string + nullable: true + ErrorResponse: + type: object + required: + - error + properties: + error: + $ref: '#/components/schemas/Error' + FineTune: + type: object + description: The `FineTune` object represents a legacy fine-tune job that has been created through the API. + required: + - id + - object + - created_at + - updated_at + - model + - fine_tuned_model + - organization_id + - status + - hyperparams + - training_files + - validation_files + - result_files + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + object: + type: string + enum: + - fine-tune + description: The object type, which is always "fine-tune". + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + updated_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + model: + type: string + description: The base model that is being fine-tuned. + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. + organization_id: + type: string + description: The organization that owns the fine-tuning job. + status: + type: string + enum: + - created + - running + - succeeded + - failed + - cancelled + description: |- + The current status of the fine-tuning job, which can be either `created`, `running`, + `succeeded`, `failed`, or `cancelled`. + hyperparams: + type: object + description: |- + The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + required: + - n_epochs + - batch_size + - prompt_loss_weight + - learning_rate_multiplier + properties: + n_epochs: + type: integer + format: int64 + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + batch_size: + type: integer + format: int64 + description: |- + The batch size to use for training. The batch size is the number of training examples used to + train a single forward and backward pass. + prompt_loss_weight: + type: number + format: double + description: The weight to use for loss on the prompt tokens. + learning_rate_multiplier: + type: number + format: double + description: The learning rate multiplier to use for training. + compute_classification_metrics: + type: boolean + description: The classification metrics to compute using the validation dataset at the end of every epoch. + classification_positive_class: + type: string + description: The positive class to use for computing classification metrics. + classification_n_classes: + type: integer + format: int64 + description: The number of classes to use for computing classification metrics. + training_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The list of files used for training. + validation_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The list of files used for validation. + result_files: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + description: The compiled results files for the fine-tuning job. + events: + type: array + items: + $ref: '#/components/schemas/FineTuneEvent' + description: The list of events that have been observed in the lifecycle of the FineTune job. + FineTuneEvent: + type: object + required: + - object + - created_at + - level + - message + properties: + object: + type: string + created_at: + type: integer + format: unixtime + level: + type: string + message: + type: string + FineTuningEvent: + type: object + required: + - object + - created_at + - level + - message + properties: + object: + type: string + created_at: + type: integer + format: unixtime + level: + type: string + message: + type: string + data: + type: object + additionalProperties: {} + nullable: true + type: + type: string + enum: + - message + - metrics + FineTuningJob: + type: object + required: + - id + - object + - created_at + - finished_at + - model + - fine_tuned_model + - organization_id + - status + - hyperparameters + - training_file + - validation_file + - result_files + - trained_tokens + - error + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + object: + type: string + enum: + - fine_tuning.job + description: The object type, which is always "fine_tuning.job". + created_at: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + finished_at: + type: string + format: date-time + nullable: true + description: |- + The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + null if the fine-tuning job is still running. + model: + type: string + description: The base model that is being fine-tuned. + fine_tuned_model: + type: string + nullable: true + description: |- + The name of the fine-tuned model that is being created. The value will be null if the + fine-tuning job is still running. + organization_id: + type: string + description: The organization that owns the fine-tuning job. + status: + type: string + enum: + - created + - pending + - running + - succeeded + - failed + - cancelled + description: |- + The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + `succeeded`, `failed`, or `cancelled`. + hyperparameters: + type: object + description: |- + The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/fine-tuning) for more details. + properties: + n_epochs: + anyOf: + - type: string + enum: + - auto + - $ref: '#/components/schemas/NEpochs' + description: |- + The number of epochs to train the model for. An epoch refers to one full cycle through the + training dataset. + + "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the + number manually, we support any number between 1 and 50 epochs. + default: auto + training_file: + type: string + description: |- + The file ID used for training. You can retrieve the training data with the + [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string + nullable: true + description: |- + The file ID used for validation. You can retrieve the validation results with the + [Files API](/docs/api-reference/files/retrieve-contents). + result_files: + type: array + items: + type: string + description: |- + The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + [Files API](/docs/api-reference/files/retrieve-contents). + trained_tokens: + type: integer + format: int64 + nullable: true + description: |- + The total number of billable tokens processed by this fine tuning job. The value will be null + if the fine-tuning job is still running. + error: + type: object + description: |- + For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + failure. + properties: + message: + type: string + description: A human-readable error message. + code: + type: string + description: A machine-readable error code. + param: + type: string + nullable: true + description: |- + The parameter that was invalid, usually `training_file` or `validation_file`. This field + will be null if the failure was not parameter-specific. + nullable: true + FineTuningJobEvent: + type: object + required: + - id + - object + - created_at + - level + - message + properties: + id: + type: string + object: + type: string + created_at: + type: integer + format: unixtime + level: + type: string + enum: + - info + - warn + - error + message: + type: string + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + url: + type: string + format: uri + description: The URL of the generated image, if `response_format` is `url` (default). + b64_json: + type: string + format: base64 + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + ImagesN: + type: integer + format: int64 + minimum: 1 + maximum: 10 + ImagesResponse: + type: object + required: + - created + - data + properties: + created: + type: integer + format: unixtime + data: + type: array + items: + $ref: '#/components/schemas/Image' + ListFilesResponse: + type: object + required: + - object + - data + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + ListFineTuneEventsResponse: + type: object + required: + - object + - data + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTuneEvent' + ListFineTunesResponse: + type: object + required: + - object + - data + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTune' + ListFineTuningJobEventsResponse: + type: object + required: + - object + - data + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJobEvent' + ListModelsResponse: + type: object + required: + - object + - data + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/Model' + ListPaginatedFineTuningJobsResponse: + type: object + required: + - object + - data + - has_more + properties: + object: + type: string + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJob' + has_more: + type: boolean + MaxTokens: + type: integer + format: int64 + minimum: 0 + Model: + type: object + description: Describes an OpenAI model offering that can be used with the API. + required: + - id + - object + - created + - owned_by + properties: + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + object: + type: string + enum: + - model + description: The object type, which is always "model". + created: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) when the model was created. + owned_by: + type: string + description: The organization that owns the model. + N: + type: integer + format: int64 + minimum: 1 + maximum: 128 + NEpochs: + type: integer + format: int64 + minimum: 1 + maximum: 50 + OpenAIFile: + type: object + description: The `File` object represents a document that has been uploaded to OpenAI. + required: + - id + - object + - bytes + - createdAt + - filename + - purpose + - status + properties: + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + object: + type: string + enum: + - file + description: The object type, which is always "file". + bytes: + type: integer + format: int64 + description: The size of the file in bytes. + createdAt: + type: integer + format: unixtime + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. + purpose: + type: string + description: The intended purpose of the file. Currently, only "fine-tune" is supported. + status: + type: string + enum: + - uploaded + - processed + - pending + - error + - deleting + - deleted + description: |- + The current status of the file, which can be either `uploaded`, `processed`, `pending`, + `error`, `deleting` or `deleted`. + status_details: + type: string + nullable: true + description: |- + Additional details about the status of the file. If the file is in the `error` state, this will + include a message describing the error. + Penalty: + type: number + format: double + minimum: -2 + maximum: 2 + Prompt: + oneOf: + - type: string + - type: array + items: + type: string + - $ref: '#/components/schemas/TokenArray' + - $ref: '#/components/schemas/TokenArrayArray' + nullable: true + Stop: + oneOf: + - type: string + - $ref: '#/components/schemas/StopSequences' + nullable: true + StopSequences: + type: array + items: + type: string + minItems: 1 + maxItems: 4 + SuffixString: + type: string + minLength: 1 + maxLength: 40 + Temperature: + type: number + format: double + minimum: 0 + maximum: 2 + TokenArray: + type: array + items: + type: integer + format: int64 + minItems: 1 + TokenArrayArray: + type: array + items: + $ref: '#/components/schemas/TokenArray' + minItems: 1 + TopP: + type: number + format: double + minimum: 0 + maximum: 1 + User: + type: string + securitySchemes: + BearerAuth: + type: http + scheme: bearer +servers: + - url: https://api.openai.com/v1 + description: OpenAI Endpoint + variables: {} diff --git a/packages/typespec-test/test/openai_generic/spec/tspconfig.yaml b/packages/typespec-test/test/openai_generic/spec/tspconfig.yaml new file mode 100644 index 0000000000..25ee3860f7 --- /dev/null +++ b/packages/typespec-test/test/openai_generic/spec/tspconfig.yaml @@ -0,0 +1,10 @@ +emit: + - "@azure-tools/typespec-ts" +options: + "@azure-tools/typespec-ts": + azureSdkForJs: false + isModularLibrary: true + "emitter-output-dir": "{project-root}/../generated/typespec-ts" + packageDetails: + name: "@msinternal/openai-generic" + description: "OpenAI" diff --git a/packages/typespec-test/test/openai_modular/generated/typespec-ts/review/openai_modular.api.md b/packages/typespec-test/test/openai_modular/generated/typespec-ts/review/openai_modular.api.md index 2261d01972..8c3240fc33 100644 --- a/packages/typespec-test/test/openai_modular/generated/typespec-ts/review/openai_modular.api.md +++ b/packages/typespec-test/test/openai_modular/generated/typespec-ts/review/openai_modular.api.md @@ -115,7 +115,7 @@ export type CompletionsFinishReason = string; // @public export interface CompletionsLogProbabilityModel { textOffset: number[]; - tokenLogprobs: (number | null)[]; + tokenLogprobs: (number | null)[] | null; tokens: string[]; topLogprobs: Record[]; } @@ -123,7 +123,7 @@ export interface CompletionsLogProbabilityModel { // @public export interface CompletionsLogProbabilityModel { textOffset: number[]; - tokenLogprobs: (number | null)[]; + tokenLogprobs: (number | null)[] | null; tokens: string[]; topLogprobs: Record[]; } diff --git a/packages/typespec-test/test/openai_modular/generated/typespec-ts/src/models/models.ts b/packages/typespec-test/test/openai_modular/generated/typespec-ts/src/models/models.ts index b21a24de5f..3c9eaf5f0e 100644 --- a/packages/typespec-test/test/openai_modular/generated/typespec-ts/src/models/models.ts +++ b/packages/typespec-test/test/openai_modular/generated/typespec-ts/src/models/models.ts @@ -257,7 +257,7 @@ export interface CompletionsLogProbabilityModel { /** The textual forms of tokens evaluated in this probability model. */ tokens: string[]; /** A collection of log probability values for the tokens in this completions data. */ - tokenLogprobs: (number | null)[]; + tokenLogprobs: (number | null)[] | null; /** A mapping of tokens to maximum log probability values in this completions data. */ topLogprobs: Record[]; /** The text offsets associated with tokens in this completions data. */ @@ -269,7 +269,7 @@ export interface CompletionsLogProbabilityModel { /** The textual forms of tokens evaluated in this probability model. */ tokens: string[]; /** A collection of log probability values for the tokens in this completions data. */ - tokenLogprobs: (number | null)[]; + tokenLogprobs: (number | null)[] | null; /** A mapping of tokens to maximum log probability values in this completions data. */ topLogprobs: Record[]; /** The text offsets associated with tokens in this completions data. */ diff --git a/packages/typespec-ts/src/modular/helpers/operationHelpers.ts b/packages/typespec-ts/src/modular/helpers/operationHelpers.ts index 8d60ae1d91..da68b22a09 100644 --- a/packages/typespec-ts/src/modular/helpers/operationHelpers.ts +++ b/packages/typespec-ts/src/modular/helpers/operationHelpers.ts @@ -396,18 +396,28 @@ function buildBodyParameter( } return bodyParameter.optional ? `body: typeof ${bodyParameter.clientName} === 'string' - ? uint8ArrayToString(${bodyParameter.clientName}, "${ - bodyParameter.type.format ?? "base64" - }") + ? uint8ArrayToString(${bodyParameter.clientName}, "${getEncodingFormat( + bodyParameter.type + )}") : ${bodyParameter.clientName}` - : `body: uint8ArrayToString(${bodyParameter.clientName}, "${ - bodyParameter.type.format ?? "base64" - }")`; + : `body: uint8ArrayToString(${ + bodyParameter.clientName + }, "${getEncodingFormat(bodyParameter.type)}")`; } return ""; } +function getEncodingFormat(type: Type) { + const supportedFormats = ["base64url", "base64", "byte"]; + + if (!supportedFormats.includes(type.format ?? "")) { + return "base64"; + } + + return type.format; +} + /** * This function helps with renames, translating client names to rest api names */ @@ -812,7 +822,9 @@ function deserializeResponseValue( switch (type.type) { case "datetime": return required - ? `new Date(${restValue})` + ? type.nullable + ? `${restValue} === null ? null : new Date(${restValue})` + : `new Date(${restValue})` : `${restValue} !== undefined? new Date(${restValue}): undefined`; case "combined": return `${restValue} as any`; @@ -908,9 +920,11 @@ function serializeRequestValue( coreUtilSet.add("uint8ArrayToString"); } return required - ? `uint8ArrayToString(${clientValue}, "${format ?? "base64"}")` + ? `uint8ArrayToString(${clientValue}, "${ + getEncodingFormat(type) ?? "base64" + }")` : `${clientValue} !== undefined ? uint8ArrayToString(${clientValue}, "${ - format ?? "base64" + getEncodingFormat(type) ?? "base64" }"): undefined`; default: return clientValue; diff --git a/packages/typespec-ts/src/modular/helpers/typeHelpers.ts b/packages/typespec-ts/src/modular/helpers/typeHelpers.ts index 6c22b95608..1dbd365c80 100644 --- a/packages/typespec-ts/src/modular/helpers/typeHelpers.ts +++ b/packages/typespec-ts/src/modular/helpers/typeHelpers.ts @@ -123,10 +123,11 @@ function handleListType(type: Type): TypeMetadata { const typeMetadata = getType(type.elementType, type.elementType.format); const nestedName = getTypeName(typeMetadata); + const name = type.nullable ? `(${nestedName}[] | null)` : `${nestedName}[]`; + return { - name: nestedName, - nullable: type.nullable, - modifier: type.nullable ? "NullableArray" : "Array", + name: name, + nullable: type.elementType.nullable, originModule: type.elementType?.type === "model" ? "models.js" : undefined }; } @@ -135,6 +136,12 @@ function handleListType(type: Type): TypeMetadata { * Handles the conversion of model types to TypeScript representation metadata. */ function handleModelType(type: Type): TypeMetadata { + // Temporarily handling the case of anonymous models + if (!type.name) { + return { + name: "any" + }; + } return { name: type.name!, nullable: type.nullable, @@ -189,11 +196,7 @@ function handleDictType(type: Type): TypeMetadata { function getTypeName(typeMetadata: TypeMetadata): string { let typeName = typeMetadata.name; - if ( - typeMetadata.nullable && - typeMetadata.modifier !== "Array" && - typeMetadata.modifier !== "NullableArray" - ) { + if (typeMetadata.nullable) { typeName = `(${typeName} | null)`; } diff --git a/packages/typespec-ts/test/modularUnit/typeHelpers.spec.ts b/packages/typespec-ts/test/modularUnit/typeHelpers.spec.ts index ed41ba6b8c..f3cb35afcf 100644 --- a/packages/typespec-ts/test/modularUnit/typeHelpers.spec.ts +++ b/packages/typespec-ts/test/modularUnit/typeHelpers.spec.ts @@ -48,8 +48,7 @@ describe.only("typeHelpers", () => { nullable: false }; const result = getType(type); - expect(result.name).to.equal("number"); - expect(result.modifier).to.equal("Array"); + expect(result.name).to.equal("number[]"); expect(Boolean(result.nullable)).to.be.false; }); }); @@ -144,7 +143,7 @@ describe.only("typeHelpers", () => { } }; const result = buildType("foo", type); - expect(result.type).to.equal("((number[])[])"); + expect(result.type).to.equal("number[][]"); }); it("should handle a nested list of nullable elements", () => { @@ -160,7 +159,7 @@ describe.only("typeHelpers", () => { } }; const result = buildType("foo", type); - expect(result.type).to.equal("(((number | null)[])[])"); + expect(result.type).to.equal("((number | null)[] | null)[]"); }); it("should handle a nested list of nullable list of elements", () => { @@ -176,7 +175,7 @@ describe.only("typeHelpers", () => { } }; const result = buildType("foo", type); - expect(result.type).to.equal("((number[] | null)[])"); + expect(result.type).to.equal("((number[] | null)[] | null)"); }); it("should handle a nested nullable list of nullable list of non-nullable elements", () => { @@ -192,7 +191,20 @@ describe.only("typeHelpers", () => { } }; const result = buildType("foo", type); - expect(result.type).to.equal("((number[] | null)[] | null)"); + expect(result.type).to.equal("(((number[] | null)[] | null) | null)"); + }); + + it("should handle a nullable list of floats", () => { + const type: Type = { + type: "list", + nullable: true, + elementType: { + type: "float", + nullable: false + } + }; + const result = buildType("foo", type); + expect(result.type).to.equal("(number[] | null)"); }); it("should handle a nested nullable list of nullable list of nullable elements", () => { @@ -209,7 +221,9 @@ describe.only("typeHelpers", () => { } }; const result = buildType("foo", type); - expect(result.type).to.equal("(((number | null)[] | null)[] | null)"); + expect(result.type).to.equal( + "(((((number | null)[] | null) | null)[] | null) | null)" + ); }); }); @@ -225,7 +239,7 @@ describe.only("typeHelpers", () => { } }; const result = buildType("foo", type); - expect(result.type).to.equal("Record"); + expect(result.type).to.equal("Record"); }); it("should handle dictionary type with list nullable values", () => { @@ -240,7 +254,9 @@ describe.only("typeHelpers", () => { } }; const result = buildType("foo", type); - expect(result.type).to.equal("Record"); + expect(result.type).to.equal( + "Record" + ); }); }); @@ -257,8 +273,7 @@ describe.only("typeHelpers", () => { } }; const result = getType(type); - expect(result.name).to.equal("Record"); - expect(result.modifier).to.equal("Array"); + expect(result.name).to.equal("Record[]"); }); }); @@ -445,7 +460,7 @@ describe.only("typeHelpers", () => { nullable: false }; const result = buildType("ClientList", type); - expect(result.type).to.equal("(number[])"); + expect(result.type).to.equal("number[]"); }); it("should build type for nullable list", () => { @@ -470,7 +485,7 @@ describe.only("typeHelpers", () => { nullable: false }; const result = buildType("ClientList", type); - expect(result.type).to.equal("((number | null)[])"); + expect(result.type).to.equal("((number | null)[] | null)"); }); it("should build type for nullable list and nullable element", () => { @@ -483,7 +498,7 @@ describe.only("typeHelpers", () => { nullable: true }; const result = buildType("ClientList", type); - expect(result.type).to.equal("((number | null)[] | null)"); + expect(result.type).to.equal("(((number | null)[] | null) | null)"); }); });