diff --git a/specification/ai/Face/models.common.tsp b/specification/ai/Face/models.common.tsp index 4eb0c1e70ae0..e99fe295e601 100644 --- a/specification/ai/Face/models.common.tsp +++ b/specification/ai/Face/models.common.tsp @@ -294,3 +294,14 @@ model FaceError { } model FaceErrorResponse is Azure.Core.Foundations.ErrorResponseBase; + +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; diff --git a/specification/ai/Face/routes.session.tsp b/specification/ai/Face/routes.session.tsp index 70b492cdb31b..4c5c59085f75 100644 --- a/specification/ai/Face/routes.session.tsp +++ b/specification/ai/Face/routes.session.tsp @@ -69,7 +69,7 @@ interface LivenessSessionOperations { @returnsDoc(SessionCreationSuccess) createLivenessSession is FaceResourceCreateWithServiceProvidedName< LivenessSession, - CreateLivenessSessionContent, + BodyParameter, CreateLivenessSessionResult >; @@ -118,7 +118,7 @@ interface LivenessSessionOperations { @sharedRoute createLivenessWithVerifySession is FaceResourceCreateWithServiceProvidedName< LivenessWithVerifySession, - CreateLivenessSessionContent, + BodyParameter, CreateLivenessWithVerifySessionResult >; diff --git a/specification/ai/HealthInsights/HealthInsights.Common/model.common.shared.tsp b/specification/ai/HealthInsights/HealthInsights.Common/model.common.shared.tsp index a316c356a25f..e7829bde8c95 100644 --- a/specification/ai/HealthInsights/HealthInsights.Common/model.common.shared.tsp +++ b/specification/ai/HealthInsights/HealthInsights.Common/model.common.shared.tsp @@ -229,3 +229,14 @@ model ClinicalCodedElement { @doc("A value associated with the code within the given clinical coding system.") value?: string; } + +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; diff --git a/specification/ai/HealthInsights/HealthInsights.OncoPhenotype/route.oncophenotype.tsp b/specification/ai/HealthInsights/HealthInsights.OncoPhenotype/route.oncophenotype.tsp index f60da4e7484a..3bcfdf2454b3 100644 --- a/specification/ai/HealthInsights/HealthInsights.OncoPhenotype/route.oncophenotype.tsp +++ b/specification/ai/HealthInsights/HealthInsights.OncoPhenotype/route.oncophenotype.tsp @@ -19,7 +19,7 @@ interface OncoPhenotype { @doc("Gets the status and details of the Onco Phenotype job.") @get @route("/onco-phenotype/jobs/{id}") - getJob is HealthInsightsLongRunningPollOperation; + getJob is HealthInsightsLongRunningPollOperation>; #suppress "@azure-tools/typespec-azure-core/long-running-polling-operation-required" "Polling through operation-location" #suppress "@azure-tools/typespec-azure-core/use-standard-operations" "There is no long-running RPC template in Azure.Core" diff --git a/specification/ai/HealthInsights/HealthInsights.PatientTimeline/route.patienttimeline.tsp b/specification/ai/HealthInsights/HealthInsights.PatientTimeline/route.patienttimeline.tsp index 1514a76dbe88..bbb9c165dd4e 100644 --- a/specification/ai/HealthInsights/HealthInsights.PatientTimeline/route.patienttimeline.tsp +++ b/specification/ai/HealthInsights/HealthInsights.PatientTimeline/route.patienttimeline.tsp @@ -19,7 +19,7 @@ interface PatientTimeline { @doc("Gets the status and details of the Patient Timeline job.") @get @route("/patient-timeline/jobs/{id}") - getJob is HealthInsightsLongRunningPollOperation; + getJob is HealthInsightsLongRunningPollOperation>; #suppress "@azure-tools/typespec-azure-core/long-running-polling-operation-required" "Polling through operation-location" #suppress "@azure-tools/typespec-azure-core/use-standard-operations" "There is no long-running RPC template in Azure.Core" diff --git a/specification/ai/HealthInsights/HealthInsights.TrialMatcher/route.trialmatcher.tsp b/specification/ai/HealthInsights/HealthInsights.TrialMatcher/route.trialmatcher.tsp index 7f31371b01c9..4e627e65e67c 100644 --- a/specification/ai/HealthInsights/HealthInsights.TrialMatcher/route.trialmatcher.tsp +++ b/specification/ai/HealthInsights/HealthInsights.TrialMatcher/route.trialmatcher.tsp @@ -18,7 +18,7 @@ interface TrialMatcher { @doc("Gets the status and details of the Trial Matcher job.") @get @route("/trial-matcher/jobs/{id}") - getJob is HealthInsightsLongRunningPollOperation; + getJob is HealthInsightsLongRunningPollOperation>; #suppress "@azure-tools/typespec-azure-core/long-running-polling-operation-required" "Polling through operation-location" #suppress "@azure-tools/typespec-azure-core/use-standard-operations" "There is no long-running RPC template in Azure.Core" diff --git a/specification/ai/OpenAI.Assistants/assistants/routes.tsp b/specification/ai/OpenAI.Assistants/assistants/routes.tsp index cdefb9fd6aa5..e8746c12abf6 100644 --- a/specification/ai/OpenAI.Assistants/assistants/routes.tsp +++ b/specification/ai/OpenAI.Assistants/assistants/routes.tsp @@ -20,7 +20,7 @@ namespace Azure.AI.OpenAI.Assistants; @post @added(ServiceApiVersions.v2024_02_15_preview) @route("/assistants") -op createAssistant(...AssistantCreationOptions): Assistant; +op createAssistant(...BodyParameter): Assistant; /** * Gets a list of assistants that were previously created. @@ -63,7 +63,7 @@ op getAssistant(@path assistantId: string): Assistant; @post @route("/assistants/{assistantId}") @added(ServiceApiVersions.v2024_02_15_preview) -op updateAssistant(...UpdateAssistantOptions): Assistant; +op updateAssistant(...BodyParameter): Assistant; /** * Deletes an assistant. diff --git a/specification/ai/OpenAI.Assistants/common/models.tsp b/specification/ai/OpenAI.Assistants/common/models.tsp index 5c446a1d1d42..85fb2d648611 100644 --- a/specification/ai/OpenAI.Assistants/common/models.tsp +++ b/specification/ai/OpenAI.Assistants/common/models.tsp @@ -148,3 +148,14 @@ union ApiResponseFormat { /** Using `json_object` format will limit the usage of ToolCall to only functions. */ jsonObject: "json_object", } + +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; diff --git a/specification/ai/OpenAI.Assistants/runs/routes.tsp b/specification/ai/OpenAI.Assistants/runs/routes.tsp index c65c2d07fa06..45847257a2e7 100644 --- a/specification/ai/OpenAI.Assistants/runs/routes.tsp +++ b/specification/ai/OpenAI.Assistants/runs/routes.tsp @@ -139,4 +139,4 @@ op cancelRun(@path threadId: string, @path runId: string): ThreadRun; @route("/threads/runs") @doc("Creates a new assistant thread and immediately starts a run using that new thread.") @added(ServiceApiVersions.v2024_02_15_preview) -op createThreadAndRun(...CreateAndRunThreadOptions): ThreadRun; +op createThreadAndRun(...BodyParameter): ThreadRun; diff --git a/specification/ai/OpenAI.Assistants/threads/routes.tsp b/specification/ai/OpenAI.Assistants/threads/routes.tsp index 53dbc7fa16de..2f06aacfb89d 100644 --- a/specification/ai/OpenAI.Assistants/threads/routes.tsp +++ b/specification/ai/OpenAI.Assistants/threads/routes.tsp @@ -20,7 +20,9 @@ namespace Azure.AI.OpenAI.Assistants; @post @added(ServiceApiVersions.v2024_02_15_preview) @route("/threads") -op createThread(...AssistantThreadCreationOptions): AssistantThread; +op createThread( + ...BodyParameter, +): AssistantThread; // list threads? @@ -52,7 +54,9 @@ op getThread(@path threadId: string): AssistantThread; @post @route("/threads/{threadId}") @added(ServiceApiVersions.v2024_02_15_preview) -op updateThread(...UpdateAssistantThreadOptions): AssistantThread; +op updateThread( + ...BodyParameter, +): AssistantThread; /** * Deletes an existing thread. diff --git a/specification/ai/OpenAI.Assistants/vector_stores/routes.tsp b/specification/ai/OpenAI.Assistants/vector_stores/routes.tsp index a01dae88426f..da0395283ad6 100644 --- a/specification/ai/OpenAI.Assistants/vector_stores/routes.tsp +++ b/specification/ai/OpenAI.Assistants/vector_stores/routes.tsp @@ -34,7 +34,7 @@ op listVectorStores( @post @route("/vector_stores") @added(ServiceApiVersions.v2024_05_01_preview) -op createVectorStore(...VectorStoreOptions): VectorStore; +op createVectorStore(...BodyParameter): VectorStore; /** * Returns the vector store object matching the specified ID. diff --git a/specification/ai/data-plane/Face/preview/v1.1-preview.1/Face.json b/specification/ai/data-plane/Face/preview/v1.1-preview.1/Face.json index 7f17cd879a5f..f5f77b5d0d1b 100644 --- a/specification/ai/data-plane/Face/preview/v1.1-preview.1/Face.json +++ b/specification/ai/data-plane/Face/preview/v1.1-preview.1/Face.json @@ -408,6 +408,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CreateLivenessSessionContent" @@ -5846,6 +5847,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CreateLivenessSessionContent" diff --git a/specification/ai/data-plane/OpenAI.Assistants/OpenApiV2/preview/2024-02-15-preview/assistants_generated.json b/specification/ai/data-plane/OpenAI.Assistants/OpenApiV2/preview/2024-02-15-preview/assistants_generated.json index e1a20fe445e2..6e871cf81b9c 100644 --- a/specification/ai/data-plane/OpenAI.Assistants/OpenApiV2/preview/2024-02-15-preview/assistants_generated.json +++ b/specification/ai/data-plane/OpenAI.Assistants/OpenApiV2/preview/2024-02-15-preview/assistants_generated.json @@ -174,6 +174,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/AssistantCreationOptions" @@ -217,11 +218,16 @@ "description": "Modifies an existing assistant.", "parameters": [ { - "$ref": "#/parameters/UpdateAssistantOptions.assistantId" + "name": "assistantId", + "in": "path", + "description": "The ID of the assistant to modify.", + "required": true, + "type": "string" }, { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/UpdateAssistantOptions" @@ -460,6 +466,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/AssistantThreadCreationOptions" @@ -503,11 +510,16 @@ "description": "Modifies an existing thread.", "parameters": [ { - "$ref": "#/parameters/UpdateAssistantThreadOptions.threadId" + "name": "threadId", + "in": "path", + "description": "The ID of the thread to modify.", + "required": true, + "type": "string" }, { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/UpdateAssistantThreadOptions" @@ -1235,6 +1247,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CreateAndRunThreadOptions" @@ -3181,22 +3194,5 @@ } } }, - "parameters": { - "UpdateAssistantOptions.assistantId": { - "name": "assistantId", - "in": "path", - "description": "The ID of the assistant to modify.", - "required": true, - "type": "string", - "x-ms-parameter-location": "method" - }, - "UpdateAssistantThreadOptions.threadId": { - "name": "threadId", - "in": "path", - "description": "The ID of the thread to modify.", - "required": true, - "type": "string", - "x-ms-parameter-location": "method" - } - } + "parameters": {} } diff --git a/specification/ai/data-plane/OpenAI.Assistants/OpenApiV2/preview/2024-05-01-preview/assistants_generated.json b/specification/ai/data-plane/OpenAI.Assistants/OpenApiV2/preview/2024-05-01-preview/assistants_generated.json index b0fe2a911183..5fcda9a56c31 100644 --- a/specification/ai/data-plane/OpenAI.Assistants/OpenApiV2/preview/2024-05-01-preview/assistants_generated.json +++ b/specification/ai/data-plane/OpenAI.Assistants/OpenApiV2/preview/2024-05-01-preview/assistants_generated.json @@ -174,6 +174,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/AssistantCreationOptions" @@ -217,11 +218,16 @@ "description": "Modifies an existing assistant.", "parameters": [ { - "$ref": "#/parameters/UpdateAssistantOptions.assistantId" + "name": "assistantId", + "in": "path", + "description": "The ID of the assistant to modify.", + "required": true, + "type": "string" }, { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/UpdateAssistantOptions" @@ -496,6 +502,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/AssistantThreadCreationOptions" @@ -539,11 +546,16 @@ "description": "Modifies an existing thread.", "parameters": [ { - "$ref": "#/parameters/UpdateAssistantThreadOptions.threadId" + "name": "threadId", + "in": "path", + "description": "The ID of the thread to modify.", + "required": true, + "type": "string" }, { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/UpdateAssistantThreadOptions" @@ -1287,6 +1299,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CreateAndRunThreadOptions" @@ -1418,6 +1431,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/VectorStoreOptions" @@ -5172,22 +5186,5 @@ } } }, - "parameters": { - "UpdateAssistantOptions.assistantId": { - "name": "assistantId", - "in": "path", - "description": "The ID of the assistant to modify.", - "required": true, - "type": "string", - "x-ms-parameter-location": "method" - }, - "UpdateAssistantThreadOptions.threadId": { - "name": "threadId", - "in": "path", - "description": "The ID of the thread to modify.", - "required": true, - "type": "string", - "x-ms-parameter-location": "method" - } - } + "parameters": {} } diff --git a/specification/ai/data-plane/OpenAI.Assistants/OpenApiV3/2024-02-15-preview/assistants_generated.yaml b/specification/ai/data-plane/OpenAI.Assistants/OpenApiV3/2024-02-15-preview/assistants_generated.yaml index 5d101eb1cb50..50ff2a97ab68 100644 --- a/specification/ai/data-plane/OpenAI.Assistants/OpenApiV3/2024-02-15-preview/assistants_generated.yaml +++ b/specification/ai/data-plane/OpenAI.Assistants/OpenApiV3/2024-02-15-preview/assistants_generated.yaml @@ -18,6 +18,7 @@ paths: schema: $ref: '#/components/schemas/Assistant' requestBody: + description: Body parameter. required: true content: application/json: @@ -110,7 +111,12 @@ paths: operationId: updateAssistant description: Modifies an existing assistant. parameters: - - $ref: '#/components/parameters/UpdateAssistantOptions.assistantId' + - name: assistantId + in: path + required: true + description: The ID of the assistant to modify. + schema: + type: string responses: '200': description: The updated assistant instance. @@ -119,6 +125,7 @@ paths: schema: $ref: '#/components/schemas/Assistant' requestBody: + description: Body parameter. required: true content: application/json: @@ -257,6 +264,7 @@ paths: schema: $ref: '#/components/schemas/AssistantThread' requestBody: + description: Body parameter. required: true content: application/json: @@ -275,6 +283,7 @@ paths: schema: $ref: '#/components/schemas/ThreadRun' requestBody: + description: Body parameter. required: true content: application/json: @@ -302,7 +311,12 @@ paths: operationId: updateThread description: Modifies an existing thread. parameters: - - $ref: '#/components/parameters/UpdateAssistantThreadOptions.threadId' + - name: threadId + in: path + required: true + description: The ID of the thread to modify. + schema: + type: string responses: '200': description: Information about the modified thread. @@ -311,6 +325,7 @@ paths: schema: $ref: '#/components/schemas/AssistantThread' requestBody: + description: Body parameter. required: true content: application/json: @@ -817,21 +832,6 @@ security: - OAuth2Auth: - https://cognitiveservices.azure.com/.default components: - parameters: - UpdateAssistantOptions.assistantId: - name: assistantId - in: path - required: true - description: The ID of the assistant to modify. - schema: - type: string - UpdateAssistantThreadOptions.threadId: - name: threadId - in: path - required: true - description: The ID of the thread to modify. - schema: - type: string schemas: Assistant: type: object diff --git a/specification/ai/data-plane/OpenAI.Assistants/OpenApiV3/2024-05-01-preview/assistants_generated.yaml b/specification/ai/data-plane/OpenAI.Assistants/OpenApiV3/2024-05-01-preview/assistants_generated.yaml index bb72844194ac..e7a6f9691f97 100644 --- a/specification/ai/data-plane/OpenAI.Assistants/OpenApiV3/2024-05-01-preview/assistants_generated.yaml +++ b/specification/ai/data-plane/OpenAI.Assistants/OpenApiV3/2024-05-01-preview/assistants_generated.yaml @@ -18,6 +18,7 @@ paths: schema: $ref: '#/components/schemas/Assistant' requestBody: + description: Body parameter. required: true content: application/json: @@ -110,7 +111,12 @@ paths: operationId: updateAssistant description: Modifies an existing assistant. parameters: - - $ref: '#/components/parameters/UpdateAssistantOptions.assistantId' + - name: assistantId + in: path + required: true + description: The ID of the assistant to modify. + schema: + type: string responses: '200': description: The updated assistant instance. @@ -119,6 +125,7 @@ paths: schema: $ref: '#/components/schemas/Assistant' requestBody: + description: Body parameter. required: true content: application/json: @@ -257,6 +264,7 @@ paths: schema: $ref: '#/components/schemas/AssistantThread' requestBody: + description: Body parameter. required: true content: application/json: @@ -275,6 +283,7 @@ paths: schema: $ref: '#/components/schemas/ThreadRun' requestBody: + description: Body parameter. required: true content: application/json: @@ -302,7 +311,12 @@ paths: operationId: updateThread description: Modifies an existing thread. parameters: - - $ref: '#/components/parameters/UpdateAssistantThreadOptions.threadId' + - name: threadId + in: path + required: true + description: The ID of the thread to modify. + schema: + type: string responses: '200': description: Information about the modified thread. @@ -311,6 +325,7 @@ paths: schema: $ref: '#/components/schemas/AssistantThread' requestBody: + description: Body parameter. required: true content: application/json: @@ -903,6 +918,7 @@ paths: schema: $ref: '#/components/schemas/VectorStore' requestBody: + description: Body parameter. required: true content: application/json: @@ -1293,21 +1309,6 @@ security: - OAuth2Auth: - https://cognitiveservices.azure.com/.default components: - parameters: - UpdateAssistantOptions.assistantId: - name: assistantId - in: path - required: true - description: The ID of the assistant to modify. - schema: - type: string - UpdateAssistantThreadOptions.threadId: - name: threadId - in: path - required: true - description: The ID of the thread to modify. - schema: - type: string schemas: ApiResponseFormat: anyOf: diff --git a/specification/ai/data-plane/readme.md b/specification/ai/data-plane/readme.md new file mode 100644 index 000000000000..7abc736f3437 --- /dev/null +++ b/specification/ai/data-plane/readme.md @@ -0,0 +1,78 @@ +# OpenAI + +> see https://aka.ms/autorest + +This is the AutoRest configuration file for OpenAI. + +## Getting Started + +To build the SDKs for My API, simply install AutoRest via `npm` (`npm install -g autorest`) and then run: + +> `autorest readme.md` + +To see additional help and options, run: + +> `autorest --help` + +For other options on installation see [Installing AutoRest](https://aka.ms/autorest/install) on the AutoRest github page. + +--- + +## Configuration + +### Basic Information + +These are the global settings for the [OpenAI]. + +```yaml +openapi-type: data-plane +tag: package-2023-11-06-beta +``` + +### Tag: package-2023-11-06-beta + +These settings apply only when `--tag=package-2023-11-06-beta` is specified on the command line. + +```yaml $(tag) == 'package-2023-11-06-beta' +input-file: + - OpenAI.Assistants/OpenApiV2/stable/2023-11-06-beta/assistants_generated.json + - OpenAI.Assistants/OpenApiV2/stable/2023-11-06-beta/examples/create_assistant.json +``` + +### Tag: package-2023-02-15-preview + +These settings apply only when `--tag=package-2023-02-15-preview` is specified on the command line. + +```yaml $(tag) == 'package-2023-02-15-preview' +input-file: + - OpenAI.Assistants/OpenApiV2/preview/2023-02-15-preview/assistants_generated.json +``` + +### Tag: package-2024-02-15-preview + +These settings apply only when `--tag=package-2024-02-15-preview` is specified on the command line. + +```yaml $(tag) == 'package-2024-02-15-preview' +input-file: + - OpenAI.Assistants/OpenApiV2/preview/2024-02-15-preview/assistants_generated.json +``` + +### Tag: package-2024-05-01-preview + +These settings apply only when `--tag=package-2024-05-01-preview` is specified on the command line. + +```yaml $(tag) == 'package-2024-05-01-preview' +input-file: + - OpenAI.Assistants/OpenApiV2/preview/2024-05-01-preview/assistants_generated.json +``` + +### Suppression + +``` yaml +directive: + - suppress: XMS_EXAMPLE_NOTFOUND_ERROR + from: assistants_generated.json + reason: No existing examples. + - suppress: UNREFERENCED_JSON_FILE + from: +``` diff --git a/specification/cognitiveservices/OpenAI.Inference/models/completions/common.tsp b/specification/cognitiveservices/OpenAI.Inference/models/completions/common.tsp index 3afebd183971..219388cff428 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/completions/common.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/completions/common.tsp @@ -1,7 +1,9 @@ import "@typespec/versioning"; +import "@typespec/http"; import "@azure-tools/typespec-azure-core"; using TypeSpec.Versioning; +using Http; namespace Azure.OpenAI; diff --git a/specification/cognitiveservices/OpenAI.Inference/models/embeddings_create.tsp b/specification/cognitiveservices/OpenAI.Inference/models/embeddings_create.tsp index eddaa1cb4b5a..e698a2f1d70f 100644 --- a/specification/cognitiveservices/OpenAI.Inference/models/embeddings_create.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/models/embeddings_create.tsp @@ -102,3 +102,14 @@ model EmbeddingsUsage { @encodedName("application/json", "total_tokens") totalTokens: int32; } + +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; diff --git a/specification/cognitiveservices/OpenAI.Inference/routes/audio_transcription.tsp b/specification/cognitiveservices/OpenAI.Inference/routes/audio_transcription.tsp index 6a3fab3d6e9c..265ba7cedba0 100644 --- a/specification/cognitiveservices/OpenAI.Inference/routes/audio_transcription.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/routes/audio_transcription.tsp @@ -28,7 +28,7 @@ namespace Azure.OpenAI; @sharedRoute op getAudioTranscriptionAsPlainText is Azure.Core.ResourceAction< Deployment, - AudioTranscriptionOptions, // response_format must be one of: text, srt, vtt + BodyParameter, AudioTextResponse, MultipartFormDataRequestHeadersTraits & TextPlainResponseHeadersTraits >; @@ -44,7 +44,7 @@ op getAudioTranscriptionAsPlainText is Azure.Core.ResourceAction< @sharedRoute op getAudioTranscriptionAsResponseObject is Azure.Core.ResourceAction< Deployment, - AudioTranscriptionOptions, // response_format must be unspecified (json) or one of: json, verbose_json + BodyParameter, // response_format must be unspecified (json) or one of: json, verbose_json AudioTranscription, MultipartFormDataRequestHeadersTraits >; diff --git a/specification/cognitiveservices/OpenAI.Inference/routes/audio_translation.tsp b/specification/cognitiveservices/OpenAI.Inference/routes/audio_translation.tsp index 0f36637e5964..18cf4a1d1c11 100644 --- a/specification/cognitiveservices/OpenAI.Inference/routes/audio_translation.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/routes/audio_translation.tsp @@ -27,7 +27,7 @@ namespace Azure.OpenAI; @sharedRoute op getAudioTranslationAsPlainText is Azure.Core.ResourceAction< Deployment, - AudioTranslationOptions, // response_format must be one of: text, srt, vtt + BodyParameter, AudioTextResponse, MultipartFormDataRequestHeadersTraits & TextPlainResponseHeadersTraits >; @@ -42,7 +42,7 @@ op getAudioTranslationAsPlainText is Azure.Core.ResourceAction< @sharedRoute op getAudioTranslationAsResponseObject is Azure.Core.ResourceAction< Deployment, - AudioTranslationOptions, // response_format must be unspecified (json) or one of: json, verbose_json + BodyParameter, // response_format must be unspecified (json) or one of: json, verbose_json AudioTranslation, MultipartFormDataRequestHeadersTraits >; diff --git a/specification/cognitiveservices/OpenAI.Inference/routes/completions.tsp b/specification/cognitiveservices/OpenAI.Inference/routes/completions.tsp index 284b0842ca5d..3b138d588587 100644 --- a/specification/cognitiveservices/OpenAI.Inference/routes/completions.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/routes/completions.tsp @@ -23,7 +23,7 @@ namespace Azure.OpenAI; @action("completions") //@convenientAPI(true) op getCompletions is Azure.Core.ResourceAction< Deployment, - CompletionsOptions, + BodyParameter, Completions >; diff --git a/specification/cognitiveservices/OpenAI.Inference/routes/embeddings.tsp b/specification/cognitiveservices/OpenAI.Inference/routes/embeddings.tsp index 20afaa954d04..ecb49b09e4b1 100644 --- a/specification/cognitiveservices/OpenAI.Inference/routes/embeddings.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/routes/embeddings.tsp @@ -18,6 +18,6 @@ namespace Azure.OpenAI; @action("embeddings") //@convenientAPI(true) op getEmbeddings is Azure.Core.ResourceAction< Deployment, - EmbeddingsOptions, + BodyParameter, Embeddings >; diff --git a/specification/cognitiveservices/OpenAI.Inference/routes/images.tsp b/specification/cognitiveservices/OpenAI.Inference/routes/images.tsp index 0f09b3f185bc..b0fcec522f40 100644 --- a/specification/cognitiveservices/OpenAI.Inference/routes/images.tsp +++ b/specification/cognitiveservices/OpenAI.Inference/routes/images.tsp @@ -20,7 +20,7 @@ namespace Azure.OpenAI; @action("images/generations") op getImageGenerations is Azure.Core.ResourceAction< Deployment, - ImageGenerationOptions, + BodyParameter, ImageGenerations >; @@ -54,7 +54,7 @@ op getAzureBatchImageGenerationOperationStatus is RpcOperation< } ) op beginAzureBatchImageGeneration is OaiLongRunningRpcOperation< - ImageGenerationOptions, + BodyParameter, BatchImageGenerationOperationResponse, BatchImageGenerationOperationResponse >; diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json index cb97b483ca84..08007691cc3a 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-06-01-preview/generated.json @@ -128,6 +128,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CompletionsOptions" @@ -179,6 +180,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/EmbeddingsOptions" @@ -223,6 +225,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ImageGenerationOptions" diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-07-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-07-01-preview/generated.json index 9230cd3ea16a..dfd0dcdce0b0 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-07-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-07-01-preview/generated.json @@ -128,6 +128,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CompletionsOptions" @@ -179,6 +180,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/EmbeddingsOptions" @@ -223,6 +225,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ImageGenerationOptions" diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-02-15-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-02-15-preview/generated.json index 43de460be08c..0fb1618e5c07 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-02-15-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-02-15-preview/generated.json @@ -137,25 +137,93 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranscriptionFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated transcription." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.language" + "name": "language", + "in": "formData", + "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this transcription request.", + "required": false, + "type": "string" } ], "responses": { @@ -208,22 +276,86 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranslationOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the translation response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranslationFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated translation." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranslationOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranslationOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this translation request.", + "required": false, + "type": "string" } ], "responses": { @@ -343,6 +475,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CompletionsOptions" @@ -394,6 +527,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/EmbeddingsOptions" @@ -445,6 +579,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ImageGenerationOptions" @@ -499,25 +634,93 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranscriptionFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated transcription." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.language" + "name": "language", + "in": "formData", + "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this transcription request.", + "required": false, + "type": "string" } ], "responses": { @@ -566,22 +769,86 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranslationOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the translation response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranslationFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated translation." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranslationOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranslationOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this translation request.", + "required": false, + "type": "string" } ], "responses": { @@ -4014,190 +4281,6 @@ } }, "parameters": { - "AudioTranscriptionOptions.file": { - "name": "file", - "in": "formData", - "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", - "required": true, - "type": "file", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.filename": { - "name": "filename", - "in": "formData", - "description": "The optional filename or descriptive identifier to associate with with the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.language": { - "name": "language", - "in": "formData", - "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.model": { - "name": "model", - "in": "formData", - "description": "The model to use for this transcription request.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.prompt": { - "name": "prompt", - "in": "formData", - "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.responseFormat": { - "name": "response_format", - "in": "formData", - "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", - "required": false, - "type": "string", - "enum": [ - "json", - "verbose_json", - "text", - "srt", - "vtt" - ], - "x-ms-enum": { - "name": "AudioTranscriptionFormat", - "modelAsString": true, - "values": [ - { - "name": "json", - "value": "json", - "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." - }, - { - "name": "verbose_json", - "value": "verbose_json", - "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." - }, - { - "name": "text", - "value": "text", - "description": "Use a response body that is plain text containing the raw, unannotated transcription." - }, - { - "name": "srt", - "value": "srt", - "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." - }, - { - "name": "vtt", - "value": "vtt", - "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." - } - ] - }, - "x-ms-parameter-location": "method", - "x-ms-client-name": "responseFormat" - }, - "AudioTranscriptionOptions.temperature": { - "name": "temperature", - "in": "formData", - "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", - "required": false, - "type": "number", - "format": "float", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.file": { - "name": "file", - "in": "formData", - "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", - "required": true, - "type": "file", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.filename": { - "name": "filename", - "in": "formData", - "description": "The optional filename or descriptive identifier to associate with with the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.model": { - "name": "model", - "in": "formData", - "description": "The model to use for this translation request.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.prompt": { - "name": "prompt", - "in": "formData", - "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.responseFormat": { - "name": "response_format", - "in": "formData", - "description": "The requested format of the translation response data, which will influence the content and detail of the result.", - "required": false, - "type": "string", - "enum": [ - "json", - "verbose_json", - "text", - "srt", - "vtt" - ], - "x-ms-enum": { - "name": "AudioTranslationFormat", - "modelAsString": true, - "values": [ - { - "name": "json", - "value": "json", - "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." - }, - { - "name": "verbose_json", - "value": "verbose_json", - "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." - }, - { - "name": "text", - "value": "text", - "description": "Use a response body that is plain text containing the raw, unannotated translation." - }, - { - "name": "srt", - "value": "srt", - "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." - }, - { - "name": "vtt", - "value": "vtt", - "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." - } - ] - }, - "x-ms-parameter-location": "method", - "x-ms-client-name": "responseFormat" - }, - "AudioTranslationOptions.temperature": { - "name": "temperature", - "in": "formData", - "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", - "required": false, - "type": "number", - "format": "float", - "x-ms-parameter-location": "method" - }, "Azure.Core.Foundations.ApiVersionParameter": { "name": "api-version", "in": "query", diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-03-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-03-01-preview/generated.json index 3586f7590bf8..50544e3dc1fc 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-03-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-03-01-preview/generated.json @@ -137,25 +137,93 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranscriptionFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated transcription." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.language" + "name": "language", + "in": "formData", + "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this transcription request.", + "required": false, + "type": "string" } ], "responses": { @@ -208,22 +276,86 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranslationOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the translation response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranslationFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated translation." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranslationOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranslationOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this translation request.", + "required": false, + "type": "string" } ], "responses": { @@ -343,6 +475,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CompletionsOptions" @@ -394,6 +527,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/EmbeddingsOptions" @@ -445,6 +579,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ImageGenerationOptions" @@ -499,25 +634,93 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranscriptionFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated transcription." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.language" + "name": "language", + "in": "formData", + "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this transcription request.", + "required": false, + "type": "string" } ], "responses": { @@ -566,22 +769,86 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranslationOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the translation response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranslationFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated translation." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranslationOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranslationOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this translation request.", + "required": false, + "type": "string" } ], "responses": { @@ -4045,190 +4312,6 @@ } }, "parameters": { - "AudioTranscriptionOptions.file": { - "name": "file", - "in": "formData", - "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", - "required": true, - "type": "file", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.filename": { - "name": "filename", - "in": "formData", - "description": "The optional filename or descriptive identifier to associate with with the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.language": { - "name": "language", - "in": "formData", - "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.model": { - "name": "model", - "in": "formData", - "description": "The model to use for this transcription request.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.prompt": { - "name": "prompt", - "in": "formData", - "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.responseFormat": { - "name": "response_format", - "in": "formData", - "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", - "required": false, - "type": "string", - "enum": [ - "json", - "verbose_json", - "text", - "srt", - "vtt" - ], - "x-ms-enum": { - "name": "AudioTranscriptionFormat", - "modelAsString": true, - "values": [ - { - "name": "json", - "value": "json", - "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." - }, - { - "name": "verbose_json", - "value": "verbose_json", - "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." - }, - { - "name": "text", - "value": "text", - "description": "Use a response body that is plain text containing the raw, unannotated transcription." - }, - { - "name": "srt", - "value": "srt", - "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." - }, - { - "name": "vtt", - "value": "vtt", - "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." - } - ] - }, - "x-ms-parameter-location": "method", - "x-ms-client-name": "responseFormat" - }, - "AudioTranscriptionOptions.temperature": { - "name": "temperature", - "in": "formData", - "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", - "required": false, - "type": "number", - "format": "float", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.file": { - "name": "file", - "in": "formData", - "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", - "required": true, - "type": "file", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.filename": { - "name": "filename", - "in": "formData", - "description": "The optional filename or descriptive identifier to associate with with the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.model": { - "name": "model", - "in": "formData", - "description": "The model to use for this translation request.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.prompt": { - "name": "prompt", - "in": "formData", - "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.responseFormat": { - "name": "response_format", - "in": "formData", - "description": "The requested format of the translation response data, which will influence the content and detail of the result.", - "required": false, - "type": "string", - "enum": [ - "json", - "verbose_json", - "text", - "srt", - "vtt" - ], - "x-ms-enum": { - "name": "AudioTranslationFormat", - "modelAsString": true, - "values": [ - { - "name": "json", - "value": "json", - "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." - }, - { - "name": "verbose_json", - "value": "verbose_json", - "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." - }, - { - "name": "text", - "value": "text", - "description": "Use a response body that is plain text containing the raw, unannotated translation." - }, - { - "name": "srt", - "value": "srt", - "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." - }, - { - "name": "vtt", - "value": "vtt", - "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." - } - ] - }, - "x-ms-parameter-location": "method", - "x-ms-client-name": "responseFormat" - }, - "AudioTranslationOptions.temperature": { - "name": "temperature", - "in": "formData", - "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", - "required": false, - "type": "number", - "format": "float", - "x-ms-parameter-location": "method" - }, "Azure.Core.Foundations.ApiVersionParameter": { "name": "api-version", "in": "query", diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-04-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-04-01-preview/generated.json index 7a140362995b..1e2249218c04 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-04-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-04-01-preview/generated.json @@ -137,28 +137,127 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranscriptionFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated transcription." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.language" + "name": "language", + "in": "formData", + "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.timestampGranularities" + "name": "timestamp_granularities", + "in": "formData", + "description": "The timestamp granularities to populate for this transcription.\n`response_format` must be set `verbose_json` to use timestamp granularities.\nEither or both of these options are supported: `word`, or `segment`.\nNote: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "word", + "segment" + ], + "x-ms-enum": { + "name": "AudioTranscriptionTimestampGranularity", + "modelAsString": true, + "values": [ + { + "name": "word", + "value": "word", + "description": "Indicates that responses should include timing information about each transcribed word. Note that generating word\ntimestamp information will incur additional response latency." + }, + { + "name": "segment", + "value": "segment", + "description": "Indicates that responses should include timing and other information about each transcribed audio segment. Audio\nsegment timestamp information does not incur any additional latency." + } + ] + } + }, + "default": [ + "segment" + ], + "x-ms-client-name": "timestampGranularities" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this transcription request.", + "required": false, + "type": "string" } ], "responses": { @@ -211,22 +310,86 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranslationOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the translation response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranslationFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated translation." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranslationOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranslationOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this translation request.", + "required": false, + "type": "string" } ], "responses": { @@ -346,6 +509,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CompletionsOptions" @@ -397,6 +561,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/EmbeddingsOptions" @@ -448,6 +613,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ImageGenerationOptions" @@ -502,28 +668,127 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranscriptionFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated transcription." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.language" + "name": "language", + "in": "formData", + "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.timestampGranularities" + "name": "timestamp_granularities", + "in": "formData", + "description": "The timestamp granularities to populate for this transcription.\n`response_format` must be set `verbose_json` to use timestamp granularities.\nEither or both of these options are supported: `word`, or `segment`.\nNote: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "word", + "segment" + ], + "x-ms-enum": { + "name": "AudioTranscriptionTimestampGranularity", + "modelAsString": true, + "values": [ + { + "name": "word", + "value": "word", + "description": "Indicates that responses should include timing information about each transcribed word. Note that generating word\ntimestamp information will incur additional response latency." + }, + { + "name": "segment", + "value": "segment", + "description": "Indicates that responses should include timing and other information about each transcribed audio segment. Audio\nsegment timestamp information does not incur any additional latency." + } + ] + } + }, + "default": [ + "segment" + ], + "x-ms-client-name": "timestampGranularities" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this transcription request.", + "required": false, + "type": "string" } ], "responses": { @@ -572,22 +837,86 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranslationOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the translation response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranslationFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated translation." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranslationOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranslationOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this translation request.", + "required": false, + "type": "string" } ], "responses": { @@ -4151,225 +4480,6 @@ } }, "parameters": { - "AudioTranscriptionOptions.file": { - "name": "file", - "in": "formData", - "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", - "required": true, - "type": "file", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.filename": { - "name": "filename", - "in": "formData", - "description": "The optional filename or descriptive identifier to associate with with the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.language": { - "name": "language", - "in": "formData", - "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.model": { - "name": "model", - "in": "formData", - "description": "The model to use for this transcription request.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.prompt": { - "name": "prompt", - "in": "formData", - "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.responseFormat": { - "name": "response_format", - "in": "formData", - "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", - "required": false, - "type": "string", - "enum": [ - "json", - "verbose_json", - "text", - "srt", - "vtt" - ], - "x-ms-enum": { - "name": "AudioTranscriptionFormat", - "modelAsString": true, - "values": [ - { - "name": "json", - "value": "json", - "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." - }, - { - "name": "verbose_json", - "value": "verbose_json", - "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." - }, - { - "name": "text", - "value": "text", - "description": "Use a response body that is plain text containing the raw, unannotated transcription." - }, - { - "name": "srt", - "value": "srt", - "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." - }, - { - "name": "vtt", - "value": "vtt", - "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." - } - ] - }, - "x-ms-parameter-location": "method", - "x-ms-client-name": "responseFormat" - }, - "AudioTranscriptionOptions.temperature": { - "name": "temperature", - "in": "formData", - "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", - "required": false, - "type": "number", - "format": "float", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.timestampGranularities": { - "name": "timestamp_granularities", - "in": "formData", - "description": "The timestamp granularities to populate for this transcription.\n`response_format` must be set `verbose_json` to use timestamp granularities.\nEither or both of these options are supported: `word`, or `segment`.\nNote: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.", - "required": false, - "type": "array", - "items": { - "type": "string", - "enum": [ - "word", - "segment" - ], - "x-ms-enum": { - "name": "AudioTranscriptionTimestampGranularity", - "modelAsString": true, - "values": [ - { - "name": "word", - "value": "word", - "description": "Indicates that responses should include timing information about each transcribed word. Note that generating word\ntimestamp information will incur additional response latency." - }, - { - "name": "segment", - "value": "segment", - "description": "Indicates that responses should include timing and other information about each transcribed audio segment. Audio\nsegment timestamp information does not incur any additional latency." - } - ] - } - }, - "default": [ - "segment" - ], - "x-ms-parameter-location": "method", - "x-ms-client-name": "timestampGranularities" - }, - "AudioTranslationOptions.file": { - "name": "file", - "in": "formData", - "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", - "required": true, - "type": "file", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.filename": { - "name": "filename", - "in": "formData", - "description": "The optional filename or descriptive identifier to associate with with the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.model": { - "name": "model", - "in": "formData", - "description": "The model to use for this translation request.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.prompt": { - "name": "prompt", - "in": "formData", - "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.responseFormat": { - "name": "response_format", - "in": "formData", - "description": "The requested format of the translation response data, which will influence the content and detail of the result.", - "required": false, - "type": "string", - "enum": [ - "json", - "verbose_json", - "text", - "srt", - "vtt" - ], - "x-ms-enum": { - "name": "AudioTranslationFormat", - "modelAsString": true, - "values": [ - { - "name": "json", - "value": "json", - "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." - }, - { - "name": "verbose_json", - "value": "verbose_json", - "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." - }, - { - "name": "text", - "value": "text", - "description": "Use a response body that is plain text containing the raw, unannotated translation." - }, - { - "name": "srt", - "value": "srt", - "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." - }, - { - "name": "vtt", - "value": "vtt", - "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." - } - ] - }, - "x-ms-parameter-location": "method", - "x-ms-client-name": "responseFormat" - }, - "AudioTranslationOptions.temperature": { - "name": "temperature", - "in": "formData", - "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", - "required": false, - "type": "number", - "format": "float", - "x-ms-parameter-location": "method" - }, "Azure.Core.Foundations.ApiVersionParameter": { "name": "api-version", "in": "query", diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-05-01-preview/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-05-01-preview/generated.json index 24ed1b370093..8557c34830a2 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-05-01-preview/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2024-05-01-preview/generated.json @@ -137,28 +137,127 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranscriptionFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated transcription." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.language" + "name": "language", + "in": "formData", + "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.timestampGranularities" + "name": "timestamp_granularities", + "in": "formData", + "description": "The timestamp granularities to populate for this transcription.\n`response_format` must be set `verbose_json` to use timestamp granularities.\nEither or both of these options are supported: `word`, or `segment`.\nNote: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "word", + "segment" + ], + "x-ms-enum": { + "name": "AudioTranscriptionTimestampGranularity", + "modelAsString": true, + "values": [ + { + "name": "word", + "value": "word", + "description": "Indicates that responses should include timing information about each transcribed word. Note that generating word\ntimestamp information will incur additional response latency." + }, + { + "name": "segment", + "value": "segment", + "description": "Indicates that responses should include timing and other information about each transcribed audio segment. Audio\nsegment timestamp information does not incur any additional latency." + } + ] + } + }, + "default": [ + "segment" + ], + "x-ms-client-name": "timestampGranularities" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this transcription request.", + "required": false, + "type": "string" } ], "responses": { @@ -211,22 +310,86 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranslationOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the translation response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranslationFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated translation." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranslationOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranslationOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this translation request.", + "required": false, + "type": "string" } ], "responses": { @@ -346,6 +509,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CompletionsOptions" @@ -397,6 +561,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/EmbeddingsOptions" @@ -448,6 +613,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ImageGenerationOptions" @@ -502,28 +668,127 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranscriptionFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated transcription." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.language" + "name": "language", + "in": "formData", + "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.timestampGranularities" + "name": "timestamp_granularities", + "in": "formData", + "description": "The timestamp granularities to populate for this transcription.\n`response_format` must be set `verbose_json` to use timestamp granularities.\nEither or both of these options are supported: `word`, or `segment`.\nNote: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "word", + "segment" + ], + "x-ms-enum": { + "name": "AudioTranscriptionTimestampGranularity", + "modelAsString": true, + "values": [ + { + "name": "word", + "value": "word", + "description": "Indicates that responses should include timing information about each transcribed word. Note that generating word\ntimestamp information will incur additional response latency." + }, + { + "name": "segment", + "value": "segment", + "description": "Indicates that responses should include timing and other information about each transcribed audio segment. Audio\nsegment timestamp information does not incur any additional latency." + } + ] + } + }, + "default": [ + "segment" + ], + "x-ms-client-name": "timestampGranularities" }, { - "$ref": "#/parameters/AudioTranscriptionOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this transcription request.", + "required": false, + "type": "string" } ], "responses": { @@ -572,22 +837,86 @@ "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.file" + "name": "file", + "in": "formData", + "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", + "required": true, + "type": "file" }, { - "$ref": "#/parameters/AudioTranslationOptions.filename" + "name": "filename", + "in": "formData", + "description": "The optional filename or descriptive identifier to associate with with the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.responseFormat" + "name": "response_format", + "in": "formData", + "description": "The requested format of the translation response data, which will influence the content and detail of the result.", + "required": false, + "type": "string", + "enum": [ + "json", + "verbose_json", + "text", + "srt", + "vtt" + ], + "x-ms-enum": { + "name": "AudioTranslationFormat", + "modelAsString": true, + "values": [ + { + "name": "json", + "value": "json", + "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." + }, + { + "name": "verbose_json", + "value": "verbose_json", + "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." + }, + { + "name": "text", + "value": "text", + "description": "Use a response body that is plain text containing the raw, unannotated translation." + }, + { + "name": "srt", + "value": "srt", + "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." + }, + { + "name": "vtt", + "value": "vtt", + "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." + } + ] + }, + "x-ms-client-name": "responseFormat" }, { - "$ref": "#/parameters/AudioTranslationOptions.prompt" + "name": "prompt", + "in": "formData", + "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", + "required": false, + "type": "string" }, { - "$ref": "#/parameters/AudioTranslationOptions.temperature" + "name": "temperature", + "in": "formData", + "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", + "required": false, + "type": "number", + "format": "float" }, { - "$ref": "#/parameters/AudioTranslationOptions.model" + "name": "model", + "in": "formData", + "description": "The model to use for this translation request.", + "required": false, + "type": "string" } ], "responses": { @@ -4468,225 +4797,6 @@ } }, "parameters": { - "AudioTranscriptionOptions.file": { - "name": "file", - "in": "formData", - "description": "The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", - "required": true, - "type": "file", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.filename": { - "name": "filename", - "in": "formData", - "description": "The optional filename or descriptive identifier to associate with with the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.language": { - "name": "language", - "in": "formData", - "description": "The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code\nsuch as 'en' or 'fr'.\nProviding this known input language is optional but may improve the accuracy and/or latency of transcription.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.model": { - "name": "model", - "in": "formData", - "description": "The model to use for this transcription request.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.prompt": { - "name": "prompt", - "in": "formData", - "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.responseFormat": { - "name": "response_format", - "in": "formData", - "description": "The requested format of the transcription response data, which will influence the content and detail of the result.", - "required": false, - "type": "string", - "enum": [ - "json", - "verbose_json", - "text", - "srt", - "vtt" - ], - "x-ms-enum": { - "name": "AudioTranscriptionFormat", - "modelAsString": true, - "values": [ - { - "name": "json", - "value": "json", - "description": "Use a response body that is a JSON object containing a single 'text' field for the transcription." - }, - { - "name": "verbose_json", - "value": "verbose_json", - "description": "Use a response body that is a JSON object containing transcription text along with timing, segments, and other\nmetadata." - }, - { - "name": "text", - "value": "text", - "description": "Use a response body that is plain text containing the raw, unannotated transcription." - }, - { - "name": "srt", - "value": "srt", - "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." - }, - { - "name": "vtt", - "value": "vtt", - "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." - } - ] - }, - "x-ms-parameter-location": "method", - "x-ms-client-name": "responseFormat" - }, - "AudioTranscriptionOptions.temperature": { - "name": "temperature", - "in": "formData", - "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", - "required": false, - "type": "number", - "format": "float", - "x-ms-parameter-location": "method" - }, - "AudioTranscriptionOptions.timestampGranularities": { - "name": "timestamp_granularities", - "in": "formData", - "description": "The timestamp granularities to populate for this transcription.\n`response_format` must be set `verbose_json` to use timestamp granularities.\nEither or both of these options are supported: `word`, or `segment`.\nNote: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.", - "required": false, - "type": "array", - "items": { - "type": "string", - "enum": [ - "word", - "segment" - ], - "x-ms-enum": { - "name": "AudioTranscriptionTimestampGranularity", - "modelAsString": true, - "values": [ - { - "name": "word", - "value": "word", - "description": "Indicates that responses should include timing information about each transcribed word. Note that generating word\ntimestamp information will incur additional response latency." - }, - { - "name": "segment", - "value": "segment", - "description": "Indicates that responses should include timing and other information about each transcribed audio segment. Audio\nsegment timestamp information does not incur any additional latency." - } - ] - } - }, - "default": [ - "segment" - ], - "x-ms-parameter-location": "method", - "x-ms-client-name": "timestampGranularities" - }, - "AudioTranslationOptions.file": { - "name": "file", - "in": "formData", - "description": "The audio data to translate. This must be the binary content of a file in one of the supported media formats:\n flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.", - "required": true, - "type": "file", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.filename": { - "name": "filename", - "in": "formData", - "description": "The optional filename or descriptive identifier to associate with with the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.model": { - "name": "model", - "in": "formData", - "description": "The model to use for this translation request.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.prompt": { - "name": "prompt", - "in": "formData", - "description": "An optional hint to guide the model's style or continue from a prior audio segment. The written language of the\nprompt should match the primary spoken language of the audio data.", - "required": false, - "type": "string", - "x-ms-parameter-location": "method" - }, - "AudioTranslationOptions.responseFormat": { - "name": "response_format", - "in": "formData", - "description": "The requested format of the translation response data, which will influence the content and detail of the result.", - "required": false, - "type": "string", - "enum": [ - "json", - "verbose_json", - "text", - "srt", - "vtt" - ], - "x-ms-enum": { - "name": "AudioTranslationFormat", - "modelAsString": true, - "values": [ - { - "name": "json", - "value": "json", - "description": "Use a response body that is a JSON object containing a single 'text' field for the translation." - }, - { - "name": "verbose_json", - "value": "verbose_json", - "description": "Use a response body that is a JSON object containing translation text along with timing, segments, and other\nmetadata." - }, - { - "name": "text", - "value": "text", - "description": "Use a response body that is plain text containing the raw, unannotated translation." - }, - { - "name": "srt", - "value": "srt", - "description": "Use a response body that is plain text in SubRip (SRT) format that also includes timing information." - }, - { - "name": "vtt", - "value": "vtt", - "description": "Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information." - } - ] - }, - "x-ms-parameter-location": "method", - "x-ms-client-name": "responseFormat" - }, - "AudioTranslationOptions.temperature": { - "name": "temperature", - "in": "formData", - "description": "The sampling temperature, between 0 and 1.\nHigher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nIf set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.", - "required": false, - "type": "number", - "format": "float", - "x-ms-parameter-location": "method" - }, "Azure.Core.Foundations.ApiVersionParameter": { "name": "api-version", "in": "query", diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json index b55dcf7f754c..39a5f276c4a8 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2022-12-01/generated.json @@ -77,6 +77,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CompletionsOptions" @@ -128,6 +129,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/EmbeddingsOptions" diff --git a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json index 4025b89cae3a..bf3f9152ea8b 100644 --- a/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json +++ b/specification/cognitiveservices/data-plane/AzureOpenAI/inference/stable/2023-05-15/generated.json @@ -128,6 +128,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/CompletionsOptions" @@ -179,6 +180,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/EmbeddingsOptions" diff --git a/specification/communication/Communication.Messages/models.tsp b/specification/communication/Communication.Messages/models.tsp index deb3876a0910..4949f3b23047 100644 --- a/specification/communication/Communication.Messages/models.tsp +++ b/specification/communication/Communication.Messages/models.tsp @@ -374,3 +374,14 @@ enum Versions { @useDependency(Azure.Core.Versions.v1_0_Preview_2) c2024_02_01: "2024-02-01", } + +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; diff --git a/specification/communication/Communication.Messages/routes.tsp b/specification/communication/Communication.Messages/routes.tsp index 95dede298d0a..a2c5d85987a4 100644 --- a/specification/communication/Communication.Messages/routes.tsp +++ b/specification/communication/Communication.Messages/routes.tsp @@ -35,7 +35,7 @@ interface NotificationMessagesOperations { @doc("Sends a notification message from Business to User.") send is Operations.ResourceCollectionAction< Notifications, - NotificationContent, + BodyParameter, AcceptedResponse & SendMessageResult >; } diff --git a/specification/communication/data-plane/Messages/stable/2024-02-01/communicationservicesmessages.json b/specification/communication/data-plane/Messages/stable/2024-02-01/communicationservicesmessages.json index 10f7cd1b04da..ec91ed70c2ed 100644 --- a/specification/communication/data-plane/Messages/stable/2024-02-01/communicationservicesmessages.json +++ b/specification/communication/data-plane/Messages/stable/2024-02-01/communicationservicesmessages.json @@ -164,6 +164,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/NotificationContent" diff --git a/specification/developersigning/DeveloperSigning/main.tsp b/specification/developersigning/DeveloperSigning/main.tsp index 89c0df419a51..167bd84ccdc7 100644 --- a/specification/developersigning/DeveloperSigning/main.tsp +++ b/specification/developersigning/DeveloperSigning/main.tsp @@ -68,7 +68,7 @@ interface CertificateProfileOperations { @pollingOperation(CertificateProfileOperations.getSigningStatus) sign is StandardResourceOperations.LongRunningResourceAction< CertificateProfileName, - SigningPayloadOptions, + BodyParameter, SignResult, Foundations.ErrorResponse, SigningRequestHeadersTraits @@ -188,6 +188,17 @@ model BytesBody { contentType: ContentType; } +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; + alias SigningRequestHeadersTraits = Azure.Core.Traits.RequestHeadersTrait<{ @doc("An optional client version.") @header diff --git a/specification/developersigning/data-plane/Azure.Developer.Signing/preview/2023-06-15-preview/azure.developer.signing.json b/specification/developersigning/data-plane/Azure.Developer.Signing/preview/2023-06-15-preview/azure.developer.signing.json index 167577681423..3ad206e9823f 100644 --- a/specification/developersigning/data-plane/Azure.Developer.Signing/preview/2023-06-15-preview/azure.developer.signing.json +++ b/specification/developersigning/data-plane/Azure.Developer.Signing/preview/2023-06-15-preview/azure.developer.signing.json @@ -93,6 +93,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/SigningPayloadOptions" diff --git a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/common.tsp b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/common.tsp index daf2d047610f..3393e203af29 100644 --- a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/common.tsp +++ b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/common.tsp @@ -104,3 +104,14 @@ model Ipv6Address { @doc("User equipment port.") port: int32; } + +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; diff --git a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/location.tsp b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/location.tsp index 2fe865cf1574..c90f5b3e63f9 100644 --- a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/location.tsp +++ b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/location.tsp @@ -13,7 +13,7 @@ interface DeviceLocation { @doc("Verifies whether a device is within a specified location area, defined as an accuracy (radius) around a point, specified by longitude and latitude.") verify is Operations.ResourceAction< DeviceLocationEndpoint, - DeviceLocationVerificationContent, + BodyParameter, DeviceLocationVerificationResult >; } diff --git a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/network.tsp b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/network.tsp index 48faea42e171..47bb8931bd9d 100644 --- a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/network.tsp +++ b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/network.tsp @@ -14,7 +14,7 @@ interface DeviceNetwork { @doc("Retrieves the network a given device is on. Returns network in a networkCode format that can be used for other APIs.") retrieve is Operations.ResourceAction< DeviceNetworkRetrievalEndpoint, - NetworkIdentifier, + BodyParameter, NetworkRetrievalResult >; } diff --git a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/number.tsp b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/number.tsp index 550e6d3d4477..c781676a7113 100644 --- a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/number.tsp +++ b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/number.tsp @@ -29,7 +29,7 @@ interface NumberVerification { @action("verify") verifyWithoutCode is Operations.ResourceAction< NumberVerificationEndpoint, - NumberVerificationWithoutCodeContent, + BodyParameter, TypeSpec.Http.Response<302> & {}, TraitOverride, NumberVerificationResult >; } diff --git a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/simswap.tsp b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/simswap.tsp index c6c717e9b1b1..c1b76c601aad 100644 --- a/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/simswap.tsp +++ b/specification/programmableconnectivity/Azure.ProgrammableConnectivity/apis/simswap.tsp @@ -14,14 +14,14 @@ interface SimSwap { @doc("Provides timestamp of latest SIM swap") retrieve is Operations.ResourceAction< SimSwapEndpoint, - SimSwapRetrievalContent, + BodyParameter, SimSwapRetrievalResult >; @doc("Verifies if a SIM swap has been performed during a past period (defined in the request with 'maxAgeHours' attribute). Returns 'True' if a SIM swap has occured.") verify is Operations.ResourceAction< SimSwapEndpoint, - SimSwapVerificationContent, + BodyParameter, SimSwapVerificationResult >; } diff --git a/specification/programmableconnectivity/data-plane/Azure.ProgrammableConnectivity/preview/2024-02-09-preview/openapi.json b/specification/programmableconnectivity/data-plane/Azure.ProgrammableConnectivity/preview/2024-02-09-preview/openapi.json index 1f674e941505..4c09ff5a23a7 100644 --- a/specification/programmableconnectivity/data-plane/Azure.ProgrammableConnectivity/preview/2024-02-09-preview/openapi.json +++ b/specification/programmableconnectivity/data-plane/Azure.ProgrammableConnectivity/preview/2024-02-09-preview/openapi.json @@ -68,6 +68,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DeviceLocationVerificationContent" @@ -130,6 +131,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/NetworkIdentifier" @@ -192,6 +194,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/NumberVerificationWithoutCodeContent" @@ -256,6 +259,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/SimSwapRetrievalContent" @@ -318,6 +322,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/SimSwapVerificationContent" @@ -382,6 +387,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/NumberVerificationWithCodeContent" diff --git a/specification/purview/Azure.Analytics.Purview.DataMap/models.tsp b/specification/purview/Azure.Analytics.Purview.DataMap/models.tsp index dc511d8f89e4..854809ea17d1 100644 --- a/specification/purview/Azure.Analytics.Purview.DataMap/models.tsp +++ b/specification/purview/Azure.Analytics.Purview.DataMap/models.tsp @@ -1983,3 +1983,14 @@ model AtlasTypeDefHeader { @doc("The name of the type definition.") name?: string; } + +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; diff --git a/specification/purview/Azure.Analytics.Purview.DataMap/routes.tsp b/specification/purview/Azure.Analytics.Purview.DataMap/routes.tsp index e7ddc9558184..7dd2a7fb5645 100644 --- a/specification/purview/Azure.Analytics.Purview.DataMap/routes.tsp +++ b/specification/purview/Azure.Analytics.Purview.DataMap/routes.tsp @@ -1619,7 +1619,7 @@ interface Discovery { @route("/search/query") @post query is Azure.Core.Foundations.Operation< - QueryOptions, + BodyParameter, QueryResult, {}, AtlasErrorResponse @@ -1630,7 +1630,7 @@ interface Discovery { @route("/search/suggest") @post suggest is Azure.Core.Foundations.Operation< - SuggestOptions, + BodyParameter, SuggestResult, {}, AtlasErrorResponse @@ -1641,7 +1641,7 @@ interface Discovery { @route("/search/autocomplete") @post autoComplete is Azure.Core.Foundations.Operation< - AutoCompleteOptions, + BodyParameter, AutoCompleteResult, {}, AtlasErrorResponse diff --git a/specification/purview/data-plane/Azure.Analytics.Purview.DataMap/stable/2023-09-01/purviewdatamap.json b/specification/purview/data-plane/Azure.Analytics.Purview.DataMap/stable/2023-09-01/purviewdatamap.json index f2e6eb6c6382..2d8ab19b2b35 100644 --- a/specification/purview/data-plane/Azure.Analytics.Purview.DataMap/stable/2023-09-01/purviewdatamap.json +++ b/specification/purview/data-plane/Azure.Analytics.Purview.DataMap/stable/2023-09-01/purviewdatamap.json @@ -4266,6 +4266,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/AutoCompleteOptions" @@ -4304,6 +4305,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/QueryOptions" @@ -4402,6 +4404,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/SuggestOptions" diff --git a/specification/riskiq/Easm/models.tsp b/specification/riskiq/Easm/models.tsp index 74f37c48bd0b..8550fb724269 100644 --- a/specification/riskiq/Easm/models.tsp +++ b/specification/riskiq/Easm/models.tsp @@ -2155,3 +2155,14 @@ model DiscoGroupSummaryResult { @doc("The name that can be used for display purposes.") displayName: string; } + +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; diff --git a/specification/riskiq/Easm/routes.tsp b/specification/riskiq/Easm/routes.tsp index c787851abd12..f8220b541bfb 100644 --- a/specification/riskiq/Easm/routes.tsp +++ b/specification/riskiq/Easm/routes.tsp @@ -41,7 +41,7 @@ interface Assets { @Versioning.added(Versions.v2024_03_01_preview) getAssetsExport is Operations.ResourceCollectionAction< AssetResource, - AssetsExportRequest, + BodyParameter, Task, AssetsExportTrait >; @@ -61,7 +61,7 @@ interface Assets { @Versioning.added(Versions.v2024_03_01_preview) getDeltaDetails is Operations.ResourceCollectionAction< AssetResource, - DeltaDetailsRequest, + BodyParameter, DeltaPageResult, DeltaDetailsTrait >; @@ -71,7 +71,7 @@ interface Assets { @Versioning.added(Versions.v2024_03_01_preview) getDeltaSummary is Operations.ResourceCollectionAction< AssetResource, - DeltaSummaryRequest, + BodyParameter, DeltaSummaryResult, {} >; @@ -90,7 +90,7 @@ interface DataConnections { @action("validate") validateDataConnection is Operations.ResourceCollectionAction< DataConnection, - DataConnectionData, + BodyParameter, ValidateResult >; @doc("Retrieve a data connection with a given dataConnectionName.") @@ -102,7 +102,7 @@ interface DataConnections { @put createOrReplaceDataConnection is Foundations.ResourceOperation< DataConnection, - DataConnectionData, + BodyParameter, DataConnection >; @@ -122,7 +122,7 @@ interface DiscoveryGroups { @clientName("ValidateDiscoveryGroup", "csharp") validateDiscoGroup is Operations.ResourceCollectionAction< DiscoGroup, - DiscoGroupData, + BodyParameter, ValidateResult >; @doc("Retrieve a discovery group with a given groupName.") @@ -162,7 +162,7 @@ interface DiscoveryGroups { @Versioning.added(Versions.v2024_03_01_preview) getAssetChainSummary is Operations.ResourceCollectionAction< DiscoGroup, - AssetChainRequest, + BodyParameter, AssetChainSummaryResult >; @@ -171,7 +171,7 @@ interface DiscoveryGroups { @Versioning.added(Versions.v2024_03_01_preview) dismissAssetChain is Operations.ResourceCollectionAction< DiscoGroup, - AssetChainRequest, + BodyParameter, Task >; } @@ -203,7 +203,7 @@ interface Reports { @route("/reports/assets:getSnapshot") @post getSnapshot is Foundations.Operation< - ReportAssetSnapshotRequest, + BodyParameter, ReportAssetSnapshotResult >; #suppress "@azure-tools/typespec-azure-core/use-standard-operations" @@ -211,7 +211,7 @@ interface Reports { @route("/reports/assets:getSummary") @post getSummary is Foundations.Operation< - ReportAssetSummaryRequest, + BodyParameter, ReportAssetSummaryResult >; @@ -221,7 +221,7 @@ interface Reports { @route("/reports/assets:getSnapshotExport") @post getSnapshotExport is Foundations.Operation< - ReportAssetSnapshotExportRequest, + BodyParameter, Task >; } diff --git a/specification/riskiq/data-plane/Microsoft.Easm/preview/2023-03-01-preview/easm.json b/specification/riskiq/data-plane/Microsoft.Easm/preview/2023-03-01-preview/easm.json index 80583013d4dc..dd82cf71c089 100644 --- a/specification/riskiq/data-plane/Microsoft.Easm/preview/2023-03-01-preview/easm.json +++ b/specification/riskiq/data-plane/Microsoft.Easm/preview/2023-03-01-preview/easm.json @@ -337,6 +337,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DataConnectionData" @@ -425,6 +426,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DataConnectionData" @@ -718,6 +720,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DiscoGroupData" @@ -901,6 +904,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ReportAssetSnapshotRequest" @@ -948,6 +952,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ReportAssetSummaryRequest" diff --git a/specification/riskiq/data-plane/Microsoft.Easm/preview/2024-03-01-preview/easm.json b/specification/riskiq/data-plane/Microsoft.Easm/preview/2024-03-01-preview/easm.json index 7414e732a170..d6f807a38604 100644 --- a/specification/riskiq/data-plane/Microsoft.Easm/preview/2024-03-01-preview/easm.json +++ b/specification/riskiq/data-plane/Microsoft.Easm/preview/2024-03-01-preview/easm.json @@ -314,6 +314,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/AssetsExportRequest" @@ -367,6 +368,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DeltaDetailsRequest" @@ -417,6 +419,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DeltaSummaryRequest" @@ -651,6 +654,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DataConnectionData" @@ -739,6 +743,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DataConnectionData" @@ -1073,6 +1078,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/AssetChainRequest" @@ -1120,6 +1126,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/AssetChainRequest" @@ -1167,6 +1174,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/DiscoGroupData" @@ -1350,6 +1358,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ReportAssetSnapshotRequest" @@ -1397,6 +1406,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ReportAssetSnapshotExportRequest" @@ -1444,6 +1454,7 @@ { "name": "body", "in": "body", + "description": "Body parameter.", "required": true, "schema": { "$ref": "#/definitions/ReportAssetSummaryRequest" diff --git a/specification/voiceservices/VoiceServices.Provisioning/main.tsp b/specification/voiceservices/VoiceServices.Provisioning/main.tsp index caf465c869bc..02a41d6528cc 100644 --- a/specification/voiceservices/VoiceServices.Provisioning/main.tsp +++ b/specification/voiceservices/VoiceServices.Provisioning/main.tsp @@ -781,6 +781,17 @@ model CountOfRecordsHeader { countOfRecords?: int32; } +alias BodyParameter< + T, + TName extends valueof string = "body", + TDoc extends valueof string = "Body parameter." +> = { + @doc(TDoc) + @friendlyName(TName) + @bodyRoot + body: T; +}; + alias FilterQueryParameters = QueryParametersTrait<{ ...FilterQueryParameter; }>; @@ -843,7 +854,11 @@ interface Accounts { @doc("Create or update up to 100 Numbers on the specified account.") createOrUpdateNumbers is Operations.ResourceAction< AccountResource, - BatchNumbers, + BodyParameter< + BatchNumbers, + "body", + "Batch of numbers to create or replace." + >, BatchNumbers >; @@ -852,7 +867,7 @@ interface Accounts { @doc("Delete up to 100 Numbers on the specified account.") deleteNumbers is Operations.ResourceAction< AccountResource, - BatchNumbersDelete, + BodyParameter, NoContentResponse >; @@ -893,7 +908,11 @@ interface Accounts { @doc("Get a list of Teams Available Capabilities on the specified account.") getTeamsAvailableCapabilities is Operations.ResourceAction< AccountResource, - NumberIdentifiers, + BodyParameter< + NumberIdentifiers, + "body", + "List identifiers for a Number resource" + >, TeamsAvailableCapabilities >; diff --git a/specification/voiceservices/data-plane/Microsoft.VoiceServices/preview/2024-02-29-preview/swagger.json b/specification/voiceservices/data-plane/Microsoft.VoiceServices/preview/2024-02-29-preview/swagger.json index 87695257d9f6..25cd2ef5dae0 100644 --- a/specification/voiceservices/data-plane/Microsoft.VoiceServices/preview/2024-02-29-preview/swagger.json +++ b/specification/voiceservices/data-plane/Microsoft.VoiceServices/preview/2024-02-29-preview/swagger.json @@ -499,6 +499,7 @@ { "name": "body", "in": "body", + "description": "Batch of numbers to create or replace.", "required": true, "schema": { "$ref": "#/definitions/BatchNumbers" @@ -597,6 +598,7 @@ { "name": "body", "in": "body", + "description": "Batch numbers to delete.", "required": true, "schema": { "$ref": "#/definitions/BatchNumbersDelete" @@ -782,6 +784,7 @@ { "name": "body", "in": "body", + "description": "List identifiers for a Number resource", "required": true, "schema": { "$ref": "#/definitions/NumberIdentifiers"