diff --git a/clients/client-rekognition/README.md b/clients/client-rekognition/README.md index b446dbf923e5..ebf55d8b6d04 100644 --- a/clients/client-rekognition/README.md +++ b/clients/client-rekognition/README.md @@ -864,6 +864,14 @@ GetLabelDetection [Command API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/classes/getlabeldetectioncommand.html) / [Input](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/getlabeldetectioncommandinput.html) / [Output](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/getlabeldetectioncommandoutput.html) + +
+ +GetMediaAnalysisJob + + +[Command API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/classes/getmediaanalysisjobcommand.html) / [Input](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/getmediaanalysisjobcommandinput.html) / [Output](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/getmediaanalysisjobcommandoutput.html) +
@@ -928,6 +936,14 @@ ListFaces [Command API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/classes/listfacescommand.html) / [Input](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/listfacescommandinput.html) / [Output](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/listfacescommandoutput.html) +
+
+ +ListMediaAnalysisJobs + + +[Command API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/classes/listmediaanalysisjobscommand.html) / [Input](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/listmediaanalysisjobscommandinput.html) / [Output](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/listmediaanalysisjobscommandoutput.html) +
@@ -1048,6 +1064,14 @@ StartLabelDetection [Command API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/classes/startlabeldetectioncommand.html) / [Input](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/startlabeldetectioncommandinput.html) / [Output](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/startlabeldetectioncommandoutput.html) +
+
+ +StartMediaAnalysisJob + + +[Command API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/classes/startmediaanalysisjobcommand.html) / [Input](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/startmediaanalysisjobcommandinput.html) / [Output](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-rekognition/interfaces/startmediaanalysisjobcommandoutput.html) +
diff --git a/clients/client-rekognition/src/Rekognition.ts b/clients/client-rekognition/src/Rekognition.ts index 252883998538..780afacc7796 100644 --- a/clients/client-rekognition/src/Rekognition.ts +++ b/clients/client-rekognition/src/Rekognition.ts @@ -172,6 +172,11 @@ import { GetLabelDetectionCommandInput, GetLabelDetectionCommandOutput, } from "./commands/GetLabelDetectionCommand"; +import { + GetMediaAnalysisJobCommand, + GetMediaAnalysisJobCommandInput, + GetMediaAnalysisJobCommandOutput, +} from "./commands/GetMediaAnalysisJobCommand"; import { GetPersonTrackingCommand, GetPersonTrackingCommandInput, @@ -204,6 +209,11 @@ import { ListDatasetLabelsCommandOutput, } from "./commands/ListDatasetLabelsCommand"; import { ListFacesCommand, ListFacesCommandInput, ListFacesCommandOutput } from "./commands/ListFacesCommand"; +import { + ListMediaAnalysisJobsCommand, + ListMediaAnalysisJobsCommandInput, + ListMediaAnalysisJobsCommandOutput, +} from "./commands/ListMediaAnalysisJobsCommand"; import { ListProjectPoliciesCommand, ListProjectPoliciesCommandInput, @@ -267,6 +277,11 @@ import { StartLabelDetectionCommandInput, StartLabelDetectionCommandOutput, } from "./commands/StartLabelDetectionCommand"; +import { + StartMediaAnalysisJobCommand, + StartMediaAnalysisJobCommandInput, + StartMediaAnalysisJobCommandOutput, +} from "./commands/StartMediaAnalysisJobCommand"; import { StartPersonTrackingCommand, StartPersonTrackingCommandInput, @@ -359,6 +374,7 @@ const commands = { GetFaceLivenessSessionResultsCommand, GetFaceSearchCommand, GetLabelDetectionCommand, + GetMediaAnalysisJobCommand, GetPersonTrackingCommand, GetSegmentDetectionCommand, GetTextDetectionCommand, @@ -367,6 +383,7 @@ const commands = { ListDatasetEntriesCommand, ListDatasetLabelsCommand, ListFacesCommand, + ListMediaAnalysisJobsCommand, ListProjectPoliciesCommand, ListStreamProcessorsCommand, ListTagsForResourceCommand, @@ -382,6 +399,7 @@ const commands = { StartFaceDetectionCommand, StartFaceSearchCommand, StartLabelDetectionCommand, + StartMediaAnalysisJobCommand, StartPersonTrackingCommand, StartProjectVersionCommand, StartSegmentDetectionCommand, @@ -964,6 +982,23 @@ export interface Rekognition { cb: (err: any, data?: GetLabelDetectionCommandOutput) => void ): void; + /** + * @see {@link GetMediaAnalysisJobCommand} + */ + getMediaAnalysisJob( + args: GetMediaAnalysisJobCommandInput, + options?: __HttpHandlerOptions + ): Promise; + getMediaAnalysisJob( + args: GetMediaAnalysisJobCommandInput, + cb: (err: any, data?: GetMediaAnalysisJobCommandOutput) => void + ): void; + getMediaAnalysisJob( + args: GetMediaAnalysisJobCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetMediaAnalysisJobCommandOutput) => void + ): void; + /** * @see {@link GetPersonTrackingCommand} */ @@ -1085,6 +1120,23 @@ export interface Rekognition { cb: (err: any, data?: ListFacesCommandOutput) => void ): void; + /** + * @see {@link ListMediaAnalysisJobsCommand} + */ + listMediaAnalysisJobs( + args: ListMediaAnalysisJobsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + listMediaAnalysisJobs( + args: ListMediaAnalysisJobsCommandInput, + cb: (err: any, data?: ListMediaAnalysisJobsCommandOutput) => void + ): void; + listMediaAnalysisJobs( + args: ListMediaAnalysisJobsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListMediaAnalysisJobsCommandOutput) => void + ): void; + /** * @see {@link ListProjectPoliciesCommand} */ @@ -1319,6 +1371,23 @@ export interface Rekognition { cb: (err: any, data?: StartLabelDetectionCommandOutput) => void ): void; + /** + * @see {@link StartMediaAnalysisJobCommand} + */ + startMediaAnalysisJob( + args: StartMediaAnalysisJobCommandInput, + options?: __HttpHandlerOptions + ): Promise; + startMediaAnalysisJob( + args: StartMediaAnalysisJobCommandInput, + cb: (err: any, data?: StartMediaAnalysisJobCommandOutput) => void + ): void; + startMediaAnalysisJob( + args: StartMediaAnalysisJobCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StartMediaAnalysisJobCommandOutput) => void + ): void; + /** * @see {@link StartPersonTrackingCommand} */ diff --git a/clients/client-rekognition/src/RekognitionClient.ts b/clients/client-rekognition/src/RekognitionClient.ts index 1489fb087f0f..28799f68f79f 100644 --- a/clients/client-rekognition/src/RekognitionClient.ts +++ b/clients/client-rekognition/src/RekognitionClient.ts @@ -130,6 +130,10 @@ import { } from "./commands/GetFaceLivenessSessionResultsCommand"; import { GetFaceSearchCommandInput, GetFaceSearchCommandOutput } from "./commands/GetFaceSearchCommand"; import { GetLabelDetectionCommandInput, GetLabelDetectionCommandOutput } from "./commands/GetLabelDetectionCommand"; +import { + GetMediaAnalysisJobCommandInput, + GetMediaAnalysisJobCommandOutput, +} from "./commands/GetMediaAnalysisJobCommand"; import { GetPersonTrackingCommandInput, GetPersonTrackingCommandOutput } from "./commands/GetPersonTrackingCommand"; import { GetSegmentDetectionCommandInput, @@ -141,6 +145,10 @@ import { ListCollectionsCommandInput, ListCollectionsCommandOutput } from "./com import { ListDatasetEntriesCommandInput, ListDatasetEntriesCommandOutput } from "./commands/ListDatasetEntriesCommand"; import { ListDatasetLabelsCommandInput, ListDatasetLabelsCommandOutput } from "./commands/ListDatasetLabelsCommand"; import { ListFacesCommandInput, ListFacesCommandOutput } from "./commands/ListFacesCommand"; +import { + ListMediaAnalysisJobsCommandInput, + ListMediaAnalysisJobsCommandOutput, +} from "./commands/ListMediaAnalysisJobsCommand"; import { ListProjectPoliciesCommandInput, ListProjectPoliciesCommandOutput, @@ -177,6 +185,10 @@ import { StartLabelDetectionCommandInput, StartLabelDetectionCommandOutput, } from "./commands/StartLabelDetectionCommand"; +import { + StartMediaAnalysisJobCommandInput, + StartMediaAnalysisJobCommandOutput, +} from "./commands/StartMediaAnalysisJobCommand"; import { StartPersonTrackingCommandInput, StartPersonTrackingCommandOutput, @@ -262,6 +274,7 @@ export type ServiceInputTypes = | GetFaceLivenessSessionResultsCommandInput | GetFaceSearchCommandInput | GetLabelDetectionCommandInput + | GetMediaAnalysisJobCommandInput | GetPersonTrackingCommandInput | GetSegmentDetectionCommandInput | GetTextDetectionCommandInput @@ -270,6 +283,7 @@ export type ServiceInputTypes = | ListDatasetEntriesCommandInput | ListDatasetLabelsCommandInput | ListFacesCommandInput + | ListMediaAnalysisJobsCommandInput | ListProjectPoliciesCommandInput | ListStreamProcessorsCommandInput | ListTagsForResourceCommandInput @@ -285,6 +299,7 @@ export type ServiceInputTypes = | StartFaceDetectionCommandInput | StartFaceSearchCommandInput | StartLabelDetectionCommandInput + | StartMediaAnalysisJobCommandInput | StartPersonTrackingCommandInput | StartProjectVersionCommandInput | StartSegmentDetectionCommandInput @@ -339,6 +354,7 @@ export type ServiceOutputTypes = | GetFaceLivenessSessionResultsCommandOutput | GetFaceSearchCommandOutput | GetLabelDetectionCommandOutput + | GetMediaAnalysisJobCommandOutput | GetPersonTrackingCommandOutput | GetSegmentDetectionCommandOutput | GetTextDetectionCommandOutput @@ -347,6 +363,7 @@ export type ServiceOutputTypes = | ListDatasetEntriesCommandOutput | ListDatasetLabelsCommandOutput | ListFacesCommandOutput + | ListMediaAnalysisJobsCommandOutput | ListProjectPoliciesCommandOutput | ListStreamProcessorsCommandOutput | ListTagsForResourceCommandOutput @@ -362,6 +379,7 @@ export type ServiceOutputTypes = | StartFaceDetectionCommandOutput | StartFaceSearchCommandOutput | StartLabelDetectionCommandOutput + | StartMediaAnalysisJobCommandOutput | StartPersonTrackingCommandOutput | StartProjectVersionCommandOutput | StartSegmentDetectionCommandOutput diff --git a/clients/client-rekognition/src/commands/GetMediaAnalysisJobCommand.ts b/clients/client-rekognition/src/commands/GetMediaAnalysisJobCommand.ts new file mode 100644 index 000000000000..69c3597c34a1 --- /dev/null +++ b/clients/client-rekognition/src/commands/GetMediaAnalysisJobCommand.ts @@ -0,0 +1,208 @@ +// smithy-typescript generated code +import { EndpointParameterInstructions, getEndpointPlugin } from "@smithy/middleware-endpoint"; +import { getSerdePlugin } from "@smithy/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@smithy/protocol-http"; +import { Command as $Command } from "@smithy/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, + SMITHY_CONTEXT_KEY, +} from "@smithy/types"; + +import { GetMediaAnalysisJobRequest, GetMediaAnalysisJobResponse } from "../models/models_0"; +import { de_GetMediaAnalysisJobCommand, se_GetMediaAnalysisJobCommand } from "../protocols/Aws_json1_1"; +import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; + +/** + * @public + */ +export { __MetadataBearer, $Command }; +/** + * @public + * + * The input for {@link GetMediaAnalysisJobCommand}. + */ +export interface GetMediaAnalysisJobCommandInput extends GetMediaAnalysisJobRequest {} +/** + * @public + * + * The output of {@link GetMediaAnalysisJobCommand}. + */ +export interface GetMediaAnalysisJobCommandOutput extends GetMediaAnalysisJobResponse, __MetadataBearer {} + +/** + * @public + *

Retrieves the results for a given media analysis job. + * Takes a JobId returned by StartMediaAnalysisJob.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RekognitionClient, GetMediaAnalysisJobCommand } from "@aws-sdk/client-rekognition"; // ES Modules import + * // const { RekognitionClient, GetMediaAnalysisJobCommand } = require("@aws-sdk/client-rekognition"); // CommonJS import + * const client = new RekognitionClient(config); + * const input = { // GetMediaAnalysisJobRequest + * JobId: "STRING_VALUE", // required + * }; + * const command = new GetMediaAnalysisJobCommand(input); + * const response = await client.send(command); + * // { // GetMediaAnalysisJobResponse + * // JobId: "STRING_VALUE", // required + * // JobName: "STRING_VALUE", + * // OperationsConfig: { // MediaAnalysisOperationsConfig + * // DetectModerationLabels: { // MediaAnalysisDetectModerationLabelsConfig + * // MinConfidence: Number("float"), + * // ProjectVersion: "STRING_VALUE", + * // }, + * // }, + * // Status: "CREATED" || "QUEUED" || "IN_PROGRESS" || "SUCCEEDED" || "FAILED", // required + * // FailureDetails: { // MediaAnalysisJobFailureDetails + * // Code: "INTERNAL_ERROR" || "INVALID_S3_OBJECT" || "INVALID_MANIFEST" || "INVALID_OUTPUT_CONFIG" || "INVALID_KMS_KEY" || "ACCESS_DENIED" || "RESOURCE_NOT_FOUND" || "RESOURCE_NOT_READY" || "THROTTLED", + * // Message: "STRING_VALUE", + * // }, + * // CreationTimestamp: new Date("TIMESTAMP"), // required + * // CompletionTimestamp: new Date("TIMESTAMP"), + * // Input: { // MediaAnalysisInput + * // S3Object: { // S3Object + * // Bucket: "STRING_VALUE", + * // Name: "STRING_VALUE", + * // Version: "STRING_VALUE", + * // }, + * // }, + * // OutputConfig: { // MediaAnalysisOutputConfig + * // S3Bucket: "STRING_VALUE", // required + * // S3KeyPrefix: "STRING_VALUE", + * // }, + * // KmsKeyId: "STRING_VALUE", + * // Results: { // MediaAnalysisResults + * // S3Object: { + * // Bucket: "STRING_VALUE", + * // Name: "STRING_VALUE", + * // Version: "STRING_VALUE", + * // }, + * // }, + * // ManifestSummary: { // MediaAnalysisManifestSummary + * // S3Object: { + * // Bucket: "STRING_VALUE", + * // Name: "STRING_VALUE", + * // Version: "STRING_VALUE", + * // }, + * // }, + * // }; + * + * ``` + * + * @param GetMediaAnalysisJobCommandInput - {@link GetMediaAnalysisJobCommandInput} + * @returns {@link GetMediaAnalysisJobCommandOutput} + * @see {@link GetMediaAnalysisJobCommandInput} for command's `input` shape. + * @see {@link GetMediaAnalysisJobCommandOutput} for command's `response` shape. + * @see {@link RekognitionClientResolvedConfig | config} for RekognitionClient's `config` shape. + * + * @throws {@link AccessDeniedException} (client fault) + *

You are not authorized to perform the action.

+ * + * @throws {@link InternalServerError} (server fault) + *

Amazon Rekognition experienced a service issue. Try your call again.

+ * + * @throws {@link InvalidParameterException} (client fault) + *

Input parameter violated a constraint. Validate your parameter before calling the API + * operation again.

+ * + * @throws {@link ProvisionedThroughputExceededException} (client fault) + *

The number of requests exceeded your throughput limit. If you want to increase this + * limit, contact Amazon Rekognition.

+ * + * @throws {@link ResourceNotFoundException} (client fault) + *

The resource specified in the request cannot be found.

+ * + * @throws {@link ThrottlingException} (server fault) + *

Amazon Rekognition is temporarily unable to process the request. Try your call again.

+ * + * @throws {@link RekognitionServiceException} + *

Base exception class for all service exceptions from Rekognition service.

+ * + */ +export class GetMediaAnalysisJobCommand extends $Command< + GetMediaAnalysisJobCommandInput, + GetMediaAnalysisJobCommandOutput, + RekognitionClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + public static getEndpointParameterInstructions(): EndpointParameterInstructions { + return { + UseFIPS: { type: "builtInParams", name: "useFipsEndpoint" }, + Endpoint: { type: "builtInParams", name: "endpoint" }, + Region: { type: "builtInParams", name: "region" }, + UseDualStack: { type: "builtInParams", name: "useDualstackEndpoint" }, + }; + } + + /** + * @public + */ + constructor(readonly input: GetMediaAnalysisJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RekognitionClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + this.middlewareStack.use( + getEndpointPlugin(configuration, GetMediaAnalysisJobCommand.getEndpointParameterInstructions()) + ); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RekognitionClient"; + const commandName = "GetMediaAnalysisJobCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: (_: any) => _, + outputFilterSensitiveLog: (_: any) => _, + [SMITHY_CONTEXT_KEY]: { + service: "RekognitionService", + operation: "GetMediaAnalysisJob", + }, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + /** + * @internal + */ + private serialize(input: GetMediaAnalysisJobCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return se_GetMediaAnalysisJobCommand(input, context); + } + + /** + * @internal + */ + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return de_GetMediaAnalysisJobCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rekognition/src/commands/ListMediaAnalysisJobsCommand.ts b/clients/client-rekognition/src/commands/ListMediaAnalysisJobsCommand.ts new file mode 100644 index 000000000000..a54b3392e789 --- /dev/null +++ b/clients/client-rekognition/src/commands/ListMediaAnalysisJobsCommand.ts @@ -0,0 +1,213 @@ +// smithy-typescript generated code +import { EndpointParameterInstructions, getEndpointPlugin } from "@smithy/middleware-endpoint"; +import { getSerdePlugin } from "@smithy/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@smithy/protocol-http"; +import { Command as $Command } from "@smithy/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, + SMITHY_CONTEXT_KEY, +} from "@smithy/types"; + +import { ListMediaAnalysisJobsRequest, ListMediaAnalysisJobsResponse } from "../models/models_0"; +import { de_ListMediaAnalysisJobsCommand, se_ListMediaAnalysisJobsCommand } from "../protocols/Aws_json1_1"; +import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; + +/** + * @public + */ +export { __MetadataBearer, $Command }; +/** + * @public + * + * The input for {@link ListMediaAnalysisJobsCommand}. + */ +export interface ListMediaAnalysisJobsCommandInput extends ListMediaAnalysisJobsRequest {} +/** + * @public + * + * The output of {@link ListMediaAnalysisJobsCommand}. + */ +export interface ListMediaAnalysisJobsCommandOutput extends ListMediaAnalysisJobsResponse, __MetadataBearer {} + +/** + * @public + *

Returns a list of media analysis jobs. Results are sorted by CreationTimestamp in descending order.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RekognitionClient, ListMediaAnalysisJobsCommand } from "@aws-sdk/client-rekognition"; // ES Modules import + * // const { RekognitionClient, ListMediaAnalysisJobsCommand } = require("@aws-sdk/client-rekognition"); // CommonJS import + * const client = new RekognitionClient(config); + * const input = { // ListMediaAnalysisJobsRequest + * NextToken: "STRING_VALUE", + * MaxResults: Number("int"), + * }; + * const command = new ListMediaAnalysisJobsCommand(input); + * const response = await client.send(command); + * // { // ListMediaAnalysisJobsResponse + * // NextToken: "STRING_VALUE", + * // MediaAnalysisJobs: [ // MediaAnalysisJobDescriptions // required + * // { // MediaAnalysisJobDescription + * // JobId: "STRING_VALUE", // required + * // JobName: "STRING_VALUE", + * // OperationsConfig: { // MediaAnalysisOperationsConfig + * // DetectModerationLabels: { // MediaAnalysisDetectModerationLabelsConfig + * // MinConfidence: Number("float"), + * // ProjectVersion: "STRING_VALUE", + * // }, + * // }, + * // Status: "CREATED" || "QUEUED" || "IN_PROGRESS" || "SUCCEEDED" || "FAILED", // required + * // FailureDetails: { // MediaAnalysisJobFailureDetails + * // Code: "INTERNAL_ERROR" || "INVALID_S3_OBJECT" || "INVALID_MANIFEST" || "INVALID_OUTPUT_CONFIG" || "INVALID_KMS_KEY" || "ACCESS_DENIED" || "RESOURCE_NOT_FOUND" || "RESOURCE_NOT_READY" || "THROTTLED", + * // Message: "STRING_VALUE", + * // }, + * // CreationTimestamp: new Date("TIMESTAMP"), // required + * // CompletionTimestamp: new Date("TIMESTAMP"), + * // Input: { // MediaAnalysisInput + * // S3Object: { // S3Object + * // Bucket: "STRING_VALUE", + * // Name: "STRING_VALUE", + * // Version: "STRING_VALUE", + * // }, + * // }, + * // OutputConfig: { // MediaAnalysisOutputConfig + * // S3Bucket: "STRING_VALUE", // required + * // S3KeyPrefix: "STRING_VALUE", + * // }, + * // KmsKeyId: "STRING_VALUE", + * // Results: { // MediaAnalysisResults + * // S3Object: { + * // Bucket: "STRING_VALUE", + * // Name: "STRING_VALUE", + * // Version: "STRING_VALUE", + * // }, + * // }, + * // ManifestSummary: { // MediaAnalysisManifestSummary + * // S3Object: { + * // Bucket: "STRING_VALUE", + * // Name: "STRING_VALUE", + * // Version: "STRING_VALUE", + * // }, + * // }, + * // }, + * // ], + * // }; + * + * ``` + * + * @param ListMediaAnalysisJobsCommandInput - {@link ListMediaAnalysisJobsCommandInput} + * @returns {@link ListMediaAnalysisJobsCommandOutput} + * @see {@link ListMediaAnalysisJobsCommandInput} for command's `input` shape. + * @see {@link ListMediaAnalysisJobsCommandOutput} for command's `response` shape. + * @see {@link RekognitionClientResolvedConfig | config} for RekognitionClient's `config` shape. + * + * @throws {@link AccessDeniedException} (client fault) + *

You are not authorized to perform the action.

+ * + * @throws {@link InternalServerError} (server fault) + *

Amazon Rekognition experienced a service issue. Try your call again.

+ * + * @throws {@link InvalidPaginationTokenException} (client fault) + *

Pagination token in the request is not valid.

+ * + * @throws {@link InvalidParameterException} (client fault) + *

Input parameter violated a constraint. Validate your parameter before calling the API + * operation again.

+ * + * @throws {@link ProvisionedThroughputExceededException} (client fault) + *

The number of requests exceeded your throughput limit. If you want to increase this + * limit, contact Amazon Rekognition.

+ * + * @throws {@link ThrottlingException} (server fault) + *

Amazon Rekognition is temporarily unable to process the request. Try your call again.

+ * + * @throws {@link RekognitionServiceException} + *

Base exception class for all service exceptions from Rekognition service.

+ * + */ +export class ListMediaAnalysisJobsCommand extends $Command< + ListMediaAnalysisJobsCommandInput, + ListMediaAnalysisJobsCommandOutput, + RekognitionClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + public static getEndpointParameterInstructions(): EndpointParameterInstructions { + return { + UseFIPS: { type: "builtInParams", name: "useFipsEndpoint" }, + Endpoint: { type: "builtInParams", name: "endpoint" }, + Region: { type: "builtInParams", name: "region" }, + UseDualStack: { type: "builtInParams", name: "useDualstackEndpoint" }, + }; + } + + /** + * @public + */ + constructor(readonly input: ListMediaAnalysisJobsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RekognitionClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + this.middlewareStack.use( + getEndpointPlugin(configuration, ListMediaAnalysisJobsCommand.getEndpointParameterInstructions()) + ); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RekognitionClient"; + const commandName = "ListMediaAnalysisJobsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: (_: any) => _, + outputFilterSensitiveLog: (_: any) => _, + [SMITHY_CONTEXT_KEY]: { + service: "RekognitionService", + operation: "ListMediaAnalysisJobs", + }, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + /** + * @internal + */ + private serialize(input: ListMediaAnalysisJobsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return se_ListMediaAnalysisJobsCommand(input, context); + } + + /** + * @internal + */ + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return de_ListMediaAnalysisJobsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rekognition/src/commands/PutProjectPolicyCommand.ts b/clients/client-rekognition/src/commands/PutProjectPolicyCommand.ts index 8077404e6c2f..7ea120a272b7 100644 --- a/clients/client-rekognition/src/commands/PutProjectPolicyCommand.ts +++ b/clients/client-rekognition/src/commands/PutProjectPolicyCommand.ts @@ -14,7 +14,7 @@ import { SMITHY_CONTEXT_KEY, } from "@smithy/types"; -import { PutProjectPolicyRequest, PutProjectPolicyResponse } from "../models/models_0"; +import { PutProjectPolicyRequest, PutProjectPolicyResponse } from "../models/models_1"; import { de_PutProjectPolicyCommand, se_PutProjectPolicyCommand } from "../protocols/Aws_json1_1"; import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; diff --git a/clients/client-rekognition/src/commands/RecognizeCelebritiesCommand.ts b/clients/client-rekognition/src/commands/RecognizeCelebritiesCommand.ts index 3d15b73874d8..1d448c411f23 100644 --- a/clients/client-rekognition/src/commands/RecognizeCelebritiesCommand.ts +++ b/clients/client-rekognition/src/commands/RecognizeCelebritiesCommand.ts @@ -14,7 +14,7 @@ import { SMITHY_CONTEXT_KEY, } from "@smithy/types"; -import { RecognizeCelebritiesRequest, RecognizeCelebritiesResponse } from "../models/models_0"; +import { RecognizeCelebritiesRequest, RecognizeCelebritiesResponse } from "../models/models_1"; import { de_RecognizeCelebritiesCommand, se_RecognizeCelebritiesCommand } from "../protocols/Aws_json1_1"; import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; diff --git a/clients/client-rekognition/src/commands/SearchFacesByImageCommand.ts b/clients/client-rekognition/src/commands/SearchFacesByImageCommand.ts index a1745d0de805..43d941d67c49 100644 --- a/clients/client-rekognition/src/commands/SearchFacesByImageCommand.ts +++ b/clients/client-rekognition/src/commands/SearchFacesByImageCommand.ts @@ -14,7 +14,7 @@ import { SMITHY_CONTEXT_KEY, } from "@smithy/types"; -import { SearchFacesByImageRequest, SearchFacesByImageResponse } from "../models/models_0"; +import { SearchFacesByImageRequest, SearchFacesByImageResponse } from "../models/models_1"; import { de_SearchFacesByImageCommand, se_SearchFacesByImageCommand } from "../protocols/Aws_json1_1"; import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; diff --git a/clients/client-rekognition/src/commands/SearchFacesCommand.ts b/clients/client-rekognition/src/commands/SearchFacesCommand.ts index 4695b4630350..eb7f1d46a069 100644 --- a/clients/client-rekognition/src/commands/SearchFacesCommand.ts +++ b/clients/client-rekognition/src/commands/SearchFacesCommand.ts @@ -14,7 +14,7 @@ import { SMITHY_CONTEXT_KEY, } from "@smithy/types"; -import { SearchFacesRequest, SearchFacesResponse } from "../models/models_0"; +import { SearchFacesRequest, SearchFacesResponse } from "../models/models_1"; import { de_SearchFacesCommand, se_SearchFacesCommand } from "../protocols/Aws_json1_1"; import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; diff --git a/clients/client-rekognition/src/commands/SearchUsersByImageCommand.ts b/clients/client-rekognition/src/commands/SearchUsersByImageCommand.ts index b230fb03f19b..5b76b514f1e0 100644 --- a/clients/client-rekognition/src/commands/SearchUsersByImageCommand.ts +++ b/clients/client-rekognition/src/commands/SearchUsersByImageCommand.ts @@ -14,8 +14,7 @@ import { SMITHY_CONTEXT_KEY, } from "@smithy/types"; -import { SearchUsersByImageRequest } from "../models/models_0"; -import { SearchUsersByImageResponse } from "../models/models_1"; +import { SearchUsersByImageRequest, SearchUsersByImageResponse } from "../models/models_1"; import { de_SearchUsersByImageCommand, se_SearchUsersByImageCommand } from "../protocols/Aws_json1_1"; import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; diff --git a/clients/client-rekognition/src/commands/SearchUsersCommand.ts b/clients/client-rekognition/src/commands/SearchUsersCommand.ts index 9bd0a2292966..67486f4e5360 100644 --- a/clients/client-rekognition/src/commands/SearchUsersCommand.ts +++ b/clients/client-rekognition/src/commands/SearchUsersCommand.ts @@ -14,7 +14,7 @@ import { SMITHY_CONTEXT_KEY, } from "@smithy/types"; -import { SearchUsersRequest, SearchUsersResponse } from "../models/models_0"; +import { SearchUsersRequest, SearchUsersResponse } from "../models/models_1"; import { de_SearchUsersCommand, se_SearchUsersCommand } from "../protocols/Aws_json1_1"; import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; diff --git a/clients/client-rekognition/src/commands/StartMediaAnalysisJobCommand.ts b/clients/client-rekognition/src/commands/StartMediaAnalysisJobCommand.ts new file mode 100644 index 000000000000..ff11f8e2c357 --- /dev/null +++ b/clients/client-rekognition/src/commands/StartMediaAnalysisJobCommand.ts @@ -0,0 +1,209 @@ +// smithy-typescript generated code +import { EndpointParameterInstructions, getEndpointPlugin } from "@smithy/middleware-endpoint"; +import { getSerdePlugin } from "@smithy/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@smithy/protocol-http"; +import { Command as $Command } from "@smithy/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, + SMITHY_CONTEXT_KEY, +} from "@smithy/types"; + +import { StartMediaAnalysisJobRequest, StartMediaAnalysisJobResponse } from "../models/models_1"; +import { de_StartMediaAnalysisJobCommand, se_StartMediaAnalysisJobCommand } from "../protocols/Aws_json1_1"; +import { RekognitionClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RekognitionClient"; + +/** + * @public + */ +export { __MetadataBearer, $Command }; +/** + * @public + * + * The input for {@link StartMediaAnalysisJobCommand}. + */ +export interface StartMediaAnalysisJobCommandInput extends StartMediaAnalysisJobRequest {} +/** + * @public + * + * The output of {@link StartMediaAnalysisJobCommand}. + */ +export interface StartMediaAnalysisJobCommandOutput extends StartMediaAnalysisJobResponse, __MetadataBearer {} + +/** + * @public + *

Initiates a new media analysis job. Accepts a manifest file in an Amazon S3 bucket. The + * output is a manifest file and a summary of the manifest stored in the Amazon S3 bucket.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RekognitionClient, StartMediaAnalysisJobCommand } from "@aws-sdk/client-rekognition"; // ES Modules import + * // const { RekognitionClient, StartMediaAnalysisJobCommand } = require("@aws-sdk/client-rekognition"); // CommonJS import + * const client = new RekognitionClient(config); + * const input = { // StartMediaAnalysisJobRequest + * ClientRequestToken: "STRING_VALUE", + * JobName: "STRING_VALUE", + * OperationsConfig: { // MediaAnalysisOperationsConfig + * DetectModerationLabels: { // MediaAnalysisDetectModerationLabelsConfig + * MinConfidence: Number("float"), + * ProjectVersion: "STRING_VALUE", + * }, + * }, + * Input: { // MediaAnalysisInput + * S3Object: { // S3Object + * Bucket: "STRING_VALUE", + * Name: "STRING_VALUE", + * Version: "STRING_VALUE", + * }, + * }, + * OutputConfig: { // MediaAnalysisOutputConfig + * S3Bucket: "STRING_VALUE", // required + * S3KeyPrefix: "STRING_VALUE", + * }, + * KmsKeyId: "STRING_VALUE", + * }; + * const command = new StartMediaAnalysisJobCommand(input); + * const response = await client.send(command); + * // { // StartMediaAnalysisJobResponse + * // JobId: "STRING_VALUE", // required + * // }; + * + * ``` + * + * @param StartMediaAnalysisJobCommandInput - {@link StartMediaAnalysisJobCommandInput} + * @returns {@link StartMediaAnalysisJobCommandOutput} + * @see {@link StartMediaAnalysisJobCommandInput} for command's `input` shape. + * @see {@link StartMediaAnalysisJobCommandOutput} for command's `response` shape. + * @see {@link RekognitionClientResolvedConfig | config} for RekognitionClient's `config` shape. + * + * @throws {@link AccessDeniedException} (client fault) + *

You are not authorized to perform the action.

+ * + * @throws {@link IdempotentParameterMismatchException} (client fault) + *

A ClientRequestToken input parameter was reused with an operation, but at least one of the other input + * parameters is different from the previous call to the operation.

+ * + * @throws {@link InternalServerError} (server fault) + *

Amazon Rekognition experienced a service issue. Try your call again.

+ * + * @throws {@link InvalidManifestException} (client fault) + *

Indicates that a provided manifest file is empty or larger than the allowed limit.

+ * + * @throws {@link InvalidParameterException} (client fault) + *

Input parameter violated a constraint. Validate your parameter before calling the API + * operation again.

+ * + * @throws {@link InvalidS3ObjectException} (client fault) + *

Amazon Rekognition is unable to access the S3 object specified in the request.

+ * + * @throws {@link LimitExceededException} (client fault) + *

An Amazon Rekognition service limit was exceeded. For example, if you start too many jobs + * concurrently, subsequent calls to start operations (ex: + * StartLabelDetection) will raise a LimitExceededException + * exception (HTTP status code: 400) until the number of concurrently running jobs is below + * the Amazon Rekognition service limit.

+ * + * @throws {@link ProvisionedThroughputExceededException} (client fault) + *

The number of requests exceeded your throughput limit. If you want to increase this + * limit, contact Amazon Rekognition.

+ * + * @throws {@link ResourceNotFoundException} (client fault) + *

The resource specified in the request cannot be found.

+ * + * @throws {@link ResourceNotReadyException} (client fault) + *

The requested resource isn't ready. For example, + * this exception occurs when you call DetectCustomLabels with a + * model version that isn't deployed.

+ * + * @throws {@link ThrottlingException} (server fault) + *

Amazon Rekognition is temporarily unable to process the request. Try your call again.

+ * + * @throws {@link RekognitionServiceException} + *

Base exception class for all service exceptions from Rekognition service.

+ * + */ +export class StartMediaAnalysisJobCommand extends $Command< + StartMediaAnalysisJobCommandInput, + StartMediaAnalysisJobCommandOutput, + RekognitionClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + public static getEndpointParameterInstructions(): EndpointParameterInstructions { + return { + UseFIPS: { type: "builtInParams", name: "useFipsEndpoint" }, + Endpoint: { type: "builtInParams", name: "endpoint" }, + Region: { type: "builtInParams", name: "region" }, + UseDualStack: { type: "builtInParams", name: "useDualstackEndpoint" }, + }; + } + + /** + * @public + */ + constructor(readonly input: StartMediaAnalysisJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RekognitionClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + this.middlewareStack.use( + getEndpointPlugin(configuration, StartMediaAnalysisJobCommand.getEndpointParameterInstructions()) + ); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RekognitionClient"; + const commandName = "StartMediaAnalysisJobCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: (_: any) => _, + outputFilterSensitiveLog: (_: any) => _, + [SMITHY_CONTEXT_KEY]: { + service: "RekognitionService", + operation: "StartMediaAnalysisJob", + }, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + /** + * @internal + */ + private serialize(input: StartMediaAnalysisJobCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return se_StartMediaAnalysisJobCommand(input, context); + } + + /** + * @internal + */ + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return de_StartMediaAnalysisJobCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rekognition/src/commands/index.ts b/clients/client-rekognition/src/commands/index.ts index 2e0e817da3af..921e7de46efb 100644 --- a/clients/client-rekognition/src/commands/index.ts +++ b/clients/client-rekognition/src/commands/index.ts @@ -37,6 +37,7 @@ export * from "./GetFaceDetectionCommand"; export * from "./GetFaceLivenessSessionResultsCommand"; export * from "./GetFaceSearchCommand"; export * from "./GetLabelDetectionCommand"; +export * from "./GetMediaAnalysisJobCommand"; export * from "./GetPersonTrackingCommand"; export * from "./GetSegmentDetectionCommand"; export * from "./GetTextDetectionCommand"; @@ -45,6 +46,7 @@ export * from "./ListCollectionsCommand"; export * from "./ListDatasetEntriesCommand"; export * from "./ListDatasetLabelsCommand"; export * from "./ListFacesCommand"; +export * from "./ListMediaAnalysisJobsCommand"; export * from "./ListProjectPoliciesCommand"; export * from "./ListStreamProcessorsCommand"; export * from "./ListTagsForResourceCommand"; @@ -60,6 +62,7 @@ export * from "./StartContentModerationCommand"; export * from "./StartFaceDetectionCommand"; export * from "./StartFaceSearchCommand"; export * from "./StartLabelDetectionCommand"; +export * from "./StartMediaAnalysisJobCommand"; export * from "./StartPersonTrackingCommand"; export * from "./StartProjectVersionCommand"; export * from "./StartSegmentDetectionCommand"; diff --git a/clients/client-rekognition/src/endpoint/ruleset.ts b/clients/client-rekognition/src/endpoint/ruleset.ts index 915c8a2b608e..fd826a9b978f 100644 --- a/clients/client-rekognition/src/endpoint/ruleset.ts +++ b/clients/client-rekognition/src/endpoint/ruleset.ts @@ -6,25 +6,27 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/rekognition.json */ -const q="required", -r="fn", -s="argv", -t="ref"; -const a="isSet", -b="tree", -c="error", -d="endpoint", -e="PartitionResult", -f={[q]:false,"type":"String"}, -g={[q]:true,"default":false,"type":"Boolean"}, -h={[t]:"Endpoint"}, -i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, -j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, -k={}, -l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, -m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, -n=[i], -o=[j], -p=[{[t]:"Region"}]; -const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://rekognition-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://rekognition-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://rekognition.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://rekognition.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; +const s="required", +t="fn", +u="argv", +v="ref"; +const a=true, +b="isSet", +c="booleanEquals", +d="error", +e="endpoint", +f="tree", +g="PartitionResult", +h={[s]:false,"type":"String"}, +i={[s]:true,"default":false,"type":"Boolean"}, +j={[v]:"Endpoint"}, +k={[t]:c,[u]:[{[v]:"UseFIPS"},true]}, +l={[t]:c,[u]:[{[v]:"UseDualStack"},true]}, +m={}, +n={[t]:"getAttr",[u]:[{[v]:g},"supportsFIPS"]}, +o={[t]:c,[u]:[true,{[t]:"getAttr",[u]:[{[v]:g},"supportsDualStack"]}]}, +p=[k], +q=[l], +r=[{[v]:"Region"}]; +const _data={version:"1.0",parameters:{Region:h,UseDualStack:i,UseFIPS:i,Endpoint:h},rules:[{conditions:[{[t]:b,[u]:[j]}],rules:[{conditions:p,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:d},{conditions:q,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:d},{endpoint:{url:j,properties:m,headers:m},type:e}],type:f},{conditions:[{[t]:b,[u]:r}],rules:[{conditions:[{[t]:"aws.partition",[u]:r,assign:g}],rules:[{conditions:[k,l],rules:[{conditions:[{[t]:c,[u]:[a,n]},o],rules:[{endpoint:{url:"https://rekognition-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:m,headers:m},type:e}],type:f},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:d}],type:f},{conditions:p,rules:[{conditions:[{[t]:c,[u]:[n,a]}],rules:[{endpoint:{url:"https://rekognition-fips.{Region}.{PartitionResult#dnsSuffix}",properties:m,headers:m},type:e}],type:f},{error:"FIPS is enabled but this partition does not support FIPS",type:d}],type:f},{conditions:q,rules:[{conditions:[o],rules:[{endpoint:{url:"https://rekognition.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:m,headers:m},type:e}],type:f},{error:"DualStack is enabled but this partition does not support DualStack",type:d}],type:f},{endpoint:{url:"https://rekognition.{Region}.{PartitionResult#dnsSuffix}",properties:m,headers:m},type:e}],type:f}],type:f},{error:"Invalid Configuration: Missing Region",type:d}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-rekognition/src/models/models_0.ts b/clients/client-rekognition/src/models/models_0.ts index 61431115c6c8..649480d1b40d 100644 --- a/clients/client-rekognition/src/models/models_0.ts +++ b/clients/client-rekognition/src/models/models_0.ts @@ -6517,6 +6517,254 @@ export interface GetLabelDetectionResponse { GetRequestMetadata?: GetLabelDetectionRequestMetadata; } +/** + * @public + */ +export interface GetMediaAnalysisJobRequest { + /** + * @public + *

Unique identifier for the media analysis job for which you want to retrieve results.

+ */ + JobId: string | undefined; +} + +/** + * @public + * @enum + */ +export const MediaAnalysisJobFailureCode = { + ACCESS_DENIED: "ACCESS_DENIED", + INTERNAL_ERROR: "INTERNAL_ERROR", + INVALID_KMS_KEY: "INVALID_KMS_KEY", + INVALID_MANIFEST: "INVALID_MANIFEST", + INVALID_OUTPUT_CONFIG: "INVALID_OUTPUT_CONFIG", + INVALID_S3_OBJECT: "INVALID_S3_OBJECT", + RESOURCE_NOT_FOUND: "RESOURCE_NOT_FOUND", + RESOURCE_NOT_READY: "RESOURCE_NOT_READY", + THROTTLED: "THROTTLED", +} as const; + +/** + * @public + */ +export type MediaAnalysisJobFailureCode = + (typeof MediaAnalysisJobFailureCode)[keyof typeof MediaAnalysisJobFailureCode]; + +/** + * @public + *

Details about the error that resulted in failure of the job.

+ */ +export interface MediaAnalysisJobFailureDetails { + /** + * @public + *

Error code for the failed job.

+ */ + Code?: MediaAnalysisJobFailureCode; + + /** + * @public + *

Human readable error message.

+ */ + Message?: string; +} + +/** + * @public + *

Contains input information for a media analysis job.

+ */ +export interface MediaAnalysisInput { + /** + * @public + *

Provides the S3 bucket name and object name.

+ *

The region for the S3 bucket containing the S3 object must match the region you use for + * Amazon Rekognition operations.

+ *

For Amazon Rekognition to process an S3 object, the user must have permission to + * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the + * Amazon Rekognition Developer Guide.

+ */ + S3Object: S3Object | undefined; +} + +/** + * @public + *

Summary that provides statistics on input manifest and errors identified in the input manifest.

+ */ +export interface MediaAnalysisManifestSummary { + /** + * @public + *

Provides the S3 bucket name and object name.

+ *

The region for the S3 bucket containing the S3 object must match the region you use for + * Amazon Rekognition operations.

+ *

For Amazon Rekognition to process an S3 object, the user must have permission to + * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the + * Amazon Rekognition Developer Guide.

+ */ + S3Object?: S3Object; +} + +/** + * @public + *

Configuration for Moderation Labels Detection.

+ */ +export interface MediaAnalysisDetectModerationLabelsConfig { + /** + * @public + *

Specifies the minimum confidence level for the moderation labels to return. Amazon Rekognition + * doesn't return any labels with a confidence level lower than this specified value. + *

+ */ + MinConfidence?: number; + + /** + * @public + *

Specifies the custom moderation model to be used during the label detection job. + * If not provided the pre-trained model is used.

+ */ + ProjectVersion?: string; +} + +/** + * @public + *

Configuration options for a media analysis job. Configuration is operation-specific.

+ */ +export interface MediaAnalysisOperationsConfig { + /** + * @public + *

Contains configuration options for a DetectModerationLabels job.

+ */ + DetectModerationLabels?: MediaAnalysisDetectModerationLabelsConfig; +} + +/** + * @public + *

Output configuration provided in the job creation request.

+ */ +export interface MediaAnalysisOutputConfig { + /** + * @public + *

Specifies the Amazon S3 bucket to contain the output of the media analysis job.

+ */ + S3Bucket: string | undefined; + + /** + * @public + *

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have + * designated for storage.

+ */ + S3KeyPrefix?: string; +} + +/** + * @public + *

Contains the results for a media analysis job created with StartMediaAnalysisJob.

+ */ +export interface MediaAnalysisResults { + /** + * @public + *

Provides the S3 bucket name and object name.

+ *

The region for the S3 bucket containing the S3 object must match the region you use for + * Amazon Rekognition operations.

+ *

For Amazon Rekognition to process an S3 object, the user must have permission to + * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the + * Amazon Rekognition Developer Guide.

+ */ + S3Object?: S3Object; +} + +/** + * @public + * @enum + */ +export const MediaAnalysisJobStatus = { + CREATED: "CREATED", + FAILED: "FAILED", + IN_PROGRESS: "IN_PROGRESS", + QUEUED: "QUEUED", + SUCCEEDED: "SUCCEEDED", +} as const; + +/** + * @public + */ +export type MediaAnalysisJobStatus = (typeof MediaAnalysisJobStatus)[keyof typeof MediaAnalysisJobStatus]; + +/** + * @public + */ +export interface GetMediaAnalysisJobResponse { + /** + * @public + *

The identifier for the media analysis job.

+ */ + JobId: string | undefined; + + /** + * @public + *

The name of the media analysis job.

+ */ + JobName?: string; + + /** + * @public + *

Operation configurations that were provided during job creation.

+ */ + OperationsConfig: MediaAnalysisOperationsConfig | undefined; + + /** + * @public + *

The current status of the media analysis job.

+ */ + Status: MediaAnalysisJobStatus | undefined; + + /** + * @public + *

Details about the error that resulted in failure of the job.

+ */ + FailureDetails?: MediaAnalysisJobFailureDetails; + + /** + * @public + *

The Unix date and time when the job was started.

+ */ + CreationTimestamp: Date | undefined; + + /** + * @public + *

The Unix date and time when the job finished.

+ */ + CompletionTimestamp?: Date; + + /** + * @public + *

Reference to the input manifest that was provided in the job creation request.

+ */ + Input: MediaAnalysisInput | undefined; + + /** + * @public + *

Output configuration that was provided in the creation request.

+ */ + OutputConfig: MediaAnalysisOutputConfig | undefined; + + /** + * @public + *

KMS Key that was provided in the creation request.

+ */ + KmsKeyId?: string; + + /** + * @public + *

Output manifest that contains prediction results.

+ */ + Results?: MediaAnalysisResults; + + /** + * @public + *

The summary manifest provides statistics on input manifest and errors identified in the input manifest.

+ */ + ManifestSummary?: MediaAnalysisManifestSummary; +} + /** * @public * @enum @@ -7241,6 +7489,36 @@ export interface IndexFacesResponse { UnindexedFaces?: UnindexedFace[]; } +/** + * @public + *

Indicates that a provided manifest file is empty or larger than the allowed limit.

+ */ +export class InvalidManifestException extends __BaseException { + readonly name: "InvalidManifestException" = "InvalidManifestException"; + readonly $fault: "client" = "client"; + Message?: string; + Code?: string; + /** + * @public + *

A universally unique identifier (UUID) for the request.

+ */ + Logref?: string; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "InvalidManifestException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, InvalidManifestException.prototype); + this.Message = opts.Message; + this.Code = opts.Code; + this.Logref = opts.Logref; + } +} + /** * @public *

Specifies the starting point in a Kinesis stream to start processing. You can use the @@ -7534,748 +7812,411 @@ export interface ListFacesResponse { /** * @public */ -export interface ListProjectPoliciesRequest { - /** - * @public - *

The ARN of the project for which you want to list the project policies.

- */ - ProjectArn: string | undefined; - +export interface ListMediaAnalysisJobsRequest { /** * @public - *

If the previous response was incomplete (because there is more results to retrieve), - * Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination token - * to retrieve the next set of results.

+ *

Pagination token, if the previous response was incomplete.

*/ NextToken?: string; /** * @public - *

The maximum number of results to return per paginated call. The largest value you can - * specify is 5. If you specify a value greater than 5, a ValidationException error - * occurs. The default value is 5.

+ *

The maximum number of results to return per paginated call. The largest value user can specify is 100. + * If user specifies a value greater than 100, an InvalidParameterException error occurs. The default value is 100.

*/ MaxResults?: number; } /** * @public - *

Describes a project policy in the response from ListProjectPolicies.

- *

+ *

Description for a media analysis job.

*/ -export interface ProjectPolicy { +export interface MediaAnalysisJobDescription { /** * @public - *

The Amazon Resource Name (ARN) of the project to which the project policy is attached.

+ *

The identifier for a media analysis job.

*/ - ProjectArn?: string; + JobId: string | undefined; /** * @public - *

The name of the project policy.

+ *

The name of a media analysis job.

*/ - PolicyName?: string; + JobName?: string; /** * @public - *

The revision ID of the project policy.

+ *

Operation configurations that were provided during job creation.

*/ - PolicyRevisionId?: string; + OperationsConfig: MediaAnalysisOperationsConfig | undefined; /** * @public - *

The JSON document for the project policy.

+ *

The status of the media analysis job being retrieved.

*/ - PolicyDocument?: string; + Status: MediaAnalysisJobStatus | undefined; /** * @public - *

The Unix datetime for the creation of the project policy.

+ *

Details about the error that resulted in failure of the job.

*/ - CreationTimestamp?: Date; + FailureDetails?: MediaAnalysisJobFailureDetails; /** * @public - *

The Unix datetime for when the project policy was last updated.

+ *

The Unix date and time when the job was started.

*/ - LastUpdatedTimestamp?: Date; -} + CreationTimestamp: Date | undefined; -/** - * @public - */ -export interface ListProjectPoliciesResponse { /** * @public - *

A list of project policies attached to the project.

+ *

The Unix date and time when the job finished.

*/ - ProjectPolicies?: ProjectPolicy[]; + CompletionTimestamp?: Date; /** * @public - *

If the response is truncated, Amazon Rekognition returns this token that you can use in the - * subsequent request to retrieve the next set of project policies.

+ *

Reference to the input manifest that was provided in the job creation request.

*/ - NextToken?: string; -} + Input: MediaAnalysisInput | undefined; -/** - * @public - */ -export interface ListStreamProcessorsRequest { /** * @public - *

If the previous response was incomplete (because there are more stream processors to retrieve), Amazon Rekognition Video - * returns a pagination token in the response. You can use this pagination token to retrieve the next set of stream processors.

+ *

Output configuration that was provided in the creation request.

*/ - NextToken?: string; + OutputConfig: MediaAnalysisOutputConfig | undefined; /** * @public - *

Maximum number of stream processors you want Amazon Rekognition Video to return in the response. The default is 1000.

+ *

KMS Key that was provided in the creation request.

*/ - MaxResults?: number; -} + KmsKeyId?: string; -/** - * @public - *

An object that recognizes faces or labels in a streaming video. An Amazon Rekognition stream processor is created by a call to CreateStreamProcessor. The request - * parameters for CreateStreamProcessor describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts. - * - *

- */ -export interface StreamProcessor { /** * @public - *

Name of the Amazon Rekognition stream processor.

+ *

Output manifest that contains prediction results.

*/ - Name?: string; + Results?: MediaAnalysisResults; /** * @public - *

Current status of the Amazon Rekognition stream processor.

+ *

Provides statistics on input manifest and errors identified in the input manifest.

*/ - Status?: StreamProcessorStatus; + ManifestSummary?: MediaAnalysisManifestSummary; } /** * @public */ -export interface ListStreamProcessorsResponse { +export interface ListMediaAnalysisJobsResponse { /** * @public - *

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent - * request to retrieve the next set of stream processors.

+ *

Pagination token, if the previous response was incomplete.

*/ NextToken?: string; /** * @public - *

List of stream processors that you have created.

+ *

Contains a list of all media analysis jobs.

*/ - StreamProcessors?: StreamProcessor[]; + MediaAnalysisJobs: MediaAnalysisJobDescription[] | undefined; } /** * @public */ -export interface ListTagsForResourceRequest { - /** - * @public - *

Amazon Resource Name (ARN) of the model, collection, or stream processor that contains - * the tags that you want a list of.

- */ - ResourceArn: string | undefined; -} - -/** - * @public - */ -export interface ListTagsForResourceResponse { +export interface ListProjectPoliciesRequest { /** * @public - *

A list of key-value tags assigned to the resource.

+ *

The ARN of the project for which you want to list the project policies.

*/ - Tags?: Record; -} + ProjectArn: string | undefined; -/** - * @public - */ -export interface ListUsersRequest { /** * @public - *

The ID of an existing collection.

+ *

If the previous response was incomplete (because there is more results to retrieve), + * Amazon Rekognition Custom Labels returns a pagination token in the response. You can use this pagination token + * to retrieve the next set of results.

*/ - CollectionId: string | undefined; + NextToken?: string; /** * @public - *

Maximum number of UsersID to return.

+ *

The maximum number of results to return per paginated call. The largest value you can + * specify is 5. If you specify a value greater than 5, a ValidationException error + * occurs. The default value is 5.

*/ MaxResults?: number; - - /** - * @public - *

Pagingation token to receive the next set of UsersID.

- */ - NextToken?: string; } /** * @public - *

Metadata of the user stored in a collection.

+ *

Describes a project policy in the response from ListProjectPolicies.

+ *

*/ -export interface User { - /** - * @public - *

A provided ID for the User. Unique within the collection.

- */ - UserId?: string; - +export interface ProjectPolicy { /** * @public - *

Communicates if the UserID has been updated with latest set of faces to be associated - * with the UserID.

+ *

The Amazon Resource Name (ARN) of the project to which the project policy is attached.

*/ - UserStatus?: UserStatus; -} + ProjectArn?: string; -/** - * @public - */ -export interface ListUsersResponse { /** * @public - *

List of UsersID associated with the specified collection.

+ *

The name of the project policy.

*/ - Users?: User[]; + PolicyName?: string; /** * @public - *

A pagination token to be used with the subsequent request if the response is - * truncated.

+ *

The revision ID of the project policy.

*/ - NextToken?: string; -} + PolicyRevisionId?: string; -/** - * @public - *

The format of the project policy document that you supplied to - * PutProjectPolicy is incorrect.

- */ -export class MalformedPolicyDocumentException extends __BaseException { - readonly name: "MalformedPolicyDocumentException" = "MalformedPolicyDocumentException"; - readonly $fault: "client" = "client"; - Message?: string; - Code?: string; /** * @public - *

A universally unique identifier (UUID) for the request.

- */ - Logref?: string; - /** - * @internal + *

The JSON document for the project policy.

*/ - constructor(opts: __ExceptionOptionType) { - super({ - name: "MalformedPolicyDocumentException", - $fault: "client", - ...opts, - }); - Object.setPrototypeOf(this, MalformedPolicyDocumentException.prototype); - this.Message = opts.Message; - this.Code = opts.Code; - this.Logref = opts.Logref; - } -} + PolicyDocument?: string; -/** - * @public - *

Contains metadata for a UserID matched with a given face.

- */ -export interface MatchedUser { /** * @public - *

A provided ID for the UserID. Unique within the collection.

+ *

The Unix datetime for the creation of the project policy.

*/ - UserId?: string; + CreationTimestamp?: Date; /** * @public - *

The status of the user matched to a provided FaceID.

+ *

The Unix datetime for when the project policy was last updated.

*/ - UserStatus?: UserStatus; + LastUpdatedTimestamp?: Date; } /** * @public - *

The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see - * Calling Amazon Rekognition Video operations. Note that the Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic. - * For more information, see Giving access to multiple Amazon SNS topics.

*/ -export interface NotificationChannel { +export interface ListProjectPoliciesResponse { /** * @public - *

The Amazon SNS topic to which Amazon Rekognition posts the completion status.

+ *

A list of project policies attached to the project.

*/ - SNSTopicArn: string | undefined; + ProjectPolicies?: ProjectPolicy[]; /** * @public - *

The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.

+ *

If the response is truncated, Amazon Rekognition returns this token that you can use in the + * subsequent request to retrieve the next set of project policies.

*/ - RoleArn: string | undefined; + NextToken?: string; } /** * @public */ -export interface PutProjectPolicyRequest { - /** - * @public - *

The Amazon Resource Name (ARN) of the project that the project policy is attached to.

- */ - ProjectArn: string | undefined; - - /** - * @public - *

A name for the policy.

- */ - PolicyName: string | undefined; - +export interface ListStreamProcessorsRequest { /** * @public - *

The revision ID for the Project Policy. Each time you modify a policy, Amazon Rekognition Custom Labels - * generates and assigns a new PolicyRevisionId and then deletes the previous version of the - * policy.

+ *

If the previous response was incomplete (because there are more stream processors to retrieve), Amazon Rekognition Video + * returns a pagination token in the response. You can use this pagination token to retrieve the next set of stream processors.

*/ - PolicyRevisionId?: string; + NextToken?: string; /** * @public - *

A resource policy to add to the model. The policy is a JSON structure that contains - * one or more statements that define the policy. - * The policy must follow the IAM syntax. For - * more information about the contents of a JSON policy document, see - * IAM JSON policy reference.

+ *

Maximum number of stream processors you want Amazon Rekognition Video to return in the response. The default is 1000.

*/ - PolicyDocument: string | undefined; + MaxResults?: number; } /** * @public + *

An object that recognizes faces or labels in a streaming video. An Amazon Rekognition stream processor is created by a call to CreateStreamProcessor. The request + * parameters for CreateStreamProcessor describe the Kinesis video stream source for the streaming video, face recognition parameters, and where to stream the analysis resullts. + * + *

*/ -export interface PutProjectPolicyResponse { +export interface StreamProcessor { /** * @public - *

The ID of the project policy.

+ *

Name of the Amazon Rekognition stream processor.

*/ - PolicyRevisionId?: string; -} + Name?: string; -/** - * @public - */ -export interface RecognizeCelebritiesRequest { /** * @public - *

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to - * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

- *

If you are using an AWS SDK to call Amazon Rekognition, you might not need to - * base64-encode image bytes passed using the Bytes field. For more information, see - * Images in the Amazon Rekognition developer guide.

+ *

Current status of the Amazon Rekognition stream processor.

*/ - Image: Image | undefined; + Status?: StreamProcessorStatus; } /** * @public */ -export interface RecognizeCelebritiesResponse { - /** - * @public - *

Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 64 - * celebrities in an image. Each celebrity object includes the following attributes: - * Face, Confidence, Emotions, Landmarks, - * Pose, Quality, Smile, Id, - * KnownGender, MatchConfidence, Name, - * Urls.

- */ - CelebrityFaces?: Celebrity[]; - +export interface ListStreamProcessorsResponse { /** * @public - *

Details about each unrecognized face in the image.

+ *

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent + * request to retrieve the next set of stream processors.

*/ - UnrecognizedFaces?: ComparedFace[]; + NextToken?: string; /** * @public - * - *

Support for estimating image orientation using the the OrientationCorrection field - * has ceased as of August 2021. Any returned values for this field included in an API response - * will always be NULL.

- *
- *

The orientation of the input image (counterclockwise direction). If your application - * displays the image, you can use this value to correct the orientation. The bounding box - * coordinates returned in CelebrityFaces and UnrecognizedFaces - * represent face locations before the image orientation is corrected.

- * - *

If the input image is in .jpeg format, it might contain exchangeable image (Exif) - * metadata that includes the image's orientation. If so, and the Exif metadata for the input - * image populates the orientation field, the value of OrientationCorrection is - * null. The CelebrityFaces and UnrecognizedFaces bounding box - * coordinates represent face locations after Exif metadata is used to correct the image - * orientation. Images in .png format don't contain Exif metadata.

- *
+ *

List of stream processors that you have created.

*/ - OrientationCorrection?: OrientationCorrection; + StreamProcessors?: StreamProcessor[]; } /** * @public */ -export interface SearchFacesRequest { - /** - * @public - *

ID of the collection the face belongs to.

- */ - CollectionId: string | undefined; - - /** - * @public - *

ID of a face to find matches for in the collection.

- */ - FaceId: string | undefined; - - /** - * @public - *

Maximum number of faces to return. The operation returns the maximum number of faces - * with the highest confidence in the match.

- */ - MaxFaces?: number; - +export interface ListTagsForResourceRequest { /** * @public - *

Optional value specifying the minimum confidence in the face match to return. For - * example, don't return any matches where confidence in matches is less than 70%. The default - * value is 80%.

+ *

Amazon Resource Name (ARN) of the model, collection, or stream processor that contains + * the tags that you want a list of.

*/ - FaceMatchThreshold?: number; + ResourceArn: string | undefined; } /** * @public */ -export interface SearchFacesResponse { - /** - * @public - *

ID of the face that was searched for matches in a collection.

- */ - SearchedFaceId?: string; - - /** - * @public - *

An array of faces that matched the input face, along with the confidence in the - * match.

- */ - FaceMatches?: FaceMatch[]; - +export interface ListTagsForResourceResponse { /** * @public - *

Version number of the face detection model associated with the input collection - * (CollectionId).

+ *

A list of key-value tags assigned to the resource.

*/ - FaceModelVersion?: string; + Tags?: Record; } /** * @public */ -export interface SearchFacesByImageRequest { +export interface ListUsersRequest { /** * @public - *

ID of the collection to search.

+ *

The ID of an existing collection.

*/ CollectionId: string | undefined; /** * @public - *

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to - * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

- *

If you are using an AWS SDK to call Amazon Rekognition, you might not need to - * base64-encode image bytes passed using the Bytes field. For more information, see - * Images in the Amazon Rekognition developer guide.

- */ - Image: Image | undefined; - - /** - * @public - *

Maximum number of faces to return. The operation returns the maximum number of faces - * with the highest confidence in the match.

- */ - MaxFaces?: number; - - /** - * @public - *

(Optional) Specifies the minimum confidence in the face match to return. For example, - * don't return any matches where confidence in matches is less than 70%. The default value is - * 80%.

- */ - FaceMatchThreshold?: number; - - /** - * @public - *

A filter that specifies a quality bar for how much filtering is done to identify faces. - * Filtered faces aren't searched for in the collection. If you specify AUTO, - * Amazon Rekognition chooses the quality bar. If you specify LOW, MEDIUM, or - * HIGH, filtering removes all faces that don’t meet the chosen quality bar. - * The quality bar is - * based on a variety of common use cases. Low-quality detections can occur for a number of - * reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, - * or a face with a pose that's too extreme to use. If you specify NONE, no - * filtering is performed. The default value is NONE.

- *

To use quality filtering, the collection you are using must be associated with version 3 - * of the face model or higher.

- */ - QualityFilter?: QualityFilter; -} - -/** - * @public - */ -export interface SearchFacesByImageResponse { - /** - * @public - *

The bounding box around the face in the input image that Amazon Rekognition used for the - * search.

- */ - SearchedFaceBoundingBox?: BoundingBox; - - /** - * @public - *

The level of confidence that the searchedFaceBoundingBox, contains a - * face.

- */ - SearchedFaceConfidence?: number; - - /** - * @public - *

An array of faces that match the input face, along with the confidence in the - * match.

+ *

Maximum number of UsersID to return.

*/ - FaceMatches?: FaceMatch[]; + MaxResults?: number; /** * @public - *

Version number of the face detection model associated with the input collection - * (CollectionId).

+ *

Pagingation token to receive the next set of UsersID.

*/ - FaceModelVersion?: string; + NextToken?: string; } /** * @public + *

Metadata of the user stored in a collection.

*/ -export interface SearchUsersRequest { - /** - * @public - *

The ID of an existing collection containing the UserID, used with a UserId or FaceId. If a - * FaceId is provided, UserId isn’t required to be present in the Collection.

- */ - CollectionId: string | undefined; - +export interface User { /** * @public - *

ID for the existing User.

+ *

A provided ID for the User. Unique within the collection.

*/ UserId?: string; /** * @public - *

ID for the existing face.

- */ - FaceId?: string; - - /** - * @public - *

Optional value that specifies the minimum confidence in the matched UserID to return. - * Default value of 80.

- */ - UserMatchThreshold?: number; - - /** - * @public - *

Maximum number of identities to return.

+ *

Communicates if the UserID has been updated with latest set of faces to be associated + * with the UserID.

*/ - MaxUsers?: number; + UserStatus?: UserStatus; } /** * @public - *

Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for - * search.

*/ -export interface SearchedFace { +export interface ListUsersResponse { /** * @public - *

Unique identifier assigned to the face.

+ *

List of UsersID associated with the specified collection.

*/ - FaceId?: string; -} + Users?: User[]; -/** - * @public - *

Contains metadata about a User searched for within a collection.

- */ -export interface SearchedUser { /** * @public - *

A provided ID for the UserID. Unique within the collection.

+ *

A pagination token to be used with the subsequent request if the response is + * truncated.

*/ - UserId?: string; + NextToken?: string; } /** * @public - *

Provides UserID metadata along with the confidence in the match of this UserID with the - * input face.

+ *

The format of the project policy document that you supplied to + * PutProjectPolicy is incorrect.

*/ -export interface UserMatch { +export class MalformedPolicyDocumentException extends __BaseException { + readonly name: "MalformedPolicyDocumentException" = "MalformedPolicyDocumentException"; + readonly $fault: "client" = "client"; + Message?: string; + Code?: string; /** * @public - *

Describes the UserID metadata.

+ *

A universally unique identifier (UUID) for the request.

*/ - Similarity?: number; - + Logref?: string; /** - * @public - *

Confidence in the match of this UserID with the input face.

+ * @internal */ - User?: MatchedUser; + constructor(opts: __ExceptionOptionType) { + super({ + name: "MalformedPolicyDocumentException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, MalformedPolicyDocumentException.prototype); + this.Message = opts.Message; + this.Code = opts.Code; + this.Logref = opts.Logref; + } } /** * @public + *

Contains metadata for a UserID matched with a given face.

*/ -export interface SearchUsersResponse { - /** - * @public - *

An array of UserMatch objects that matched the input face along with the confidence in the - * match. Array will be empty if there are no matches.

- */ - UserMatches?: UserMatch[]; - - /** - * @public - *

Version number of the face detection model associated with the input CollectionId.

- */ - FaceModelVersion?: string; - +export interface MatchedUser { /** * @public - *

Contains the ID of a face that was used to search for matches in a collection.

+ *

A provided ID for the UserID. Unique within the collection.

*/ - SearchedFace?: SearchedFace; + UserId?: string; /** * @public - *

Contains the ID of the UserID that was used to search for matches in a collection.

+ *

The status of the user matched to a provided FaceID.

*/ - SearchedUser?: SearchedUser; + UserStatus?: UserStatus; } /** * @public + *

The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see + * Calling Amazon Rekognition Video operations. Note that the Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic. + * For more information, see Giving access to multiple Amazon SNS topics.

*/ -export interface SearchUsersByImageRequest { - /** - * @public - *

The ID of an existing collection containing the UserID.

- */ - CollectionId: string | undefined; - - /** - * @public - *

Provides the input image either as bytes or an S3 object.

- *

You pass image bytes to an Amazon Rekognition API operation by using the Bytes - * property. For example, you would use the Bytes property to pass an image loaded - * from a local file system. Image bytes passed by using the Bytes property must be - * base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to - * call Amazon Rekognition API operations.

- *

For more information, see Analyzing an Image Loaded from a Local File System - * in the Amazon Rekognition Developer Guide.

- *

You pass images stored in an S3 bucket to an Amazon Rekognition API operation by using the - * S3Object property. Images stored in an S3 bucket do not need to be - * base64-encoded.

- *

The region for the S3 bucket containing the S3 object must match the region you use for - * Amazon Rekognition operations.

- *

If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the - * Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and - * then call the operation using the S3Object property.

- *

For Amazon Rekognition to process an S3 object, the user must have permission to - * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the - * Amazon Rekognition Developer Guide.

- */ - Image: Image | undefined; - - /** - * @public - *

Specifies the minimum confidence in the UserID match to return. Default value is - * 80.

- */ - UserMatchThreshold?: number; - - /** - * @public - *

Maximum number of UserIDs to return.

- */ - MaxUsers?: number; - +export interface NotificationChannel { /** * @public - *

A filter that specifies a quality bar for how much filtering is done to identify faces. - * Filtered faces aren't searched for in the collection. The default value is NONE.

+ *

The Amazon SNS topic to which Amazon Rekognition posts the completion status.

*/ - QualityFilter?: QualityFilter; -} + SNSTopicArn: string | undefined; -/** - * @public - *

Contains data regarding the input face used for a search.

- */ -export interface SearchedFaceDetails { /** * @public - *

Structure containing attributes of the face that the algorithm detected.

- *

A FaceDetail object contains either the default facial attributes or all - * facial attributes. The default attributes are BoundingBox, - * Confidence, Landmarks, Pose, and - * Quality.

- *

- * GetFaceDetection is the only Amazon Rekognition Video stored video operation that can - * return a FaceDetail object with all attributes. To specify which attributes to - * return, use the FaceAttributes input parameter for StartFaceDetection. The following Amazon Rekognition Video operations return only the default - * attributes. The corresponding Start operations don't have a FaceAttributes input - * parameter:

- *
    - *
  • - *

    GetCelebrityRecognition

    - *
  • - *
  • - *

    GetPersonTracking

    - *
  • - *
  • - *

    GetFaceSearch

    - *
  • - *
- *

The Amazon Rekognition Image DetectFaces and IndexFaces operations - * can return all facial attributes. To specify which attributes to return, use the - * Attributes input parameter for DetectFaces. For - * IndexFaces, use the DetectAttributes input parameter.

+ *

The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.

*/ - FaceDetail?: FaceDetail; + RoleArn: string | undefined; } /** diff --git a/clients/client-rekognition/src/models/models_1.ts b/clients/client-rekognition/src/models/models_1.ts index e09e2e64ffaf..98de87d64322 100644 --- a/clients/client-rekognition/src/models/models_1.ts +++ b/clients/client-rekognition/src/models/models_1.ts @@ -3,25 +3,484 @@ import { ExceptionOptionType as __ExceptionOptionType } from "@smithy/smithy-cli import { BlackFrame, + BoundingBox, + Celebrity, + ComparedFace, ConnectedHomeSettingsForUpdate, DatasetChanges, DetectionFilter, FaceAttributes, FaceDetail, + FaceMatch, + Image, KinesisVideoStreamStartSelector, LabelDetectionFeatureName, LabelDetectionSettings, + MatchedUser, + MediaAnalysisInput, + MediaAnalysisOperationsConfig, + MediaAnalysisOutputConfig, NotificationChannel, + OrientationCorrection, ProjectVersionStatus, + QualityFilter, RegionOfInterest, - SearchedFaceDetails, SegmentType, StreamProcessorDataSharingPreference, - UserMatch, Video, } from "./models_0"; import { RekognitionServiceException as __BaseException } from "./RekognitionServiceException"; +/** + * @public + */ +export interface PutProjectPolicyRequest { + /** + * @public + *

The Amazon Resource Name (ARN) of the project that the project policy is attached to.

+ */ + ProjectArn: string | undefined; + + /** + * @public + *

A name for the policy.

+ */ + PolicyName: string | undefined; + + /** + * @public + *

The revision ID for the Project Policy. Each time you modify a policy, Amazon Rekognition Custom Labels + * generates and assigns a new PolicyRevisionId and then deletes the previous version of the + * policy.

+ */ + PolicyRevisionId?: string; + + /** + * @public + *

A resource policy to add to the model. The policy is a JSON structure that contains + * one or more statements that define the policy. + * The policy must follow the IAM syntax. For + * more information about the contents of a JSON policy document, see + * IAM JSON policy reference.

+ */ + PolicyDocument: string | undefined; +} + +/** + * @public + */ +export interface PutProjectPolicyResponse { + /** + * @public + *

The ID of the project policy.

+ */ + PolicyRevisionId?: string; +} + +/** + * @public + */ +export interface RecognizeCelebritiesRequest { + /** + * @public + *

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to + * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

+ *

If you are using an AWS SDK to call Amazon Rekognition, you might not need to + * base64-encode image bytes passed using the Bytes field. For more information, see + * Images in the Amazon Rekognition developer guide.

+ */ + Image: Image | undefined; +} + +/** + * @public + */ +export interface RecognizeCelebritiesResponse { + /** + * @public + *

Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 64 + * celebrities in an image. Each celebrity object includes the following attributes: + * Face, Confidence, Emotions, Landmarks, + * Pose, Quality, Smile, Id, + * KnownGender, MatchConfidence, Name, + * Urls.

+ */ + CelebrityFaces?: Celebrity[]; + + /** + * @public + *

Details about each unrecognized face in the image.

+ */ + UnrecognizedFaces?: ComparedFace[]; + + /** + * @public + * + *

Support for estimating image orientation using the the OrientationCorrection field + * has ceased as of August 2021. Any returned values for this field included in an API response + * will always be NULL.

+ *
+ *

The orientation of the input image (counterclockwise direction). If your application + * displays the image, you can use this value to correct the orientation. The bounding box + * coordinates returned in CelebrityFaces and UnrecognizedFaces + * represent face locations before the image orientation is corrected.

+ * + *

If the input image is in .jpeg format, it might contain exchangeable image (Exif) + * metadata that includes the image's orientation. If so, and the Exif metadata for the input + * image populates the orientation field, the value of OrientationCorrection is + * null. The CelebrityFaces and UnrecognizedFaces bounding box + * coordinates represent face locations after Exif metadata is used to correct the image + * orientation. Images in .png format don't contain Exif metadata.

+ *
+ */ + OrientationCorrection?: OrientationCorrection; +} + +/** + * @public + */ +export interface SearchFacesRequest { + /** + * @public + *

ID of the collection the face belongs to.

+ */ + CollectionId: string | undefined; + + /** + * @public + *

ID of a face to find matches for in the collection.

+ */ + FaceId: string | undefined; + + /** + * @public + *

Maximum number of faces to return. The operation returns the maximum number of faces + * with the highest confidence in the match.

+ */ + MaxFaces?: number; + + /** + * @public + *

Optional value specifying the minimum confidence in the face match to return. For + * example, don't return any matches where confidence in matches is less than 70%. The default + * value is 80%.

+ */ + FaceMatchThreshold?: number; +} + +/** + * @public + */ +export interface SearchFacesResponse { + /** + * @public + *

ID of the face that was searched for matches in a collection.

+ */ + SearchedFaceId?: string; + + /** + * @public + *

An array of faces that matched the input face, along with the confidence in the + * match.

+ */ + FaceMatches?: FaceMatch[]; + + /** + * @public + *

Version number of the face detection model associated with the input collection + * (CollectionId).

+ */ + FaceModelVersion?: string; +} + +/** + * @public + */ +export interface SearchFacesByImageRequest { + /** + * @public + *

ID of the collection to search.

+ */ + CollectionId: string | undefined; + + /** + * @public + *

The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to + * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported.

+ *

If you are using an AWS SDK to call Amazon Rekognition, you might not need to + * base64-encode image bytes passed using the Bytes field. For more information, see + * Images in the Amazon Rekognition developer guide.

+ */ + Image: Image | undefined; + + /** + * @public + *

Maximum number of faces to return. The operation returns the maximum number of faces + * with the highest confidence in the match.

+ */ + MaxFaces?: number; + + /** + * @public + *

(Optional) Specifies the minimum confidence in the face match to return. For example, + * don't return any matches where confidence in matches is less than 70%. The default value is + * 80%.

+ */ + FaceMatchThreshold?: number; + + /** + * @public + *

A filter that specifies a quality bar for how much filtering is done to identify faces. + * Filtered faces aren't searched for in the collection. If you specify AUTO, + * Amazon Rekognition chooses the quality bar. If you specify LOW, MEDIUM, or + * HIGH, filtering removes all faces that don’t meet the chosen quality bar. + * The quality bar is + * based on a variety of common use cases. Low-quality detections can occur for a number of + * reasons. Some examples are an object that's misidentified as a face, a face that's too blurry, + * or a face with a pose that's too extreme to use. If you specify NONE, no + * filtering is performed. The default value is NONE.

+ *

To use quality filtering, the collection you are using must be associated with version 3 + * of the face model or higher.

+ */ + QualityFilter?: QualityFilter; +} + +/** + * @public + */ +export interface SearchFacesByImageResponse { + /** + * @public + *

The bounding box around the face in the input image that Amazon Rekognition used for the + * search.

+ */ + SearchedFaceBoundingBox?: BoundingBox; + + /** + * @public + *

The level of confidence that the searchedFaceBoundingBox, contains a + * face.

+ */ + SearchedFaceConfidence?: number; + + /** + * @public + *

An array of faces that match the input face, along with the confidence in the + * match.

+ */ + FaceMatches?: FaceMatch[]; + + /** + * @public + *

Version number of the face detection model associated with the input collection + * (CollectionId).

+ */ + FaceModelVersion?: string; +} + +/** + * @public + */ +export interface SearchUsersRequest { + /** + * @public + *

The ID of an existing collection containing the UserID, used with a UserId or FaceId. If a + * FaceId is provided, UserId isn’t required to be present in the Collection.

+ */ + CollectionId: string | undefined; + + /** + * @public + *

ID for the existing User.

+ */ + UserId?: string; + + /** + * @public + *

ID for the existing face.

+ */ + FaceId?: string; + + /** + * @public + *

Optional value that specifies the minimum confidence in the matched UserID to return. + * Default value of 80.

+ */ + UserMatchThreshold?: number; + + /** + * @public + *

Maximum number of identities to return.

+ */ + MaxUsers?: number; +} + +/** + * @public + *

Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for + * search.

+ */ +export interface SearchedFace { + /** + * @public + *

Unique identifier assigned to the face.

+ */ + FaceId?: string; +} + +/** + * @public + *

Contains metadata about a User searched for within a collection.

+ */ +export interface SearchedUser { + /** + * @public + *

A provided ID for the UserID. Unique within the collection.

+ */ + UserId?: string; +} + +/** + * @public + *

Provides UserID metadata along with the confidence in the match of this UserID with the + * input face.

+ */ +export interface UserMatch { + /** + * @public + *

Describes the UserID metadata.

+ */ + Similarity?: number; + + /** + * @public + *

Confidence in the match of this UserID with the input face.

+ */ + User?: MatchedUser; +} + +/** + * @public + */ +export interface SearchUsersResponse { + /** + * @public + *

An array of UserMatch objects that matched the input face along with the confidence in the + * match. Array will be empty if there are no matches.

+ */ + UserMatches?: UserMatch[]; + + /** + * @public + *

Version number of the face detection model associated with the input CollectionId.

+ */ + FaceModelVersion?: string; + + /** + * @public + *

Contains the ID of a face that was used to search for matches in a collection.

+ */ + SearchedFace?: SearchedFace; + + /** + * @public + *

Contains the ID of the UserID that was used to search for matches in a collection.

+ */ + SearchedUser?: SearchedUser; +} + +/** + * @public + */ +export interface SearchUsersByImageRequest { + /** + * @public + *

The ID of an existing collection containing the UserID.

+ */ + CollectionId: string | undefined; + + /** + * @public + *

Provides the input image either as bytes or an S3 object.

+ *

You pass image bytes to an Amazon Rekognition API operation by using the Bytes + * property. For example, you would use the Bytes property to pass an image loaded + * from a local file system. Image bytes passed by using the Bytes property must be + * base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to + * call Amazon Rekognition API operations.

+ *

For more information, see Analyzing an Image Loaded from a Local File System + * in the Amazon Rekognition Developer Guide.

+ *

You pass images stored in an S3 bucket to an Amazon Rekognition API operation by using the + * S3Object property. Images stored in an S3 bucket do not need to be + * base64-encoded.

+ *

The region for the S3 bucket containing the S3 object must match the region you use for + * Amazon Rekognition operations.

+ *

If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the + * Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and + * then call the operation using the S3Object property.

+ *

For Amazon Rekognition to process an S3 object, the user must have permission to + * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the + * Amazon Rekognition Developer Guide.

+ */ + Image: Image | undefined; + + /** + * @public + *

Specifies the minimum confidence in the UserID match to return. Default value is + * 80.

+ */ + UserMatchThreshold?: number; + + /** + * @public + *

Maximum number of UserIDs to return.

+ */ + MaxUsers?: number; + + /** + * @public + *

A filter that specifies a quality bar for how much filtering is done to identify faces. + * Filtered faces aren't searched for in the collection. The default value is NONE.

+ */ + QualityFilter?: QualityFilter; +} + +/** + * @public + *

Contains data regarding the input face used for a search.

+ */ +export interface SearchedFaceDetails { + /** + * @public + *

Structure containing attributes of the face that the algorithm detected.

+ *

A FaceDetail object contains either the default facial attributes or all + * facial attributes. The default attributes are BoundingBox, + * Confidence, Landmarks, Pose, and + * Quality.

+ *

+ * GetFaceDetection is the only Amazon Rekognition Video stored video operation that can + * return a FaceDetail object with all attributes. To specify which attributes to + * return, use the FaceAttributes input parameter for StartFaceDetection. The following Amazon Rekognition Video operations return only the default + * attributes. The corresponding Start operations don't have a FaceAttributes input + * parameter:

+ *
    + *
  • + *

    GetCelebrityRecognition

    + *
  • + *
  • + *

    GetPersonTracking

    + *
  • + *
  • + *

    GetFaceSearch

    + *
  • + *
+ *

The Amazon Rekognition Image DetectFaces and IndexFaces operations + * can return all facial attributes. To specify which attributes to return, use the + * Attributes input parameter for DetectFaces. For + * IndexFaces, use the DetectAttributes input parameter.

+ */ + FaceDetail?: FaceDetail; +} + /** * @public * @enum @@ -438,6 +897,63 @@ export interface StartLabelDetectionResponse { JobId?: string; } +/** + * @public + */ +export interface StartMediaAnalysisJobRequest { + /** + * @public + *

Idempotency token used to prevent the accidental creation of duplicate versions. If + * you use the same token with multiple StartMediaAnalysisJobRequest requests, the same + * response is returned. Use ClientRequestToken to prevent the same request from being + * processed more than once.

+ */ + ClientRequestToken?: string; + + /** + * @public + *

The name of the job. Does not have to be unique.

+ */ + JobName?: string; + + /** + * @public + *

Configuration options for the media analysis job to be created.

+ */ + OperationsConfig: MediaAnalysisOperationsConfig | undefined; + + /** + * @public + *

Input data to be analyzed by the job.

+ */ + Input: MediaAnalysisInput | undefined; + + /** + * @public + *

The Amazon S3 bucket location to store the results.

+ */ + OutputConfig: MediaAnalysisOutputConfig | undefined; + + /** + * @public + *

The identifier of customer managed AWS KMS key (name or ARN). The key + * is used to encrypt images copied into the service. The key is also used + * to encrypt results and manifest files written to the output Amazon S3 bucket.

+ */ + KmsKeyId?: string; +} + +/** + * @public + */ +export interface StartMediaAnalysisJobResponse { + /** + * @public + *

Identifier for the created job.

+ */ + JobId: string | undefined; +} + /** * @public */ diff --git a/clients/client-rekognition/src/pagination/ListMediaAnalysisJobsPaginator.ts b/clients/client-rekognition/src/pagination/ListMediaAnalysisJobsPaginator.ts new file mode 100644 index 000000000000..2400c3d680f8 --- /dev/null +++ b/clients/client-rekognition/src/pagination/ListMediaAnalysisJobsPaginator.ts @@ -0,0 +1,50 @@ +// smithy-typescript generated code +import { Paginator } from "@smithy/types"; + +import { + ListMediaAnalysisJobsCommand, + ListMediaAnalysisJobsCommandInput, + ListMediaAnalysisJobsCommandOutput, +} from "../commands/ListMediaAnalysisJobsCommand"; +import { RekognitionClient } from "../RekognitionClient"; +import { RekognitionPaginationConfiguration } from "./Interfaces"; + +/** + * @internal + */ +const makePagedClientRequest = async ( + client: RekognitionClient, + input: ListMediaAnalysisJobsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListMediaAnalysisJobsCommand(input), ...args); +}; +/** + * @public + */ +export async function* paginateListMediaAnalysisJobs( + config: RekognitionPaginationConfiguration, + input: ListMediaAnalysisJobsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListMediaAnalysisJobsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof RekognitionClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Rekognition | RekognitionClient"); + } + yield page; + const prevToken = token; + token = page.NextToken; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-rekognition/src/pagination/index.ts b/clients/client-rekognition/src/pagination/index.ts index 0fdb6acf8f87..5dd987118f1d 100644 --- a/clients/client-rekognition/src/pagination/index.ts +++ b/clients/client-rekognition/src/pagination/index.ts @@ -23,6 +23,7 @@ export * from "./ListCollectionsPaginator"; export * from "./ListDatasetEntriesPaginator"; export * from "./ListDatasetLabelsPaginator"; export * from "./ListFacesPaginator"; +export * from "./ListMediaAnalysisJobsPaginator"; export * from "./ListProjectPoliciesPaginator"; export * from "./ListStreamProcessorsPaginator"; export * from "./ListUsersPaginator"; diff --git a/clients/client-rekognition/src/protocols/Aws_json1_1.ts b/clients/client-rekognition/src/protocols/Aws_json1_1.ts index bb536d3ecf45..bc25cea7dade 100644 --- a/clients/client-rekognition/src/protocols/Aws_json1_1.ts +++ b/clients/client-rekognition/src/protocols/Aws_json1_1.ts @@ -105,6 +105,10 @@ import { } from "../commands/GetFaceLivenessSessionResultsCommand"; import { GetFaceSearchCommandInput, GetFaceSearchCommandOutput } from "../commands/GetFaceSearchCommand"; import { GetLabelDetectionCommandInput, GetLabelDetectionCommandOutput } from "../commands/GetLabelDetectionCommand"; +import { + GetMediaAnalysisJobCommandInput, + GetMediaAnalysisJobCommandOutput, +} from "../commands/GetMediaAnalysisJobCommand"; import { GetPersonTrackingCommandInput, GetPersonTrackingCommandOutput } from "../commands/GetPersonTrackingCommand"; import { GetSegmentDetectionCommandInput, @@ -116,6 +120,10 @@ import { ListCollectionsCommandInput, ListCollectionsCommandOutput } from "../co import { ListDatasetEntriesCommandInput, ListDatasetEntriesCommandOutput } from "../commands/ListDatasetEntriesCommand"; import { ListDatasetLabelsCommandInput, ListDatasetLabelsCommandOutput } from "../commands/ListDatasetLabelsCommand"; import { ListFacesCommandInput, ListFacesCommandOutput } from "../commands/ListFacesCommand"; +import { + ListMediaAnalysisJobsCommandInput, + ListMediaAnalysisJobsCommandOutput, +} from "../commands/ListMediaAnalysisJobsCommand"; import { ListProjectPoliciesCommandInput, ListProjectPoliciesCommandOutput, @@ -152,6 +160,10 @@ import { StartLabelDetectionCommandInput, StartLabelDetectionCommandOutput, } from "../commands/StartLabelDetectionCommand"; +import { + StartMediaAnalysisJobCommandInput, + StartMediaAnalysisJobCommandOutput, +} from "../commands/StartMediaAnalysisJobCommand"; import { StartPersonTrackingCommandInput, StartPersonTrackingCommandOutput, @@ -297,6 +309,8 @@ import { GetFaceSearchResponse, GetLabelDetectionRequest, GetLabelDetectionResponse, + GetMediaAnalysisJobRequest, + GetMediaAnalysisJobResponse, GetPersonTrackingRequest, GetPersonTrackingResponse, GetSegmentDetectionRequest, @@ -317,6 +331,7 @@ import { Instance, InternalServerError, InvalidImageFormatException, + InvalidManifestException, InvalidPaginationTokenException, InvalidParameterException, InvalidPolicyRevisionIdException, @@ -335,6 +350,8 @@ import { ListDatasetLabelsRequest, ListFacesRequest, ListFacesResponse, + ListMediaAnalysisJobsRequest, + ListMediaAnalysisJobsResponse, ListProjectPoliciesRequest, ListProjectPoliciesResponse, ListStreamProcessorsRequest, @@ -342,6 +359,11 @@ import { ListUsersRequest, LivenessOutputConfig, MalformedPolicyDocumentException, + MediaAnalysisDetectModerationLabelsConfig, + MediaAnalysisInput, + MediaAnalysisJobDescription, + MediaAnalysisOperationsConfig, + MediaAnalysisOutputConfig, ModerationLabel, MouthOpen, Mustache, @@ -360,9 +382,6 @@ import { ProtectiveEquipmentSummarizationAttributes, ProtectiveEquipmentType, ProvisionedThroughputExceededException, - PutProjectPolicyRequest, - RecognizeCelebritiesRequest, - RecognizeCelebritiesResponse, RegionOfInterest, ResourceAlreadyExistsException, ResourceInUseException, @@ -370,14 +389,6 @@ import { ResourceNotReadyException, S3Destination, S3Object, - SearchedFaceDetails, - SearchFacesByImageRequest, - SearchFacesByImageResponse, - SearchFacesRequest, - SearchFacesResponse, - SearchUsersByImageRequest, - SearchUsersRequest, - SearchUsersResponse, SegmentDetection, SegmentType, ServiceQuotaExceededException, @@ -398,17 +409,28 @@ import { TrainingData, UnindexedFace, UnsuccessfulFaceAssociation, - UserMatch, Video, VideoMetadata, } from "../models/models_0"; import { + PutProjectPolicyRequest, + RecognizeCelebritiesRequest, + RecognizeCelebritiesResponse, + SearchedFaceDetails, + SearchFacesByImageRequest, + SearchFacesByImageResponse, + SearchFacesRequest, + SearchFacesResponse, + SearchUsersByImageRequest, SearchUsersByImageResponse, + SearchUsersRequest, + SearchUsersResponse, StartCelebrityRecognitionRequest, StartContentModerationRequest, StartFaceDetectionRequest, StartFaceSearchRequest, StartLabelDetectionRequest, + StartMediaAnalysisJobRequest, StartPersonTrackingRequest, StartProjectVersionRequest, StartSegmentDetectionFilters, @@ -429,6 +451,7 @@ import { UntagResourceRequest, UpdateDatasetEntriesRequest, UpdateStreamProcessorRequest, + UserMatch, VideoTooLargeException, } from "../models/models_1"; import { RekognitionServiceException as __BaseException } from "../models/RekognitionServiceException"; @@ -927,6 +950,19 @@ export const se_GetLabelDetectionCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +/** + * serializeAws_json1_1GetMediaAnalysisJobCommand + */ +export const se_GetMediaAnalysisJobCommand = async ( + input: GetMediaAnalysisJobCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = sharedHeaders("GetMediaAnalysisJob"); + let body: any; + body = JSON.stringify(_json(input)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + /** * serializeAws_json1_1GetPersonTrackingCommand */ @@ -1031,6 +1067,19 @@ export const se_ListFacesCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +/** + * serializeAws_json1_1ListMediaAnalysisJobsCommand + */ +export const se_ListMediaAnalysisJobsCommand = async ( + input: ListMediaAnalysisJobsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = sharedHeaders("ListMediaAnalysisJobs"); + let body: any; + body = JSON.stringify(_json(input)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + /** * serializeAws_json1_1ListProjectPoliciesCommand */ @@ -1226,6 +1275,19 @@ export const se_StartLabelDetectionCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +/** + * serializeAws_json1_1StartMediaAnalysisJobCommand + */ +export const se_StartMediaAnalysisJobCommand = async ( + input: StartMediaAnalysisJobCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = sharedHeaders("StartMediaAnalysisJob"); + let body: any; + body = JSON.stringify(se_StartMediaAnalysisJobRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + /** * serializeAws_json1_1StartPersonTrackingCommand */ @@ -3852,6 +3914,67 @@ const de_GetLabelDetectionCommandError = async ( } }; +/** + * deserializeAws_json1_1GetMediaAnalysisJobCommand + */ +export const de_GetMediaAnalysisJobCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return de_GetMediaAnalysisJobCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = de_GetMediaAnalysisJobResponse(data, context); + const response: GetMediaAnalysisJobCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return response; +}; + +/** + * deserializeAws_json1_1GetMediaAnalysisJobCommandError + */ +const de_GetMediaAnalysisJobCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseErrorBody(output.body, context), + }; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rekognition#AccessDeniedException": + throw await de_AccessDeniedExceptionRes(parsedOutput, context); + case "InternalServerError": + case "com.amazonaws.rekognition#InternalServerError": + throw await de_InternalServerErrorRes(parsedOutput, context); + case "InvalidParameterException": + case "com.amazonaws.rekognition#InvalidParameterException": + throw await de_InvalidParameterExceptionRes(parsedOutput, context); + case "ProvisionedThroughputExceededException": + case "com.amazonaws.rekognition#ProvisionedThroughputExceededException": + throw await de_ProvisionedThroughputExceededExceptionRes(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.rekognition#ResourceNotFoundException": + throw await de_ResourceNotFoundExceptionRes(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.rekognition#ThrottlingException": + throw await de_ThrottlingExceptionRes(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + return throwDefaultError({ + output, + parsedBody, + errorCode, + }); + } +}; + /** * deserializeAws_json1_1GetPersonTrackingCommand */ @@ -4385,6 +4508,67 @@ const de_ListFacesCommandError = async ( } }; +/** + * deserializeAws_json1_1ListMediaAnalysisJobsCommand + */ +export const de_ListMediaAnalysisJobsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return de_ListMediaAnalysisJobsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = de_ListMediaAnalysisJobsResponse(data, context); + const response: ListMediaAnalysisJobsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return response; +}; + +/** + * deserializeAws_json1_1ListMediaAnalysisJobsCommandError + */ +const de_ListMediaAnalysisJobsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseErrorBody(output.body, context), + }; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rekognition#AccessDeniedException": + throw await de_AccessDeniedExceptionRes(parsedOutput, context); + case "InternalServerError": + case "com.amazonaws.rekognition#InternalServerError": + throw await de_InternalServerErrorRes(parsedOutput, context); + case "InvalidPaginationTokenException": + case "com.amazonaws.rekognition#InvalidPaginationTokenException": + throw await de_InvalidPaginationTokenExceptionRes(parsedOutput, context); + case "InvalidParameterException": + case "com.amazonaws.rekognition#InvalidParameterException": + throw await de_InvalidParameterExceptionRes(parsedOutput, context); + case "ProvisionedThroughputExceededException": + case "com.amazonaws.rekognition#ProvisionedThroughputExceededException": + throw await de_ProvisionedThroughputExceededExceptionRes(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.rekognition#ThrottlingException": + throw await de_ThrottlingExceptionRes(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + return throwDefaultError({ + output, + parsedBody, + errorCode, + }); + } +}; + /** * deserializeAws_json1_1ListProjectPoliciesCommand */ @@ -5393,6 +5577,82 @@ const de_StartLabelDetectionCommandError = async ( } }; +/** + * deserializeAws_json1_1StartMediaAnalysisJobCommand + */ +export const de_StartMediaAnalysisJobCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return de_StartMediaAnalysisJobCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = _json(data); + const response: StartMediaAnalysisJobCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return response; +}; + +/** + * deserializeAws_json1_1StartMediaAnalysisJobCommandError + */ +const de_StartMediaAnalysisJobCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseErrorBody(output.body, context), + }; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rekognition#AccessDeniedException": + throw await de_AccessDeniedExceptionRes(parsedOutput, context); + case "IdempotentParameterMismatchException": + case "com.amazonaws.rekognition#IdempotentParameterMismatchException": + throw await de_IdempotentParameterMismatchExceptionRes(parsedOutput, context); + case "InternalServerError": + case "com.amazonaws.rekognition#InternalServerError": + throw await de_InternalServerErrorRes(parsedOutput, context); + case "InvalidManifestException": + case "com.amazonaws.rekognition#InvalidManifestException": + throw await de_InvalidManifestExceptionRes(parsedOutput, context); + case "InvalidParameterException": + case "com.amazonaws.rekognition#InvalidParameterException": + throw await de_InvalidParameterExceptionRes(parsedOutput, context); + case "InvalidS3ObjectException": + case "com.amazonaws.rekognition#InvalidS3ObjectException": + throw await de_InvalidS3ObjectExceptionRes(parsedOutput, context); + case "LimitExceededException": + case "com.amazonaws.rekognition#LimitExceededException": + throw await de_LimitExceededExceptionRes(parsedOutput, context); + case "ProvisionedThroughputExceededException": + case "com.amazonaws.rekognition#ProvisionedThroughputExceededException": + throw await de_ProvisionedThroughputExceededExceptionRes(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.rekognition#ResourceNotFoundException": + throw await de_ResourceNotFoundExceptionRes(parsedOutput, context); + case "ResourceNotReadyException": + case "com.amazonaws.rekognition#ResourceNotReadyException": + throw await de_ResourceNotReadyExceptionRes(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.rekognition#ThrottlingException": + throw await de_ThrottlingExceptionRes(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + return throwDefaultError({ + output, + parsedBody, + errorCode, + }); + } +}; + /** * deserializeAws_json1_1StartPersonTrackingCommand */ @@ -6224,6 +6484,22 @@ const de_InvalidImageFormatExceptionRes = async ( return __decorateServiceException(exception, body); }; +/** + * deserializeAws_json1_1InvalidManifestExceptionRes + */ +const de_InvalidManifestExceptionRes = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = _json(body); + const exception = new InvalidManifestException({ + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }); + return __decorateServiceException(exception, body); +}; + /** * deserializeAws_json1_1InvalidPaginationTokenExceptionRes */ @@ -6809,6 +7085,8 @@ const se_FaceSearchSettings = (input: FaceSearchSettings, context: __SerdeContex // se_GetLabelDetectionRequest omitted. +// se_GetMediaAnalysisJobRequest omitted. + // se_GetPersonTrackingRequest omitted. // se_GetSegmentDetectionRequest omitted. @@ -6863,6 +7141,8 @@ const se_IndexFacesRequest = (input: IndexFacesRequest, context: __SerdeContext) // se_ListFacesRequest omitted. +// se_ListMediaAnalysisJobsRequest omitted. + // se_ListProjectPoliciesRequest omitted. // se_ListStreamProcessorsRequest omitted. @@ -6873,6 +7153,32 @@ const se_IndexFacesRequest = (input: IndexFacesRequest, context: __SerdeContext) // se_LivenessOutputConfig omitted. +/** + * serializeAws_json1_1MediaAnalysisDetectModerationLabelsConfig + */ +const se_MediaAnalysisDetectModerationLabelsConfig = ( + input: MediaAnalysisDetectModerationLabelsConfig, + context: __SerdeContext +): any => { + return take(input, { + MinConfidence: __serializeFloat, + ProjectVersion: [], + }); +}; + +// se_MediaAnalysisInput omitted. + +/** + * serializeAws_json1_1MediaAnalysisOperationsConfig + */ +const se_MediaAnalysisOperationsConfig = (input: MediaAnalysisOperationsConfig, context: __SerdeContext): any => { + return take(input, { + DetectModerationLabels: (_) => se_MediaAnalysisDetectModerationLabelsConfig(_, context), + }); +}; + +// se_MediaAnalysisOutputConfig omitted. + // se_NotificationChannel omitted. // se_OutputConfig omitted. @@ -7050,6 +7356,20 @@ const se_StartLabelDetectionRequest = (input: StartLabelDetectionRequest, contex }); }; +/** + * serializeAws_json1_1StartMediaAnalysisJobRequest + */ +const se_StartMediaAnalysisJobRequest = (input: StartMediaAnalysisJobRequest, context: __SerdeContext): any => { + return take(input, { + ClientRequestToken: [true, (_) => _ ?? generateIdempotencyToken()], + Input: _json, + JobName: [], + KmsKeyId: [], + OperationsConfig: (_) => se_MediaAnalysisOperationsConfig(_, context), + OutputConfig: _json, + }); +}; + // se_StartPersonTrackingRequest omitted. // se_StartProjectVersionRequest omitted. @@ -8184,6 +8504,26 @@ const de_GetLabelDetectionResponse = (output: any, context: __SerdeContext): Get }) as any; }; +/** + * deserializeAws_json1_1GetMediaAnalysisJobResponse + */ +const de_GetMediaAnalysisJobResponse = (output: any, context: __SerdeContext): GetMediaAnalysisJobResponse => { + return take(output, { + CompletionTimestamp: (_: any) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))), + CreationTimestamp: (_: any) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))), + FailureDetails: _json, + Input: _json, + JobId: __expectString, + JobName: __expectString, + KmsKeyId: __expectString, + ManifestSummary: _json, + OperationsConfig: (_: any) => de_MediaAnalysisOperationsConfig(_, context), + OutputConfig: _json, + Results: _json, + Status: __expectString, + }) as any; +}; + /** * deserializeAws_json1_1GetPersonTrackingResponse */ @@ -8305,6 +8645,8 @@ const de_Instances = (output: any, context: __SerdeContext): Instance[] => { // de_InvalidImageFormatException omitted. +// de_InvalidManifestException omitted. + // de_InvalidPaginationTokenException omitted. // de_InvalidParameterException omitted. @@ -8420,6 +8762,16 @@ const de_ListFacesResponse = (output: any, context: __SerdeContext): ListFacesRe }) as any; }; +/** + * deserializeAws_json1_1ListMediaAnalysisJobsResponse + */ +const de_ListMediaAnalysisJobsResponse = (output: any, context: __SerdeContext): ListMediaAnalysisJobsResponse => { + return take(output, { + MediaAnalysisJobs: (_: any) => de_MediaAnalysisJobDescriptions(_, context), + NextToken: __expectString, + }) as any; +}; + /** * deserializeAws_json1_1ListProjectPoliciesResponse */ @@ -8440,6 +8792,70 @@ const de_ListProjectPoliciesResponse = (output: any, context: __SerdeContext): L // de_MatchedUser omitted. +/** + * deserializeAws_json1_1MediaAnalysisDetectModerationLabelsConfig + */ +const de_MediaAnalysisDetectModerationLabelsConfig = ( + output: any, + context: __SerdeContext +): MediaAnalysisDetectModerationLabelsConfig => { + return take(output, { + MinConfidence: __limitedParseFloat32, + ProjectVersion: __expectString, + }) as any; +}; + +// de_MediaAnalysisInput omitted. + +/** + * deserializeAws_json1_1MediaAnalysisJobDescription + */ +const de_MediaAnalysisJobDescription = (output: any, context: __SerdeContext): MediaAnalysisJobDescription => { + return take(output, { + CompletionTimestamp: (_: any) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))), + CreationTimestamp: (_: any) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))), + FailureDetails: _json, + Input: _json, + JobId: __expectString, + JobName: __expectString, + KmsKeyId: __expectString, + ManifestSummary: _json, + OperationsConfig: (_: any) => de_MediaAnalysisOperationsConfig(_, context), + OutputConfig: _json, + Results: _json, + Status: __expectString, + }) as any; +}; + +/** + * deserializeAws_json1_1MediaAnalysisJobDescriptions + */ +const de_MediaAnalysisJobDescriptions = (output: any, context: __SerdeContext): MediaAnalysisJobDescription[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + return de_MediaAnalysisJobDescription(entry, context); + }); + return retVal; +}; + +// de_MediaAnalysisJobFailureDetails omitted. + +// de_MediaAnalysisManifestSummary omitted. + +/** + * deserializeAws_json1_1MediaAnalysisOperationsConfig + */ +const de_MediaAnalysisOperationsConfig = (output: any, context: __SerdeContext): MediaAnalysisOperationsConfig => { + return take(output, { + DetectModerationLabels: (_: any) => de_MediaAnalysisDetectModerationLabelsConfig(_, context), + }) as any; +}; + +// de_MediaAnalysisOutputConfig omitted. + +// de_MediaAnalysisResults omitted. + /** * deserializeAws_json1_1ModerationLabel */ @@ -8889,6 +9305,8 @@ const de_Smile = (output: any, context: __SerdeContext): Smile => { // de_StartLabelDetectionResponse omitted. +// de_StartMediaAnalysisJobResponse omitted. + // de_StartPersonTrackingResponse omitted. // de_StartProjectVersionResponse omitted. diff --git a/codegen/sdk-codegen/aws-models/rekognition.json b/codegen/sdk-codegen/aws-models/rekognition.json index 80c82dfc6d15..52e05c833b32 100644 --- a/codegen/sdk-codegen/aws-models/rekognition.json +++ b/codegen/sdk-codegen/aws-models/rekognition.json @@ -6386,6 +6386,139 @@ "smithy.api#output": {} } }, + "com.amazonaws.rekognition#GetMediaAnalysisJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.rekognition#GetMediaAnalysisJobRequest" + }, + "output": { + "target": "com.amazonaws.rekognition#GetMediaAnalysisJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rekognition#AccessDeniedException" + }, + { + "target": "com.amazonaws.rekognition#InternalServerError" + }, + { + "target": "com.amazonaws.rekognition#InvalidParameterException" + }, + { + "target": "com.amazonaws.rekognition#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.rekognition#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rekognition#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the results for a given media analysis job. \n Takes a JobId returned by StartMediaAnalysisJob.

" + } + }, + "com.amazonaws.rekognition#GetMediaAnalysisJobRequest": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobId", + "traits": { + "smithy.api#documentation": "

Unique identifier for the media analysis job for which you want to retrieve results.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.rekognition#GetMediaAnalysisJobResponse": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobId", + "traits": { + "smithy.api#documentation": "

The identifier for the media analysis job.

", + "smithy.api#required": {} + } + }, + "JobName": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobName", + "traits": { + "smithy.api#documentation": "

The name of the media analysis job.

" + } + }, + "OperationsConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOperationsConfig", + "traits": { + "smithy.api#documentation": "

Operation configurations that were provided during job creation.

", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobStatus", + "traits": { + "smithy.api#documentation": "

The current status of the media analysis job.

", + "smithy.api#required": {} + } + }, + "FailureDetails": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobFailureDetails", + "traits": { + "smithy.api#documentation": "

Details about the error that resulted in failure of the job.

" + } + }, + "CreationTimestamp": { + "target": "com.amazonaws.rekognition#DateTime", + "traits": { + "smithy.api#documentation": "

The Unix date and time when the job was started.

", + "smithy.api#required": {} + } + }, + "CompletionTimestamp": { + "target": "com.amazonaws.rekognition#DateTime", + "traits": { + "smithy.api#documentation": "

The Unix date and time when the job finished.

" + } + }, + "Input": { + "target": "com.amazonaws.rekognition#MediaAnalysisInput", + "traits": { + "smithy.api#documentation": "

Reference to the input manifest that was provided in the job creation request.

", + "smithy.api#required": {} + } + }, + "OutputConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOutputConfig", + "traits": { + "smithy.api#documentation": "

Output configuration that was provided in the creation request.

", + "smithy.api#required": {} + } + }, + "KmsKeyId": { + "target": "com.amazonaws.rekognition#KmsKeyId", + "traits": { + "smithy.api#documentation": "

KMS Key that was provided in the creation request.

" + } + }, + "Results": { + "target": "com.amazonaws.rekognition#MediaAnalysisResults", + "traits": { + "smithy.api#documentation": "

Output manifest that contains prediction results.

" + } + }, + "ManifestSummary": { + "target": "com.amazonaws.rekognition#MediaAnalysisManifestSummary", + "traits": { + "smithy.api#documentation": "

The summary manifest provides statistics on input manifest and errors identified in the input manifest.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.rekognition#GetPersonTracking": { "type": "operation", "input": { @@ -7376,6 +7509,27 @@ "smithy.api#error": "client" } }, + "com.amazonaws.rekognition#InvalidManifestException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.rekognition#String" + }, + "Code": { + "target": "com.amazonaws.rekognition#String" + }, + "Logref": { + "target": "com.amazonaws.rekognition#String", + "traits": { + "smithy.api#documentation": "

A universally unique identifier (UUID) for the request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates that a provided manifest file is empty or larger than the allowed limit.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.rekognition#InvalidPaginationTokenException": { "type": "structure", "members": { @@ -8588,6 +8742,93 @@ "smithy.api#output": {} } }, + "com.amazonaws.rekognition#ListMediaAnalysisJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.rekognition#ListMediaAnalysisJobsRequest" + }, + "output": { + "target": "com.amazonaws.rekognition#ListMediaAnalysisJobsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rekognition#AccessDeniedException" + }, + { + "target": "com.amazonaws.rekognition#InternalServerError" + }, + { + "target": "com.amazonaws.rekognition#InvalidPaginationTokenException" + }, + { + "target": "com.amazonaws.rekognition#InvalidParameterException" + }, + { + "target": "com.amazonaws.rekognition#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.rekognition#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of media analysis jobs. Results are sorted by CreationTimestamp in descending order.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.rekognition#ListMediaAnalysisJobsPageSize": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.rekognition#ListMediaAnalysisJobsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.rekognition#ExtendedPaginationToken", + "traits": { + "smithy.api#documentation": "

Pagination token, if the previous response was incomplete.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.rekognition#ListMediaAnalysisJobsPageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per paginated call. The largest value user can specify is 100. \n If user specifies a value greater than 100, an InvalidParameterException error occurs. The default value is 100.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.rekognition#ListMediaAnalysisJobsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.rekognition#ExtendedPaginationToken", + "traits": { + "smithy.api#documentation": "

Pagination token, if the previous response was incomplete.

" + } + }, + "MediaAnalysisJobs": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobDescriptions", + "traits": { + "smithy.api#documentation": "

Contains a list of all media analysis jobs.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.rekognition#ListProjectPolicies": { "type": "operation", "input": { @@ -8943,48 +9184,401 @@ "smithy.api#sensitive": {} } }, - "com.amazonaws.rekognition#LivenessOutputConfig": { + "com.amazonaws.rekognition#LivenessOutputConfig": { + "type": "structure", + "members": { + "S3Bucket": { + "target": "com.amazonaws.rekognition#S3Bucket", + "traits": { + "smithy.api#documentation": "

The path to an AWS Amazon S3 bucket used to store Face Liveness session results.

", + "smithy.api#required": {} + } + }, + "S3KeyPrefix": { + "target": "com.amazonaws.rekognition#LivenessS3KeyPrefix", + "traits": { + "smithy.api#documentation": "

The prefix prepended to the output files for the Face Liveness session results.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains settings that specify the location of an Amazon S3 bucket used to store the output of\n a Face Liveness session. Note that the S3 bucket must be located in the caller's AWS account\n and in the same region as the Face Liveness end-point. Additionally, the Amazon S3 object keys are\n auto-generated by the Face Liveness system.

" + } + }, + "com.amazonaws.rekognition#LivenessS3KeyPrefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 950 + }, + "smithy.api#pattern": "^\\S*$" + } + }, + "com.amazonaws.rekognition#LivenessSessionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + } + }, + "com.amazonaws.rekognition#LivenessSessionStatus": { + "type": "enum", + "members": { + "CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATED" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "EXPIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPIRED" + } + } + } + }, + "com.amazonaws.rekognition#MalformedPolicyDocumentException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.rekognition#String" + }, + "Code": { + "target": "com.amazonaws.rekognition#String" + }, + "Logref": { + "target": "com.amazonaws.rekognition#String", + "traits": { + "smithy.api#documentation": "

A universally unique identifier (UUID) for the request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The format of the project policy document that you supplied to \n PutProjectPolicy is incorrect.

", + "smithy.api#error": "client" + } + }, + "com.amazonaws.rekognition#MatchedUser": { + "type": "structure", + "members": { + "UserId": { + "target": "com.amazonaws.rekognition#UserId", + "traits": { + "smithy.api#documentation": "

A provided ID for the UserID. Unique within the collection.

" + } + }, + "UserStatus": { + "target": "com.amazonaws.rekognition#UserStatus", + "traits": { + "smithy.api#documentation": "

The status of the user matched to a provided FaceID.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains metadata for a UserID matched with a given face.

" + } + }, + "com.amazonaws.rekognition#MaxDurationInSecondsULong": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 120 + } + } + }, + "com.amazonaws.rekognition#MaxFaces": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 4096 + } + } + }, + "com.amazonaws.rekognition#MaxFacesToIndex": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.rekognition#MaxPixelThreshold": { + "type": "float", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 1 + } + } + }, + "com.amazonaws.rekognition#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.rekognition#MaxUserResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.rekognition#MediaAnalysisDetectModerationLabelsConfig": { + "type": "structure", + "members": { + "MinConfidence": { + "target": "com.amazonaws.rekognition#Percent", + "traits": { + "smithy.api#documentation": "

Specifies the minimum confidence level for the moderation labels to return. Amazon Rekognition\n doesn't return any labels with a confidence level lower than this specified value.\n

" + } + }, + "ProjectVersion": { + "target": "com.amazonaws.rekognition#ProjectVersionId", + "traits": { + "smithy.api#documentation": "

Specifies the custom moderation model to be used during the label detection job. \n If not provided the pre-trained model is used.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration for Moderation Labels Detection.

" + } + }, + "com.amazonaws.rekognition#MediaAnalysisInput": { + "type": "structure", + "members": { + "S3Object": { + "target": "com.amazonaws.rekognition#S3Object", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains input information for a media analysis job.

" + } + }, + "com.amazonaws.rekognition#MediaAnalysisJobDescription": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobId", + "traits": { + "smithy.api#documentation": "

The identifier for a media analysis job.

", + "smithy.api#required": {} + } + }, + "JobName": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobName", + "traits": { + "smithy.api#documentation": "

The name of a media analysis job.

" + } + }, + "OperationsConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOperationsConfig", + "traits": { + "smithy.api#documentation": "

Operation configurations that were provided during job creation.

", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobStatus", + "traits": { + "smithy.api#documentation": "

The status of the media analysis job being retrieved.

", + "smithy.api#required": {} + } + }, + "FailureDetails": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobFailureDetails", + "traits": { + "smithy.api#documentation": "

Details about the error that resulted in failure of the job.

" + } + }, + "CreationTimestamp": { + "target": "com.amazonaws.rekognition#DateTime", + "traits": { + "smithy.api#documentation": "

The Unix date and time when the job was started.

", + "smithy.api#required": {} + } + }, + "CompletionTimestamp": { + "target": "com.amazonaws.rekognition#DateTime", + "traits": { + "smithy.api#documentation": "

The Unix date and time when the job finished.

" + } + }, + "Input": { + "target": "com.amazonaws.rekognition#MediaAnalysisInput", + "traits": { + "smithy.api#documentation": "

Reference to the input manifest that was provided in the job creation request.

", + "smithy.api#required": {} + } + }, + "OutputConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOutputConfig", + "traits": { + "smithy.api#documentation": "

Output configuration that was provided in the creation request.

", + "smithy.api#required": {} + } + }, + "KmsKeyId": { + "target": "com.amazonaws.rekognition#KmsKeyId", + "traits": { + "smithy.api#documentation": "

KMS Key that was provided in the creation request.

" + } + }, + "Results": { + "target": "com.amazonaws.rekognition#MediaAnalysisResults", + "traits": { + "smithy.api#documentation": "

Output manifest that contains prediction results.

" + } + }, + "ManifestSummary": { + "target": "com.amazonaws.rekognition#MediaAnalysisManifestSummary", + "traits": { + "smithy.api#documentation": "

Provides statistics on input manifest and errors identified in the input manifest.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Description for a media analysis job.

" + } + }, + "com.amazonaws.rekognition#MediaAnalysisJobDescriptions": { + "type": "list", + "member": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobDescription" + } + }, + "com.amazonaws.rekognition#MediaAnalysisJobFailureCode": { + "type": "enum", + "members": { + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_ERROR" + } + }, + "INVALID_S3_OBJECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_S3_OBJECT" + } + }, + "INVALID_MANIFEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_MANIFEST" + } + }, + "INVALID_OUTPUT_CONFIG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_OUTPUT_CONFIG" + } + }, + "INVALID_KMS_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_KMS_KEY" + } + }, + "ACCESS_DENIED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACCESS_DENIED" + } + }, + "RESOURCE_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RESOURCE_NOT_FOUND" + } + }, + "RESOURCE_NOT_READY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RESOURCE_NOT_READY" + } + }, + "THROTTLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THROTTLED" + } + } + } + }, + "com.amazonaws.rekognition#MediaAnalysisJobFailureDetails": { "type": "structure", "members": { - "S3Bucket": { - "target": "com.amazonaws.rekognition#S3Bucket", + "Code": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobFailureCode", "traits": { - "smithy.api#documentation": "

The path to an AWS Amazon S3 bucket used to store Face Liveness session results.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Error code for the failed job.

" } }, - "S3KeyPrefix": { - "target": "com.amazonaws.rekognition#LivenessS3KeyPrefix", + "Message": { + "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "

The prefix prepended to the output files for the Face Liveness session results.

" + "smithy.api#documentation": "

Human readable error message.

" } } }, "traits": { - "smithy.api#documentation": "

Contains settings that specify the location of an Amazon S3 bucket used to store the output of\n a Face Liveness session. Note that the S3 bucket must be located in the caller's AWS account\n and in the same region as the Face Liveness end-point. Additionally, the Amazon S3 object keys are\n auto-generated by the Face Liveness system.

" + "smithy.api#documentation": "

Details about the error that resulted in failure of the job.

" } }, - "com.amazonaws.rekognition#LivenessS3KeyPrefix": { + "com.amazonaws.rekognition#MediaAnalysisJobId": { "type": "string", "traits": { "smithy.api#length": { - "min": 0, - "max": 950 + "min": 1, + "max": 64 }, - "smithy.api#pattern": "^\\S*$" + "smithy.api#pattern": "^[a-zA-Z0-9-_]+$" } }, - "com.amazonaws.rekognition#LivenessSessionId": { + "com.amazonaws.rekognition#MediaAnalysisJobName": { "type": "string", "traits": { "smithy.api#length": { - "min": 36, - "max": 36 + "min": 1, + "max": 64 }, - "smithy.api#pattern": "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + "smithy.api#pattern": "^[a-zA-Z0-9_.\\-]+$" } }, - "com.amazonaws.rekognition#LivenessSessionStatus": { + "com.amazonaws.rekognition#MediaAnalysisJobStatus": { "type": "enum", "members": { "CREATED": { @@ -8993,6 +9587,12 @@ "smithy.api#enumValue": "CREATED" } }, + "QUEUED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUEUED" + } + }, "IN_PROGRESS": { "target": "smithy.api#Unit", "traits": { @@ -9010,106 +9610,74 @@ "traits": { "smithy.api#enumValue": "FAILED" } - }, - "EXPIRED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EXPIRED" - } } } }, - "com.amazonaws.rekognition#MalformedPolicyDocumentException": { + "com.amazonaws.rekognition#MediaAnalysisManifestSummary": { "type": "structure", "members": { - "Message": { - "target": "com.amazonaws.rekognition#String" - }, - "Code": { - "target": "com.amazonaws.rekognition#String" - }, - "Logref": { - "target": "com.amazonaws.rekognition#String", + "S3Object": { + "target": "com.amazonaws.rekognition#S3Object" + } + }, + "traits": { + "smithy.api#documentation": "

Summary that provides statistics on input manifest and errors identified in the input manifest.

" + } + }, + "com.amazonaws.rekognition#MediaAnalysisOperationsConfig": { + "type": "structure", + "members": { + "DetectModerationLabels": { + "target": "com.amazonaws.rekognition#MediaAnalysisDetectModerationLabelsConfig", "traits": { - "smithy.api#documentation": "

A universally unique identifier (UUID) for the request.

" + "smithy.api#documentation": "

Contains configuration options for a DetectModerationLabels job.

" } } }, "traits": { - "smithy.api#documentation": "

The format of the project policy document that you supplied to \n PutProjectPolicy is incorrect.

", - "smithy.api#error": "client" + "smithy.api#documentation": "

Configuration options for a media analysis job. Configuration is operation-specific.

" } }, - "com.amazonaws.rekognition#MatchedUser": { + "com.amazonaws.rekognition#MediaAnalysisOutputConfig": { "type": "structure", "members": { - "UserId": { - "target": "com.amazonaws.rekognition#UserId", + "S3Bucket": { + "target": "com.amazonaws.rekognition#S3Bucket", "traits": { - "smithy.api#documentation": "

A provided ID for the UserID. Unique within the collection.

" + "smithy.api#documentation": "

Specifies the Amazon S3 bucket to contain the output of the media analysis job.

", + "smithy.api#required": {} } }, - "UserStatus": { - "target": "com.amazonaws.rekognition#UserStatus", + "S3KeyPrefix": { + "target": "com.amazonaws.rekognition#MediaAnalysisS3KeyPrefix", "traits": { - "smithy.api#documentation": "

The status of the user matched to a provided FaceID.

" + "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have\n designated for storage.

" } } }, "traits": { - "smithy.api#documentation": "

Contains metadata for a UserID matched with a given face.

" - } - }, - "com.amazonaws.rekognition#MaxDurationInSecondsULong": { - "type": "long", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 120 - } + "smithy.api#documentation": "

Output configuration provided in the job creation request.

" } }, - "com.amazonaws.rekognition#MaxFaces": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 4096 + "com.amazonaws.rekognition#MediaAnalysisResults": { + "type": "structure", + "members": { + "S3Object": { + "target": "com.amazonaws.rekognition#S3Object" } - } - }, - "com.amazonaws.rekognition#MaxFacesToIndex": { - "type": "integer", + }, "traits": { - "smithy.api#range": { - "min": 1 - } + "smithy.api#documentation": "

Contains the results for a media analysis job created with StartMediaAnalysisJob.

" } }, - "com.amazonaws.rekognition#MaxPixelThreshold": { - "type": "float", + "com.amazonaws.rekognition#MediaAnalysisS3KeyPrefix": { + "type": "string", "traits": { - "smithy.api#range": { + "smithy.api#length": { "min": 0, - "max": 1 - } - } - }, - "com.amazonaws.rekognition#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1 - } - } - }, - "com.amazonaws.rekognition#MaxUserResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 500 - } + "max": 800 + }, + "smithy.api#pattern": "^\\S*$" } }, "com.amazonaws.rekognition#MinCoveragePercentage": { @@ -10538,6 +11106,9 @@ { "target": "com.amazonaws.rekognition#GetLabelDetection" }, + { + "target": "com.amazonaws.rekognition#GetMediaAnalysisJob" + }, { "target": "com.amazonaws.rekognition#GetPersonTracking" }, @@ -10562,6 +11133,9 @@ { "target": "com.amazonaws.rekognition#ListFaces" }, + { + "target": "com.amazonaws.rekognition#ListMediaAnalysisJobs" + }, { "target": "com.amazonaws.rekognition#ListProjectPolicies" }, @@ -10607,6 +11181,9 @@ { "target": "com.amazonaws.rekognition#StartLabelDetection" }, + { + "target": "com.amazonaws.rekognition#StartMediaAnalysisJob" + }, { "target": "com.amazonaws.rekognition#StartPersonTracking" }, @@ -10697,7 +11274,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10740,7 +11316,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -10753,7 +11330,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10767,7 +11343,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10790,7 +11365,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10825,7 +11399,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -10836,14 +11409,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -10857,14 +11432,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -10873,11 +11446,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -10888,14 +11461,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -10909,7 +11484,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -10929,7 +11503,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -10940,14 +11513,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -10958,9 +11533,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -13089,6 +13666,145 @@ "smithy.api#output": {} } }, + "com.amazonaws.rekognition#StartMediaAnalysisJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.rekognition#StartMediaAnalysisJobRequest" + }, + "output": { + "target": "com.amazonaws.rekognition#StartMediaAnalysisJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rekognition#AccessDeniedException" + }, + { + "target": "com.amazonaws.rekognition#IdempotentParameterMismatchException" + }, + { + "target": "com.amazonaws.rekognition#InternalServerError" + }, + { + "target": "com.amazonaws.rekognition#InvalidManifestException" + }, + { + "target": "com.amazonaws.rekognition#InvalidParameterException" + }, + { + "target": "com.amazonaws.rekognition#InvalidS3ObjectException" + }, + { + "target": "com.amazonaws.rekognition#LimitExceededException" + }, + { + "target": "com.amazonaws.rekognition#ProvisionedThroughputExceededException" + }, + { + "target": "com.amazonaws.rekognition#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rekognition#ResourceNotReadyException" + }, + { + "target": "com.amazonaws.rekognition#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Initiates a new media analysis job. Accepts a manifest file in an Amazon S3 bucket. The\n output is a manifest file and a summary of the manifest stored in the Amazon S3 bucket.

", + "smithy.api#examples": [ + { + "title": "StartMediaAnalysisJob", + "documentation": "Initiates a new media analysis job.", + "input": { + "JobName": "job-name", + "OperationsConfig": { + "DetectModerationLabels": { + "MinConfidence": 50, + "ProjectVersion": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/version/1/1690556751958" + } + }, + "Input": { + "S3Object": { + "Bucket": "input-bucket", + "Name": "input-manifest.json" + } + }, + "OutputConfig": { + "S3Bucket": "output-bucket", + "S3KeyPrefix": "output-location" + } + }, + "output": { + "JobId": "861a0645d98ef88efb75477628c011c04942d9d5f58faf2703c393c8cf8c1537" + } + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.rekognition#StartMediaAnalysisJobRequest": { + "type": "structure", + "members": { + "ClientRequestToken": { + "target": "com.amazonaws.rekognition#ClientRequestToken", + "traits": { + "smithy.api#documentation": "

Idempotency token used to prevent the accidental creation of duplicate versions. If\n you use the same token with multiple StartMediaAnalysisJobRequest requests, the same\n response is returned. Use ClientRequestToken to prevent the same request from being\n processed more than once.

", + "smithy.api#idempotencyToken": {} + } + }, + "JobName": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobName", + "traits": { + "smithy.api#documentation": "

The name of the job. Does not have to be unique.

" + } + }, + "OperationsConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOperationsConfig", + "traits": { + "smithy.api#documentation": "

Configuration options for the media analysis job to be created.

", + "smithy.api#required": {} + } + }, + "Input": { + "target": "com.amazonaws.rekognition#MediaAnalysisInput", + "traits": { + "smithy.api#documentation": "

Input data to be analyzed by the job.

", + "smithy.api#required": {} + } + }, + "OutputConfig": { + "target": "com.amazonaws.rekognition#MediaAnalysisOutputConfig", + "traits": { + "smithy.api#documentation": "

The Amazon S3 bucket location to store the results.

", + "smithy.api#required": {} + } + }, + "KmsKeyId": { + "target": "com.amazonaws.rekognition#KmsKeyId", + "traits": { + "smithy.api#documentation": "

The identifier of customer managed AWS KMS key (name or ARN). The key \n is used to encrypt images copied into the service. The key is also used \n to encrypt results and manifest files written to the output Amazon S3 bucket.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.rekognition#StartMediaAnalysisJobResponse": { + "type": "structure", + "members": { + "JobId": { + "target": "com.amazonaws.rekognition#MediaAnalysisJobId", + "traits": { + "smithy.api#documentation": "

Identifier for the created job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.rekognition#StartPersonTracking": { "type": "operation", "input": {