Skip to content

Commit

Permalink
feat(client-rekognition): This release adds support for tagging proje…
Browse files Browse the repository at this point in the history
…cts and datasets with the CreateProject and CreateDataset APIs.
  • Loading branch information
awstools committed Jul 3, 2024
1 parent 0ecd09f commit f795a25
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ export interface CreateDatasetCommandOutput extends CreateDatasetResponse, __Met
* },
* DatasetType: "TRAIN" || "TEST", // required
* ProjectArn: "STRING_VALUE", // required
* Tags: { // TagMap
* "<keys>": "STRING_VALUE",
* },
* };
* const command = new CreateDatasetCommand(input);
* const response = await client.send(command);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ export interface CreateProjectCommandOutput extends CreateProjectResponse, __Met
* ProjectName: "STRING_VALUE", // required
* Feature: "CONTENT_MODERATION" || "CUSTOM_LABELS",
* AutoUpdate: "ENABLED" || "DISABLED",
* Tags: { // TagMap
* "<keys>": "STRING_VALUE",
* },
* };
* const command = new CreateProjectCommand(input);
* const response = await client.send(command);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ export interface GetLabelDetectionCommandOutput extends GetLabelDetectionRespons
* next set of results. To get the next page of results, call <code>GetlabelDetection</code> and
* populate the <code>NextToken</code> request parameter with the token value returned from the
* previous call to <code>GetLabelDetection</code>.</p>
* <p>If you are retrieving results while using the Amazon Simple Notification Service, note that you will receive an
* "ERROR" notification if the job encounters an issue.</p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
Expand Down
16 changes: 14 additions & 2 deletions clients/client-rekognition/src/models/models_0.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2370,6 +2370,12 @@ export interface CreateDatasetRequest {
* @public
*/
ProjectArn: string | undefined;

/**
* <p>A set of tags (key-value pairs) that you want to attach to the dataset.</p>
* @public
*/
Tags?: Record<string, string>;
}

/**
Expand Down Expand Up @@ -2526,6 +2532,12 @@ export interface CreateProjectRequest {
* @public
*/
AutoUpdate?: ProjectAutoUpdate;

/**
* <p>A set of tags (key-value pairs) that you want to attach to the project.</p>
* @public
*/
Tags?: Record<string, string>;
}

/**
Expand Down Expand Up @@ -5015,8 +5027,8 @@ export interface HumanLoopActivationOutput {
*/
export interface DetectModerationLabelsResponse {
/**
* <p>Array of detected Moderation labels and the time, in milliseconds from the start of the
* video, they were detected.</p>
* <p>Array of detected Moderation labels. For video operations, this includes the time,
* in milliseconds from the start of the video, they were detected.</p>
* @public
*/
ModerationLabels?: ModerationLabel[];
Expand Down
16 changes: 14 additions & 2 deletions codegen/sdk-codegen/aws-models/rekognition.json
Original file line number Diff line number Diff line change
Expand Up @@ -1546,6 +1546,12 @@
"smithy.api#documentation": "<p>\nThe ARN of the Amazon Rekognition Custom Labels project to which you want to asssign the dataset.\n</p>",
"smithy.api#required": {}
}
},
"Tags": {
"target": "com.amazonaws.rekognition#TagMap",
"traits": {
"smithy.api#documentation": "<p>A set of tags (key-value pairs) that you want to attach to the dataset.</p>"
}
}
},
"traits": {
Expand Down Expand Up @@ -1725,6 +1731,12 @@
"traits": {
"smithy.api#documentation": "<p>Specifies whether automatic retraining should be attempted for the versions of the\n project. Automatic retraining is done as a best effort. Required argument for Content\n Moderation. Applicable only to adapters.</p>"
}
},
"Tags": {
"target": "com.amazonaws.rekognition#TagMap",
"traits": {
"smithy.api#documentation": "<p>A set of tags (key-value pairs) that you want to attach to the project.</p>"
}
}
},
"traits": {
Expand Down Expand Up @@ -4318,7 +4330,7 @@
"ModerationLabels": {
"target": "com.amazonaws.rekognition#ModerationLabels",
"traits": {
"smithy.api#documentation": "<p>Array of detected Moderation labels and the time, in milliseconds from the start of the\n video, they were detected.</p>"
"smithy.api#documentation": "<p>Array of detected Moderation labels. For video operations, this includes the time, \n in milliseconds from the start of the video, they were detected.</p>"
}
},
"ModerationModelVersion": {
Expand Down Expand Up @@ -6298,7 +6310,7 @@
}
],
"traits": {
"smithy.api#documentation": "<p>Gets the label detection results of a Amazon Rekognition Video analysis started by <a>StartLabelDetection</a>. </p>\n <p>The label detection operation is started by a call to <a>StartLabelDetection</a> which returns a job identifier (<code>JobId</code>). When\n the label detection operation finishes, Amazon Rekognition publishes a completion status to the\n Amazon Simple Notification Service topic registered in the initial call to <code>StartlabelDetection</code>. </p>\n <p>To get the results of the label detection operation, first check that the status value\n published to the Amazon SNS topic is <code>SUCCEEDED</code>. If so, call <a>GetLabelDetection</a> and pass the job identifier (<code>JobId</code>) from the\n initial call to <code>StartLabelDetection</code>.</p>\n <p>\n <code>GetLabelDetection</code> returns an array of detected labels\n (<code>Labels</code>) sorted by the time the labels were detected. You can also sort by the\n label name by specifying <code>NAME</code> for the <code>SortBy</code> input parameter. If\n there is no <code>NAME</code> specified, the default sort is by\n timestamp.</p>\n <p>You can select how results are aggregated by using the <code>AggregateBy</code> input\n parameter. The default aggregation method is <code>TIMESTAMPS</code>. You can also aggregate\n by <code>SEGMENTS</code>, which aggregates all instances of labels detected in a given\n segment. </p>\n <p>The returned Labels array may include the following attributes:</p>\n <ul>\n <li>\n <p>Name - The name of the detected label.</p>\n </li>\n <li>\n <p>Confidence - The level of confidence in the label assigned to a detected object. </p>\n </li>\n <li>\n <p>Parents - The ancestor labels for a detected label. GetLabelDetection returns a hierarchical\n taxonomy of detected labels. For example, a detected car might be assigned the label car.\n The label car has two parent labels: Vehicle (its parent) and Transportation (its\n grandparent). The response includes the all ancestors for a label, where every ancestor is\n a unique label. In the previous example, Car, Vehicle, and Transportation are returned as\n unique labels in the response. </p>\n </li>\n <li>\n <p> Aliases - Possible Aliases for the label. </p>\n </li>\n <li>\n <p>Categories - The label categories that the detected label belongs to.</p>\n </li>\n <li>\n <p>BoundingBox — Bounding boxes are described for all instances of detected common object labels, \n returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing \n the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box.</p>\n </li>\n <li>\n <p>Timestamp - Time, in milliseconds from the start of the video, that the label was detected.\n For aggregation by <code>SEGMENTS</code>, the <code>StartTimestampMillis</code>,\n <code>EndTimestampMillis</code>, and <code>DurationMillis</code> structures are what\n define a segment. Although the “Timestamp” structure is still returned with each label,\n its value is set to be the same as <code>StartTimestampMillis</code>.</p>\n </li>\n </ul>\n <p>Timestamp and Bounding box information are returned for detected Instances, only if\n aggregation is done by <code>TIMESTAMPS</code>. If aggregating by <code>SEGMENTS</code>,\n information about detected instances isn’t returned. </p>\n <p>The version of the label model used for the detection is also returned.</p>\n <p>\n <b>Note <code>DominantColors</code> isn't returned for <code>Instances</code>,\n although it is shown as part of the response in the sample seen below.</b>\n </p>\n <p>Use <code>MaxResults</code> parameter to limit the number of labels returned. If\n there are more results than specified in <code>MaxResults</code>, the value of\n <code>NextToken</code> in the operation response contains a pagination token for getting the\n next set of results. To get the next page of results, call <code>GetlabelDetection</code> and\n populate the <code>NextToken</code> request parameter with the token value returned from the\n previous call to <code>GetLabelDetection</code>.</p>",
"smithy.api#documentation": "<p>Gets the label detection results of a Amazon Rekognition Video analysis started by <a>StartLabelDetection</a>. </p>\n <p>The label detection operation is started by a call to <a>StartLabelDetection</a> which returns a job identifier (<code>JobId</code>). When\n the label detection operation finishes, Amazon Rekognition publishes a completion status to the\n Amazon Simple Notification Service topic registered in the initial call to <code>StartlabelDetection</code>. </p>\n <p>To get the results of the label detection operation, first check that the status value\n published to the Amazon SNS topic is <code>SUCCEEDED</code>. If so, call <a>GetLabelDetection</a> and pass the job identifier (<code>JobId</code>) from the\n initial call to <code>StartLabelDetection</code>.</p>\n <p>\n <code>GetLabelDetection</code> returns an array of detected labels\n (<code>Labels</code>) sorted by the time the labels were detected. You can also sort by the\n label name by specifying <code>NAME</code> for the <code>SortBy</code> input parameter. If\n there is no <code>NAME</code> specified, the default sort is by\n timestamp.</p>\n <p>You can select how results are aggregated by using the <code>AggregateBy</code> input\n parameter. The default aggregation method is <code>TIMESTAMPS</code>. You can also aggregate\n by <code>SEGMENTS</code>, which aggregates all instances of labels detected in a given\n segment. </p>\n <p>The returned Labels array may include the following attributes:</p>\n <ul>\n <li>\n <p>Name - The name of the detected label.</p>\n </li>\n <li>\n <p>Confidence - The level of confidence in the label assigned to a detected object. </p>\n </li>\n <li>\n <p>Parents - The ancestor labels for a detected label. GetLabelDetection returns a hierarchical\n taxonomy of detected labels. For example, a detected car might be assigned the label car.\n The label car has two parent labels: Vehicle (its parent) and Transportation (its\n grandparent). The response includes the all ancestors for a label, where every ancestor is\n a unique label. In the previous example, Car, Vehicle, and Transportation are returned as\n unique labels in the response. </p>\n </li>\n <li>\n <p> Aliases - Possible Aliases for the label. </p>\n </li>\n <li>\n <p>Categories - The label categories that the detected label belongs to.</p>\n </li>\n <li>\n <p>BoundingBox — Bounding boxes are described for all instances of detected common object labels, \n returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing \n the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box.</p>\n </li>\n <li>\n <p>Timestamp - Time, in milliseconds from the start of the video, that the label was detected.\n For aggregation by <code>SEGMENTS</code>, the <code>StartTimestampMillis</code>,\n <code>EndTimestampMillis</code>, and <code>DurationMillis</code> structures are what\n define a segment. Although the “Timestamp” structure is still returned with each label,\n its value is set to be the same as <code>StartTimestampMillis</code>.</p>\n </li>\n </ul>\n <p>Timestamp and Bounding box information are returned for detected Instances, only if\n aggregation is done by <code>TIMESTAMPS</code>. If aggregating by <code>SEGMENTS</code>,\n information about detected instances isn’t returned. </p>\n <p>The version of the label model used for the detection is also returned.</p>\n <p>\n <b>Note <code>DominantColors</code> isn't returned for <code>Instances</code>,\n although it is shown as part of the response in the sample seen below.</b>\n </p>\n <p>Use <code>MaxResults</code> parameter to limit the number of labels returned. If\n there are more results than specified in <code>MaxResults</code>, the value of\n <code>NextToken</code> in the operation response contains a pagination token for getting the\n next set of results. To get the next page of results, call <code>GetlabelDetection</code> and\n populate the <code>NextToken</code> request parameter with the token value returned from the\n previous call to <code>GetLabelDetection</code>.</p>\n <p>If you are retrieving results while using the Amazon Simple Notification Service, note that you will receive an\n \"ERROR\" notification if the job encounters an issue.</p>",
"smithy.api#paginated": {
"inputToken": "NextToken",
"outputToken": "NextToken",
Expand Down

0 comments on commit f795a25

Please sign in to comment.