From 8a5726bb043766e7171692a92c1fd7d50e661c08 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 28 Jan 2020 16:01:42 -0800 Subject: [PATCH] fix: new proto annotations; .d.ts enums support strings (#358) * [CHANGE ME] Re-generated to pick up changes in the API or client library generator. * fix: regenerate library Co-authored-by: Alexander Fenster --- .../v1beta2/video_intelligence.proto | 30 +- .../v1p1beta1/video_intelligence.proto | 86 +- .../v1p2beta1/video_intelligence.proto | 39 +- .../v1p3beta1/video_intelligence.proto | 85 +- .../protos/protos.d.ts | 666 ++- .../protos/protos.js | 4987 +++++++++++------ .../protos/protos.json | 212 +- .../v1beta2/doc_video_intelligence.js | 10 +- .../video_intelligence_service_client.js | 6 +- .../v1p1beta1/doc_video_intelligence.js | 48 +- .../video_intelligence_service_client.js | 18 +- .../v1p2beta1/doc_video_intelligence.js | 18 +- .../video_intelligence_service_client.js | 18 +- .../v1p3beta1/doc_video_intelligence.js | 144 +- .../synth.metadata | 115 +- 15 files changed, 4342 insertions(+), 2140 deletions(-) diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto index a69c25791e1..8e80640e05c 100644 --- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto +++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// syntax = "proto3"; package google.cloud.videointelligence.v1beta2; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -32,6 +35,10 @@ option ruby_package = "Google::Cloud::VideoIntelligence::V1beta2"; // Service that implements Google Cloud Video Intelligence API. service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). @@ -42,6 +49,11 @@ service VideoIntelligenceService { post: "/v1beta2/videos:annotate" body: "*" }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; } } @@ -64,31 +76,31 @@ message AnnotateVideoRequest { // If set, `input_uri` should be unset. bytes input_content = 6; - // Requested video annotation features. - repeated Feature features = 2; + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; // Additional video context and/or feature-specific parameters. VideoContext video_context = 3; - // Optional location where the output (in JSON format) should be stored. + // Optional. Location where the output (in JSON format) should be stored. // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) // URIs are supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For // more information, see [Request URIs](/storage/docs/reference-uris). - string output_uri = 4; + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional cloud region where annotation should take place. Supported cloud + // Optional. Cloud region where annotation should take place. Supported cloud // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region // is specified, a region will be determined based on video file location. - string location_id = 5; + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; } // Video context and/or feature-specific parameters. message VideoContext { // Video segments to annotate. The segments may overlap and are not required - // to be contiguous or span the whole video. If unspecified, each video - // is treated as a single segment. + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. repeated VideoSegment segments = 1; // Config for LABEL_DETECTION. diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto index 115f362beb6..44d3ca64162 100644 --- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto +++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p1beta1/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// syntax = "proto3"; package google.cloud.videointelligence.v1p1beta1; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -32,16 +35,23 @@ option ruby_package = "Google::Cloud::VideoIntelligence::V1p1beta1"; // Service that implements Google Cloud Video Intelligence API. service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1p1beta1/videos:annotate" body: "*" }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; } } @@ -51,10 +61,10 @@ message AnnotateVideoRequest { // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are // supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). A video - // URI may include wildcards in `object-id`, and thus identify multiple - // videos. Supported wildcards: '*' to match 0 or more characters; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; // '?' to match 1 character. If unset, the input video should be embedded // in the request as `input_content`. If set, `input_content` should be unset. string input_uri = 1; @@ -64,24 +74,24 @@ message AnnotateVideoRequest { // If set, `input_uri` should be unset. bytes input_content = 6; - // Requested video annotation features. - repeated Feature features = 2; + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; // Additional video context and/or feature-specific parameters. VideoContext video_context = 3; - // Optional location where the output (in JSON format) should be stored. + // Optional. Location where the output (in JSON format) should be stored. // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) // URIs are supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). - string output_uri = 4; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional cloud region where annotation should take place. Supported cloud + // Optional. Cloud region where annotation should take place. Supported cloud // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region // is specified, a region will be determined based on video file location. - string location_id = 5; + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; } // Video context and/or feature-specific parameters. @@ -285,60 +295,60 @@ message AnnotateVideoProgress { // Config for SPEECH_TRANSCRIPTION. message SpeechTranscriptionConfig { - // *Required* The language of the supplied audio as a + // Required. *Required* The language of the supplied audio as a // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. // Example: "en-US". // See [Language Support](https://cloud.google.com/speech/docs/languages) // for a list of the currently supported language codes. - string language_code = 1; + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; - // *Optional* Maximum number of recognition hypotheses to be returned. + // Optional. Maximum number of recognition hypotheses to be returned. // Specifically, the maximum number of `SpeechRecognitionAlternative` messages - // within each `SpeechRecognitionResult`. The server may return fewer than + // within each `SpeechTranscription`. The server may return fewer than // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will // return a maximum of one. If omitted, will return a maximum of one. - int32 max_alternatives = 2; + int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; - // *Optional* If set to `true`, the server will attempt to filter out + // Optional. If set to `true`, the server will attempt to filter out // profanities, replacing all but the initial character in each filtered word // with asterisks, e.g. "f***". If set to `false` or omitted, profanities // won't be filtered out. - bool filter_profanity = 3; + bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; - // *Optional* A means to provide context to assist the speech recognition. - repeated SpeechContext speech_contexts = 4; + // Optional. A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4 [(google.api.field_behavior) = OPTIONAL]; - // *Optional* If 'true', adds punctuation to recognition result hypotheses. + // Optional. If 'true', adds punctuation to recognition result hypotheses. // This feature is only available in select languages. Setting this for // requests in other languages has no effect at all. The default 'false' value // does not add punctuation to result hypotheses. NOTE: "This is currently // offered as an experimental service, complimentary to all users. In the // future this may be exclusively available as a premium feature." - bool enable_automatic_punctuation = 5; + bool enable_automatic_punctuation = 5 [(google.api.field_behavior) = OPTIONAL]; - // *Optional* For file formats, such as MXF or MKV, supporting multiple audio + // Optional. For file formats, such as MXF or MKV, supporting multiple audio // tracks, specify up to two tracks. Default: track 0. - repeated int32 audio_tracks = 6; + repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; } // Provides "hints" to the speech recognizer to favor specific words and phrases // in the results. message SpeechContext { - // *Optional* A list of strings containing words and phrases "hints" so that + // Optional. A list of strings containing words and phrases "hints" so that // the speech recognition is more likely to recognize them. This can be used // to improve the accuracy for specific words and phrases, for example, if // specific commands are typically spoken by the user. This can also be used // to add additional words to the vocabulary of the recognizer. See // [usage limits](https://cloud.google.com/speech/limits#content). - repeated string phrases = 1; + repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; } // A speech recognition result corresponding to a portion of the audio. message SpeechTranscription { - // Output only. May contain one or more recognition hypotheses (up to the - // maximum specified in `max_alternatives`). - // These alternatives are ordered in terms of accuracy, with the top (first) - // alternative being the most probable, as ranked by the recognizer. + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. repeated SpeechRecognitionAlternative alternatives = 1; } @@ -349,11 +359,11 @@ message SpeechRecognitionAlternative { // Output only. The confidence estimate between 0.0 and 1.0. A higher number // indicates an estimated greater likelihood that the recognized words are - // correct. This field is typically provided only for the top hypothesis, and - // only for `is_final=true` results. Clients should not rely on the - // `confidence` field as it is not guaranteed to be accurate or consistent. + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. // The default of 0.0 is a sentinel value indicating `confidence` was not set. - float confidence = 2; + float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A list of word-specific information for each recognized word. repeated WordInfo words = 3; diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto index 0a16e7afd15..044233b09d2 100644 --- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto +++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p2beta1/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,6 +18,8 @@ syntax = "proto3"; package google.cloud.videointelligence.v1p2beta1; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -33,16 +35,23 @@ option ruby_package = "Google::Cloud::VideoIntelligence::V1p2beta1"; // Service that implements Google Cloud Video Intelligence API. service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1p2beta1/videos:annotate" body: "*" }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; } } @@ -52,10 +61,10 @@ message AnnotateVideoRequest { // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are // supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). A video - // URI may include wildcards in `object-id`, and thus identify multiple - // videos. Supported wildcards: '*' to match 0 or more characters; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; // '?' to match 1 character. If unset, the input video should be embedded // in the request as `input_content`. If set, `input_content` should be unset. string input_uri = 1; @@ -65,24 +74,24 @@ message AnnotateVideoRequest { // If set, `input_uri` should be unset. bytes input_content = 6; - // Requested video annotation features. - repeated Feature features = 2; + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; // Additional video context and/or feature-specific parameters. VideoContext video_context = 3; - // Optional location where the output (in JSON format) should be stored. + // Optional. Location where the output (in JSON format) should be stored. // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) // URIs are supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). - string output_uri = 4; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional cloud region where annotation should take place. Supported cloud + // Optional. Cloud region where annotation should take place. Supported cloud // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region // is specified, a region will be determined based on video file location. - string location_id = 5; + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; } // Video context and/or feature-specific parameters. diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto index 1203b315283..942f63be8a9 100644 --- a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto +++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -124,12 +124,18 @@ message VideoContext { // Config for EXPLICIT_CONTENT_DETECTION. ExplicitContentDetectionConfig explicit_content_detection_config = 4; + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; + // Config for SPEECH_TRANSCRIPTION. SpeechTranscriptionConfig speech_transcription_config = 6; // Config for TEXT_DETECTION. TextDetectionConfig text_detection_config = 8; + // Config for PERSON_DETECTION. + PersonDetectionConfig person_detection_config = 11; + // Config for OBJECT_TRACKING. ObjectTrackingConfig object_tracking_config = 13; } @@ -192,6 +198,38 @@ message ExplicitContentDetectionConfig { string model = 1; } +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes be included in the face annotation output. + bool include_bounding_boxes = 2; + + // Whether to enable face attributes detection, such as glasses, dark_glasses, + // mouth_open etc. Ignored if 'include_bounding_boxes' is false. + bool include_attributes = 5; +} + +// Config for PERSON_DETECTION. +message PersonDetectionConfig { + // Whether bounding boxes be included in the person detection annotation + // output. + bool include_bounding_boxes = 1; + + // Whether to enable pose landmarks detection. Ignored if + // 'include_bounding_boxes' is false. + bool include_pose_landmarks = 2; + + // Whether to enable person attributes detection, such as cloth color (black, + // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair + // color (black, blonde, etc), hair length (long, short, bald), etc. + // Ignored if 'include_bounding_boxes' is false. + bool include_attributes = 3; +} + // Config for TEXT_DETECTION. message TextDetectionConfig { // Language hint can be specified if the language to be detected is known a @@ -318,6 +356,10 @@ message TimestampedObject { // Optional. The attributes of the object in the bounding box. repeated DetectedAttribute attributes = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The detected landmarks. + repeated DetectedLandmark landmarks = 4 + [(google.api.field_behavior) = OPTIONAL]; } // A track of an object instance. @@ -392,6 +434,35 @@ message CelebrityRecognitionAnnotation { repeated CelebrityTrack celebrity_tracks = 1; } +// A generic detected landmark represented by name in string format and a 2D +// location. +message DetectedLandmark { + // The name of this landmark, i.e. left_hand, right_shoulder. + string name = 1; + + // The 2D point of the detected landmark using the normalized image + // coordindate system. The normalized coordinates have the range from 0 to 1. + NormalizedVertex point = 2; + + // The confidence score of the detected landmark. Range [0, 1]. + float confidence = 3; +} + +// Face detection annotation. +message FaceDetectionAnnotation { + // The face tracks with attributes. + repeated Track tracks = 3; + + // The thumbnail of a person's face. + bytes thumbnail = 4; +} + +// Person detection annotation per video. +message PersonDetectionAnnotation { + // The trackes that a person is detected. + repeated Track tracks = 1; +} + // Annotation results for a single video. message VideoAnnotationResults { // Video file location in @@ -428,6 +499,9 @@ message VideoAnnotationResults { // There is exactly one element for each unique label. repeated LabelAnnotation frame_label_annotations = 4; + // Face detection annotations. + repeated FaceDetectionAnnotation face_detection_annotations = 13; + // Shot annotations. Each shot is represented as a video segment. repeated VideoSegment shot_annotations = 6; @@ -448,6 +522,9 @@ message VideoAnnotationResults { // Annotations for list of logos detected, tracked and recognized in video. repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + // Person detection annotations. + repeated PersonDetectionAnnotation person_detection_annotations = 20; + // Celebrity recognition annotations. CelebrityRecognitionAnnotation celebrity_recognition_annotations = 21; @@ -912,6 +989,9 @@ enum Feature { // Explicit content detection. EXPLICIT_CONTENT_DETECTION = 3; + // Human face detection. + FACE_DETECTION = 4; + // Speech transcription. SPEECH_TRANSCRIPTION = 6; @@ -926,6 +1006,9 @@ enum Feature { // Celebrity recognition. CELEBRITY_RECOGNITION = 13; + + // Person detection. + PERSON_DETECTION = 14; } // Label detection mode. diff --git a/packages/google-cloud-videointelligence/protos/protos.d.ts b/packages/google-cloud-videointelligence/protos/protos.d.ts index ef1fce2a5ea..7be3137049e 100644 --- a/packages/google-cloud-videointelligence/protos/protos.d.ts +++ b/packages/google-cloud-videointelligence/protos/protos.d.ts @@ -78,7 +78,7 @@ export namespace google { inputUri?: (string|null); /** AnnotateVideoRequest inputContent */ - inputContent?: (Uint8Array|null); + inputContent?: (Uint8Array|string|null); /** AnnotateVideoRequest features */ features?: (google.cloud.videointelligence.v1.Feature[]|null); @@ -106,7 +106,7 @@ export namespace google { public inputUri: string; /** AnnotateVideoRequest inputContent. */ - public inputContent: Uint8Array; + public inputContent: (Uint8Array|string); /** AnnotateVideoRequest features. */ public features: google.cloud.videointelligence.v1.Feature[]; @@ -357,7 +357,7 @@ export namespace google { interface ILabelDetectionConfig { /** LabelDetectionConfig labelDetectionMode */ - labelDetectionMode?: (google.cloud.videointelligence.v1.LabelDetectionMode|null); + labelDetectionMode?: (google.cloud.videointelligence.v1.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1.LabelDetectionMode|null); /** LabelDetectionConfig stationaryCamera */ stationaryCamera?: (boolean|null); @@ -382,7 +382,7 @@ export namespace google { constructor(properties?: google.cloud.videointelligence.v1.ILabelDetectionConfig); /** LabelDetectionConfig labelDetectionMode. */ - public labelDetectionMode: google.cloud.videointelligence.v1.LabelDetectionMode; + public labelDetectionMode: (google.cloud.videointelligence.v1.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1.LabelDetectionMode); /** LabelDetectionConfig stationaryCamera. */ public stationaryCamera: boolean; @@ -1434,7 +1434,7 @@ export namespace google { timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood */ - pornographyLikelihood?: (google.cloud.videointelligence.v1.Likelihood|null); + pornographyLikelihood?: (google.cloud.videointelligence.v1.Likelihood|keyof typeof google.cloud.videointelligence.v1.Likelihood|null); } /** Represents an ExplicitContentFrame. */ @@ -1450,7 +1450,7 @@ export namespace google { public timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood. */ - public pornographyLikelihood: google.cloud.videointelligence.v1.Likelihood; + public pornographyLikelihood: (google.cloud.videointelligence.v1.Likelihood|keyof typeof google.cloud.videointelligence.v1.Likelihood); /** * Creates a new ExplicitContentFrame instance using the specified properties. @@ -1911,7 +1911,7 @@ export namespace google { interface IFaceAnnotation { /** FaceAnnotation thumbnail */ - thumbnail?: (Uint8Array|null); + thumbnail?: (Uint8Array|string|null); /** FaceAnnotation segments */ segments?: (google.cloud.videointelligence.v1.IFaceSegment[]|null); @@ -1930,7 +1930,7 @@ export namespace google { constructor(properties?: google.cloud.videointelligence.v1.IFaceAnnotation); /** FaceAnnotation thumbnail. */ - public thumbnail: Uint8Array; + public thumbnail: (Uint8Array|string); /** FaceAnnotation segments. */ public segments: google.cloud.videointelligence.v1.IFaceSegment[]; @@ -2283,7 +2283,7 @@ export namespace google { updateTime?: (google.protobuf.ITimestamp|null); /** VideoAnnotationProgress feature */ - feature?: (google.cloud.videointelligence.v1.Feature|null); + feature?: (google.cloud.videointelligence.v1.Feature|keyof typeof google.cloud.videointelligence.v1.Feature|null); /** VideoAnnotationProgress segment */ segment?: (google.cloud.videointelligence.v1.IVideoSegment|null); @@ -2311,7 +2311,7 @@ export namespace google { public updateTime?: (google.protobuf.ITimestamp|null); /** VideoAnnotationProgress feature. */ - public feature: google.cloud.videointelligence.v1.Feature; + public feature: (google.cloud.videointelligence.v1.Feature|keyof typeof google.cloud.videointelligence.v1.Feature); /** VideoAnnotationProgress segment. */ public segment?: (google.cloud.videointelligence.v1.IVideoSegment|null); @@ -3600,7 +3600,7 @@ export namespace google { segment?: (google.cloud.videointelligence.v1.IVideoSegment|null); /** ObjectTrackingAnnotation trackId */ - trackId?: (number|Long|null); + trackId?: (number|Long|string|null); /** ObjectTrackingAnnotation entity */ entity?: (google.cloud.videointelligence.v1.IEntity|null); @@ -3625,7 +3625,7 @@ export namespace google { public segment?: (google.cloud.videointelligence.v1.IVideoSegment|null); /** ObjectTrackingAnnotation trackId. */ - public trackId: (number|Long); + public trackId: (number|Long|string); /** ObjectTrackingAnnotation entity. */ public entity?: (google.cloud.videointelligence.v1.IEntity|null); @@ -3766,7 +3766,7 @@ export namespace google { inputUri?: (string|null); /** AnnotateVideoRequest inputContent */ - inputContent?: (Uint8Array|null); + inputContent?: (Uint8Array|string|null); /** AnnotateVideoRequest features */ features?: (google.cloud.videointelligence.v1beta2.Feature[]|null); @@ -3794,7 +3794,7 @@ export namespace google { public inputUri: string; /** AnnotateVideoRequest inputContent. */ - public inputContent: Uint8Array; + public inputContent: (Uint8Array|string); /** AnnotateVideoRequest features. */ public features: google.cloud.videointelligence.v1beta2.Feature[]; @@ -3997,7 +3997,7 @@ export namespace google { interface ILabelDetectionConfig { /** LabelDetectionConfig labelDetectionMode */ - labelDetectionMode?: (google.cloud.videointelligence.v1beta2.LabelDetectionMode|null); + labelDetectionMode?: (google.cloud.videointelligence.v1beta2.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1beta2.LabelDetectionMode|null); /** LabelDetectionConfig stationaryCamera */ stationaryCamera?: (boolean|null); @@ -4016,7 +4016,7 @@ export namespace google { constructor(properties?: google.cloud.videointelligence.v1beta2.ILabelDetectionConfig); /** LabelDetectionConfig labelDetectionMode. */ - public labelDetectionMode: google.cloud.videointelligence.v1beta2.LabelDetectionMode; + public labelDetectionMode: (google.cloud.videointelligence.v1beta2.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1beta2.LabelDetectionMode); /** LabelDetectionConfig stationaryCamera. */ public stationaryCamera: boolean; @@ -4876,7 +4876,7 @@ export namespace google { timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood */ - pornographyLikelihood?: (google.cloud.videointelligence.v1beta2.Likelihood|null); + pornographyLikelihood?: (google.cloud.videointelligence.v1beta2.Likelihood|keyof typeof google.cloud.videointelligence.v1beta2.Likelihood|null); } /** Represents an ExplicitContentFrame. */ @@ -4892,7 +4892,7 @@ export namespace google { public timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood. */ - public pornographyLikelihood: google.cloud.videointelligence.v1beta2.Likelihood; + public pornographyLikelihood: (google.cloud.videointelligence.v1beta2.Likelihood|keyof typeof google.cloud.videointelligence.v1beta2.Likelihood); /** * Creates a new ExplicitContentFrame instance using the specified properties. @@ -5353,7 +5353,7 @@ export namespace google { interface IFaceAnnotation { /** FaceAnnotation thumbnail */ - thumbnail?: (Uint8Array|null); + thumbnail?: (Uint8Array|string|null); /** FaceAnnotation segments */ segments?: (google.cloud.videointelligence.v1beta2.IFaceSegment[]|null); @@ -5372,7 +5372,7 @@ export namespace google { constructor(properties?: google.cloud.videointelligence.v1beta2.IFaceAnnotation); /** FaceAnnotation thumbnail. */ - public thumbnail: Uint8Array; + public thumbnail: (Uint8Array|string); /** FaceAnnotation segments. */ public segments: google.cloud.videointelligence.v1beta2.IFaceSegment[]; @@ -5954,7 +5954,7 @@ export namespace google { inputUri?: (string|null); /** AnnotateVideoRequest inputContent */ - inputContent?: (Uint8Array|null); + inputContent?: (Uint8Array|string|null); /** AnnotateVideoRequest features */ features?: (google.cloud.videointelligence.v1p1beta1.Feature[]|null); @@ -5982,7 +5982,7 @@ export namespace google { public inputUri: string; /** AnnotateVideoRequest inputContent. */ - public inputContent: Uint8Array; + public inputContent: (Uint8Array|string); /** AnnotateVideoRequest features. */ public features: google.cloud.videointelligence.v1p1beta1.Feature[]; @@ -6185,7 +6185,7 @@ export namespace google { interface ILabelDetectionConfig { /** LabelDetectionConfig labelDetectionMode */ - labelDetectionMode?: (google.cloud.videointelligence.v1p1beta1.LabelDetectionMode|null); + labelDetectionMode?: (google.cloud.videointelligence.v1p1beta1.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1p1beta1.LabelDetectionMode|null); /** LabelDetectionConfig stationaryCamera */ stationaryCamera?: (boolean|null); @@ -6204,7 +6204,7 @@ export namespace google { constructor(properties?: google.cloud.videointelligence.v1p1beta1.ILabelDetectionConfig); /** LabelDetectionConfig labelDetectionMode. */ - public labelDetectionMode: google.cloud.videointelligence.v1p1beta1.LabelDetectionMode; + public labelDetectionMode: (google.cloud.videointelligence.v1p1beta1.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1p1beta1.LabelDetectionMode); /** LabelDetectionConfig stationaryCamera. */ public stationaryCamera: boolean; @@ -6968,7 +6968,7 @@ export namespace google { timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood */ - pornographyLikelihood?: (google.cloud.videointelligence.v1p1beta1.Likelihood|null); + pornographyLikelihood?: (google.cloud.videointelligence.v1p1beta1.Likelihood|keyof typeof google.cloud.videointelligence.v1p1beta1.Likelihood|null); } /** Represents an ExplicitContentFrame. */ @@ -6984,7 +6984,7 @@ export namespace google { public timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood. */ - public pornographyLikelihood: google.cloud.videointelligence.v1p1beta1.Likelihood; + public pornographyLikelihood: (google.cloud.videointelligence.v1p1beta1.Likelihood|keyof typeof google.cloud.videointelligence.v1p1beta1.Likelihood); /** * Creates a new ExplicitContentFrame instance using the specified properties. @@ -8154,7 +8154,7 @@ export namespace google { inputUri?: (string|null); /** AnnotateVideoRequest inputContent */ - inputContent?: (Uint8Array|null); + inputContent?: (Uint8Array|string|null); /** AnnotateVideoRequest features */ features?: (google.cloud.videointelligence.v1p2beta1.Feature[]|null); @@ -8182,7 +8182,7 @@ export namespace google { public inputUri: string; /** AnnotateVideoRequest inputContent. */ - public inputContent: Uint8Array; + public inputContent: (Uint8Array|string); /** AnnotateVideoRequest features. */ public features: google.cloud.videointelligence.v1p2beta1.Feature[]; @@ -8385,7 +8385,7 @@ export namespace google { interface ILabelDetectionConfig { /** LabelDetectionConfig labelDetectionMode */ - labelDetectionMode?: (google.cloud.videointelligence.v1p2beta1.LabelDetectionMode|null); + labelDetectionMode?: (google.cloud.videointelligence.v1p2beta1.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1p2beta1.LabelDetectionMode|null); /** LabelDetectionConfig stationaryCamera */ stationaryCamera?: (boolean|null); @@ -8404,7 +8404,7 @@ export namespace google { constructor(properties?: google.cloud.videointelligence.v1p2beta1.ILabelDetectionConfig); /** LabelDetectionConfig labelDetectionMode. */ - public labelDetectionMode: google.cloud.videointelligence.v1p2beta1.LabelDetectionMode; + public labelDetectionMode: (google.cloud.videointelligence.v1p2beta1.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1p2beta1.LabelDetectionMode); /** LabelDetectionConfig stationaryCamera. */ public stationaryCamera: boolean; @@ -9258,7 +9258,7 @@ export namespace google { timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood */ - pornographyLikelihood?: (google.cloud.videointelligence.v1p2beta1.Likelihood|null); + pornographyLikelihood?: (google.cloud.videointelligence.v1p2beta1.Likelihood|keyof typeof google.cloud.videointelligence.v1p2beta1.Likelihood|null); } /** Represents an ExplicitContentFrame. */ @@ -9274,7 +9274,7 @@ export namespace google { public timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood. */ - public pornographyLikelihood: google.cloud.videointelligence.v1p2beta1.Likelihood; + public pornographyLikelihood: (google.cloud.videointelligence.v1p2beta1.Likelihood|keyof typeof google.cloud.videointelligence.v1p2beta1.Likelihood); /** * Creates a new ExplicitContentFrame instance using the specified properties. @@ -10784,7 +10784,7 @@ export namespace google { inputUri?: (string|null); /** AnnotateVideoRequest inputContent */ - inputContent?: (Uint8Array|null); + inputContent?: (Uint8Array|string|null); /** AnnotateVideoRequest features */ features?: (google.cloud.videointelligence.v1p3beta1.Feature[]|null); @@ -10812,7 +10812,7 @@ export namespace google { public inputUri: string; /** AnnotateVideoRequest inputContent. */ - public inputContent: Uint8Array; + public inputContent: (Uint8Array|string); /** AnnotateVideoRequest features. */ public features: google.cloud.videointelligence.v1p3beta1.Feature[]; @@ -10912,12 +10912,18 @@ export namespace google { /** VideoContext explicitContentDetectionConfig */ explicitContentDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.IExplicitContentDetectionConfig|null); + /** VideoContext faceDetectionConfig */ + faceDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig|null); + /** VideoContext speechTranscriptionConfig */ speechTranscriptionConfig?: (google.cloud.videointelligence.v1p3beta1.ISpeechTranscriptionConfig|null); /** VideoContext textDetectionConfig */ textDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig|null); + /** VideoContext personDetectionConfig */ + personDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig|null); + /** VideoContext objectTrackingConfig */ objectTrackingConfig?: (google.cloud.videointelligence.v1p3beta1.IObjectTrackingConfig|null); } @@ -10943,12 +10949,18 @@ export namespace google { /** VideoContext explicitContentDetectionConfig. */ public explicitContentDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.IExplicitContentDetectionConfig|null); + /** VideoContext faceDetectionConfig. */ + public faceDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig|null); + /** VideoContext speechTranscriptionConfig. */ public speechTranscriptionConfig?: (google.cloud.videointelligence.v1p3beta1.ISpeechTranscriptionConfig|null); /** VideoContext textDetectionConfig. */ public textDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig|null); + /** VideoContext personDetectionConfig. */ + public personDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig|null); + /** VideoContext objectTrackingConfig. */ public objectTrackingConfig?: (google.cloud.videointelligence.v1p3beta1.IObjectTrackingConfig|null); @@ -11027,7 +11039,7 @@ export namespace google { interface ILabelDetectionConfig { /** LabelDetectionConfig labelDetectionMode */ - labelDetectionMode?: (google.cloud.videointelligence.v1p3beta1.LabelDetectionMode|null); + labelDetectionMode?: (google.cloud.videointelligence.v1p3beta1.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1p3beta1.LabelDetectionMode|null); /** LabelDetectionConfig stationaryCamera */ stationaryCamera?: (boolean|null); @@ -11052,7 +11064,7 @@ export namespace google { constructor(properties?: google.cloud.videointelligence.v1p3beta1.ILabelDetectionConfig); /** LabelDetectionConfig labelDetectionMode. */ - public labelDetectionMode: google.cloud.videointelligence.v1p3beta1.LabelDetectionMode; + public labelDetectionMode: (google.cloud.videointelligence.v1p3beta1.LabelDetectionMode|keyof typeof google.cloud.videointelligence.v1p3beta1.LabelDetectionMode); /** LabelDetectionConfig stationaryCamera. */ public stationaryCamera: boolean; @@ -11407,6 +11419,210 @@ export namespace google { public toJSON(): { [k: string]: any }; } + /** Properties of a FaceDetectionConfig. */ + interface IFaceDetectionConfig { + + /** FaceDetectionConfig model */ + model?: (string|null); + + /** FaceDetectionConfig includeBoundingBoxes */ + includeBoundingBoxes?: (boolean|null); + + /** FaceDetectionConfig includeAttributes */ + includeAttributes?: (boolean|null); + } + + /** Represents a FaceDetectionConfig. */ + class FaceDetectionConfig implements IFaceDetectionConfig { + + /** + * Constructs a new FaceDetectionConfig. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig); + + /** FaceDetectionConfig model. */ + public model: string; + + /** FaceDetectionConfig includeBoundingBoxes. */ + public includeBoundingBoxes: boolean; + + /** FaceDetectionConfig includeAttributes. */ + public includeAttributes: boolean; + + /** + * Creates a new FaceDetectionConfig instance using the specified properties. + * @param [properties] Properties to set + * @returns FaceDetectionConfig instance + */ + public static create(properties?: google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig): google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig; + + /** + * Encodes the specified FaceDetectionConfig message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.verify|verify} messages. + * @param message FaceDetectionConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified FaceDetectionConfig message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.verify|verify} messages. + * @param message FaceDetectionConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a FaceDetectionConfig message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns FaceDetectionConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig; + + /** + * Decodes a FaceDetectionConfig message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns FaceDetectionConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig; + + /** + * Verifies a FaceDetectionConfig message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a FaceDetectionConfig message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns FaceDetectionConfig + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig; + + /** + * Creates a plain object from a FaceDetectionConfig message. Also converts values to other types if specified. + * @param message FaceDetectionConfig + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this FaceDetectionConfig to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + + /** Properties of a PersonDetectionConfig. */ + interface IPersonDetectionConfig { + + /** PersonDetectionConfig includeBoundingBoxes */ + includeBoundingBoxes?: (boolean|null); + + /** PersonDetectionConfig includePoseLandmarks */ + includePoseLandmarks?: (boolean|null); + + /** PersonDetectionConfig includeAttributes */ + includeAttributes?: (boolean|null); + } + + /** Represents a PersonDetectionConfig. */ + class PersonDetectionConfig implements IPersonDetectionConfig { + + /** + * Constructs a new PersonDetectionConfig. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig); + + /** PersonDetectionConfig includeBoundingBoxes. */ + public includeBoundingBoxes: boolean; + + /** PersonDetectionConfig includePoseLandmarks. */ + public includePoseLandmarks: boolean; + + /** PersonDetectionConfig includeAttributes. */ + public includeAttributes: boolean; + + /** + * Creates a new PersonDetectionConfig instance using the specified properties. + * @param [properties] Properties to set + * @returns PersonDetectionConfig instance + */ + public static create(properties?: google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig): google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig; + + /** + * Encodes the specified PersonDetectionConfig message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.verify|verify} messages. + * @param message PersonDetectionConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified PersonDetectionConfig message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.verify|verify} messages. + * @param message PersonDetectionConfig message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a PersonDetectionConfig message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns PersonDetectionConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig; + + /** + * Decodes a PersonDetectionConfig message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns PersonDetectionConfig + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig; + + /** + * Verifies a PersonDetectionConfig message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a PersonDetectionConfig message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns PersonDetectionConfig + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig; + + /** + * Creates a plain object from a PersonDetectionConfig message. Also converts values to other types if specified. + * @param message PersonDetectionConfig + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this PersonDetectionConfig to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + /** Properties of a TextDetectionConfig. */ interface ITextDetectionConfig { @@ -12008,7 +12224,7 @@ export namespace google { timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood */ - pornographyLikelihood?: (google.cloud.videointelligence.v1p3beta1.Likelihood|null); + pornographyLikelihood?: (google.cloud.videointelligence.v1p3beta1.Likelihood|keyof typeof google.cloud.videointelligence.v1p3beta1.Likelihood|null); } /** Represents an ExplicitContentFrame. */ @@ -12024,7 +12240,7 @@ export namespace google { public timeOffset?: (google.protobuf.IDuration|null); /** ExplicitContentFrame pornographyLikelihood. */ - public pornographyLikelihood: google.cloud.videointelligence.v1p3beta1.Likelihood; + public pornographyLikelihood: (google.cloud.videointelligence.v1p3beta1.Likelihood|keyof typeof google.cloud.videointelligence.v1p3beta1.Likelihood); /** * Creates a new ExplicitContentFrame instance using the specified properties. @@ -12306,6 +12522,9 @@ export namespace google { /** TimestampedObject attributes */ attributes?: (google.cloud.videointelligence.v1p3beta1.IDetectedAttribute[]|null); + + /** TimestampedObject landmarks */ + landmarks?: (google.cloud.videointelligence.v1p3beta1.IDetectedLandmark[]|null); } /** Represents a TimestampedObject. */ @@ -12326,6 +12545,9 @@ export namespace google { /** TimestampedObject attributes. */ public attributes: google.cloud.videointelligence.v1p3beta1.IDetectedAttribute[]; + /** TimestampedObject landmarks. */ + public landmarks: google.cloud.videointelligence.v1p3beta1.IDetectedLandmark[]; + /** * Creates a new TimestampedObject instance using the specified properties. * @param [properties] Properties to set @@ -12994,6 +13216,294 @@ export namespace google { public toJSON(): { [k: string]: any }; } + /** Properties of a DetectedLandmark. */ + interface IDetectedLandmark { + + /** DetectedLandmark name */ + name?: (string|null); + + /** DetectedLandmark point */ + point?: (google.cloud.videointelligence.v1p3beta1.INormalizedVertex|null); + + /** DetectedLandmark confidence */ + confidence?: (number|null); + } + + /** Represents a DetectedLandmark. */ + class DetectedLandmark implements IDetectedLandmark { + + /** + * Constructs a new DetectedLandmark. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1p3beta1.IDetectedLandmark); + + /** DetectedLandmark name. */ + public name: string; + + /** DetectedLandmark point. */ + public point?: (google.cloud.videointelligence.v1p3beta1.INormalizedVertex|null); + + /** DetectedLandmark confidence. */ + public confidence: number; + + /** + * Creates a new DetectedLandmark instance using the specified properties. + * @param [properties] Properties to set + * @returns DetectedLandmark instance + */ + public static create(properties?: google.cloud.videointelligence.v1p3beta1.IDetectedLandmark): google.cloud.videointelligence.v1p3beta1.DetectedLandmark; + + /** + * Encodes the specified DetectedLandmark message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.DetectedLandmark.verify|verify} messages. + * @param message DetectedLandmark message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1p3beta1.IDetectedLandmark, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DetectedLandmark message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.DetectedLandmark.verify|verify} messages. + * @param message DetectedLandmark message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1p3beta1.IDetectedLandmark, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DetectedLandmark message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DetectedLandmark + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1p3beta1.DetectedLandmark; + + /** + * Decodes a DetectedLandmark message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DetectedLandmark + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1p3beta1.DetectedLandmark; + + /** + * Verifies a DetectedLandmark message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DetectedLandmark message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DetectedLandmark + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1p3beta1.DetectedLandmark; + + /** + * Creates a plain object from a DetectedLandmark message. Also converts values to other types if specified. + * @param message DetectedLandmark + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1p3beta1.DetectedLandmark, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DetectedLandmark to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + + /** Properties of a FaceDetectionAnnotation. */ + interface IFaceDetectionAnnotation { + + /** FaceDetectionAnnotation tracks */ + tracks?: (google.cloud.videointelligence.v1p3beta1.ITrack[]|null); + + /** FaceDetectionAnnotation thumbnail */ + thumbnail?: (Uint8Array|string|null); + } + + /** Represents a FaceDetectionAnnotation. */ + class FaceDetectionAnnotation implements IFaceDetectionAnnotation { + + /** + * Constructs a new FaceDetectionAnnotation. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation); + + /** FaceDetectionAnnotation tracks. */ + public tracks: google.cloud.videointelligence.v1p3beta1.ITrack[]; + + /** FaceDetectionAnnotation thumbnail. */ + public thumbnail: (Uint8Array|string); + + /** + * Creates a new FaceDetectionAnnotation instance using the specified properties. + * @param [properties] Properties to set + * @returns FaceDetectionAnnotation instance + */ + public static create(properties?: google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation): google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation; + + /** + * Encodes the specified FaceDetectionAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.verify|verify} messages. + * @param message FaceDetectionAnnotation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified FaceDetectionAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.verify|verify} messages. + * @param message FaceDetectionAnnotation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a FaceDetectionAnnotation message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns FaceDetectionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation; + + /** + * Decodes a FaceDetectionAnnotation message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns FaceDetectionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation; + + /** + * Verifies a FaceDetectionAnnotation message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a FaceDetectionAnnotation message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns FaceDetectionAnnotation + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation; + + /** + * Creates a plain object from a FaceDetectionAnnotation message. Also converts values to other types if specified. + * @param message FaceDetectionAnnotation + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this FaceDetectionAnnotation to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + + /** Properties of a PersonDetectionAnnotation. */ + interface IPersonDetectionAnnotation { + + /** PersonDetectionAnnotation tracks */ + tracks?: (google.cloud.videointelligence.v1p3beta1.ITrack[]|null); + } + + /** Represents a PersonDetectionAnnotation. */ + class PersonDetectionAnnotation implements IPersonDetectionAnnotation { + + /** + * Constructs a new PersonDetectionAnnotation. + * @param [properties] Properties to set + */ + constructor(properties?: google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation); + + /** PersonDetectionAnnotation tracks. */ + public tracks: google.cloud.videointelligence.v1p3beta1.ITrack[]; + + /** + * Creates a new PersonDetectionAnnotation instance using the specified properties. + * @param [properties] Properties to set + * @returns PersonDetectionAnnotation instance + */ + public static create(properties?: google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation): google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation; + + /** + * Encodes the specified PersonDetectionAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.verify|verify} messages. + * @param message PersonDetectionAnnotation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified PersonDetectionAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.verify|verify} messages. + * @param message PersonDetectionAnnotation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a PersonDetectionAnnotation message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns PersonDetectionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation; + + /** + * Decodes a PersonDetectionAnnotation message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns PersonDetectionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation; + + /** + * Verifies a PersonDetectionAnnotation message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a PersonDetectionAnnotation message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns PersonDetectionAnnotation + */ + public static fromObject(object: { [k: string]: any }): google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation; + + /** + * Creates a plain object from a PersonDetectionAnnotation message. Also converts values to other types if specified. + * @param message PersonDetectionAnnotation + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this PersonDetectionAnnotation to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + } + /** Properties of a VideoAnnotationResults. */ interface IVideoAnnotationResults { @@ -13018,6 +13528,9 @@ export namespace google { /** VideoAnnotationResults frameLabelAnnotations */ frameLabelAnnotations?: (google.cloud.videointelligence.v1p3beta1.ILabelAnnotation[]|null); + /** VideoAnnotationResults faceDetectionAnnotations */ + faceDetectionAnnotations?: (google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation[]|null); + /** VideoAnnotationResults shotAnnotations */ shotAnnotations?: (google.cloud.videointelligence.v1p3beta1.IVideoSegment[]|null); @@ -13036,6 +13549,9 @@ export namespace google { /** VideoAnnotationResults logoRecognitionAnnotations */ logoRecognitionAnnotations?: (google.cloud.videointelligence.v1p3beta1.ILogoRecognitionAnnotation[]|null); + /** VideoAnnotationResults personDetectionAnnotations */ + personDetectionAnnotations?: (google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation[]|null); + /** VideoAnnotationResults celebrityRecognitionAnnotations */ celebrityRecognitionAnnotations?: (google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation|null); @@ -13073,6 +13589,9 @@ export namespace google { /** VideoAnnotationResults frameLabelAnnotations. */ public frameLabelAnnotations: google.cloud.videointelligence.v1p3beta1.ILabelAnnotation[]; + /** VideoAnnotationResults faceDetectionAnnotations. */ + public faceDetectionAnnotations: google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation[]; + /** VideoAnnotationResults shotAnnotations. */ public shotAnnotations: google.cloud.videointelligence.v1p3beta1.IVideoSegment[]; @@ -13091,6 +13610,9 @@ export namespace google { /** VideoAnnotationResults logoRecognitionAnnotations. */ public logoRecognitionAnnotations: google.cloud.videointelligence.v1p3beta1.ILogoRecognitionAnnotation[]; + /** VideoAnnotationResults personDetectionAnnotations. */ + public personDetectionAnnotations: google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation[]; + /** VideoAnnotationResults celebrityRecognitionAnnotations. */ public celebrityRecognitionAnnotations?: (google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation|null); @@ -13274,7 +13796,7 @@ export namespace google { updateTime?: (google.protobuf.ITimestamp|null); /** VideoAnnotationProgress feature */ - feature?: (google.cloud.videointelligence.v1p3beta1.Feature|null); + feature?: (google.cloud.videointelligence.v1p3beta1.Feature|keyof typeof google.cloud.videointelligence.v1p3beta1.Feature|null); /** VideoAnnotationProgress segment */ segment?: (google.cloud.videointelligence.v1p3beta1.IVideoSegment|null); @@ -13302,7 +13824,7 @@ export namespace google { public updateTime?: (google.protobuf.ITimestamp|null); /** VideoAnnotationProgress feature. */ - public feature: google.cloud.videointelligence.v1p3beta1.Feature; + public feature: (google.cloud.videointelligence.v1p3beta1.Feature|keyof typeof google.cloud.videointelligence.v1p3beta1.Feature); /** VideoAnnotationProgress segment. */ public segment?: (google.cloud.videointelligence.v1p3beta1.IVideoSegment|null); @@ -14600,7 +15122,7 @@ export namespace google { segment?: (google.cloud.videointelligence.v1p3beta1.IVideoSegment|null); /** ObjectTrackingAnnotation trackId */ - trackId?: (number|Long|null); + trackId?: (number|Long|string|null); } /** Represents an ObjectTrackingAnnotation. */ @@ -14625,7 +15147,7 @@ export namespace google { public segment?: (google.cloud.videointelligence.v1p3beta1.IVideoSegment|null); /** ObjectTrackingAnnotation trackId. */ - public trackId: (number|Long); + public trackId: (number|Long|string); /** ObjectTrackingAnnotation trackInfo. */ public trackInfo?: ("segment"|"trackId"); @@ -14810,7 +15332,7 @@ export namespace google { videoConfig?: (google.cloud.videointelligence.v1p3beta1.IStreamingVideoConfig|null); /** StreamingAnnotateVideoRequest inputContent */ - inputContent?: (Uint8Array|null); + inputContent?: (Uint8Array|string|null); } /** Represents a StreamingAnnotateVideoRequest. */ @@ -14826,7 +15348,7 @@ export namespace google { public videoConfig?: (google.cloud.videointelligence.v1p3beta1.IStreamingVideoConfig|null); /** StreamingAnnotateVideoRequest inputContent. */ - public inputContent: Uint8Array; + public inputContent: (Uint8Array|string); /** StreamingAnnotateVideoRequest streamingRequest. */ public streamingRequest?: ("videoConfig"|"inputContent"); @@ -15734,7 +16256,7 @@ export namespace google { interface IStreamingVideoConfig { /** StreamingVideoConfig feature */ - feature?: (google.cloud.videointelligence.v1p3beta1.StreamingFeature|null); + feature?: (google.cloud.videointelligence.v1p3beta1.StreamingFeature|keyof typeof google.cloud.videointelligence.v1p3beta1.StreamingFeature|null); /** StreamingVideoConfig shotChangeDetectionConfig */ shotChangeDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.IStreamingShotChangeDetectionConfig|null); @@ -15768,7 +16290,7 @@ export namespace google { constructor(properties?: google.cloud.videointelligence.v1p3beta1.IStreamingVideoConfig); /** StreamingVideoConfig feature. */ - public feature: google.cloud.videointelligence.v1p3beta1.StreamingFeature; + public feature: (google.cloud.videointelligence.v1p3beta1.StreamingFeature|keyof typeof google.cloud.videointelligence.v1p3beta1.StreamingFeature); /** StreamingVideoConfig shotChangeDetectionConfig. */ public shotChangeDetectionConfig?: (google.cloud.videointelligence.v1p3beta1.IStreamingShotChangeDetectionConfig|null); @@ -15871,11 +16393,13 @@ export namespace google { LABEL_DETECTION = 1, SHOT_CHANGE_DETECTION = 2, EXPLICIT_CONTENT_DETECTION = 3, + FACE_DETECTION = 4, SPEECH_TRANSCRIPTION = 6, TEXT_DETECTION = 7, OBJECT_TRACKING = 9, LOGO_RECOGNITION = 12, - CELEBRITY_RECOGNITION = 13 + CELEBRITY_RECOGNITION = 13, + PERSON_DETECTION = 14 } /** LabelDetectionMode enum. */ @@ -16957,10 +17481,10 @@ export namespace google { number?: (number|null); /** FieldDescriptorProto label */ - label?: (google.protobuf.FieldDescriptorProto.Label|null); + label?: (google.protobuf.FieldDescriptorProto.Label|keyof typeof google.protobuf.FieldDescriptorProto.Label|null); /** FieldDescriptorProto type */ - type?: (google.protobuf.FieldDescriptorProto.Type|null); + type?: (google.protobuf.FieldDescriptorProto.Type|keyof typeof google.protobuf.FieldDescriptorProto.Type|null); /** FieldDescriptorProto typeName */ typeName?: (string|null); @@ -16997,10 +17521,10 @@ export namespace google { public number: number; /** FieldDescriptorProto label. */ - public label: google.protobuf.FieldDescriptorProto.Label; + public label: (google.protobuf.FieldDescriptorProto.Label|keyof typeof google.protobuf.FieldDescriptorProto.Label); /** FieldDescriptorProto type. */ - public type: google.protobuf.FieldDescriptorProto.Type; + public type: (google.protobuf.FieldDescriptorProto.Type|keyof typeof google.protobuf.FieldDescriptorProto.Type); /** FieldDescriptorProto typeName. */ public typeName: string; @@ -17775,7 +18299,7 @@ export namespace google { javaStringCheckUtf8?: (boolean|null); /** FileOptions optimizeFor */ - optimizeFor?: (google.protobuf.FileOptions.OptimizeMode|null); + optimizeFor?: (google.protobuf.FileOptions.OptimizeMode|keyof typeof google.protobuf.FileOptions.OptimizeMode|null); /** FileOptions goPackage */ goPackage?: (string|null); @@ -17848,7 +18372,7 @@ export namespace google { public javaStringCheckUtf8: boolean; /** FileOptions optimizeFor. */ - public optimizeFor: google.protobuf.FileOptions.OptimizeMode; + public optimizeFor: (google.protobuf.FileOptions.OptimizeMode|keyof typeof google.protobuf.FileOptions.OptimizeMode); /** FileOptions goPackage. */ public goPackage: string; @@ -18094,13 +18618,13 @@ export namespace google { interface IFieldOptions { /** FieldOptions ctype */ - ctype?: (google.protobuf.FieldOptions.CType|null); + ctype?: (google.protobuf.FieldOptions.CType|keyof typeof google.protobuf.FieldOptions.CType|null); /** FieldOptions packed */ packed?: (boolean|null); /** FieldOptions jstype */ - jstype?: (google.protobuf.FieldOptions.JSType|null); + jstype?: (google.protobuf.FieldOptions.JSType|keyof typeof google.protobuf.FieldOptions.JSType|null); /** FieldOptions lazy */ lazy?: (boolean|null); @@ -18128,13 +18652,13 @@ export namespace google { constructor(properties?: google.protobuf.IFieldOptions); /** FieldOptions ctype. */ - public ctype: google.protobuf.FieldOptions.CType; + public ctype: (google.protobuf.FieldOptions.CType|keyof typeof google.protobuf.FieldOptions.CType); /** FieldOptions packed. */ public packed: boolean; /** FieldOptions jstype. */ - public jstype: google.protobuf.FieldOptions.JSType; + public jstype: (google.protobuf.FieldOptions.JSType|keyof typeof google.protobuf.FieldOptions.JSType); /** FieldOptions lazy. */ public lazy: boolean; @@ -18633,7 +19157,7 @@ export namespace google { deprecated?: (boolean|null); /** MethodOptions idempotencyLevel */ - idempotencyLevel?: (google.protobuf.MethodOptions.IdempotencyLevel|null); + idempotencyLevel?: (google.protobuf.MethodOptions.IdempotencyLevel|keyof typeof google.protobuf.MethodOptions.IdempotencyLevel|null); /** MethodOptions uninterpretedOption */ uninterpretedOption?: (google.protobuf.IUninterpretedOption[]|null); @@ -18661,7 +19185,7 @@ export namespace google { public deprecated: boolean; /** MethodOptions idempotencyLevel. */ - public idempotencyLevel: google.protobuf.MethodOptions.IdempotencyLevel; + public idempotencyLevel: (google.protobuf.MethodOptions.IdempotencyLevel|keyof typeof google.protobuf.MethodOptions.IdempotencyLevel); /** MethodOptions uninterpretedOption. */ public uninterpretedOption: google.protobuf.IUninterpretedOption[]; @@ -18757,16 +19281,16 @@ export namespace google { identifierValue?: (string|null); /** UninterpretedOption positiveIntValue */ - positiveIntValue?: (number|Long|null); + positiveIntValue?: (number|Long|string|null); /** UninterpretedOption negativeIntValue */ - negativeIntValue?: (number|Long|null); + negativeIntValue?: (number|Long|string|null); /** UninterpretedOption doubleValue */ doubleValue?: (number|null); /** UninterpretedOption stringValue */ - stringValue?: (Uint8Array|null); + stringValue?: (Uint8Array|string|null); /** UninterpretedOption aggregateValue */ aggregateValue?: (string|null); @@ -18788,16 +19312,16 @@ export namespace google { public identifierValue: string; /** UninterpretedOption positiveIntValue. */ - public positiveIntValue: (number|Long); + public positiveIntValue: (number|Long|string); /** UninterpretedOption negativeIntValue. */ - public negativeIntValue: (number|Long); + public negativeIntValue: (number|Long|string); /** UninterpretedOption doubleValue. */ public doubleValue: number; /** UninterpretedOption stringValue. */ - public stringValue: Uint8Array; + public stringValue: (Uint8Array|string); /** UninterpretedOption aggregateValue. */ public aggregateValue: string; @@ -19387,7 +19911,7 @@ export namespace google { type_url?: (string|null); /** Any value */ - value?: (Uint8Array|null); + value?: (Uint8Array|string|null); } /** Represents an Any. */ @@ -19403,7 +19927,7 @@ export namespace google { public type_url: string; /** Any value. */ - public value: Uint8Array; + public value: (Uint8Array|string); /** * Creates a new Any instance using the specified properties. @@ -19480,7 +20004,7 @@ export namespace google { interface IDuration { /** Duration seconds */ - seconds?: (number|Long|null); + seconds?: (number|Long|string|null); /** Duration nanos */ nanos?: (number|null); @@ -19496,7 +20020,7 @@ export namespace google { constructor(properties?: google.protobuf.IDuration); /** Duration seconds. */ - public seconds: (number|Long); + public seconds: (number|Long|string); /** Duration nanos. */ public nanos: number; @@ -19660,7 +20184,7 @@ export namespace google { interface ITimestamp { /** Timestamp seconds */ - seconds?: (number|Long|null); + seconds?: (number|Long|string|null); /** Timestamp nanos */ nanos?: (number|null); @@ -19676,7 +20200,7 @@ export namespace google { constructor(properties?: google.protobuf.ITimestamp); /** Timestamp seconds. */ - public seconds: (number|Long); + public seconds: (number|Long|string); /** Timestamp nanos. */ public nanos: number; diff --git a/packages/google-cloud-videointelligence/protos/protos.js b/packages/google-cloud-videointelligence/protos/protos.js index b0f5c795179..64e727108e2 100644 --- a/packages/google-cloud-videointelligence/protos/protos.js +++ b/packages/google-cloud-videointelligence/protos/protos.js @@ -27162,11 +27162,13 @@ case 1: case 2: case 3: + case 4: case 6: case 7: case 9: case 12: case 13: + case 14: break; } } @@ -27226,6 +27228,10 @@ case 3: message.features[i] = 3; break; + case "FACE_DETECTION": + case 4: + message.features[i] = 4; + break; case "SPEECH_TRANSCRIPTION": case 6: message.features[i] = 6; @@ -27246,6 +27252,10 @@ case 13: message.features[i] = 13; break; + case "PERSON_DETECTION": + case 14: + message.features[i] = 14; + break; } } if (object.videoContext != null) { @@ -27330,8 +27340,10 @@ * @property {google.cloud.videointelligence.v1p3beta1.ILabelDetectionConfig|null} [labelDetectionConfig] VideoContext labelDetectionConfig * @property {google.cloud.videointelligence.v1p3beta1.IShotChangeDetectionConfig|null} [shotChangeDetectionConfig] VideoContext shotChangeDetectionConfig * @property {google.cloud.videointelligence.v1p3beta1.IExplicitContentDetectionConfig|null} [explicitContentDetectionConfig] VideoContext explicitContentDetectionConfig + * @property {google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig|null} [faceDetectionConfig] VideoContext faceDetectionConfig * @property {google.cloud.videointelligence.v1p3beta1.ISpeechTranscriptionConfig|null} [speechTranscriptionConfig] VideoContext speechTranscriptionConfig * @property {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig|null} [textDetectionConfig] VideoContext textDetectionConfig + * @property {google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig|null} [personDetectionConfig] VideoContext personDetectionConfig * @property {google.cloud.videointelligence.v1p3beta1.IObjectTrackingConfig|null} [objectTrackingConfig] VideoContext objectTrackingConfig */ @@ -27383,6 +27395,14 @@ */ VideoContext.prototype.explicitContentDetectionConfig = null; + /** + * VideoContext faceDetectionConfig. + * @member {google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig|null|undefined} faceDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.VideoContext + * @instance + */ + VideoContext.prototype.faceDetectionConfig = null; + /** * VideoContext speechTranscriptionConfig. * @member {google.cloud.videointelligence.v1p3beta1.ISpeechTranscriptionConfig|null|undefined} speechTranscriptionConfig @@ -27399,6 +27419,14 @@ */ VideoContext.prototype.textDetectionConfig = null; + /** + * VideoContext personDetectionConfig. + * @member {google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig|null|undefined} personDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.VideoContext + * @instance + */ + VideoContext.prototype.personDetectionConfig = null; + /** * VideoContext objectTrackingConfig. * @member {google.cloud.videointelligence.v1p3beta1.IObjectTrackingConfig|null|undefined} objectTrackingConfig @@ -27440,10 +27468,14 @@ $root.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig.encode(message.shotChangeDetectionConfig, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); if (message.explicitContentDetectionConfig != null && message.hasOwnProperty("explicitContentDetectionConfig")) $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig.encode(message.explicitContentDetectionConfig, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.faceDetectionConfig != null && message.hasOwnProperty("faceDetectionConfig")) + $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.encode(message.faceDetectionConfig, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); if (message.speechTranscriptionConfig != null && message.hasOwnProperty("speechTranscriptionConfig")) $root.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig.encode(message.speechTranscriptionConfig, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); if (message.textDetectionConfig != null && message.hasOwnProperty("textDetectionConfig")) $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.encode(message.textDetectionConfig, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); + if (message.personDetectionConfig != null && message.hasOwnProperty("personDetectionConfig")) + $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.encode(message.personDetectionConfig, writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); if (message.objectTrackingConfig != null && message.hasOwnProperty("objectTrackingConfig")) $root.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig.encode(message.objectTrackingConfig, writer.uint32(/* id 13, wireType 2 =*/106).fork()).ldelim(); return writer; @@ -27494,12 +27526,18 @@ case 4: message.explicitContentDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig.decode(reader, reader.uint32()); break; + case 5: + message.faceDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.decode(reader, reader.uint32()); + break; case 6: message.speechTranscriptionConfig = $root.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig.decode(reader, reader.uint32()); break; case 8: message.textDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.decode(reader, reader.uint32()); break; + case 11: + message.personDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.decode(reader, reader.uint32()); + break; case 13: message.objectTrackingConfig = $root.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig.decode(reader, reader.uint32()); break; @@ -27562,6 +27600,11 @@ if (error) return "explicitContentDetectionConfig." + error; } + if (message.faceDetectionConfig != null && message.hasOwnProperty("faceDetectionConfig")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.verify(message.faceDetectionConfig); + if (error) + return "faceDetectionConfig." + error; + } if (message.speechTranscriptionConfig != null && message.hasOwnProperty("speechTranscriptionConfig")) { var error = $root.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig.verify(message.speechTranscriptionConfig); if (error) @@ -27572,6 +27615,11 @@ if (error) return "textDetectionConfig." + error; } + if (message.personDetectionConfig != null && message.hasOwnProperty("personDetectionConfig")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.verify(message.personDetectionConfig); + if (error) + return "personDetectionConfig." + error; + } if (message.objectTrackingConfig != null && message.hasOwnProperty("objectTrackingConfig")) { var error = $root.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig.verify(message.objectTrackingConfig); if (error) @@ -27617,6 +27665,11 @@ throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoContext.explicitContentDetectionConfig: object expected"); message.explicitContentDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig.fromObject(object.explicitContentDetectionConfig); } + if (object.faceDetectionConfig != null) { + if (typeof object.faceDetectionConfig !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoContext.faceDetectionConfig: object expected"); + message.faceDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.fromObject(object.faceDetectionConfig); + } if (object.speechTranscriptionConfig != null) { if (typeof object.speechTranscriptionConfig !== "object") throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoContext.speechTranscriptionConfig: object expected"); @@ -27627,6 +27680,11 @@ throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoContext.textDetectionConfig: object expected"); message.textDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.fromObject(object.textDetectionConfig); } + if (object.personDetectionConfig != null) { + if (typeof object.personDetectionConfig !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoContext.personDetectionConfig: object expected"); + message.personDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.fromObject(object.personDetectionConfig); + } if (object.objectTrackingConfig != null) { if (typeof object.objectTrackingConfig !== "object") throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoContext.objectTrackingConfig: object expected"); @@ -27654,8 +27712,10 @@ object.labelDetectionConfig = null; object.shotChangeDetectionConfig = null; object.explicitContentDetectionConfig = null; + object.faceDetectionConfig = null; object.speechTranscriptionConfig = null; object.textDetectionConfig = null; + object.personDetectionConfig = null; object.objectTrackingConfig = null; } if (message.segments && message.segments.length) { @@ -27669,10 +27729,14 @@ object.shotChangeDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig.toObject(message.shotChangeDetectionConfig, options); if (message.explicitContentDetectionConfig != null && message.hasOwnProperty("explicitContentDetectionConfig")) object.explicitContentDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig.toObject(message.explicitContentDetectionConfig, options); + if (message.faceDetectionConfig != null && message.hasOwnProperty("faceDetectionConfig")) + object.faceDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.toObject(message.faceDetectionConfig, options); if (message.speechTranscriptionConfig != null && message.hasOwnProperty("speechTranscriptionConfig")) object.speechTranscriptionConfig = $root.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig.toObject(message.speechTranscriptionConfig, options); if (message.textDetectionConfig != null && message.hasOwnProperty("textDetectionConfig")) object.textDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.toObject(message.textDetectionConfig, options); + if (message.personDetectionConfig != null && message.hasOwnProperty("personDetectionConfig")) + object.personDetectionConfig = $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.toObject(message.personDetectionConfig, options); if (message.objectTrackingConfig != null && message.hasOwnProperty("objectTrackingConfig")) object.objectTrackingConfig = $root.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig.toObject(message.objectTrackingConfig, options); return object; @@ -28552,26 +28616,26 @@ return ExplicitContentDetectionConfig; })(); - v1p3beta1.TextDetectionConfig = (function() { + v1p3beta1.FaceDetectionConfig = (function() { /** - * Properties of a TextDetectionConfig. + * Properties of a FaceDetectionConfig. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ITextDetectionConfig - * @property {Array.|null} [languageHints] TextDetectionConfig languageHints - * @property {string|null} [model] TextDetectionConfig model + * @interface IFaceDetectionConfig + * @property {string|null} [model] FaceDetectionConfig model + * @property {boolean|null} [includeBoundingBoxes] FaceDetectionConfig includeBoundingBoxes + * @property {boolean|null} [includeAttributes] FaceDetectionConfig includeAttributes */ /** - * Constructs a new TextDetectionConfig. + * Constructs a new FaceDetectionConfig. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a TextDetectionConfig. - * @implements ITextDetectionConfig + * @classdesc Represents a FaceDetectionConfig. + * @implements IFaceDetectionConfig * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig=} [properties] Properties to set */ - function TextDetectionConfig(properties) { - this.languageHints = []; + function FaceDetectionConfig(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -28579,91 +28643,101 @@ } /** - * TextDetectionConfig languageHints. - * @member {Array.} languageHints - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * FaceDetectionConfig model. + * @member {string} model + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @instance */ - TextDetectionConfig.prototype.languageHints = $util.emptyArray; + FaceDetectionConfig.prototype.model = ""; /** - * TextDetectionConfig model. - * @member {string} model - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * FaceDetectionConfig includeBoundingBoxes. + * @member {boolean} includeBoundingBoxes + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @instance */ - TextDetectionConfig.prototype.model = ""; + FaceDetectionConfig.prototype.includeBoundingBoxes = false; /** - * Creates a new TextDetectionConfig instance using the specified properties. + * FaceDetectionConfig includeAttributes. + * @member {boolean} includeAttributes + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig + * @instance + */ + FaceDetectionConfig.prototype.includeAttributes = false; + + /** + * Creates a new FaceDetectionConfig instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} TextDetectionConfig instance + * @param {google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig} FaceDetectionConfig instance */ - TextDetectionConfig.create = function create(properties) { - return new TextDetectionConfig(properties); + FaceDetectionConfig.create = function create(properties) { + return new FaceDetectionConfig(properties); }; /** - * Encodes the specified TextDetectionConfig message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.verify|verify} messages. + * Encodes the specified FaceDetectionConfig message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig} message TextDetectionConfig message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig} message FaceDetectionConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TextDetectionConfig.encode = function encode(message, writer) { + FaceDetectionConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.languageHints != null && message.languageHints.length) - for (var i = 0; i < message.languageHints.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.languageHints[i]); if (message.model != null && message.hasOwnProperty("model")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.model); + writer.uint32(/* id 1, wireType 2 =*/10).string(message.model); + if (message.includeBoundingBoxes != null && message.hasOwnProperty("includeBoundingBoxes")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.includeBoundingBoxes); + if (message.includeAttributes != null && message.hasOwnProperty("includeAttributes")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.includeAttributes); return writer; }; /** - * Encodes the specified TextDetectionConfig message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.verify|verify} messages. + * Encodes the specified FaceDetectionConfig message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig} message TextDetectionConfig message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IFaceDetectionConfig} message FaceDetectionConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TextDetectionConfig.encodeDelimited = function encodeDelimited(message, writer) { + FaceDetectionConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a TextDetectionConfig message from the specified reader or buffer. + * Decodes a FaceDetectionConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} TextDetectionConfig + * @returns {google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig} FaceDetectionConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TextDetectionConfig.decode = function decode(reader, length) { + FaceDetectionConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - if (!(message.languageHints && message.languageHints.length)) - message.languageHints = []; - message.languageHints.push(reader.string()); + message.model = reader.string(); break; case 2: - message.model = reader.string(); + message.includeBoundingBoxes = reader.bool(); + break; + case 5: + message.includeAttributes = reader.bool(); break; default: reader.skipType(tag & 7); @@ -28674,129 +28748,126 @@ }; /** - * Decodes a TextDetectionConfig message from the specified reader or buffer, length delimited. + * Decodes a FaceDetectionConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} TextDetectionConfig + * @returns {google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig} FaceDetectionConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TextDetectionConfig.decodeDelimited = function decodeDelimited(reader) { + FaceDetectionConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a TextDetectionConfig message. + * Verifies a FaceDetectionConfig message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - TextDetectionConfig.verify = function verify(message) { + FaceDetectionConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.languageHints != null && message.hasOwnProperty("languageHints")) { - if (!Array.isArray(message.languageHints)) - return "languageHints: array expected"; - for (var i = 0; i < message.languageHints.length; ++i) - if (!$util.isString(message.languageHints[i])) - return "languageHints: string[] expected"; - } if (message.model != null && message.hasOwnProperty("model")) if (!$util.isString(message.model)) return "model: string expected"; + if (message.includeBoundingBoxes != null && message.hasOwnProperty("includeBoundingBoxes")) + if (typeof message.includeBoundingBoxes !== "boolean") + return "includeBoundingBoxes: boolean expected"; + if (message.includeAttributes != null && message.hasOwnProperty("includeAttributes")) + if (typeof message.includeAttributes !== "boolean") + return "includeAttributes: boolean expected"; return null; }; /** - * Creates a TextDetectionConfig message from a plain object. Also converts values to their respective internal types. + * Creates a FaceDetectionConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} TextDetectionConfig + * @returns {google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig} FaceDetectionConfig */ - TextDetectionConfig.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig) + FaceDetectionConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig(); - if (object.languageHints) { - if (!Array.isArray(object.languageHints)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.languageHints: array expected"); - message.languageHints = []; - for (var i = 0; i < object.languageHints.length; ++i) - message.languageHints[i] = String(object.languageHints[i]); - } + var message = new $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig(); if (object.model != null) message.model = String(object.model); + if (object.includeBoundingBoxes != null) + message.includeBoundingBoxes = Boolean(object.includeBoundingBoxes); + if (object.includeAttributes != null) + message.includeAttributes = Boolean(object.includeAttributes); return message; }; /** - * Creates a plain object from a TextDetectionConfig message. Also converts values to other types if specified. + * Creates a plain object from a FaceDetectionConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} message TextDetectionConfig + * @param {google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig} message FaceDetectionConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - TextDetectionConfig.toObject = function toObject(message, options) { + FaceDetectionConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.languageHints = []; - if (options.defaults) + if (options.defaults) { object.model = ""; - if (message.languageHints && message.languageHints.length) { - object.languageHints = []; - for (var j = 0; j < message.languageHints.length; ++j) - object.languageHints[j] = message.languageHints[j]; + object.includeBoundingBoxes = false; + object.includeAttributes = false; } if (message.model != null && message.hasOwnProperty("model")) object.model = message.model; + if (message.includeBoundingBoxes != null && message.hasOwnProperty("includeBoundingBoxes")) + object.includeBoundingBoxes = message.includeBoundingBoxes; + if (message.includeAttributes != null && message.hasOwnProperty("includeAttributes")) + object.includeAttributes = message.includeAttributes; return object; }; /** - * Converts this TextDetectionConfig to JSON. + * Converts this FaceDetectionConfig to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig * @instance * @returns {Object.} JSON object */ - TextDetectionConfig.prototype.toJSON = function toJSON() { + FaceDetectionConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return TextDetectionConfig; + return FaceDetectionConfig; })(); - v1p3beta1.VideoSegment = (function() { + v1p3beta1.PersonDetectionConfig = (function() { /** - * Properties of a VideoSegment. + * Properties of a PersonDetectionConfig. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface IVideoSegment - * @property {google.protobuf.IDuration|null} [startTimeOffset] VideoSegment startTimeOffset - * @property {google.protobuf.IDuration|null} [endTimeOffset] VideoSegment endTimeOffset + * @interface IPersonDetectionConfig + * @property {boolean|null} [includeBoundingBoxes] PersonDetectionConfig includeBoundingBoxes + * @property {boolean|null} [includePoseLandmarks] PersonDetectionConfig includePoseLandmarks + * @property {boolean|null} [includeAttributes] PersonDetectionConfig includeAttributes */ /** - * Constructs a new VideoSegment. + * Constructs a new PersonDetectionConfig. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a VideoSegment. - * @implements IVideoSegment + * @classdesc Represents a PersonDetectionConfig. + * @implements IPersonDetectionConfig * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.IVideoSegment=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig=} [properties] Properties to set */ - function VideoSegment(properties) { + function PersonDetectionConfig(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -28804,88 +28875,101 @@ } /** - * VideoSegment startTimeOffset. - * @member {google.protobuf.IDuration|null|undefined} startTimeOffset - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * PersonDetectionConfig includeBoundingBoxes. + * @member {boolean} includeBoundingBoxes + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @instance */ - VideoSegment.prototype.startTimeOffset = null; + PersonDetectionConfig.prototype.includeBoundingBoxes = false; /** - * VideoSegment endTimeOffset. - * @member {google.protobuf.IDuration|null|undefined} endTimeOffset - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * PersonDetectionConfig includePoseLandmarks. + * @member {boolean} includePoseLandmarks + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @instance */ - VideoSegment.prototype.endTimeOffset = null; + PersonDetectionConfig.prototype.includePoseLandmarks = false; /** - * Creates a new VideoSegment instance using the specified properties. + * PersonDetectionConfig includeAttributes. + * @member {boolean} includeAttributes + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig + * @instance + */ + PersonDetectionConfig.prototype.includeAttributes = false; + + /** + * Creates a new PersonDetectionConfig instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.IVideoSegment=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.VideoSegment} VideoSegment instance + * @param {google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig} PersonDetectionConfig instance */ - VideoSegment.create = function create(properties) { - return new VideoSegment(properties); + PersonDetectionConfig.create = function create(properties) { + return new PersonDetectionConfig(properties); }; /** - * Encodes the specified VideoSegment message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.VideoSegment.verify|verify} messages. + * Encodes the specified PersonDetectionConfig message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.IVideoSegment} message VideoSegment message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig} message PersonDetectionConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VideoSegment.encode = function encode(message, writer) { + PersonDetectionConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.startTimeOffset != null && message.hasOwnProperty("startTimeOffset")) - $root.google.protobuf.Duration.encode(message.startTimeOffset, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.endTimeOffset != null && message.hasOwnProperty("endTimeOffset")) - $root.google.protobuf.Duration.encode(message.endTimeOffset, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.includeBoundingBoxes != null && message.hasOwnProperty("includeBoundingBoxes")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.includeBoundingBoxes); + if (message.includePoseLandmarks != null && message.hasOwnProperty("includePoseLandmarks")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.includePoseLandmarks); + if (message.includeAttributes != null && message.hasOwnProperty("includeAttributes")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.includeAttributes); return writer; }; /** - * Encodes the specified VideoSegment message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.VideoSegment.verify|verify} messages. + * Encodes the specified PersonDetectionConfig message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.IVideoSegment} message VideoSegment message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IPersonDetectionConfig} message PersonDetectionConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VideoSegment.encodeDelimited = function encodeDelimited(message, writer) { + PersonDetectionConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VideoSegment message from the specified reader or buffer. + * Decodes a PersonDetectionConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.VideoSegment} VideoSegment + * @returns {google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig} PersonDetectionConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VideoSegment.decode = function decode(reader, length) { + PersonDetectionConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.VideoSegment(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.startTimeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); + message.includeBoundingBoxes = reader.bool(); break; case 2: - message.endTimeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); + message.includePoseLandmarks = reader.bool(); + break; + case 3: + message.includeAttributes = reader.bool(); break; default: reader.skipType(tag & 7); @@ -28896,127 +28980,126 @@ }; /** - * Decodes a VideoSegment message from the specified reader or buffer, length delimited. + * Decodes a PersonDetectionConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.VideoSegment} VideoSegment + * @returns {google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig} PersonDetectionConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VideoSegment.decodeDelimited = function decodeDelimited(reader) { + PersonDetectionConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VideoSegment message. + * Verifies a PersonDetectionConfig message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VideoSegment.verify = function verify(message) { + PersonDetectionConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.startTimeOffset != null && message.hasOwnProperty("startTimeOffset")) { - var error = $root.google.protobuf.Duration.verify(message.startTimeOffset); - if (error) - return "startTimeOffset." + error; - } - if (message.endTimeOffset != null && message.hasOwnProperty("endTimeOffset")) { - var error = $root.google.protobuf.Duration.verify(message.endTimeOffset); - if (error) - return "endTimeOffset." + error; - } + if (message.includeBoundingBoxes != null && message.hasOwnProperty("includeBoundingBoxes")) + if (typeof message.includeBoundingBoxes !== "boolean") + return "includeBoundingBoxes: boolean expected"; + if (message.includePoseLandmarks != null && message.hasOwnProperty("includePoseLandmarks")) + if (typeof message.includePoseLandmarks !== "boolean") + return "includePoseLandmarks: boolean expected"; + if (message.includeAttributes != null && message.hasOwnProperty("includeAttributes")) + if (typeof message.includeAttributes !== "boolean") + return "includeAttributes: boolean expected"; return null; }; /** - * Creates a VideoSegment message from a plain object. Also converts values to their respective internal types. + * Creates a PersonDetectionConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.VideoSegment} VideoSegment + * @returns {google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig} PersonDetectionConfig */ - VideoSegment.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.VideoSegment) + PersonDetectionConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.VideoSegment(); - if (object.startTimeOffset != null) { - if (typeof object.startTimeOffset !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoSegment.startTimeOffset: object expected"); - message.startTimeOffset = $root.google.protobuf.Duration.fromObject(object.startTimeOffset); - } - if (object.endTimeOffset != null) { - if (typeof object.endTimeOffset !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoSegment.endTimeOffset: object expected"); - message.endTimeOffset = $root.google.protobuf.Duration.fromObject(object.endTimeOffset); - } + var message = new $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig(); + if (object.includeBoundingBoxes != null) + message.includeBoundingBoxes = Boolean(object.includeBoundingBoxes); + if (object.includePoseLandmarks != null) + message.includePoseLandmarks = Boolean(object.includePoseLandmarks); + if (object.includeAttributes != null) + message.includeAttributes = Boolean(object.includeAttributes); return message; }; /** - * Creates a plain object from a VideoSegment message. Also converts values to other types if specified. + * Creates a plain object from a PersonDetectionConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.VideoSegment} message VideoSegment + * @param {google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig} message PersonDetectionConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VideoSegment.toObject = function toObject(message, options) { + PersonDetectionConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.startTimeOffset = null; - object.endTimeOffset = null; + object.includeBoundingBoxes = false; + object.includePoseLandmarks = false; + object.includeAttributes = false; } - if (message.startTimeOffset != null && message.hasOwnProperty("startTimeOffset")) - object.startTimeOffset = $root.google.protobuf.Duration.toObject(message.startTimeOffset, options); - if (message.endTimeOffset != null && message.hasOwnProperty("endTimeOffset")) - object.endTimeOffset = $root.google.protobuf.Duration.toObject(message.endTimeOffset, options); + if (message.includeBoundingBoxes != null && message.hasOwnProperty("includeBoundingBoxes")) + object.includeBoundingBoxes = message.includeBoundingBoxes; + if (message.includePoseLandmarks != null && message.hasOwnProperty("includePoseLandmarks")) + object.includePoseLandmarks = message.includePoseLandmarks; + if (message.includeAttributes != null && message.hasOwnProperty("includeAttributes")) + object.includeAttributes = message.includeAttributes; return object; }; /** - * Converts this VideoSegment to JSON. + * Converts this PersonDetectionConfig to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig * @instance * @returns {Object.} JSON object */ - VideoSegment.prototype.toJSON = function toJSON() { + PersonDetectionConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return VideoSegment; + return PersonDetectionConfig; })(); - v1p3beta1.LabelSegment = (function() { + v1p3beta1.TextDetectionConfig = (function() { /** - * Properties of a LabelSegment. + * Properties of a TextDetectionConfig. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ILabelSegment - * @property {google.cloud.videointelligence.v1p3beta1.IVideoSegment|null} [segment] LabelSegment segment - * @property {number|null} [confidence] LabelSegment confidence + * @interface ITextDetectionConfig + * @property {Array.|null} [languageHints] TextDetectionConfig languageHints + * @property {string|null} [model] TextDetectionConfig model */ /** - * Constructs a new LabelSegment. + * Constructs a new TextDetectionConfig. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a LabelSegment. - * @implements ILabelSegment + * @classdesc Represents a TextDetectionConfig. + * @implements ITextDetectionConfig * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ILabelSegment=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig=} [properties] Properties to set */ - function LabelSegment(properties) { + function TextDetectionConfig(properties) { + this.languageHints = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -29024,88 +29107,91 @@ } /** - * LabelSegment segment. - * @member {google.cloud.videointelligence.v1p3beta1.IVideoSegment|null|undefined} segment - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * TextDetectionConfig languageHints. + * @member {Array.} languageHints + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @instance */ - LabelSegment.prototype.segment = null; + TextDetectionConfig.prototype.languageHints = $util.emptyArray; /** - * LabelSegment confidence. - * @member {number} confidence - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * TextDetectionConfig model. + * @member {string} model + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @instance */ - LabelSegment.prototype.confidence = 0; + TextDetectionConfig.prototype.model = ""; /** - * Creates a new LabelSegment instance using the specified properties. + * Creates a new TextDetectionConfig instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelSegment=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.LabelSegment} LabelSegment instance + * @param {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} TextDetectionConfig instance */ - LabelSegment.create = function create(properties) { - return new LabelSegment(properties); + TextDetectionConfig.create = function create(properties) { + return new TextDetectionConfig(properties); }; /** - * Encodes the specified LabelSegment message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelSegment.verify|verify} messages. + * Encodes the specified TextDetectionConfig message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelSegment} message LabelSegment message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig} message TextDetectionConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LabelSegment.encode = function encode(message, writer) { + TextDetectionConfig.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.segment != null && message.hasOwnProperty("segment")) - $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.encode(message.segment, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.confidence != null && message.hasOwnProperty("confidence")) - writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); + if (message.languageHints != null && message.languageHints.length) + for (var i = 0; i < message.languageHints.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.languageHints[i]); + if (message.model != null && message.hasOwnProperty("model")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.model); return writer; }; /** - * Encodes the specified LabelSegment message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelSegment.verify|verify} messages. + * Encodes the specified TextDetectionConfig message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelSegment} message LabelSegment message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ITextDetectionConfig} message TextDetectionConfig message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LabelSegment.encodeDelimited = function encodeDelimited(message, writer) { + TextDetectionConfig.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a LabelSegment message from the specified reader or buffer. + * Decodes a TextDetectionConfig message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.LabelSegment} LabelSegment + * @returns {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} TextDetectionConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LabelSegment.decode = function decode(reader, length) { + TextDetectionConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.LabelSegment(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.decode(reader, reader.uint32()); + if (!(message.languageHints && message.languageHints.length)) + message.languageHints = []; + message.languageHints.push(reader.string()); break; case 2: - message.confidence = reader.float(); + message.model = reader.string(); break; default: reader.skipType(tag & 7); @@ -29116,122 +29202,129 @@ }; /** - * Decodes a LabelSegment message from the specified reader or buffer, length delimited. + * Decodes a TextDetectionConfig message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.LabelSegment} LabelSegment + * @returns {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} TextDetectionConfig * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LabelSegment.decodeDelimited = function decodeDelimited(reader) { + TextDetectionConfig.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a LabelSegment message. + * Verifies a TextDetectionConfig message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - LabelSegment.verify = function verify(message) { + TextDetectionConfig.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.segment != null && message.hasOwnProperty("segment")) { - var error = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.verify(message.segment); - if (error) - return "segment." + error; + if (message.languageHints != null && message.hasOwnProperty("languageHints")) { + if (!Array.isArray(message.languageHints)) + return "languageHints: array expected"; + for (var i = 0; i < message.languageHints.length; ++i) + if (!$util.isString(message.languageHints[i])) + return "languageHints: string[] expected"; } - if (message.confidence != null && message.hasOwnProperty("confidence")) - if (typeof message.confidence !== "number") - return "confidence: number expected"; + if (message.model != null && message.hasOwnProperty("model")) + if (!$util.isString(message.model)) + return "model: string expected"; return null; }; /** - * Creates a LabelSegment message from a plain object. Also converts values to their respective internal types. + * Creates a TextDetectionConfig message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.LabelSegment} LabelSegment + * @returns {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} TextDetectionConfig */ - LabelSegment.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.LabelSegment) + TextDetectionConfig.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.LabelSegment(); - if (object.segment != null) { - if (typeof object.segment !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelSegment.segment: object expected"); - message.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.fromObject(object.segment); + var message = new $root.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig(); + if (object.languageHints) { + if (!Array.isArray(object.languageHints)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.TextDetectionConfig.languageHints: array expected"); + message.languageHints = []; + for (var i = 0; i < object.languageHints.length; ++i) + message.languageHints[i] = String(object.languageHints[i]); } - if (object.confidence != null) - message.confidence = Number(object.confidence); + if (object.model != null) + message.model = String(object.model); return message; }; /** - * Creates a plain object from a LabelSegment message. Also converts values to other types if specified. + * Creates a plain object from a TextDetectionConfig message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @static - * @param {google.cloud.videointelligence.v1p3beta1.LabelSegment} message LabelSegment + * @param {google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} message TextDetectionConfig * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - LabelSegment.toObject = function toObject(message, options) { + TextDetectionConfig.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.defaults) { - object.segment = null; - object.confidence = 0; + if (options.arrays || options.defaults) + object.languageHints = []; + if (options.defaults) + object.model = ""; + if (message.languageHints && message.languageHints.length) { + object.languageHints = []; + for (var j = 0; j < message.languageHints.length; ++j) + object.languageHints[j] = message.languageHints[j]; } - if (message.segment != null && message.hasOwnProperty("segment")) - object.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.toObject(message.segment, options); - if (message.confidence != null && message.hasOwnProperty("confidence")) - object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + if (message.model != null && message.hasOwnProperty("model")) + object.model = message.model; return object; }; /** - * Converts this LabelSegment to JSON. + * Converts this TextDetectionConfig to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1.TextDetectionConfig * @instance * @returns {Object.} JSON object */ - LabelSegment.prototype.toJSON = function toJSON() { + TextDetectionConfig.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return LabelSegment; + return TextDetectionConfig; })(); - v1p3beta1.LabelFrame = (function() { + v1p3beta1.VideoSegment = (function() { /** - * Properties of a LabelFrame. + * Properties of a VideoSegment. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ILabelFrame - * @property {google.protobuf.IDuration|null} [timeOffset] LabelFrame timeOffset - * @property {number|null} [confidence] LabelFrame confidence + * @interface IVideoSegment + * @property {google.protobuf.IDuration|null} [startTimeOffset] VideoSegment startTimeOffset + * @property {google.protobuf.IDuration|null} [endTimeOffset] VideoSegment endTimeOffset */ /** - * Constructs a new LabelFrame. + * Constructs a new VideoSegment. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a LabelFrame. - * @implements ILabelFrame + * @classdesc Represents a VideoSegment. + * @implements IVideoSegment * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ILabelFrame=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.IVideoSegment=} [properties] Properties to set */ - function LabelFrame(properties) { + function VideoSegment(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -29239,88 +29332,88 @@ } /** - * LabelFrame timeOffset. - * @member {google.protobuf.IDuration|null|undefined} timeOffset - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * VideoSegment startTimeOffset. + * @member {google.protobuf.IDuration|null|undefined} startTimeOffset + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @instance */ - LabelFrame.prototype.timeOffset = null; + VideoSegment.prototype.startTimeOffset = null; /** - * LabelFrame confidence. - * @member {number} confidence - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * VideoSegment endTimeOffset. + * @member {google.protobuf.IDuration|null|undefined} endTimeOffset + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @instance */ - LabelFrame.prototype.confidence = 0; + VideoSegment.prototype.endTimeOffset = null; /** - * Creates a new LabelFrame instance using the specified properties. + * Creates a new VideoSegment instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelFrame=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.LabelFrame} LabelFrame instance + * @param {google.cloud.videointelligence.v1p3beta1.IVideoSegment=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.VideoSegment} VideoSegment instance */ - LabelFrame.create = function create(properties) { - return new LabelFrame(properties); + VideoSegment.create = function create(properties) { + return new VideoSegment(properties); }; /** - * Encodes the specified LabelFrame message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelFrame.verify|verify} messages. + * Encodes the specified VideoSegment message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.VideoSegment.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelFrame} message LabelFrame message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IVideoSegment} message VideoSegment message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LabelFrame.encode = function encode(message, writer) { + VideoSegment.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) - $root.google.protobuf.Duration.encode(message.timeOffset, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.confidence != null && message.hasOwnProperty("confidence")) - writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); + if (message.startTimeOffset != null && message.hasOwnProperty("startTimeOffset")) + $root.google.protobuf.Duration.encode(message.startTimeOffset, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.endTimeOffset != null && message.hasOwnProperty("endTimeOffset")) + $root.google.protobuf.Duration.encode(message.endTimeOffset, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified LabelFrame message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelFrame.verify|verify} messages. + * Encodes the specified VideoSegment message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.VideoSegment.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelFrame} message LabelFrame message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IVideoSegment} message VideoSegment message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LabelFrame.encodeDelimited = function encodeDelimited(message, writer) { + VideoSegment.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a LabelFrame message from the specified reader or buffer. + * Decodes a VideoSegment message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.LabelFrame} LabelFrame + * @returns {google.cloud.videointelligence.v1p3beta1.VideoSegment} VideoSegment * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LabelFrame.decode = function decode(reader, length) { + VideoSegment.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.LabelFrame(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.VideoSegment(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.timeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); + message.startTimeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); break; case 2: - message.confidence = reader.float(); + message.endTimeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); break; default: reader.skipType(tag & 7); @@ -29331,123 +29424,127 @@ }; /** - * Decodes a LabelFrame message from the specified reader or buffer, length delimited. + * Decodes a VideoSegment message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.LabelFrame} LabelFrame + * @returns {google.cloud.videointelligence.v1p3beta1.VideoSegment} VideoSegment * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LabelFrame.decodeDelimited = function decodeDelimited(reader) { + VideoSegment.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a LabelFrame message. + * Verifies a VideoSegment message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - LabelFrame.verify = function verify(message) { + VideoSegment.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) { - var error = $root.google.protobuf.Duration.verify(message.timeOffset); + if (message.startTimeOffset != null && message.hasOwnProperty("startTimeOffset")) { + var error = $root.google.protobuf.Duration.verify(message.startTimeOffset); if (error) - return "timeOffset." + error; + return "startTimeOffset." + error; + } + if (message.endTimeOffset != null && message.hasOwnProperty("endTimeOffset")) { + var error = $root.google.protobuf.Duration.verify(message.endTimeOffset); + if (error) + return "endTimeOffset." + error; } - if (message.confidence != null && message.hasOwnProperty("confidence")) - if (typeof message.confidence !== "number") - return "confidence: number expected"; return null; }; /** - * Creates a LabelFrame message from a plain object. Also converts values to their respective internal types. + * Creates a VideoSegment message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.LabelFrame} LabelFrame + * @returns {google.cloud.videointelligence.v1p3beta1.VideoSegment} VideoSegment */ - LabelFrame.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.LabelFrame) + VideoSegment.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.VideoSegment) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.LabelFrame(); - if (object.timeOffset != null) { - if (typeof object.timeOffset !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelFrame.timeOffset: object expected"); - message.timeOffset = $root.google.protobuf.Duration.fromObject(object.timeOffset); + var message = new $root.google.cloud.videointelligence.v1p3beta1.VideoSegment(); + if (object.startTimeOffset != null) { + if (typeof object.startTimeOffset !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoSegment.startTimeOffset: object expected"); + message.startTimeOffset = $root.google.protobuf.Duration.fromObject(object.startTimeOffset); + } + if (object.endTimeOffset != null) { + if (typeof object.endTimeOffset !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoSegment.endTimeOffset: object expected"); + message.endTimeOffset = $root.google.protobuf.Duration.fromObject(object.endTimeOffset); } - if (object.confidence != null) - message.confidence = Number(object.confidence); return message; }; /** - * Creates a plain object from a LabelFrame message. Also converts values to other types if specified. + * Creates a plain object from a VideoSegment message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @static - * @param {google.cloud.videointelligence.v1p3beta1.LabelFrame} message LabelFrame + * @param {google.cloud.videointelligence.v1p3beta1.VideoSegment} message VideoSegment * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - LabelFrame.toObject = function toObject(message, options) { + VideoSegment.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.timeOffset = null; - object.confidence = 0; + object.startTimeOffset = null; + object.endTimeOffset = null; } - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) - object.timeOffset = $root.google.protobuf.Duration.toObject(message.timeOffset, options); - if (message.confidence != null && message.hasOwnProperty("confidence")) - object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + if (message.startTimeOffset != null && message.hasOwnProperty("startTimeOffset")) + object.startTimeOffset = $root.google.protobuf.Duration.toObject(message.startTimeOffset, options); + if (message.endTimeOffset != null && message.hasOwnProperty("endTimeOffset")) + object.endTimeOffset = $root.google.protobuf.Duration.toObject(message.endTimeOffset, options); return object; }; /** - * Converts this LabelFrame to JSON. + * Converts this VideoSegment to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1.VideoSegment * @instance * @returns {Object.} JSON object */ - LabelFrame.prototype.toJSON = function toJSON() { + VideoSegment.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return LabelFrame; + return VideoSegment; })(); - v1p3beta1.Entity = (function() { + v1p3beta1.LabelSegment = (function() { /** - * Properties of an Entity. + * Properties of a LabelSegment. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface IEntity - * @property {string|null} [entityId] Entity entityId - * @property {string|null} [description] Entity description - * @property {string|null} [languageCode] Entity languageCode + * @interface ILabelSegment + * @property {google.cloud.videointelligence.v1p3beta1.IVideoSegment|null} [segment] LabelSegment segment + * @property {number|null} [confidence] LabelSegment confidence */ /** - * Constructs a new Entity. + * Constructs a new LabelSegment. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents an Entity. - * @implements IEntity + * @classdesc Represents a LabelSegment. + * @implements ILabelSegment * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.IEntity=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.ILabelSegment=} [properties] Properties to set */ - function Entity(properties) { + function LabelSegment(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -29455,101 +29552,88 @@ } /** - * Entity entityId. - * @member {string} entityId - * @memberof google.cloud.videointelligence.v1p3beta1.Entity - * @instance - */ - Entity.prototype.entityId = ""; - - /** - * Entity description. - * @member {string} description - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * LabelSegment segment. + * @member {google.cloud.videointelligence.v1p3beta1.IVideoSegment|null|undefined} segment + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @instance */ - Entity.prototype.description = ""; + LabelSegment.prototype.segment = null; /** - * Entity languageCode. - * @member {string} languageCode - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * LabelSegment confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @instance */ - Entity.prototype.languageCode = ""; + LabelSegment.prototype.confidence = 0; /** - * Creates a new Entity instance using the specified properties. + * Creates a new LabelSegment instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @static - * @param {google.cloud.videointelligence.v1p3beta1.IEntity=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.Entity} Entity instance + * @param {google.cloud.videointelligence.v1p3beta1.ILabelSegment=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.LabelSegment} LabelSegment instance */ - Entity.create = function create(properties) { - return new Entity(properties); + LabelSegment.create = function create(properties) { + return new LabelSegment(properties); }; /** - * Encodes the specified Entity message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Entity.verify|verify} messages. + * Encodes the specified LabelSegment message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelSegment.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @static - * @param {google.cloud.videointelligence.v1p3beta1.IEntity} message Entity message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ILabelSegment} message LabelSegment message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Entity.encode = function encode(message, writer) { + LabelSegment.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.entityId != null && message.hasOwnProperty("entityId")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.entityId); - if (message.description != null && message.hasOwnProperty("description")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.description); - if (message.languageCode != null && message.hasOwnProperty("languageCode")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.languageCode); + if (message.segment != null && message.hasOwnProperty("segment")) + $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.encode(message.segment, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); return writer; }; /** - * Encodes the specified Entity message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Entity.verify|verify} messages. + * Encodes the specified LabelSegment message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelSegment.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @static - * @param {google.cloud.videointelligence.v1p3beta1.IEntity} message Entity message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ILabelSegment} message LabelSegment message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Entity.encodeDelimited = function encodeDelimited(message, writer) { + LabelSegment.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an Entity message from the specified reader or buffer. + * Decodes a LabelSegment message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.Entity} Entity + * @returns {google.cloud.videointelligence.v1p3beta1.LabelSegment} LabelSegment * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Entity.decode = function decode(reader, length) { + LabelSegment.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.Entity(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.LabelSegment(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.entityId = reader.string(); + message.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.decode(reader, reader.uint32()); break; case 2: - message.description = reader.string(); - break; - case 3: - message.languageCode = reader.string(); + message.confidence = reader.float(); break; default: reader.skipType(tag & 7); @@ -29560,130 +29644,122 @@ }; /** - * Decodes an Entity message from the specified reader or buffer, length delimited. + * Decodes a LabelSegment message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.Entity} Entity + * @returns {google.cloud.videointelligence.v1p3beta1.LabelSegment} LabelSegment * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Entity.decodeDelimited = function decodeDelimited(reader) { + LabelSegment.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an Entity message. + * Verifies a LabelSegment message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Entity.verify = function verify(message) { + LabelSegment.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.entityId != null && message.hasOwnProperty("entityId")) - if (!$util.isString(message.entityId)) - return "entityId: string expected"; - if (message.description != null && message.hasOwnProperty("description")) - if (!$util.isString(message.description)) - return "description: string expected"; - if (message.languageCode != null && message.hasOwnProperty("languageCode")) - if (!$util.isString(message.languageCode)) - return "languageCode: string expected"; + if (message.segment != null && message.hasOwnProperty("segment")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.verify(message.segment); + if (error) + return "segment." + error; + } + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; return null; }; /** - * Creates an Entity message from a plain object. Also converts values to their respective internal types. + * Creates a LabelSegment message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.Entity} Entity + * @returns {google.cloud.videointelligence.v1p3beta1.LabelSegment} LabelSegment */ - Entity.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.Entity) + LabelSegment.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.LabelSegment) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.Entity(); - if (object.entityId != null) - message.entityId = String(object.entityId); - if (object.description != null) - message.description = String(object.description); - if (object.languageCode != null) - message.languageCode = String(object.languageCode); + var message = new $root.google.cloud.videointelligence.v1p3beta1.LabelSegment(); + if (object.segment != null) { + if (typeof object.segment !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelSegment.segment: object expected"); + message.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.fromObject(object.segment); + } + if (object.confidence != null) + message.confidence = Number(object.confidence); return message; }; /** - * Creates a plain object from an Entity message. Also converts values to other types if specified. + * Creates a plain object from a LabelSegment message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @static - * @param {google.cloud.videointelligence.v1p3beta1.Entity} message Entity + * @param {google.cloud.videointelligence.v1p3beta1.LabelSegment} message LabelSegment * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Entity.toObject = function toObject(message, options) { + LabelSegment.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.entityId = ""; - object.description = ""; - object.languageCode = ""; + object.segment = null; + object.confidence = 0; } - if (message.entityId != null && message.hasOwnProperty("entityId")) - object.entityId = message.entityId; - if (message.description != null && message.hasOwnProperty("description")) - object.description = message.description; - if (message.languageCode != null && message.hasOwnProperty("languageCode")) - object.languageCode = message.languageCode; + if (message.segment != null && message.hasOwnProperty("segment")) + object.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.toObject(message.segment, options); + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; return object; }; /** - * Converts this Entity to JSON. + * Converts this LabelSegment to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelSegment * @instance * @returns {Object.} JSON object */ - Entity.prototype.toJSON = function toJSON() { + LabelSegment.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return Entity; + return LabelSegment; })(); - v1p3beta1.LabelAnnotation = (function() { + v1p3beta1.LabelFrame = (function() { /** - * Properties of a LabelAnnotation. + * Properties of a LabelFrame. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ILabelAnnotation - * @property {google.cloud.videointelligence.v1p3beta1.IEntity|null} [entity] LabelAnnotation entity - * @property {Array.|null} [categoryEntities] LabelAnnotation categoryEntities - * @property {Array.|null} [segments] LabelAnnotation segments - * @property {Array.|null} [frames] LabelAnnotation frames + * @interface ILabelFrame + * @property {google.protobuf.IDuration|null} [timeOffset] LabelFrame timeOffset + * @property {number|null} [confidence] LabelFrame confidence */ /** - * Constructs a new LabelAnnotation. + * Constructs a new LabelFrame. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a LabelAnnotation. - * @implements ILabelAnnotation + * @classdesc Represents a LabelFrame. + * @implements ILabelFrame * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ILabelAnnotation=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.ILabelFrame=} [properties] Properties to set */ - function LabelAnnotation(properties) { - this.categoryEntities = []; - this.segments = []; - this.frames = []; + function LabelFrame(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -29691,123 +29767,88 @@ } /** - * LabelAnnotation entity. - * @member {google.cloud.videointelligence.v1p3beta1.IEntity|null|undefined} entity - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation - * @instance - */ - LabelAnnotation.prototype.entity = null; - - /** - * LabelAnnotation categoryEntities. - * @member {Array.} categoryEntities - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation - * @instance - */ - LabelAnnotation.prototype.categoryEntities = $util.emptyArray; - - /** - * LabelAnnotation segments. - * @member {Array.} segments - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * LabelFrame timeOffset. + * @member {google.protobuf.IDuration|null|undefined} timeOffset + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @instance */ - LabelAnnotation.prototype.segments = $util.emptyArray; + LabelFrame.prototype.timeOffset = null; /** - * LabelAnnotation frames. - * @member {Array.} frames - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * LabelFrame confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @instance */ - LabelAnnotation.prototype.frames = $util.emptyArray; + LabelFrame.prototype.confidence = 0; /** - * Creates a new LabelAnnotation instance using the specified properties. + * Creates a new LabelFrame instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelAnnotation=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} LabelAnnotation instance + * @param {google.cloud.videointelligence.v1p3beta1.ILabelFrame=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.LabelFrame} LabelFrame instance */ - LabelAnnotation.create = function create(properties) { - return new LabelAnnotation(properties); + LabelFrame.create = function create(properties) { + return new LabelFrame(properties); }; /** - * Encodes the specified LabelAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation.verify|verify} messages. + * Encodes the specified LabelFrame message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelFrame.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelAnnotation} message LabelAnnotation message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ILabelFrame} message LabelFrame message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LabelAnnotation.encode = function encode(message, writer) { + LabelFrame.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.entity != null && message.hasOwnProperty("entity")) - $root.google.cloud.videointelligence.v1p3beta1.Entity.encode(message.entity, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.categoryEntities != null && message.categoryEntities.length) - for (var i = 0; i < message.categoryEntities.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.Entity.encode(message.categoryEntities[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.segments != null && message.segments.length) - for (var i = 0; i < message.segments.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.LabelSegment.encode(message.segments[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.frames != null && message.frames.length) - for (var i = 0; i < message.frames.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.LabelFrame.encode(message.frames[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) + $root.google.protobuf.Duration.encode(message.timeOffset, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); return writer; }; /** - * Encodes the specified LabelAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation.verify|verify} messages. + * Encodes the specified LabelFrame message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelFrame.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @static - * @param {google.cloud.videointelligence.v1p3beta1.ILabelAnnotation} message LabelAnnotation message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ILabelFrame} message LabelFrame message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LabelAnnotation.encodeDelimited = function encodeDelimited(message, writer) { + LabelFrame.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a LabelAnnotation message from the specified reader or buffer. + * Decodes a LabelFrame message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} LabelAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.LabelFrame} LabelFrame * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LabelAnnotation.decode = function decode(reader, length) { + LabelFrame.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.LabelAnnotation(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.LabelFrame(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.entity = $root.google.cloud.videointelligence.v1p3beta1.Entity.decode(reader, reader.uint32()); + message.timeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); break; case 2: - if (!(message.categoryEntities && message.categoryEntities.length)) - message.categoryEntities = []; - message.categoryEntities.push($root.google.cloud.videointelligence.v1p3beta1.Entity.decode(reader, reader.uint32())); - break; - case 3: - if (!(message.segments && message.segments.length)) - message.segments = []; - message.segments.push($root.google.cloud.videointelligence.v1p3beta1.LabelSegment.decode(reader, reader.uint32())); - break; - case 4: - if (!(message.frames && message.frames.length)) - message.frames = []; - message.frames.push($root.google.cloud.videointelligence.v1p3beta1.LabelFrame.decode(reader, reader.uint32())); + message.confidence = reader.float(); break; default: reader.skipType(tag & 7); @@ -29818,190 +29859,123 @@ }; /** - * Decodes a LabelAnnotation message from the specified reader or buffer, length delimited. + * Decodes a LabelFrame message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} LabelAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.LabelFrame} LabelFrame * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LabelAnnotation.decodeDelimited = function decodeDelimited(reader) { + LabelFrame.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a LabelAnnotation message. + * Verifies a LabelFrame message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - LabelAnnotation.verify = function verify(message) { + LabelFrame.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.entity != null && message.hasOwnProperty("entity")) { - var error = $root.google.cloud.videointelligence.v1p3beta1.Entity.verify(message.entity); + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) { + var error = $root.google.protobuf.Duration.verify(message.timeOffset); if (error) - return "entity." + error; - } - if (message.categoryEntities != null && message.hasOwnProperty("categoryEntities")) { - if (!Array.isArray(message.categoryEntities)) - return "categoryEntities: array expected"; - for (var i = 0; i < message.categoryEntities.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.Entity.verify(message.categoryEntities[i]); - if (error) - return "categoryEntities." + error; - } - } - if (message.segments != null && message.hasOwnProperty("segments")) { - if (!Array.isArray(message.segments)) - return "segments: array expected"; - for (var i = 0; i < message.segments.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.LabelSegment.verify(message.segments[i]); - if (error) - return "segments." + error; - } - } - if (message.frames != null && message.hasOwnProperty("frames")) { - if (!Array.isArray(message.frames)) - return "frames: array expected"; - for (var i = 0; i < message.frames.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.LabelFrame.verify(message.frames[i]); - if (error) - return "frames." + error; - } + return "timeOffset." + error; } + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; return null; }; /** - * Creates a LabelAnnotation message from a plain object. Also converts values to their respective internal types. + * Creates a LabelFrame message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} LabelAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.LabelFrame} LabelFrame */ - LabelAnnotation.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.LabelAnnotation) + LabelFrame.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.LabelFrame) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.LabelAnnotation(); - if (object.entity != null) { - if (typeof object.entity !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.entity: object expected"); - message.entity = $root.google.cloud.videointelligence.v1p3beta1.Entity.fromObject(object.entity); - } - if (object.categoryEntities) { - if (!Array.isArray(object.categoryEntities)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.categoryEntities: array expected"); - message.categoryEntities = []; - for (var i = 0; i < object.categoryEntities.length; ++i) { - if (typeof object.categoryEntities[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.categoryEntities: object expected"); - message.categoryEntities[i] = $root.google.cloud.videointelligence.v1p3beta1.Entity.fromObject(object.categoryEntities[i]); - } - } - if (object.segments) { - if (!Array.isArray(object.segments)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.segments: array expected"); - message.segments = []; - for (var i = 0; i < object.segments.length; ++i) { - if (typeof object.segments[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.segments: object expected"); - message.segments[i] = $root.google.cloud.videointelligence.v1p3beta1.LabelSegment.fromObject(object.segments[i]); - } - } - if (object.frames) { - if (!Array.isArray(object.frames)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.frames: array expected"); - message.frames = []; - for (var i = 0; i < object.frames.length; ++i) { - if (typeof object.frames[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.frames: object expected"); - message.frames[i] = $root.google.cloud.videointelligence.v1p3beta1.LabelFrame.fromObject(object.frames[i]); - } + var message = new $root.google.cloud.videointelligence.v1p3beta1.LabelFrame(); + if (object.timeOffset != null) { + if (typeof object.timeOffset !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelFrame.timeOffset: object expected"); + message.timeOffset = $root.google.protobuf.Duration.fromObject(object.timeOffset); } + if (object.confidence != null) + message.confidence = Number(object.confidence); return message; }; /** - * Creates a plain object from a LabelAnnotation message. Also converts values to other types if specified. + * Creates a plain object from a LabelFrame message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @static - * @param {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} message LabelAnnotation + * @param {google.cloud.videointelligence.v1p3beta1.LabelFrame} message LabelFrame * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - LabelAnnotation.toObject = function toObject(message, options) { + LabelFrame.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) { - object.categoryEntities = []; - object.segments = []; - object.frames = []; - } - if (options.defaults) - object.entity = null; - if (message.entity != null && message.hasOwnProperty("entity")) - object.entity = $root.google.cloud.videointelligence.v1p3beta1.Entity.toObject(message.entity, options); - if (message.categoryEntities && message.categoryEntities.length) { - object.categoryEntities = []; - for (var j = 0; j < message.categoryEntities.length; ++j) - object.categoryEntities[j] = $root.google.cloud.videointelligence.v1p3beta1.Entity.toObject(message.categoryEntities[j], options); - } - if (message.segments && message.segments.length) { - object.segments = []; - for (var j = 0; j < message.segments.length; ++j) - object.segments[j] = $root.google.cloud.videointelligence.v1p3beta1.LabelSegment.toObject(message.segments[j], options); - } - if (message.frames && message.frames.length) { - object.frames = []; - for (var j = 0; j < message.frames.length; ++j) - object.frames[j] = $root.google.cloud.videointelligence.v1p3beta1.LabelFrame.toObject(message.frames[j], options); + if (options.defaults) { + object.timeOffset = null; + object.confidence = 0; } + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) + object.timeOffset = $root.google.protobuf.Duration.toObject(message.timeOffset, options); + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; return object; }; /** - * Converts this LabelAnnotation to JSON. + * Converts this LabelFrame to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelFrame * @instance * @returns {Object.} JSON object */ - LabelAnnotation.prototype.toJSON = function toJSON() { + LabelFrame.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return LabelAnnotation; + return LabelFrame; })(); - v1p3beta1.ExplicitContentFrame = (function() { + v1p3beta1.Entity = (function() { /** - * Properties of an ExplicitContentFrame. + * Properties of an Entity. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface IExplicitContentFrame - * @property {google.protobuf.IDuration|null} [timeOffset] ExplicitContentFrame timeOffset - * @property {google.cloud.videointelligence.v1p3beta1.Likelihood|null} [pornographyLikelihood] ExplicitContentFrame pornographyLikelihood + * @interface IEntity + * @property {string|null} [entityId] Entity entityId + * @property {string|null} [description] Entity description + * @property {string|null} [languageCode] Entity languageCode */ /** - * Constructs a new ExplicitContentFrame. + * Constructs a new Entity. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents an ExplicitContentFrame. - * @implements IExplicitContentFrame + * @classdesc Represents an Entity. + * @implements IEntity * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentFrame=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.IEntity=} [properties] Properties to set */ - function ExplicitContentFrame(properties) { + function Entity(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -30009,88 +29983,101 @@ } /** - * ExplicitContentFrame timeOffset. - * @member {google.protobuf.IDuration|null|undefined} timeOffset - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * Entity entityId. + * @member {string} entityId + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @instance */ - ExplicitContentFrame.prototype.timeOffset = null; + Entity.prototype.entityId = ""; /** - * ExplicitContentFrame pornographyLikelihood. - * @member {google.cloud.videointelligence.v1p3beta1.Likelihood} pornographyLikelihood - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * Entity description. + * @member {string} description + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @instance */ - ExplicitContentFrame.prototype.pornographyLikelihood = 0; + Entity.prototype.description = ""; /** - * Creates a new ExplicitContentFrame instance using the specified properties. + * Entity languageCode. + * @member {string} languageCode + * @memberof google.cloud.videointelligence.v1p3beta1.Entity + * @instance + */ + Entity.prototype.languageCode = ""; + + /** + * Creates a new Entity instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @static - * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentFrame=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} ExplicitContentFrame instance + * @param {google.cloud.videointelligence.v1p3beta1.IEntity=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.Entity} Entity instance */ - ExplicitContentFrame.create = function create(properties) { - return new ExplicitContentFrame(properties); + Entity.create = function create(properties) { + return new Entity(properties); }; /** - * Encodes the specified ExplicitContentFrame message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.verify|verify} messages. + * Encodes the specified Entity message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Entity.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @static - * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentFrame} message ExplicitContentFrame message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IEntity} message Entity message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExplicitContentFrame.encode = function encode(message, writer) { + Entity.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) - $root.google.protobuf.Duration.encode(message.timeOffset, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.pornographyLikelihood != null && message.hasOwnProperty("pornographyLikelihood")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.pornographyLikelihood); + if (message.entityId != null && message.hasOwnProperty("entityId")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.entityId); + if (message.description != null && message.hasOwnProperty("description")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.description); + if (message.languageCode != null && message.hasOwnProperty("languageCode")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.languageCode); return writer; }; /** - * Encodes the specified ExplicitContentFrame message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.verify|verify} messages. + * Encodes the specified Entity message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Entity.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @static - * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentFrame} message ExplicitContentFrame message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IEntity} message Entity message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExplicitContentFrame.encodeDelimited = function encodeDelimited(message, writer) { + Entity.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExplicitContentFrame message from the specified reader or buffer. + * Decodes an Entity message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} ExplicitContentFrame + * @returns {google.cloud.videointelligence.v1p3beta1.Entity} Entity * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExplicitContentFrame.decode = function decode(reader, length) { + Entity.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.Entity(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.timeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); + message.entityId = reader.string(); break; case 2: - message.pornographyLikelihood = reader.int32(); + message.description = reader.string(); + break; + case 3: + message.languageCode = reader.string(); break; default: reader.skipType(tag & 7); @@ -30101,154 +30088,129 @@ }; /** - * Decodes an ExplicitContentFrame message from the specified reader or buffer, length delimited. + * Decodes an Entity message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} ExplicitContentFrame + * @returns {google.cloud.videointelligence.v1p3beta1.Entity} Entity * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExplicitContentFrame.decodeDelimited = function decodeDelimited(reader) { + Entity.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExplicitContentFrame message. + * Verifies an Entity message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExplicitContentFrame.verify = function verify(message) { + Entity.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) { - var error = $root.google.protobuf.Duration.verify(message.timeOffset); - if (error) - return "timeOffset." + error; - } - if (message.pornographyLikelihood != null && message.hasOwnProperty("pornographyLikelihood")) - switch (message.pornographyLikelihood) { - default: - return "pornographyLikelihood: enum value expected"; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - break; - } + if (message.entityId != null && message.hasOwnProperty("entityId")) + if (!$util.isString(message.entityId)) + return "entityId: string expected"; + if (message.description != null && message.hasOwnProperty("description")) + if (!$util.isString(message.description)) + return "description: string expected"; + if (message.languageCode != null && message.hasOwnProperty("languageCode")) + if (!$util.isString(message.languageCode)) + return "languageCode: string expected"; return null; }; /** - * Creates an ExplicitContentFrame message from a plain object. Also converts values to their respective internal types. + * Creates an Entity message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} ExplicitContentFrame + * @returns {google.cloud.videointelligence.v1p3beta1.Entity} Entity */ - ExplicitContentFrame.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame) + Entity.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.Entity) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame(); - if (object.timeOffset != null) { - if (typeof object.timeOffset !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.timeOffset: object expected"); - message.timeOffset = $root.google.protobuf.Duration.fromObject(object.timeOffset); - } - switch (object.pornographyLikelihood) { - case "LIKELIHOOD_UNSPECIFIED": - case 0: - message.pornographyLikelihood = 0; - break; - case "VERY_UNLIKELY": - case 1: - message.pornographyLikelihood = 1; - break; - case "UNLIKELY": - case 2: - message.pornographyLikelihood = 2; - break; - case "POSSIBLE": - case 3: - message.pornographyLikelihood = 3; - break; - case "LIKELY": - case 4: - message.pornographyLikelihood = 4; - break; - case "VERY_LIKELY": - case 5: - message.pornographyLikelihood = 5; - break; - } + var message = new $root.google.cloud.videointelligence.v1p3beta1.Entity(); + if (object.entityId != null) + message.entityId = String(object.entityId); + if (object.description != null) + message.description = String(object.description); + if (object.languageCode != null) + message.languageCode = String(object.languageCode); return message; }; /** - * Creates a plain object from an ExplicitContentFrame message. Also converts values to other types if specified. + * Creates a plain object from an Entity message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @static - * @param {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} message ExplicitContentFrame + * @param {google.cloud.videointelligence.v1p3beta1.Entity} message Entity * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExplicitContentFrame.toObject = function toObject(message, options) { + Entity.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.timeOffset = null; - object.pornographyLikelihood = options.enums === String ? "LIKELIHOOD_UNSPECIFIED" : 0; + object.entityId = ""; + object.description = ""; + object.languageCode = ""; } - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) - object.timeOffset = $root.google.protobuf.Duration.toObject(message.timeOffset, options); - if (message.pornographyLikelihood != null && message.hasOwnProperty("pornographyLikelihood")) - object.pornographyLikelihood = options.enums === String ? $root.google.cloud.videointelligence.v1p3beta1.Likelihood[message.pornographyLikelihood] : message.pornographyLikelihood; + if (message.entityId != null && message.hasOwnProperty("entityId")) + object.entityId = message.entityId; + if (message.description != null && message.hasOwnProperty("description")) + object.description = message.description; + if (message.languageCode != null && message.hasOwnProperty("languageCode")) + object.languageCode = message.languageCode; return object; }; /** - * Converts this ExplicitContentFrame to JSON. + * Converts this Entity to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1.Entity * @instance * @returns {Object.} JSON object */ - ExplicitContentFrame.prototype.toJSON = function toJSON() { + Entity.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return ExplicitContentFrame; + return Entity; })(); - v1p3beta1.ExplicitContentAnnotation = (function() { + v1p3beta1.LabelAnnotation = (function() { /** - * Properties of an ExplicitContentAnnotation. + * Properties of a LabelAnnotation. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface IExplicitContentAnnotation - * @property {Array.|null} [frames] ExplicitContentAnnotation frames + * @interface ILabelAnnotation + * @property {google.cloud.videointelligence.v1p3beta1.IEntity|null} [entity] LabelAnnotation entity + * @property {Array.|null} [categoryEntities] LabelAnnotation categoryEntities + * @property {Array.|null} [segments] LabelAnnotation segments + * @property {Array.|null} [frames] LabelAnnotation frames */ /** - * Constructs a new ExplicitContentAnnotation. + * Constructs a new LabelAnnotation. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents an ExplicitContentAnnotation. - * @implements IExplicitContentAnnotation + * @classdesc Represents a LabelAnnotation. + * @implements ILabelAnnotation * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.ILabelAnnotation=} [properties] Properties to set */ - function ExplicitContentAnnotation(properties) { + function LabelAnnotation(properties) { + this.categoryEntities = []; + this.segments = []; this.frames = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) @@ -30257,78 +30219,123 @@ } /** - * ExplicitContentAnnotation frames. - * @member {Array.} frames - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * LabelAnnotation entity. + * @member {google.cloud.videointelligence.v1p3beta1.IEntity|null|undefined} entity + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @instance */ - ExplicitContentAnnotation.prototype.frames = $util.emptyArray; + LabelAnnotation.prototype.entity = null; /** - * Creates a new ExplicitContentAnnotation instance using the specified properties. + * LabelAnnotation categoryEntities. + * @member {Array.} categoryEntities + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @instance + */ + LabelAnnotation.prototype.categoryEntities = $util.emptyArray; + + /** + * LabelAnnotation segments. + * @member {Array.} segments + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @instance + */ + LabelAnnotation.prototype.segments = $util.emptyArray; + + /** + * LabelAnnotation frames. + * @member {Array.} frames + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @instance + */ + LabelAnnotation.prototype.frames = $util.emptyArray; + + /** + * Creates a new LabelAnnotation instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} ExplicitContentAnnotation instance + * @param {google.cloud.videointelligence.v1p3beta1.ILabelAnnotation=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} LabelAnnotation instance */ - ExplicitContentAnnotation.create = function create(properties) { - return new ExplicitContentAnnotation(properties); + LabelAnnotation.create = function create(properties) { + return new LabelAnnotation(properties); }; /** - * Encodes the specified ExplicitContentAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation.verify|verify} messages. + * Encodes the specified LabelAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation} message ExplicitContentAnnotation message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ILabelAnnotation} message LabelAnnotation message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExplicitContentAnnotation.encode = function encode(message, writer) { + LabelAnnotation.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.entity != null && message.hasOwnProperty("entity")) + $root.google.cloud.videointelligence.v1p3beta1.Entity.encode(message.entity, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.categoryEntities != null && message.categoryEntities.length) + for (var i = 0; i < message.categoryEntities.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.Entity.encode(message.categoryEntities[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.segments != null && message.segments.length) + for (var i = 0; i < message.segments.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.LabelSegment.encode(message.segments[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); if (message.frames != null && message.frames.length) for (var i = 0; i < message.frames.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.encode(message.frames[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + $root.google.cloud.videointelligence.v1p3beta1.LabelFrame.encode(message.frames[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified ExplicitContentAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation.verify|verify} messages. + * Encodes the specified LabelAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation} message ExplicitContentAnnotation message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ILabelAnnotation} message LabelAnnotation message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExplicitContentAnnotation.encodeDelimited = function encodeDelimited(message, writer) { + LabelAnnotation.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExplicitContentAnnotation message from the specified reader or buffer. + * Decodes a LabelAnnotation message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} ExplicitContentAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} LabelAnnotation * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExplicitContentAnnotation.decode = function decode(reader, length) { + LabelAnnotation.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.LabelAnnotation(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: + message.entity = $root.google.cloud.videointelligence.v1p3beta1.Entity.decode(reader, reader.uint32()); + break; + case 2: + if (!(message.categoryEntities && message.categoryEntities.length)) + message.categoryEntities = []; + message.categoryEntities.push($root.google.cloud.videointelligence.v1p3beta1.Entity.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.segments && message.segments.length)) + message.segments = []; + message.segments.push($root.google.cloud.videointelligence.v1p3beta1.LabelSegment.decode(reader, reader.uint32())); + break; + case 4: if (!(message.frames && message.frames.length)) message.frames = []; - message.frames.push($root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.decode(reader, reader.uint32())); + message.frames.push($root.google.cloud.videointelligence.v1p3beta1.LabelFrame.decode(reader, reader.uint32())); break; default: reader.skipType(tag & 7); @@ -30339,37 +30346,60 @@ }; /** - * Decodes an ExplicitContentAnnotation message from the specified reader or buffer, length delimited. + * Decodes a LabelAnnotation message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} ExplicitContentAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} LabelAnnotation * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExplicitContentAnnotation.decodeDelimited = function decodeDelimited(reader) { + LabelAnnotation.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExplicitContentAnnotation message. + * Verifies a LabelAnnotation message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExplicitContentAnnotation.verify = function verify(message) { + LabelAnnotation.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.entity != null && message.hasOwnProperty("entity")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.Entity.verify(message.entity); + if (error) + return "entity." + error; + } + if (message.categoryEntities != null && message.hasOwnProperty("categoryEntities")) { + if (!Array.isArray(message.categoryEntities)) + return "categoryEntities: array expected"; + for (var i = 0; i < message.categoryEntities.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.Entity.verify(message.categoryEntities[i]); + if (error) + return "categoryEntities." + error; + } + } + if (message.segments != null && message.hasOwnProperty("segments")) { + if (!Array.isArray(message.segments)) + return "segments: array expected"; + for (var i = 0; i < message.segments.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.LabelSegment.verify(message.segments[i]); + if (error) + return "segments." + error; + } + } if (message.frames != null && message.hasOwnProperty("frames")) { if (!Array.isArray(message.frames)) return "frames: array expected"; for (var i = 0; i < message.frames.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.verify(message.frames[i]); + var error = $root.google.cloud.videointelligence.v1p3beta1.LabelFrame.verify(message.frames[i]); if (error) return "frames." + error; } @@ -30378,88 +30408,128 @@ }; /** - * Creates an ExplicitContentAnnotation message from a plain object. Also converts values to their respective internal types. + * Creates a LabelAnnotation message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} ExplicitContentAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} LabelAnnotation */ - ExplicitContentAnnotation.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation) + LabelAnnotation.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.LabelAnnotation) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation(); + var message = new $root.google.cloud.videointelligence.v1p3beta1.LabelAnnotation(); + if (object.entity != null) { + if (typeof object.entity !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.entity: object expected"); + message.entity = $root.google.cloud.videointelligence.v1p3beta1.Entity.fromObject(object.entity); + } + if (object.categoryEntities) { + if (!Array.isArray(object.categoryEntities)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.categoryEntities: array expected"); + message.categoryEntities = []; + for (var i = 0; i < object.categoryEntities.length; ++i) { + if (typeof object.categoryEntities[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.categoryEntities: object expected"); + message.categoryEntities[i] = $root.google.cloud.videointelligence.v1p3beta1.Entity.fromObject(object.categoryEntities[i]); + } + } + if (object.segments) { + if (!Array.isArray(object.segments)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.segments: array expected"); + message.segments = []; + for (var i = 0; i < object.segments.length; ++i) { + if (typeof object.segments[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.segments: object expected"); + message.segments[i] = $root.google.cloud.videointelligence.v1p3beta1.LabelSegment.fromObject(object.segments[i]); + } + } if (object.frames) { if (!Array.isArray(object.frames)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation.frames: array expected"); + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.frames: array expected"); message.frames = []; for (var i = 0; i < object.frames.length; ++i) { if (typeof object.frames[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation.frames: object expected"); - message.frames[i] = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.fromObject(object.frames[i]); + throw TypeError(".google.cloud.videointelligence.v1p3beta1.LabelAnnotation.frames: object expected"); + message.frames[i] = $root.google.cloud.videointelligence.v1p3beta1.LabelFrame.fromObject(object.frames[i]); } } return message; }; /** - * Creates a plain object from an ExplicitContentAnnotation message. Also converts values to other types if specified. + * Creates a plain object from a LabelAnnotation message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} message ExplicitContentAnnotation + * @param {google.cloud.videointelligence.v1p3beta1.LabelAnnotation} message LabelAnnotation * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExplicitContentAnnotation.toObject = function toObject(message, options) { + LabelAnnotation.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.frames = []; - if (message.frames && message.frames.length) { + if (options.arrays || options.defaults) { + object.categoryEntities = []; + object.segments = []; object.frames = []; - for (var j = 0; j < message.frames.length; ++j) - object.frames[j] = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.toObject(message.frames[j], options); } - return object; - }; - - /** - * Converts this ExplicitContentAnnotation to JSON. - * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation - * @instance - * @returns {Object.} JSON object + if (options.defaults) + object.entity = null; + if (message.entity != null && message.hasOwnProperty("entity")) + object.entity = $root.google.cloud.videointelligence.v1p3beta1.Entity.toObject(message.entity, options); + if (message.categoryEntities && message.categoryEntities.length) { + object.categoryEntities = []; + for (var j = 0; j < message.categoryEntities.length; ++j) + object.categoryEntities[j] = $root.google.cloud.videointelligence.v1p3beta1.Entity.toObject(message.categoryEntities[j], options); + } + if (message.segments && message.segments.length) { + object.segments = []; + for (var j = 0; j < message.segments.length; ++j) + object.segments[j] = $root.google.cloud.videointelligence.v1p3beta1.LabelSegment.toObject(message.segments[j], options); + } + if (message.frames && message.frames.length) { + object.frames = []; + for (var j = 0; j < message.frames.length; ++j) + object.frames[j] = $root.google.cloud.videointelligence.v1p3beta1.LabelFrame.toObject(message.frames[j], options); + } + return object; + }; + + /** + * Converts this LabelAnnotation to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1p3beta1.LabelAnnotation + * @instance + * @returns {Object.} JSON object */ - ExplicitContentAnnotation.prototype.toJSON = function toJSON() { + LabelAnnotation.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return ExplicitContentAnnotation; + return LabelAnnotation; })(); - v1p3beta1.NormalizedBoundingBox = (function() { + v1p3beta1.ExplicitContentFrame = (function() { /** - * Properties of a NormalizedBoundingBox. + * Properties of an ExplicitContentFrame. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface INormalizedBoundingBox - * @property {number|null} [left] NormalizedBoundingBox left - * @property {number|null} [top] NormalizedBoundingBox top - * @property {number|null} [right] NormalizedBoundingBox right - * @property {number|null} [bottom] NormalizedBoundingBox bottom + * @interface IExplicitContentFrame + * @property {google.protobuf.IDuration|null} [timeOffset] ExplicitContentFrame timeOffset + * @property {google.cloud.videointelligence.v1p3beta1.Likelihood|null} [pornographyLikelihood] ExplicitContentFrame pornographyLikelihood */ /** - * Constructs a new NormalizedBoundingBox. + * Constructs a new ExplicitContentFrame. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a NormalizedBoundingBox. - * @implements INormalizedBoundingBox + * @classdesc Represents an ExplicitContentFrame. + * @implements IExplicitContentFrame * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentFrame=} [properties] Properties to set */ - function NormalizedBoundingBox(properties) { + function ExplicitContentFrame(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -30467,114 +30537,88 @@ } /** - * NormalizedBoundingBox left. - * @member {number} left - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox - * @instance - */ - NormalizedBoundingBox.prototype.left = 0; - - /** - * NormalizedBoundingBox top. - * @member {number} top - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox - * @instance - */ - NormalizedBoundingBox.prototype.top = 0; - - /** - * NormalizedBoundingBox right. - * @member {number} right - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * ExplicitContentFrame timeOffset. + * @member {google.protobuf.IDuration|null|undefined} timeOffset + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @instance */ - NormalizedBoundingBox.prototype.right = 0; + ExplicitContentFrame.prototype.timeOffset = null; /** - * NormalizedBoundingBox bottom. - * @member {number} bottom - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * ExplicitContentFrame pornographyLikelihood. + * @member {google.cloud.videointelligence.v1p3beta1.Likelihood} pornographyLikelihood + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @instance */ - NormalizedBoundingBox.prototype.bottom = 0; + ExplicitContentFrame.prototype.pornographyLikelihood = 0; /** - * Creates a new NormalizedBoundingBox instance using the specified properties. + * Creates a new ExplicitContentFrame instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @static - * @param {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} NormalizedBoundingBox instance + * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentFrame=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} ExplicitContentFrame instance */ - NormalizedBoundingBox.create = function create(properties) { - return new NormalizedBoundingBox(properties); + ExplicitContentFrame.create = function create(properties) { + return new ExplicitContentFrame(properties); }; /** - * Encodes the specified NormalizedBoundingBox message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.verify|verify} messages. + * Encodes the specified ExplicitContentFrame message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @static - * @param {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox} message NormalizedBoundingBox message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentFrame} message ExplicitContentFrame message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - NormalizedBoundingBox.encode = function encode(message, writer) { + ExplicitContentFrame.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.left != null && message.hasOwnProperty("left")) - writer.uint32(/* id 1, wireType 5 =*/13).float(message.left); - if (message.top != null && message.hasOwnProperty("top")) - writer.uint32(/* id 2, wireType 5 =*/21).float(message.top); - if (message.right != null && message.hasOwnProperty("right")) - writer.uint32(/* id 3, wireType 5 =*/29).float(message.right); - if (message.bottom != null && message.hasOwnProperty("bottom")) - writer.uint32(/* id 4, wireType 5 =*/37).float(message.bottom); + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) + $root.google.protobuf.Duration.encode(message.timeOffset, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.pornographyLikelihood != null && message.hasOwnProperty("pornographyLikelihood")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.pornographyLikelihood); return writer; }; /** - * Encodes the specified NormalizedBoundingBox message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.verify|verify} messages. + * Encodes the specified ExplicitContentFrame message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @static - * @param {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox} message NormalizedBoundingBox message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentFrame} message ExplicitContentFrame message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - NormalizedBoundingBox.encodeDelimited = function encodeDelimited(message, writer) { + ExplicitContentFrame.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a NormalizedBoundingBox message from the specified reader or buffer. + * Decodes an ExplicitContentFrame message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} NormalizedBoundingBox + * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} ExplicitContentFrame * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - NormalizedBoundingBox.decode = function decode(reader, length) { + ExplicitContentFrame.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.left = reader.float(); + message.timeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); break; case 2: - message.top = reader.float(); - break; - case 3: - message.right = reader.float(); - break; - case 4: - message.bottom = reader.float(); + message.pornographyLikelihood = reader.int32(); break; default: reader.skipType(tag & 7); @@ -30585,135 +30629,155 @@ }; /** - * Decodes a NormalizedBoundingBox message from the specified reader or buffer, length delimited. + * Decodes an ExplicitContentFrame message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} NormalizedBoundingBox + * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} ExplicitContentFrame * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - NormalizedBoundingBox.decodeDelimited = function decodeDelimited(reader) { + ExplicitContentFrame.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a NormalizedBoundingBox message. + * Verifies an ExplicitContentFrame message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - NormalizedBoundingBox.verify = function verify(message) { + ExplicitContentFrame.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.left != null && message.hasOwnProperty("left")) - if (typeof message.left !== "number") - return "left: number expected"; - if (message.top != null && message.hasOwnProperty("top")) - if (typeof message.top !== "number") - return "top: number expected"; - if (message.right != null && message.hasOwnProperty("right")) - if (typeof message.right !== "number") - return "right: number expected"; - if (message.bottom != null && message.hasOwnProperty("bottom")) - if (typeof message.bottom !== "number") - return "bottom: number expected"; + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) { + var error = $root.google.protobuf.Duration.verify(message.timeOffset); + if (error) + return "timeOffset." + error; + } + if (message.pornographyLikelihood != null && message.hasOwnProperty("pornographyLikelihood")) + switch (message.pornographyLikelihood) { + default: + return "pornographyLikelihood: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + break; + } return null; }; /** - * Creates a NormalizedBoundingBox message from a plain object. Also converts values to their respective internal types. + * Creates an ExplicitContentFrame message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} NormalizedBoundingBox + * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} ExplicitContentFrame */ - NormalizedBoundingBox.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox) + ExplicitContentFrame.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox(); - if (object.left != null) - message.left = Number(object.left); - if (object.top != null) - message.top = Number(object.top); - if (object.right != null) - message.right = Number(object.right); - if (object.bottom != null) - message.bottom = Number(object.bottom); + var message = new $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame(); + if (object.timeOffset != null) { + if (typeof object.timeOffset !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.timeOffset: object expected"); + message.timeOffset = $root.google.protobuf.Duration.fromObject(object.timeOffset); + } + switch (object.pornographyLikelihood) { + case "LIKELIHOOD_UNSPECIFIED": + case 0: + message.pornographyLikelihood = 0; + break; + case "VERY_UNLIKELY": + case 1: + message.pornographyLikelihood = 1; + break; + case "UNLIKELY": + case 2: + message.pornographyLikelihood = 2; + break; + case "POSSIBLE": + case 3: + message.pornographyLikelihood = 3; + break; + case "LIKELY": + case 4: + message.pornographyLikelihood = 4; + break; + case "VERY_LIKELY": + case 5: + message.pornographyLikelihood = 5; + break; + } return message; }; /** - * Creates a plain object from a NormalizedBoundingBox message. Also converts values to other types if specified. + * Creates a plain object from an ExplicitContentFrame message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @static - * @param {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} message NormalizedBoundingBox + * @param {google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} message ExplicitContentFrame * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - NormalizedBoundingBox.toObject = function toObject(message, options) { + ExplicitContentFrame.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.defaults) { - object.left = 0; - object.top = 0; - object.right = 0; - object.bottom = 0; + object.timeOffset = null; + object.pornographyLikelihood = options.enums === String ? "LIKELIHOOD_UNSPECIFIED" : 0; } - if (message.left != null && message.hasOwnProperty("left")) - object.left = options.json && !isFinite(message.left) ? String(message.left) : message.left; - if (message.top != null && message.hasOwnProperty("top")) - object.top = options.json && !isFinite(message.top) ? String(message.top) : message.top; - if (message.right != null && message.hasOwnProperty("right")) - object.right = options.json && !isFinite(message.right) ? String(message.right) : message.right; - if (message.bottom != null && message.hasOwnProperty("bottom")) - object.bottom = options.json && !isFinite(message.bottom) ? String(message.bottom) : message.bottom; + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) + object.timeOffset = $root.google.protobuf.Duration.toObject(message.timeOffset, options); + if (message.pornographyLikelihood != null && message.hasOwnProperty("pornographyLikelihood")) + object.pornographyLikelihood = options.enums === String ? $root.google.cloud.videointelligence.v1p3beta1.Likelihood[message.pornographyLikelihood] : message.pornographyLikelihood; return object; }; /** - * Converts this NormalizedBoundingBox to JSON. + * Converts this ExplicitContentFrame to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame * @instance * @returns {Object.} JSON object */ - NormalizedBoundingBox.prototype.toJSON = function toJSON() { + ExplicitContentFrame.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return NormalizedBoundingBox; + return ExplicitContentFrame; })(); - v1p3beta1.TimestampedObject = (function() { + v1p3beta1.ExplicitContentAnnotation = (function() { /** - * Properties of a TimestampedObject. + * Properties of an ExplicitContentAnnotation. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ITimestampedObject - * @property {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox|null} [normalizedBoundingBox] TimestampedObject normalizedBoundingBox - * @property {google.protobuf.IDuration|null} [timeOffset] TimestampedObject timeOffset - * @property {Array.|null} [attributes] TimestampedObject attributes + * @interface IExplicitContentAnnotation + * @property {Array.|null} [frames] ExplicitContentAnnotation frames */ /** - * Constructs a new TimestampedObject. + * Constructs a new ExplicitContentAnnotation. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a TimestampedObject. - * @implements ITimestampedObject + * @classdesc Represents an ExplicitContentAnnotation. + * @implements IExplicitContentAnnotation * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ITimestampedObject=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation=} [properties] Properties to set */ - function TimestampedObject(properties) { - this.attributes = []; + function ExplicitContentAnnotation(properties) { + this.frames = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -30721,104 +30785,78 @@ } /** - * TimestampedObject normalizedBoundingBox. - * @member {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox|null|undefined} normalizedBoundingBox - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject - * @instance - */ - TimestampedObject.prototype.normalizedBoundingBox = null; - - /** - * TimestampedObject timeOffset. - * @member {google.protobuf.IDuration|null|undefined} timeOffset - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject - * @instance - */ - TimestampedObject.prototype.timeOffset = null; - - /** - * TimestampedObject attributes. - * @member {Array.} attributes - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * ExplicitContentAnnotation frames. + * @member {Array.} frames + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @instance */ - TimestampedObject.prototype.attributes = $util.emptyArray; + ExplicitContentAnnotation.prototype.frames = $util.emptyArray; /** - * Creates a new TimestampedObject instance using the specified properties. + * Creates a new ExplicitContentAnnotation instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITimestampedObject=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.TimestampedObject} TimestampedObject instance + * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} ExplicitContentAnnotation instance */ - TimestampedObject.create = function create(properties) { - return new TimestampedObject(properties); + ExplicitContentAnnotation.create = function create(properties) { + return new ExplicitContentAnnotation(properties); }; /** - * Encodes the specified TimestampedObject message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.TimestampedObject.verify|verify} messages. + * Encodes the specified ExplicitContentAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITimestampedObject} message TimestampedObject message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation} message ExplicitContentAnnotation message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TimestampedObject.encode = function encode(message, writer) { + ExplicitContentAnnotation.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) - $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.encode(message.normalizedBoundingBox, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) - $root.google.protobuf.Duration.encode(message.timeOffset, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.attributes != null && message.attributes.length) - for (var i = 0; i < message.attributes.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.encode(message.attributes[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.frames != null && message.frames.length) + for (var i = 0; i < message.frames.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.encode(message.frames[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified TimestampedObject message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.TimestampedObject.verify|verify} messages. + * Encodes the specified ExplicitContentAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITimestampedObject} message TimestampedObject message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation} message ExplicitContentAnnotation message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TimestampedObject.encodeDelimited = function encodeDelimited(message, writer) { + ExplicitContentAnnotation.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a TimestampedObject message from the specified reader or buffer. + * Decodes an ExplicitContentAnnotation message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.TimestampedObject} TimestampedObject + * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} ExplicitContentAnnotation * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TimestampedObject.decode = function decode(reader, length) { + ExplicitContentAnnotation.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.normalizedBoundingBox = $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.decode(reader, reader.uint32()); - break; - case 2: - message.timeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); - break; - case 3: - if (!(message.attributes && message.attributes.length)) - message.attributes = []; - message.attributes.push($root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.decode(reader, reader.uint32())); + if (!(message.frames && message.frames.length)) + message.frames = []; + message.frames.push($root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.decode(reader, reader.uint32())); break; default: reader.skipType(tag & 7); @@ -30829,157 +30867,127 @@ }; /** - * Decodes a TimestampedObject message from the specified reader or buffer, length delimited. + * Decodes an ExplicitContentAnnotation message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.TimestampedObject} TimestampedObject + * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} ExplicitContentAnnotation * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TimestampedObject.decodeDelimited = function decodeDelimited(reader) { + ExplicitContentAnnotation.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a TimestampedObject message. + * Verifies an ExplicitContentAnnotation message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - TimestampedObject.verify = function verify(message) { + ExplicitContentAnnotation.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) { - var error = $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.verify(message.normalizedBoundingBox); - if (error) - return "normalizedBoundingBox." + error; - } - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) { - var error = $root.google.protobuf.Duration.verify(message.timeOffset); - if (error) - return "timeOffset." + error; - } - if (message.attributes != null && message.hasOwnProperty("attributes")) { - if (!Array.isArray(message.attributes)) - return "attributes: array expected"; - for (var i = 0; i < message.attributes.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.verify(message.attributes[i]); + if (message.frames != null && message.hasOwnProperty("frames")) { + if (!Array.isArray(message.frames)) + return "frames: array expected"; + for (var i = 0; i < message.frames.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.verify(message.frames[i]); if (error) - return "attributes." + error; + return "frames." + error; } } return null; }; /** - * Creates a TimestampedObject message from a plain object. Also converts values to their respective internal types. + * Creates an ExplicitContentAnnotation message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.TimestampedObject} TimestampedObject + * @returns {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} ExplicitContentAnnotation */ - TimestampedObject.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject) + ExplicitContentAnnotation.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject(); - if (object.normalizedBoundingBox != null) { - if (typeof object.normalizedBoundingBox !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.normalizedBoundingBox: object expected"); - message.normalizedBoundingBox = $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.fromObject(object.normalizedBoundingBox); - } - if (object.timeOffset != null) { - if (typeof object.timeOffset !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.timeOffset: object expected"); - message.timeOffset = $root.google.protobuf.Duration.fromObject(object.timeOffset); - } - if (object.attributes) { - if (!Array.isArray(object.attributes)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.attributes: array expected"); - message.attributes = []; - for (var i = 0; i < object.attributes.length; ++i) { - if (typeof object.attributes[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.attributes: object expected"); - message.attributes[i] = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.fromObject(object.attributes[i]); + var message = new $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation(); + if (object.frames) { + if (!Array.isArray(object.frames)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation.frames: array expected"); + message.frames = []; + for (var i = 0; i < object.frames.length; ++i) { + if (typeof object.frames[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation.frames: object expected"); + message.frames[i] = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.fromObject(object.frames[i]); } } return message; }; /** - * Creates a plain object from a TimestampedObject message. Also converts values to other types if specified. + * Creates a plain object from an ExplicitContentAnnotation message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.TimestampedObject} message TimestampedObject + * @param {google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} message ExplicitContentAnnotation * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - TimestampedObject.toObject = function toObject(message, options) { + ExplicitContentAnnotation.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.arrays || options.defaults) - object.attributes = []; - if (options.defaults) { - object.normalizedBoundingBox = null; - object.timeOffset = null; - } - if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) - object.normalizedBoundingBox = $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.toObject(message.normalizedBoundingBox, options); - if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) - object.timeOffset = $root.google.protobuf.Duration.toObject(message.timeOffset, options); - if (message.attributes && message.attributes.length) { - object.attributes = []; - for (var j = 0; j < message.attributes.length; ++j) - object.attributes[j] = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.toObject(message.attributes[j], options); + object.frames = []; + if (message.frames && message.frames.length) { + object.frames = []; + for (var j = 0; j < message.frames.length; ++j) + object.frames[j] = $root.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame.toObject(message.frames[j], options); } return object; }; /** - * Converts this TimestampedObject to JSON. + * Converts this ExplicitContentAnnotation to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation * @instance * @returns {Object.} JSON object */ - TimestampedObject.prototype.toJSON = function toJSON() { + ExplicitContentAnnotation.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return TimestampedObject; + return ExplicitContentAnnotation; })(); - v1p3beta1.Track = (function() { + v1p3beta1.NormalizedBoundingBox = (function() { /** - * Properties of a Track. + * Properties of a NormalizedBoundingBox. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ITrack - * @property {google.cloud.videointelligence.v1p3beta1.IVideoSegment|null} [segment] Track segment - * @property {Array.|null} [timestampedObjects] Track timestampedObjects - * @property {Array.|null} [attributes] Track attributes - * @property {number|null} [confidence] Track confidence + * @interface INormalizedBoundingBox + * @property {number|null} [left] NormalizedBoundingBox left + * @property {number|null} [top] NormalizedBoundingBox top + * @property {number|null} [right] NormalizedBoundingBox right + * @property {number|null} [bottom] NormalizedBoundingBox bottom */ /** - * Constructs a new Track. + * Constructs a new NormalizedBoundingBox. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a Track. - * @implements ITrack + * @classdesc Represents a NormalizedBoundingBox. + * @implements INormalizedBoundingBox * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ITrack=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox=} [properties] Properties to set */ - function Track(properties) { - this.timestampedObjects = []; - this.attributes = []; + function NormalizedBoundingBox(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -30987,120 +30995,114 @@ } /** - * Track segment. - * @member {google.cloud.videointelligence.v1p3beta1.IVideoSegment|null|undefined} segment - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * NormalizedBoundingBox left. + * @member {number} left + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @instance */ - Track.prototype.segment = null; + NormalizedBoundingBox.prototype.left = 0; /** - * Track timestampedObjects. - * @member {Array.} timestampedObjects - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * NormalizedBoundingBox top. + * @member {number} top + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @instance */ - Track.prototype.timestampedObjects = $util.emptyArray; + NormalizedBoundingBox.prototype.top = 0; /** - * Track attributes. - * @member {Array.} attributes - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * NormalizedBoundingBox right. + * @member {number} right + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @instance */ - Track.prototype.attributes = $util.emptyArray; + NormalizedBoundingBox.prototype.right = 0; /** - * Track confidence. - * @member {number} confidence - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * NormalizedBoundingBox bottom. + * @member {number} bottom + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @instance */ - Track.prototype.confidence = 0; + NormalizedBoundingBox.prototype.bottom = 0; /** - * Creates a new Track instance using the specified properties. + * Creates a new NormalizedBoundingBox instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITrack=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.Track} Track instance + * @param {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} NormalizedBoundingBox instance */ - Track.create = function create(properties) { - return new Track(properties); + NormalizedBoundingBox.create = function create(properties) { + return new NormalizedBoundingBox(properties); }; /** - * Encodes the specified Track message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Track.verify|verify} messages. + * Encodes the specified NormalizedBoundingBox message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITrack} message Track message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox} message NormalizedBoundingBox message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Track.encode = function encode(message, writer) { + NormalizedBoundingBox.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.segment != null && message.hasOwnProperty("segment")) - $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.encode(message.segment, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.timestampedObjects != null && message.timestampedObjects.length) - for (var i = 0; i < message.timestampedObjects.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.encode(message.timestampedObjects[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.attributes != null && message.attributes.length) - for (var i = 0; i < message.attributes.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.encode(message.attributes[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.confidence != null && message.hasOwnProperty("confidence")) - writer.uint32(/* id 4, wireType 5 =*/37).float(message.confidence); + if (message.left != null && message.hasOwnProperty("left")) + writer.uint32(/* id 1, wireType 5 =*/13).float(message.left); + if (message.top != null && message.hasOwnProperty("top")) + writer.uint32(/* id 2, wireType 5 =*/21).float(message.top); + if (message.right != null && message.hasOwnProperty("right")) + writer.uint32(/* id 3, wireType 5 =*/29).float(message.right); + if (message.bottom != null && message.hasOwnProperty("bottom")) + writer.uint32(/* id 4, wireType 5 =*/37).float(message.bottom); return writer; }; /** - * Encodes the specified Track message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Track.verify|verify} messages. + * Encodes the specified NormalizedBoundingBox message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @static - * @param {google.cloud.videointelligence.v1p3beta1.ITrack} message Track message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox} message NormalizedBoundingBox message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Track.encodeDelimited = function encodeDelimited(message, writer) { + NormalizedBoundingBox.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Track message from the specified reader or buffer. + * Decodes a NormalizedBoundingBox message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.Track} Track + * @returns {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} NormalizedBoundingBox * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Track.decode = function decode(reader, length) { + NormalizedBoundingBox.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.Track(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.decode(reader, reader.uint32()); + message.left = reader.float(); break; case 2: - if (!(message.timestampedObjects && message.timestampedObjects.length)) - message.timestampedObjects = []; - message.timestampedObjects.push($root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.decode(reader, reader.uint32())); + message.top = reader.float(); break; case 3: - if (!(message.attributes && message.attributes.length)) - message.attributes = []; - message.attributes.push($root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.decode(reader, reader.uint32())); + message.right = reader.float(); break; case 4: - message.confidence = reader.float(); + message.bottom = reader.float(); break; default: reader.skipType(tag & 7); @@ -31111,175 +31113,137 @@ }; /** - * Decodes a Track message from the specified reader or buffer, length delimited. + * Decodes a NormalizedBoundingBox message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.Track} Track + * @returns {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} NormalizedBoundingBox * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Track.decodeDelimited = function decodeDelimited(reader) { + NormalizedBoundingBox.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Track message. + * Verifies a NormalizedBoundingBox message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Track.verify = function verify(message) { + NormalizedBoundingBox.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.segment != null && message.hasOwnProperty("segment")) { - var error = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.verify(message.segment); - if (error) - return "segment." + error; - } - if (message.timestampedObjects != null && message.hasOwnProperty("timestampedObjects")) { - if (!Array.isArray(message.timestampedObjects)) - return "timestampedObjects: array expected"; - for (var i = 0; i < message.timestampedObjects.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.verify(message.timestampedObjects[i]); - if (error) - return "timestampedObjects." + error; - } - } - if (message.attributes != null && message.hasOwnProperty("attributes")) { - if (!Array.isArray(message.attributes)) - return "attributes: array expected"; - for (var i = 0; i < message.attributes.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.verify(message.attributes[i]); - if (error) - return "attributes." + error; - } - } - if (message.confidence != null && message.hasOwnProperty("confidence")) - if (typeof message.confidence !== "number") - return "confidence: number expected"; + if (message.left != null && message.hasOwnProperty("left")) + if (typeof message.left !== "number") + return "left: number expected"; + if (message.top != null && message.hasOwnProperty("top")) + if (typeof message.top !== "number") + return "top: number expected"; + if (message.right != null && message.hasOwnProperty("right")) + if (typeof message.right !== "number") + return "right: number expected"; + if (message.bottom != null && message.hasOwnProperty("bottom")) + if (typeof message.bottom !== "number") + return "bottom: number expected"; return null; }; /** - * Creates a Track message from a plain object. Also converts values to their respective internal types. + * Creates a NormalizedBoundingBox message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.Track} Track + * @returns {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} NormalizedBoundingBox */ - Track.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.Track) + NormalizedBoundingBox.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.Track(); - if (object.segment != null) { - if (typeof object.segment !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.segment: object expected"); - message.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.fromObject(object.segment); - } - if (object.timestampedObjects) { - if (!Array.isArray(object.timestampedObjects)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.timestampedObjects: array expected"); - message.timestampedObjects = []; - for (var i = 0; i < object.timestampedObjects.length; ++i) { - if (typeof object.timestampedObjects[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.timestampedObjects: object expected"); - message.timestampedObjects[i] = $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.fromObject(object.timestampedObjects[i]); - } - } - if (object.attributes) { - if (!Array.isArray(object.attributes)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.attributes: array expected"); - message.attributes = []; - for (var i = 0; i < object.attributes.length; ++i) { - if (typeof object.attributes[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.attributes: object expected"); - message.attributes[i] = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.fromObject(object.attributes[i]); - } - } - if (object.confidence != null) - message.confidence = Number(object.confidence); + var message = new $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox(); + if (object.left != null) + message.left = Number(object.left); + if (object.top != null) + message.top = Number(object.top); + if (object.right != null) + message.right = Number(object.right); + if (object.bottom != null) + message.bottom = Number(object.bottom); return message; }; /** - * Creates a plain object from a Track message. Also converts values to other types if specified. + * Creates a plain object from a NormalizedBoundingBox message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @static - * @param {google.cloud.videointelligence.v1p3beta1.Track} message Track + * @param {google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} message NormalizedBoundingBox * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Track.toObject = function toObject(message, options) { + NormalizedBoundingBox.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) { - object.timestampedObjects = []; - object.attributes = []; - } if (options.defaults) { - object.segment = null; - object.confidence = 0; - } - if (message.segment != null && message.hasOwnProperty("segment")) - object.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.toObject(message.segment, options); - if (message.timestampedObjects && message.timestampedObjects.length) { - object.timestampedObjects = []; - for (var j = 0; j < message.timestampedObjects.length; ++j) - object.timestampedObjects[j] = $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.toObject(message.timestampedObjects[j], options); - } - if (message.attributes && message.attributes.length) { - object.attributes = []; - for (var j = 0; j < message.attributes.length; ++j) - object.attributes[j] = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.toObject(message.attributes[j], options); + object.left = 0; + object.top = 0; + object.right = 0; + object.bottom = 0; } - if (message.confidence != null && message.hasOwnProperty("confidence")) - object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + if (message.left != null && message.hasOwnProperty("left")) + object.left = options.json && !isFinite(message.left) ? String(message.left) : message.left; + if (message.top != null && message.hasOwnProperty("top")) + object.top = options.json && !isFinite(message.top) ? String(message.top) : message.top; + if (message.right != null && message.hasOwnProperty("right")) + object.right = options.json && !isFinite(message.right) ? String(message.right) : message.right; + if (message.bottom != null && message.hasOwnProperty("bottom")) + object.bottom = options.json && !isFinite(message.bottom) ? String(message.bottom) : message.bottom; return object; }; /** - * Converts this Track to JSON. + * Converts this NormalizedBoundingBox to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @memberof google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox * @instance * @returns {Object.} JSON object */ - Track.prototype.toJSON = function toJSON() { + NormalizedBoundingBox.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return Track; + return NormalizedBoundingBox; })(); - v1p3beta1.DetectedAttribute = (function() { + v1p3beta1.TimestampedObject = (function() { /** - * Properties of a DetectedAttribute. + * Properties of a TimestampedObject. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface IDetectedAttribute - * @property {string|null} [name] DetectedAttribute name - * @property {number|null} [confidence] DetectedAttribute confidence - * @property {string|null} [value] DetectedAttribute value + * @interface ITimestampedObject + * @property {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox|null} [normalizedBoundingBox] TimestampedObject normalizedBoundingBox + * @property {google.protobuf.IDuration|null} [timeOffset] TimestampedObject timeOffset + * @property {Array.|null} [attributes] TimestampedObject attributes + * @property {Array.|null} [landmarks] TimestampedObject landmarks */ /** - * Constructs a new DetectedAttribute. + * Constructs a new TimestampedObject. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a DetectedAttribute. - * @implements IDetectedAttribute + * @classdesc Represents a TimestampedObject. + * @implements ITimestampedObject * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.IDetectedAttribute=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.ITimestampedObject=} [properties] Properties to set */ - function DetectedAttribute(properties) { + function TimestampedObject(properties) { + this.attributes = []; + this.landmarks = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -31287,101 +31251,120 @@ } /** - * DetectedAttribute name. - * @member {string} name - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * TimestampedObject normalizedBoundingBox. + * @member {google.cloud.videointelligence.v1p3beta1.INormalizedBoundingBox|null|undefined} normalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @instance */ - DetectedAttribute.prototype.name = ""; + TimestampedObject.prototype.normalizedBoundingBox = null; /** - * DetectedAttribute confidence. - * @member {number} confidence - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * TimestampedObject timeOffset. + * @member {google.protobuf.IDuration|null|undefined} timeOffset + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @instance */ - DetectedAttribute.prototype.confidence = 0; + TimestampedObject.prototype.timeOffset = null; /** - * DetectedAttribute value. - * @member {string} value - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * TimestampedObject attributes. + * @member {Array.} attributes + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @instance */ - DetectedAttribute.prototype.value = ""; + TimestampedObject.prototype.attributes = $util.emptyArray; /** - * Creates a new DetectedAttribute instance using the specified properties. + * TimestampedObject landmarks. + * @member {Array.} landmarks + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject + * @instance + */ + TimestampedObject.prototype.landmarks = $util.emptyArray; + + /** + * Creates a new TimestampedObject instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @static - * @param {google.cloud.videointelligence.v1p3beta1.IDetectedAttribute=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} DetectedAttribute instance + * @param {google.cloud.videointelligence.v1p3beta1.ITimestampedObject=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.TimestampedObject} TimestampedObject instance */ - DetectedAttribute.create = function create(properties) { - return new DetectedAttribute(properties); + TimestampedObject.create = function create(properties) { + return new TimestampedObject(properties); }; /** - * Encodes the specified DetectedAttribute message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute.verify|verify} messages. + * Encodes the specified TimestampedObject message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.TimestampedObject.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @static - * @param {google.cloud.videointelligence.v1p3beta1.IDetectedAttribute} message DetectedAttribute message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ITimestampedObject} message TimestampedObject message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DetectedAttribute.encode = function encode(message, writer) { + TimestampedObject.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && message.hasOwnProperty("name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.confidence != null && message.hasOwnProperty("confidence")) - writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); - if (message.value != null && message.hasOwnProperty("value")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.value); + if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) + $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.encode(message.normalizedBoundingBox, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) + $root.google.protobuf.Duration.encode(message.timeOffset, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.attributes != null && message.attributes.length) + for (var i = 0; i < message.attributes.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.encode(message.attributes[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.landmarks != null && message.landmarks.length) + for (var i = 0; i < message.landmarks.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.DetectedLandmark.encode(message.landmarks[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified DetectedAttribute message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute.verify|verify} messages. + * Encodes the specified TimestampedObject message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.TimestampedObject.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @static - * @param {google.cloud.videointelligence.v1p3beta1.IDetectedAttribute} message DetectedAttribute message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ITimestampedObject} message TimestampedObject message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DetectedAttribute.encodeDelimited = function encodeDelimited(message, writer) { + TimestampedObject.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DetectedAttribute message from the specified reader or buffer. + * Decodes a TimestampedObject message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} DetectedAttribute + * @returns {google.cloud.videointelligence.v1p3beta1.TimestampedObject} TimestampedObject * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DetectedAttribute.decode = function decode(reader, length) { + TimestampedObject.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.name = reader.string(); + message.normalizedBoundingBox = $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.decode(reader, reader.uint32()); break; case 2: - message.confidence = reader.float(); + message.timeOffset = $root.google.protobuf.Duration.decode(reader, reader.uint32()); break; case 3: - message.value = reader.string(); + if (!(message.attributes && message.attributes.length)) + message.attributes = []; + message.attributes.push($root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.decode(reader, reader.uint32())); + break; + case 4: + if (!(message.landmarks && message.landmarks.length)) + message.landmarks = []; + message.landmarks.push($root.google.cloud.videointelligence.v1p3beta1.DetectedLandmark.decode(reader, reader.uint32())); break; default: reader.skipType(tag & 7); @@ -31392,126 +31375,183 @@ }; /** - * Decodes a DetectedAttribute message from the specified reader or buffer, length delimited. + * Decodes a TimestampedObject message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} DetectedAttribute + * @returns {google.cloud.videointelligence.v1p3beta1.TimestampedObject} TimestampedObject * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DetectedAttribute.decodeDelimited = function decodeDelimited(reader) { + TimestampedObject.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DetectedAttribute message. + * Verifies a TimestampedObject message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DetectedAttribute.verify = function verify(message) { + TimestampedObject.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.confidence != null && message.hasOwnProperty("confidence")) - if (typeof message.confidence !== "number") - return "confidence: number expected"; - if (message.value != null && message.hasOwnProperty("value")) - if (!$util.isString(message.value)) - return "value: string expected"; + if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.verify(message.normalizedBoundingBox); + if (error) + return "normalizedBoundingBox." + error; + } + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) { + var error = $root.google.protobuf.Duration.verify(message.timeOffset); + if (error) + return "timeOffset." + error; + } + if (message.attributes != null && message.hasOwnProperty("attributes")) { + if (!Array.isArray(message.attributes)) + return "attributes: array expected"; + for (var i = 0; i < message.attributes.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.verify(message.attributes[i]); + if (error) + return "attributes." + error; + } + } + if (message.landmarks != null && message.hasOwnProperty("landmarks")) { + if (!Array.isArray(message.landmarks)) + return "landmarks: array expected"; + for (var i = 0; i < message.landmarks.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.DetectedLandmark.verify(message.landmarks[i]); + if (error) + return "landmarks." + error; + } + } return null; }; /** - * Creates a DetectedAttribute message from a plain object. Also converts values to their respective internal types. + * Creates a TimestampedObject message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} DetectedAttribute + * @returns {google.cloud.videointelligence.v1p3beta1.TimestampedObject} TimestampedObject */ - DetectedAttribute.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute) + TimestampedObject.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute(); - if (object.name != null) - message.name = String(object.name); - if (object.confidence != null) - message.confidence = Number(object.confidence); - if (object.value != null) - message.value = String(object.value); + var message = new $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject(); + if (object.normalizedBoundingBox != null) { + if (typeof object.normalizedBoundingBox !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.normalizedBoundingBox: object expected"); + message.normalizedBoundingBox = $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.fromObject(object.normalizedBoundingBox); + } + if (object.timeOffset != null) { + if (typeof object.timeOffset !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.timeOffset: object expected"); + message.timeOffset = $root.google.protobuf.Duration.fromObject(object.timeOffset); + } + if (object.attributes) { + if (!Array.isArray(object.attributes)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.attributes: array expected"); + message.attributes = []; + for (var i = 0; i < object.attributes.length; ++i) { + if (typeof object.attributes[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.attributes: object expected"); + message.attributes[i] = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.fromObject(object.attributes[i]); + } + } + if (object.landmarks) { + if (!Array.isArray(object.landmarks)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.landmarks: array expected"); + message.landmarks = []; + for (var i = 0; i < object.landmarks.length; ++i) { + if (typeof object.landmarks[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.TimestampedObject.landmarks: object expected"); + message.landmarks[i] = $root.google.cloud.videointelligence.v1p3beta1.DetectedLandmark.fromObject(object.landmarks[i]); + } + } return message; }; /** - * Creates a plain object from a DetectedAttribute message. Also converts values to other types if specified. + * Creates a plain object from a TimestampedObject message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @static - * @param {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} message DetectedAttribute + * @param {google.cloud.videointelligence.v1p3beta1.TimestampedObject} message TimestampedObject * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DetectedAttribute.toObject = function toObject(message, options) { + TimestampedObject.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; + if (options.arrays || options.defaults) { + object.attributes = []; + object.landmarks = []; + } if (options.defaults) { - object.name = ""; - object.confidence = 0; - object.value = ""; + object.normalizedBoundingBox = null; + object.timeOffset = null; + } + if (message.normalizedBoundingBox != null && message.hasOwnProperty("normalizedBoundingBox")) + object.normalizedBoundingBox = $root.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox.toObject(message.normalizedBoundingBox, options); + if (message.timeOffset != null && message.hasOwnProperty("timeOffset")) + object.timeOffset = $root.google.protobuf.Duration.toObject(message.timeOffset, options); + if (message.attributes && message.attributes.length) { + object.attributes = []; + for (var j = 0; j < message.attributes.length; ++j) + object.attributes[j] = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.toObject(message.attributes[j], options); + } + if (message.landmarks && message.landmarks.length) { + object.landmarks = []; + for (var j = 0; j < message.landmarks.length; ++j) + object.landmarks[j] = $root.google.cloud.videointelligence.v1p3beta1.DetectedLandmark.toObject(message.landmarks[j], options); } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.confidence != null && message.hasOwnProperty("confidence")) - object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; - if (message.value != null && message.hasOwnProperty("value")) - object.value = message.value; return object; }; /** - * Converts this DetectedAttribute to JSON. + * Converts this TimestampedObject to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1.TimestampedObject * @instance * @returns {Object.} JSON object */ - DetectedAttribute.prototype.toJSON = function toJSON() { + TimestampedObject.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return DetectedAttribute; + return TimestampedObject; })(); - v1p3beta1.Celebrity = (function() { + v1p3beta1.Track = (function() { /** - * Properties of a Celebrity. + * Properties of a Track. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ICelebrity - * @property {string|null} [name] Celebrity name - * @property {string|null} [displayName] Celebrity displayName - * @property {string|null} [description] Celebrity description + * @interface ITrack + * @property {google.cloud.videointelligence.v1p3beta1.IVideoSegment|null} [segment] Track segment + * @property {Array.|null} [timestampedObjects] Track timestampedObjects + * @property {Array.|null} [attributes] Track attributes + * @property {number|null} [confidence] Track confidence */ /** - * Constructs a new Celebrity. + * Constructs a new Track. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a Celebrity. - * @implements ICelebrity + * @classdesc Represents a Track. + * @implements ITrack * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrity=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.ITrack=} [properties] Properties to set */ - function Celebrity(properties) { + function Track(properties) { + this.timestampedObjects = []; + this.attributes = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -31519,101 +31559,120 @@ } /** - * Celebrity name. - * @member {string} name - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * Track segment. + * @member {google.cloud.videointelligence.v1p3beta1.IVideoSegment|null|undefined} segment + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @instance */ - Celebrity.prototype.name = ""; + Track.prototype.segment = null; /** - * Celebrity displayName. - * @member {string} displayName - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * Track timestampedObjects. + * @member {Array.} timestampedObjects + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @instance */ - Celebrity.prototype.displayName = ""; + Track.prototype.timestampedObjects = $util.emptyArray; /** - * Celebrity description. - * @member {string} description - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * Track attributes. + * @member {Array.} attributes + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @instance */ - Celebrity.prototype.description = ""; + Track.prototype.attributes = $util.emptyArray; /** - * Creates a new Celebrity instance using the specified properties. + * Track confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1p3beta1.Track + * @instance + */ + Track.prototype.confidence = 0; + + /** + * Creates a new Track instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrity=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.Celebrity} Celebrity instance + * @param {google.cloud.videointelligence.v1p3beta1.ITrack=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.Track} Track instance */ - Celebrity.create = function create(properties) { - return new Celebrity(properties); + Track.create = function create(properties) { + return new Track(properties); }; /** - * Encodes the specified Celebrity message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Celebrity.verify|verify} messages. + * Encodes the specified Track message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Track.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrity} message Celebrity message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ITrack} message Track message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Celebrity.encode = function encode(message, writer) { + Track.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && message.hasOwnProperty("name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.displayName != null && message.hasOwnProperty("displayName")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.displayName); - if (message.description != null && message.hasOwnProperty("description")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.description); + if (message.segment != null && message.hasOwnProperty("segment")) + $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.encode(message.segment, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.timestampedObjects != null && message.timestampedObjects.length) + for (var i = 0; i < message.timestampedObjects.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.encode(message.timestampedObjects[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.attributes != null && message.attributes.length) + for (var i = 0; i < message.attributes.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.encode(message.attributes[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 4, wireType 5 =*/37).float(message.confidence); return writer; }; /** - * Encodes the specified Celebrity message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Celebrity.verify|verify} messages. + * Encodes the specified Track message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Track.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrity} message Celebrity message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.ITrack} message Track message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Celebrity.encodeDelimited = function encodeDelimited(message, writer) { + Track.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Celebrity message from the specified reader or buffer. + * Decodes a Track message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.Celebrity} Celebrity + * @returns {google.cloud.videointelligence.v1p3beta1.Track} Track * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Celebrity.decode = function decode(reader, length) { + Track.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.Celebrity(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.Track(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.name = reader.string(); + message.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.decode(reader, reader.uint32()); break; case 2: - message.displayName = reader.string(); + if (!(message.timestampedObjects && message.timestampedObjects.length)) + message.timestampedObjects = []; + message.timestampedObjects.push($root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.decode(reader, reader.uint32())); break; case 3: - message.description = reader.string(); + if (!(message.attributes && message.attributes.length)) + message.attributes = []; + message.attributes.push($root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.decode(reader, reader.uint32())); + break; + case 4: + message.confidence = reader.float(); break; default: reader.skipType(tag & 7); @@ -31624,126 +31683,175 @@ }; /** - * Decodes a Celebrity message from the specified reader or buffer, length delimited. + * Decodes a Track message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.Celebrity} Celebrity + * @returns {google.cloud.videointelligence.v1p3beta1.Track} Track * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Celebrity.decodeDelimited = function decodeDelimited(reader) { + Track.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Celebrity message. + * Verifies a Track message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Celebrity.verify = function verify(message) { + Track.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.displayName != null && message.hasOwnProperty("displayName")) - if (!$util.isString(message.displayName)) - return "displayName: string expected"; - if (message.description != null && message.hasOwnProperty("description")) - if (!$util.isString(message.description)) - return "description: string expected"; + if (message.segment != null && message.hasOwnProperty("segment")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.verify(message.segment); + if (error) + return "segment." + error; + } + if (message.timestampedObjects != null && message.hasOwnProperty("timestampedObjects")) { + if (!Array.isArray(message.timestampedObjects)) + return "timestampedObjects: array expected"; + for (var i = 0; i < message.timestampedObjects.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.verify(message.timestampedObjects[i]); + if (error) + return "timestampedObjects." + error; + } + } + if (message.attributes != null && message.hasOwnProperty("attributes")) { + if (!Array.isArray(message.attributes)) + return "attributes: array expected"; + for (var i = 0; i < message.attributes.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.verify(message.attributes[i]); + if (error) + return "attributes." + error; + } + } + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; return null; }; /** - * Creates a Celebrity message from a plain object. Also converts values to their respective internal types. + * Creates a Track message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.Celebrity} Celebrity + * @returns {google.cloud.videointelligence.v1p3beta1.Track} Track */ - Celebrity.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.Celebrity) + Track.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.Track) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.Celebrity(); - if (object.name != null) - message.name = String(object.name); - if (object.displayName != null) - message.displayName = String(object.displayName); - if (object.description != null) - message.description = String(object.description); + var message = new $root.google.cloud.videointelligence.v1p3beta1.Track(); + if (object.segment != null) { + if (typeof object.segment !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.segment: object expected"); + message.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.fromObject(object.segment); + } + if (object.timestampedObjects) { + if (!Array.isArray(object.timestampedObjects)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.timestampedObjects: array expected"); + message.timestampedObjects = []; + for (var i = 0; i < object.timestampedObjects.length; ++i) { + if (typeof object.timestampedObjects[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.timestampedObjects: object expected"); + message.timestampedObjects[i] = $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.fromObject(object.timestampedObjects[i]); + } + } + if (object.attributes) { + if (!Array.isArray(object.attributes)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.attributes: array expected"); + message.attributes = []; + for (var i = 0; i < object.attributes.length; ++i) { + if (typeof object.attributes[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.Track.attributes: object expected"); + message.attributes[i] = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.fromObject(object.attributes[i]); + } + } + if (object.confidence != null) + message.confidence = Number(object.confidence); return message; }; /** - * Creates a plain object from a Celebrity message. Also converts values to other types if specified. + * Creates a plain object from a Track message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @static - * @param {google.cloud.videointelligence.v1p3beta1.Celebrity} message Celebrity + * @param {google.cloud.videointelligence.v1p3beta1.Track} message Track * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Celebrity.toObject = function toObject(message, options) { + Track.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; + if (options.arrays || options.defaults) { + object.timestampedObjects = []; + object.attributes = []; + } if (options.defaults) { - object.name = ""; - object.displayName = ""; - object.description = ""; + object.segment = null; + object.confidence = 0; } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.displayName != null && message.hasOwnProperty("displayName")) - object.displayName = message.displayName; - if (message.description != null && message.hasOwnProperty("description")) - object.description = message.description; + if (message.segment != null && message.hasOwnProperty("segment")) + object.segment = $root.google.cloud.videointelligence.v1p3beta1.VideoSegment.toObject(message.segment, options); + if (message.timestampedObjects && message.timestampedObjects.length) { + object.timestampedObjects = []; + for (var j = 0; j < message.timestampedObjects.length; ++j) + object.timestampedObjects[j] = $root.google.cloud.videointelligence.v1p3beta1.TimestampedObject.toObject(message.timestampedObjects[j], options); + } + if (message.attributes && message.attributes.length) { + object.attributes = []; + for (var j = 0; j < message.attributes.length; ++j) + object.attributes[j] = $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute.toObject(message.attributes[j], options); + } + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; return object; }; /** - * Converts this Celebrity to JSON. + * Converts this Track to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.Track * @instance * @returns {Object.} JSON object */ - Celebrity.prototype.toJSON = function toJSON() { + Track.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return Celebrity; + return Track; })(); - v1p3beta1.CelebrityTrack = (function() { + v1p3beta1.DetectedAttribute = (function() { /** - * Properties of a CelebrityTrack. + * Properties of a DetectedAttribute. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ICelebrityTrack - * @property {Array.|null} [celebrities] CelebrityTrack celebrities - * @property {google.cloud.videointelligence.v1p3beta1.ITrack|null} [faceTrack] CelebrityTrack faceTrack + * @interface IDetectedAttribute + * @property {string|null} [name] DetectedAttribute name + * @property {number|null} [confidence] DetectedAttribute confidence + * @property {string|null} [value] DetectedAttribute value */ /** - * Constructs a new CelebrityTrack. + * Constructs a new DetectedAttribute. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a CelebrityTrack. - * @implements ICelebrityTrack + * @classdesc Represents a DetectedAttribute. + * @implements IDetectedAttribute * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityTrack=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.IDetectedAttribute=} [properties] Properties to set */ - function CelebrityTrack(properties) { - this.celebrities = []; + function DetectedAttribute(properties) { if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -31751,91 +31859,101 @@ } /** - * CelebrityTrack celebrities. - * @member {Array.} celebrities - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * DetectedAttribute name. + * @member {string} name + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @instance */ - CelebrityTrack.prototype.celebrities = $util.emptyArray; + DetectedAttribute.prototype.name = ""; /** - * CelebrityTrack faceTrack. - * @member {google.cloud.videointelligence.v1p3beta1.ITrack|null|undefined} faceTrack - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * DetectedAttribute confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @instance */ - CelebrityTrack.prototype.faceTrack = null; + DetectedAttribute.prototype.confidence = 0; /** - * Creates a new CelebrityTrack instance using the specified properties. + * DetectedAttribute value. + * @member {string} value + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute + * @instance + */ + DetectedAttribute.prototype.value = ""; + + /** + * Creates a new DetectedAttribute instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityTrack=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} CelebrityTrack instance + * @param {google.cloud.videointelligence.v1p3beta1.IDetectedAttribute=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} DetectedAttribute instance */ - CelebrityTrack.create = function create(properties) { - return new CelebrityTrack(properties); + DetectedAttribute.create = function create(properties) { + return new DetectedAttribute(properties); }; /** - * Encodes the specified CelebrityTrack message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack.verify|verify} messages. + * Encodes the specified DetectedAttribute message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityTrack} message CelebrityTrack message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IDetectedAttribute} message DetectedAttribute message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CelebrityTrack.encode = function encode(message, writer) { + DetectedAttribute.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.celebrities != null && message.celebrities.length) - for (var i = 0; i < message.celebrities.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.encode(message.celebrities[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.faceTrack != null && message.hasOwnProperty("faceTrack")) - $root.google.cloud.videointelligence.v1p3beta1.Track.encode(message.faceTrack, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.name != null && message.hasOwnProperty("name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); + if (message.value != null && message.hasOwnProperty("value")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.value); return writer; }; /** - * Encodes the specified CelebrityTrack message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack.verify|verify} messages. + * Encodes the specified DetectedAttribute message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityTrack} message CelebrityTrack message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IDetectedAttribute} message DetectedAttribute message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CelebrityTrack.encodeDelimited = function encodeDelimited(message, writer) { + DetectedAttribute.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CelebrityTrack message from the specified reader or buffer. + * Decodes a DetectedAttribute message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} CelebrityTrack + * @returns {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} DetectedAttribute * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CelebrityTrack.decode = function decode(reader, length) { + DetectedAttribute.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - if (!(message.celebrities && message.celebrities.length)) - message.celebrities = []; - message.celebrities.push($root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.decode(reader, reader.uint32())); + message.name = reader.string(); + break; + case 2: + message.confidence = reader.float(); break; case 3: - message.faceTrack = $root.google.cloud.videointelligence.v1p3beta1.Track.decode(reader, reader.uint32()); + message.value = reader.string(); break; default: reader.skipType(tag & 7); @@ -31846,162 +31964,616 @@ }; /** - * Decodes a CelebrityTrack message from the specified reader or buffer, length delimited. + * Decodes a DetectedAttribute message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} CelebrityTrack + * @returns {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} DetectedAttribute * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CelebrityTrack.decodeDelimited = function decodeDelimited(reader) { + DetectedAttribute.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CelebrityTrack message. + * Verifies a DetectedAttribute message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CelebrityTrack.verify = function verify(message) { + DetectedAttribute.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.celebrities != null && message.hasOwnProperty("celebrities")) { - if (!Array.isArray(message.celebrities)) - return "celebrities: array expected"; - for (var i = 0; i < message.celebrities.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.verify(message.celebrities[i]); - if (error) - return "celebrities." + error; - } - } - if (message.faceTrack != null && message.hasOwnProperty("faceTrack")) { - var error = $root.google.cloud.videointelligence.v1p3beta1.Track.verify(message.faceTrack); - if (error) - return "faceTrack." + error; - } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; + if (message.value != null && message.hasOwnProperty("value")) + if (!$util.isString(message.value)) + return "value: string expected"; return null; }; /** - * Creates a CelebrityTrack message from a plain object. Also converts values to their respective internal types. + * Creates a DetectedAttribute message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} CelebrityTrack + * @returns {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} DetectedAttribute */ - CelebrityTrack.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack) + DetectedAttribute.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack(); - if (object.celebrities) { - if (!Array.isArray(object.celebrities)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityTrack.celebrities: array expected"); - message.celebrities = []; - for (var i = 0; i < object.celebrities.length; ++i) { - if (typeof object.celebrities[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityTrack.celebrities: object expected"); - message.celebrities[i] = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.fromObject(object.celebrities[i]); - } - } - if (object.faceTrack != null) { - if (typeof object.faceTrack !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityTrack.faceTrack: object expected"); - message.faceTrack = $root.google.cloud.videointelligence.v1p3beta1.Track.fromObject(object.faceTrack); - } + var message = new $root.google.cloud.videointelligence.v1p3beta1.DetectedAttribute(); + if (object.name != null) + message.name = String(object.name); + if (object.confidence != null) + message.confidence = Number(object.confidence); + if (object.value != null) + message.value = String(object.value); return message; }; /** - * Creates a plain object from a CelebrityTrack message. Also converts values to other types if specified. + * Creates a plain object from a DetectedAttribute message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @static - * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} message CelebrityTrack + * @param {google.cloud.videointelligence.v1p3beta1.DetectedAttribute} message DetectedAttribute * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CelebrityTrack.toObject = function toObject(message, options) { + DetectedAttribute.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; - if (options.arrays || options.defaults) - object.celebrities = []; - if (options.defaults) - object.faceTrack = null; - if (message.celebrities && message.celebrities.length) { - object.celebrities = []; - for (var j = 0; j < message.celebrities.length; ++j) - object.celebrities[j] = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.toObject(message.celebrities[j], options); + if (options.defaults) { + object.name = ""; + object.confidence = 0; + object.value = ""; } - if (message.faceTrack != null && message.hasOwnProperty("faceTrack")) - object.faceTrack = $root.google.cloud.videointelligence.v1p3beta1.Track.toObject(message.faceTrack, options); + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + if (message.value != null && message.hasOwnProperty("value")) + object.value = message.value; return object; }; /** - * Converts this CelebrityTrack to JSON. + * Converts this DetectedAttribute to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedAttribute * @instance * @returns {Object.} JSON object */ - CelebrityTrack.prototype.toJSON = function toJSON() { + DetectedAttribute.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - CelebrityTrack.RecognizedCelebrity = (function() { - - /** - * Properties of a RecognizedCelebrity. - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack - * @interface IRecognizedCelebrity - * @property {google.cloud.videointelligence.v1p3beta1.ICelebrity|null} [celebrity] RecognizedCelebrity celebrity - * @property {number|null} [confidence] RecognizedCelebrity confidence - */ + return DetectedAttribute; + })(); - /** - * Constructs a new RecognizedCelebrity. - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack - * @classdesc Represents a RecognizedCelebrity. - * @implements IRecognizedCelebrity - * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.IRecognizedCelebrity=} [properties] Properties to set - */ - function RecognizedCelebrity(properties) { - if (properties) - for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + v1p3beta1.Celebrity = (function() { - /** - * RecognizedCelebrity celebrity. - * @member {google.cloud.videointelligence.v1p3beta1.ICelebrity|null|undefined} celebrity - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @instance - */ - RecognizedCelebrity.prototype.celebrity = null; + /** + * Properties of a Celebrity. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @interface ICelebrity + * @property {string|null} [name] Celebrity name + * @property {string|null} [displayName] Celebrity displayName + * @property {string|null} [description] Celebrity description + */ - /** - * RecognizedCelebrity confidence. - * @member {number} confidence - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @instance - */ - RecognizedCelebrity.prototype.confidence = 0; + /** + * Constructs a new Celebrity. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @classdesc Represents a Celebrity. + * @implements ICelebrity + * @constructor + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrity=} [properties] Properties to set + */ + function Celebrity(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Creates a new RecognizedCelebrity instance using the specified properties. - * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + /** + * Celebrity name. + * @member {string} name + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @instance + */ + Celebrity.prototype.name = ""; + + /** + * Celebrity displayName. + * @member {string} displayName + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @instance + */ + Celebrity.prototype.displayName = ""; + + /** + * Celebrity description. + * @member {string} description + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @instance + */ + Celebrity.prototype.description = ""; + + /** + * Creates a new Celebrity instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrity=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.Celebrity} Celebrity instance + */ + Celebrity.create = function create(properties) { + return new Celebrity(properties); + }; + + /** + * Encodes the specified Celebrity message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Celebrity.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrity} message Celebrity message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Celebrity.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && message.hasOwnProperty("name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.displayName != null && message.hasOwnProperty("displayName")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.displayName); + if (message.description != null && message.hasOwnProperty("description")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.description); + return writer; + }; + + /** + * Encodes the specified Celebrity message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.Celebrity.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrity} message Celebrity message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Celebrity.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Celebrity message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1p3beta1.Celebrity} Celebrity + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Celebrity.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.Celebrity(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.displayName = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Celebrity message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1p3beta1.Celebrity} Celebrity + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Celebrity.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Celebrity message. + * @function verify + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Celebrity.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.displayName != null && message.hasOwnProperty("displayName")) + if (!$util.isString(message.displayName)) + return "displayName: string expected"; + if (message.description != null && message.hasOwnProperty("description")) + if (!$util.isString(message.description)) + return "description: string expected"; + return null; + }; + + /** + * Creates a Celebrity message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1p3beta1.Celebrity} Celebrity + */ + Celebrity.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.Celebrity) + return object; + var message = new $root.google.cloud.videointelligence.v1p3beta1.Celebrity(); + if (object.name != null) + message.name = String(object.name); + if (object.displayName != null) + message.displayName = String(object.displayName); + if (object.description != null) + message.description = String(object.description); + return message; + }; + + /** + * Creates a plain object from a Celebrity message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @static + * @param {google.cloud.videointelligence.v1p3beta1.Celebrity} message Celebrity + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Celebrity.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.name = ""; + object.displayName = ""; + object.description = ""; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.displayName != null && message.hasOwnProperty("displayName")) + object.displayName = message.displayName; + if (message.description != null && message.hasOwnProperty("description")) + object.description = message.description; + return object; + }; + + /** + * Converts this Celebrity to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1p3beta1.Celebrity + * @instance + * @returns {Object.} JSON object + */ + Celebrity.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return Celebrity; + })(); + + v1p3beta1.CelebrityTrack = (function() { + + /** + * Properties of a CelebrityTrack. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @interface ICelebrityTrack + * @property {Array.|null} [celebrities] CelebrityTrack celebrities + * @property {google.cloud.videointelligence.v1p3beta1.ITrack|null} [faceTrack] CelebrityTrack faceTrack + */ + + /** + * Constructs a new CelebrityTrack. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @classdesc Represents a CelebrityTrack. + * @implements ICelebrityTrack + * @constructor + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityTrack=} [properties] Properties to set + */ + function CelebrityTrack(properties) { + this.celebrities = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CelebrityTrack celebrities. + * @member {Array.} celebrities + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @instance + */ + CelebrityTrack.prototype.celebrities = $util.emptyArray; + + /** + * CelebrityTrack faceTrack. + * @member {google.cloud.videointelligence.v1p3beta1.ITrack|null|undefined} faceTrack + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @instance + */ + CelebrityTrack.prototype.faceTrack = null; + + /** + * Creates a new CelebrityTrack instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityTrack=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} CelebrityTrack instance + */ + CelebrityTrack.create = function create(properties) { + return new CelebrityTrack(properties); + }; + + /** + * Encodes the specified CelebrityTrack message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityTrack} message CelebrityTrack message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CelebrityTrack.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.celebrities != null && message.celebrities.length) + for (var i = 0; i < message.celebrities.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.encode(message.celebrities[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.faceTrack != null && message.hasOwnProperty("faceTrack")) + $root.google.cloud.videointelligence.v1p3beta1.Track.encode(message.faceTrack, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified CelebrityTrack message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityTrack} message CelebrityTrack message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CelebrityTrack.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CelebrityTrack message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} CelebrityTrack + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CelebrityTrack.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.celebrities && message.celebrities.length)) + message.celebrities = []; + message.celebrities.push($root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.decode(reader, reader.uint32())); + break; + case 3: + message.faceTrack = $root.google.cloud.videointelligence.v1p3beta1.Track.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CelebrityTrack message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} CelebrityTrack + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CelebrityTrack.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CelebrityTrack message. + * @function verify + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CelebrityTrack.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.celebrities != null && message.hasOwnProperty("celebrities")) { + if (!Array.isArray(message.celebrities)) + return "celebrities: array expected"; + for (var i = 0; i < message.celebrities.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.verify(message.celebrities[i]); + if (error) + return "celebrities." + error; + } + } + if (message.faceTrack != null && message.hasOwnProperty("faceTrack")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.Track.verify(message.faceTrack); + if (error) + return "faceTrack." + error; + } + return null; + }; + + /** + * Creates a CelebrityTrack message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} CelebrityTrack + */ + CelebrityTrack.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack) + return object; + var message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack(); + if (object.celebrities) { + if (!Array.isArray(object.celebrities)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityTrack.celebrities: array expected"); + message.celebrities = []; + for (var i = 0; i < object.celebrities.length; ++i) { + if (typeof object.celebrities[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityTrack.celebrities: object expected"); + message.celebrities[i] = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.fromObject(object.celebrities[i]); + } + } + if (object.faceTrack != null) { + if (typeof object.faceTrack !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityTrack.faceTrack: object expected"); + message.faceTrack = $root.google.cloud.videointelligence.v1p3beta1.Track.fromObject(object.faceTrack); + } + return message; + }; + + /** + * Creates a plain object from a CelebrityTrack message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @static + * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack} message CelebrityTrack + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CelebrityTrack.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.celebrities = []; + if (options.defaults) + object.faceTrack = null; + if (message.celebrities && message.celebrities.length) { + object.celebrities = []; + for (var j = 0; j < message.celebrities.length; ++j) + object.celebrities[j] = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.toObject(message.celebrities[j], options); + } + if (message.faceTrack != null && message.hasOwnProperty("faceTrack")) + object.faceTrack = $root.google.cloud.videointelligence.v1p3beta1.Track.toObject(message.faceTrack, options); + return object; + }; + + /** + * Converts this CelebrityTrack to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @instance + * @returns {Object.} JSON object + */ + CelebrityTrack.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + CelebrityTrack.RecognizedCelebrity = (function() { + + /** + * Properties of a RecognizedCelebrity. + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @interface IRecognizedCelebrity + * @property {google.cloud.videointelligence.v1p3beta1.ICelebrity|null} [celebrity] RecognizedCelebrity celebrity + * @property {number|null} [confidence] RecognizedCelebrity confidence + */ + + /** + * Constructs a new RecognizedCelebrity. + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack + * @classdesc Represents a RecognizedCelebrity. + * @implements IRecognizedCelebrity + * @constructor + * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.IRecognizedCelebrity=} [properties] Properties to set + */ + function RecognizedCelebrity(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * RecognizedCelebrity celebrity. + * @member {google.cloud.videointelligence.v1p3beta1.ICelebrity|null|undefined} celebrity + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @instance + */ + RecognizedCelebrity.prototype.celebrity = null; + + /** + * RecognizedCelebrity confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @instance + */ + RecognizedCelebrity.prototype.confidence = 0; + + /** + * Creates a new RecognizedCelebrity instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity * @static * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.IRecognizedCelebrity=} [properties] Properties to set * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} RecognizedCelebrity instance @@ -32010,190 +32582,875 @@ return new RecognizedCelebrity(properties); }; - /** - * Encodes the specified RecognizedCelebrity message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.verify|verify} messages. - * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @static - * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.IRecognizedCelebrity} message RecognizedCelebrity message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - RecognizedCelebrity.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.celebrity != null && message.hasOwnProperty("celebrity")) - $root.google.cloud.videointelligence.v1p3beta1.Celebrity.encode(message.celebrity, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.confidence != null && message.hasOwnProperty("confidence")) - writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); - return writer; - }; + /** + * Encodes the specified RecognizedCelebrity message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @static + * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.IRecognizedCelebrity} message RecognizedCelebrity message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + RecognizedCelebrity.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.celebrity != null && message.hasOwnProperty("celebrity")) + $root.google.cloud.videointelligence.v1p3beta1.Celebrity.encode(message.celebrity, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 2, wireType 5 =*/21).float(message.confidence); + return writer; + }; + + /** + * Encodes the specified RecognizedCelebrity message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @static + * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.IRecognizedCelebrity} message RecognizedCelebrity message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + RecognizedCelebrity.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a RecognizedCelebrity message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} RecognizedCelebrity + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + RecognizedCelebrity.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.celebrity = $root.google.cloud.videointelligence.v1p3beta1.Celebrity.decode(reader, reader.uint32()); + break; + case 2: + message.confidence = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a RecognizedCelebrity message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} RecognizedCelebrity + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + RecognizedCelebrity.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a RecognizedCelebrity message. + * @function verify + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + RecognizedCelebrity.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.celebrity != null && message.hasOwnProperty("celebrity")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.Celebrity.verify(message.celebrity); + if (error) + return "celebrity." + error; + } + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; + return null; + }; + + /** + * Creates a RecognizedCelebrity message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} RecognizedCelebrity + */ + RecognizedCelebrity.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity) + return object; + var message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity(); + if (object.celebrity != null) { + if (typeof object.celebrity !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.celebrity: object expected"); + message.celebrity = $root.google.cloud.videointelligence.v1p3beta1.Celebrity.fromObject(object.celebrity); + } + if (object.confidence != null) + message.confidence = Number(object.confidence); + return message; + }; + + /** + * Creates a plain object from a RecognizedCelebrity message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @static + * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} message RecognizedCelebrity + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + RecognizedCelebrity.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.celebrity = null; + object.confidence = 0; + } + if (message.celebrity != null && message.hasOwnProperty("celebrity")) + object.celebrity = $root.google.cloud.videointelligence.v1p3beta1.Celebrity.toObject(message.celebrity, options); + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + return object; + }; + + /** + * Converts this RecognizedCelebrity to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity + * @instance + * @returns {Object.} JSON object + */ + RecognizedCelebrity.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return RecognizedCelebrity; + })(); + + return CelebrityTrack; + })(); + + v1p3beta1.CelebrityRecognitionAnnotation = (function() { + + /** + * Properties of a CelebrityRecognitionAnnotation. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @interface ICelebrityRecognitionAnnotation + * @property {Array.|null} [celebrityTracks] CelebrityRecognitionAnnotation celebrityTracks + */ + + /** + * Constructs a new CelebrityRecognitionAnnotation. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @classdesc Represents a CelebrityRecognitionAnnotation. + * @implements ICelebrityRecognitionAnnotation + * @constructor + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation=} [properties] Properties to set + */ + function CelebrityRecognitionAnnotation(properties) { + this.celebrityTracks = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CelebrityRecognitionAnnotation celebrityTracks. + * @member {Array.} celebrityTracks + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @instance + */ + CelebrityRecognitionAnnotation.prototype.celebrityTracks = $util.emptyArray; + + /** + * Creates a new CelebrityRecognitionAnnotation instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} CelebrityRecognitionAnnotation instance + */ + CelebrityRecognitionAnnotation.create = function create(properties) { + return new CelebrityRecognitionAnnotation(properties); + }; + + /** + * Encodes the specified CelebrityRecognitionAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation} message CelebrityRecognitionAnnotation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CelebrityRecognitionAnnotation.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.celebrityTracks != null && message.celebrityTracks.length) + for (var i = 0; i < message.celebrityTracks.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.encode(message.celebrityTracks[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified CelebrityRecognitionAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @static + * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation} message CelebrityRecognitionAnnotation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CelebrityRecognitionAnnotation.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CelebrityRecognitionAnnotation message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} CelebrityRecognitionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CelebrityRecognitionAnnotation.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.celebrityTracks && message.celebrityTracks.length)) + message.celebrityTracks = []; + message.celebrityTracks.push($root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CelebrityRecognitionAnnotation message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} CelebrityRecognitionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CelebrityRecognitionAnnotation.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CelebrityRecognitionAnnotation message. + * @function verify + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CelebrityRecognitionAnnotation.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.celebrityTracks != null && message.hasOwnProperty("celebrityTracks")) { + if (!Array.isArray(message.celebrityTracks)) + return "celebrityTracks: array expected"; + for (var i = 0; i < message.celebrityTracks.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.verify(message.celebrityTracks[i]); + if (error) + return "celebrityTracks." + error; + } + } + return null; + }; + + /** + * Creates a CelebrityRecognitionAnnotation message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} CelebrityRecognitionAnnotation + */ + CelebrityRecognitionAnnotation.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation) + return object; + var message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation(); + if (object.celebrityTracks) { + if (!Array.isArray(object.celebrityTracks)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.celebrityTracks: array expected"); + message.celebrityTracks = []; + for (var i = 0; i < object.celebrityTracks.length; ++i) { + if (typeof object.celebrityTracks[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.celebrityTracks: object expected"); + message.celebrityTracks[i] = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.fromObject(object.celebrityTracks[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a CelebrityRecognitionAnnotation message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @static + * @param {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} message CelebrityRecognitionAnnotation + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CelebrityRecognitionAnnotation.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.celebrityTracks = []; + if (message.celebrityTracks && message.celebrityTracks.length) { + object.celebrityTracks = []; + for (var j = 0; j < message.celebrityTracks.length; ++j) + object.celebrityTracks[j] = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.toObject(message.celebrityTracks[j], options); + } + return object; + }; + + /** + * Converts this CelebrityRecognitionAnnotation to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @instance + * @returns {Object.} JSON object + */ + CelebrityRecognitionAnnotation.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + return CelebrityRecognitionAnnotation; + })(); + + v1p3beta1.DetectedLandmark = (function() { + + /** + * Properties of a DetectedLandmark. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @interface IDetectedLandmark + * @property {string|null} [name] DetectedLandmark name + * @property {google.cloud.videointelligence.v1p3beta1.INormalizedVertex|null} [point] DetectedLandmark point + * @property {number|null} [confidence] DetectedLandmark confidence + */ + + /** + * Constructs a new DetectedLandmark. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @classdesc Represents a DetectedLandmark. + * @implements IDetectedLandmark + * @constructor + * @param {google.cloud.videointelligence.v1p3beta1.IDetectedLandmark=} [properties] Properties to set + */ + function DetectedLandmark(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DetectedLandmark name. + * @member {string} name + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @instance + */ + DetectedLandmark.prototype.name = ""; + + /** + * DetectedLandmark point. + * @member {google.cloud.videointelligence.v1p3beta1.INormalizedVertex|null|undefined} point + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @instance + */ + DetectedLandmark.prototype.point = null; + + /** + * DetectedLandmark confidence. + * @member {number} confidence + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @instance + */ + DetectedLandmark.prototype.confidence = 0; + + /** + * Creates a new DetectedLandmark instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @static + * @param {google.cloud.videointelligence.v1p3beta1.IDetectedLandmark=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.DetectedLandmark} DetectedLandmark instance + */ + DetectedLandmark.create = function create(properties) { + return new DetectedLandmark(properties); + }; + + /** + * Encodes the specified DetectedLandmark message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.DetectedLandmark.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @static + * @param {google.cloud.videointelligence.v1p3beta1.IDetectedLandmark} message DetectedLandmark message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DetectedLandmark.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && message.hasOwnProperty("name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.point != null && message.hasOwnProperty("point")) + $root.google.cloud.videointelligence.v1p3beta1.NormalizedVertex.encode(message.point, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.confidence != null && message.hasOwnProperty("confidence")) + writer.uint32(/* id 3, wireType 5 =*/29).float(message.confidence); + return writer; + }; + + /** + * Encodes the specified DetectedLandmark message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.DetectedLandmark.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @static + * @param {google.cloud.videointelligence.v1p3beta1.IDetectedLandmark} message DetectedLandmark message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DetectedLandmark.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DetectedLandmark message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1p3beta1.DetectedLandmark} DetectedLandmark + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DetectedLandmark.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.DetectedLandmark(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.point = $root.google.cloud.videointelligence.v1p3beta1.NormalizedVertex.decode(reader, reader.uint32()); + break; + case 3: + message.confidence = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DetectedLandmark message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1p3beta1.DetectedLandmark} DetectedLandmark + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DetectedLandmark.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DetectedLandmark message. + * @function verify + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DetectedLandmark.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.point != null && message.hasOwnProperty("point")) { + var error = $root.google.cloud.videointelligence.v1p3beta1.NormalizedVertex.verify(message.point); + if (error) + return "point." + error; + } + if (message.confidence != null && message.hasOwnProperty("confidence")) + if (typeof message.confidence !== "number") + return "confidence: number expected"; + return null; + }; + + /** + * Creates a DetectedLandmark message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1p3beta1.DetectedLandmark} DetectedLandmark + */ + DetectedLandmark.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.DetectedLandmark) + return object; + var message = new $root.google.cloud.videointelligence.v1p3beta1.DetectedLandmark(); + if (object.name != null) + message.name = String(object.name); + if (object.point != null) { + if (typeof object.point !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.DetectedLandmark.point: object expected"); + message.point = $root.google.cloud.videointelligence.v1p3beta1.NormalizedVertex.fromObject(object.point); + } + if (object.confidence != null) + message.confidence = Number(object.confidence); + return message; + }; - /** - * Encodes the specified RecognizedCelebrity message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.verify|verify} messages. - * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @static - * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.IRecognizedCelebrity} message RecognizedCelebrity message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - RecognizedCelebrity.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Creates a plain object from a DetectedLandmark message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @static + * @param {google.cloud.videointelligence.v1p3beta1.DetectedLandmark} message DetectedLandmark + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DetectedLandmark.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.defaults) { + object.name = ""; + object.point = null; + object.confidence = 0; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.point != null && message.hasOwnProperty("point")) + object.point = $root.google.cloud.videointelligence.v1p3beta1.NormalizedVertex.toObject(message.point, options); + if (message.confidence != null && message.hasOwnProperty("confidence")) + object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + return object; + }; - /** - * Decodes a RecognizedCelebrity message from the specified reader or buffer. - * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} RecognizedCelebrity - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - RecognizedCelebrity.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity(); - while (reader.pos < end) { - var tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.celebrity = $root.google.cloud.videointelligence.v1p3beta1.Celebrity.decode(reader, reader.uint32()); - break; - case 2: - message.confidence = reader.float(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + /** + * Converts this DetectedLandmark to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1p3beta1.DetectedLandmark + * @instance + * @returns {Object.} JSON object + */ + DetectedLandmark.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Decodes a RecognizedCelebrity message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} RecognizedCelebrity - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - RecognizedCelebrity.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + return DetectedLandmark; + })(); - /** - * Verifies a RecognizedCelebrity message. - * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - RecognizedCelebrity.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.celebrity != null && message.hasOwnProperty("celebrity")) { - var error = $root.google.cloud.videointelligence.v1p3beta1.Celebrity.verify(message.celebrity); - if (error) - return "celebrity." + error; - } - if (message.confidence != null && message.hasOwnProperty("confidence")) - if (typeof message.confidence !== "number") - return "confidence: number expected"; - return null; - }; + v1p3beta1.FaceDetectionAnnotation = (function() { - /** - * Creates a RecognizedCelebrity message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @static - * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} RecognizedCelebrity - */ - RecognizedCelebrity.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity) - return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity(); - if (object.celebrity != null) { - if (typeof object.celebrity !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity.celebrity: object expected"); - message.celebrity = $root.google.cloud.videointelligence.v1p3beta1.Celebrity.fromObject(object.celebrity); + /** + * Properties of a FaceDetectionAnnotation. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @interface IFaceDetectionAnnotation + * @property {Array.|null} [tracks] FaceDetectionAnnotation tracks + * @property {Uint8Array|null} [thumbnail] FaceDetectionAnnotation thumbnail + */ + + /** + * Constructs a new FaceDetectionAnnotation. + * @memberof google.cloud.videointelligence.v1p3beta1 + * @classdesc Represents a FaceDetectionAnnotation. + * @implements IFaceDetectionAnnotation + * @constructor + * @param {google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation=} [properties] Properties to set + */ + function FaceDetectionAnnotation(properties) { + this.tracks = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * FaceDetectionAnnotation tracks. + * @member {Array.} tracks + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @instance + */ + FaceDetectionAnnotation.prototype.tracks = $util.emptyArray; + + /** + * FaceDetectionAnnotation thumbnail. + * @member {Uint8Array} thumbnail + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @instance + */ + FaceDetectionAnnotation.prototype.thumbnail = $util.newBuffer([]); + + /** + * Creates a new FaceDetectionAnnotation instance using the specified properties. + * @function create + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @static + * @param {google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation} FaceDetectionAnnotation instance + */ + FaceDetectionAnnotation.create = function create(properties) { + return new FaceDetectionAnnotation(properties); + }; + + /** + * Encodes the specified FaceDetectionAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.verify|verify} messages. + * @function encode + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @static + * @param {google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation} message FaceDetectionAnnotation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + FaceDetectionAnnotation.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tracks != null && message.tracks.length) + for (var i = 0; i < message.tracks.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.Track.encode(message.tracks[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.thumbnail != null && message.hasOwnProperty("thumbnail")) + writer.uint32(/* id 4, wireType 2 =*/34).bytes(message.thumbnail); + return writer; + }; + + /** + * Encodes the specified FaceDetectionAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.verify|verify} messages. + * @function encodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @static + * @param {google.cloud.videointelligence.v1p3beta1.IFaceDetectionAnnotation} message FaceDetectionAnnotation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + FaceDetectionAnnotation.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a FaceDetectionAnnotation message from the specified reader or buffer. + * @function decode + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation} FaceDetectionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + FaceDetectionAnnotation.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + if (!(message.tracks && message.tracks.length)) + message.tracks = []; + message.tracks.push($root.google.cloud.videointelligence.v1p3beta1.Track.decode(reader, reader.uint32())); + break; + case 4: + message.thumbnail = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; } - if (object.confidence != null) - message.confidence = Number(object.confidence); - return message; - }; + } + return message; + }; + + /** + * Decodes a FaceDetectionAnnotation message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation} FaceDetectionAnnotation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + FaceDetectionAnnotation.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Creates a plain object from a RecognizedCelebrity message. Also converts values to other types if specified. - * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @static - * @param {google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity} message RecognizedCelebrity - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - RecognizedCelebrity.toObject = function toObject(message, options) { - if (!options) - options = {}; - var object = {}; - if (options.defaults) { - object.celebrity = null; - object.confidence = 0; + /** + * Verifies a FaceDetectionAnnotation message. + * @function verify + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + FaceDetectionAnnotation.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tracks != null && message.hasOwnProperty("tracks")) { + if (!Array.isArray(message.tracks)) + return "tracks: array expected"; + for (var i = 0; i < message.tracks.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.Track.verify(message.tracks[i]); + if (error) + return "tracks." + error; } - if (message.celebrity != null && message.hasOwnProperty("celebrity")) - object.celebrity = $root.google.cloud.videointelligence.v1p3beta1.Celebrity.toObject(message.celebrity, options); - if (message.confidence != null && message.hasOwnProperty("confidence")) - object.confidence = options.json && !isFinite(message.confidence) ? String(message.confidence) : message.confidence; + } + if (message.thumbnail != null && message.hasOwnProperty("thumbnail")) + if (!(message.thumbnail && typeof message.thumbnail.length === "number" || $util.isString(message.thumbnail))) + return "thumbnail: buffer expected"; + return null; + }; + + /** + * Creates a FaceDetectionAnnotation message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @static + * @param {Object.} object Plain object + * @returns {google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation} FaceDetectionAnnotation + */ + FaceDetectionAnnotation.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation) return object; - }; + var message = new $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation(); + if (object.tracks) { + if (!Array.isArray(object.tracks)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.tracks: array expected"); + message.tracks = []; + for (var i = 0; i < object.tracks.length; ++i) { + if (typeof object.tracks[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.tracks: object expected"); + message.tracks[i] = $root.google.cloud.videointelligence.v1p3beta1.Track.fromObject(object.tracks[i]); + } + } + if (object.thumbnail != null) + if (typeof object.thumbnail === "string") + $util.base64.decode(object.thumbnail, message.thumbnail = $util.newBuffer($util.base64.length(object.thumbnail)), 0); + else if (object.thumbnail.length) + message.thumbnail = object.thumbnail; + return message; + }; - /** - * Converts this RecognizedCelebrity to JSON. - * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity - * @instance - * @returns {Object.} JSON object - */ - RecognizedCelebrity.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * Creates a plain object from a FaceDetectionAnnotation message. Also converts values to other types if specified. + * @function toObject + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @static + * @param {google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation} message FaceDetectionAnnotation + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + FaceDetectionAnnotation.toObject = function toObject(message, options) { + if (!options) + options = {}; + var object = {}; + if (options.arrays || options.defaults) + object.tracks = []; + if (options.defaults) + if (options.bytes === String) + object.thumbnail = ""; + else { + object.thumbnail = []; + if (options.bytes !== Array) + object.thumbnail = $util.newBuffer(object.thumbnail); + } + if (message.tracks && message.tracks.length) { + object.tracks = []; + for (var j = 0; j < message.tracks.length; ++j) + object.tracks[j] = $root.google.cloud.videointelligence.v1p3beta1.Track.toObject(message.tracks[j], options); + } + if (message.thumbnail != null && message.hasOwnProperty("thumbnail")) + object.thumbnail = options.bytes === String ? $util.base64.encode(message.thumbnail, 0, message.thumbnail.length) : options.bytes === Array ? Array.prototype.slice.call(message.thumbnail) : message.thumbnail; + return object; + }; - return RecognizedCelebrity; - })(); + /** + * Converts this FaceDetectionAnnotation to JSON. + * @function toJSON + * @memberof google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation + * @instance + * @returns {Object.} JSON object + */ + FaceDetectionAnnotation.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - return CelebrityTrack; + return FaceDetectionAnnotation; })(); - v1p3beta1.CelebrityRecognitionAnnotation = (function() { + v1p3beta1.PersonDetectionAnnotation = (function() { /** - * Properties of a CelebrityRecognitionAnnotation. + * Properties of a PersonDetectionAnnotation. * @memberof google.cloud.videointelligence.v1p3beta1 - * @interface ICelebrityRecognitionAnnotation - * @property {Array.|null} [celebrityTracks] CelebrityRecognitionAnnotation celebrityTracks + * @interface IPersonDetectionAnnotation + * @property {Array.|null} [tracks] PersonDetectionAnnotation tracks */ /** - * Constructs a new CelebrityRecognitionAnnotation. + * Constructs a new PersonDetectionAnnotation. * @memberof google.cloud.videointelligence.v1p3beta1 - * @classdesc Represents a CelebrityRecognitionAnnotation. - * @implements ICelebrityRecognitionAnnotation + * @classdesc Represents a PersonDetectionAnnotation. + * @implements IPersonDetectionAnnotation * @constructor - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation=} [properties] Properties to set + * @param {google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation=} [properties] Properties to set */ - function CelebrityRecognitionAnnotation(properties) { - this.celebrityTracks = []; + function PersonDetectionAnnotation(properties) { + this.tracks = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -32201,78 +33458,78 @@ } /** - * CelebrityRecognitionAnnotation celebrityTracks. - * @member {Array.} celebrityTracks - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * PersonDetectionAnnotation tracks. + * @member {Array.} tracks + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @instance */ - CelebrityRecognitionAnnotation.prototype.celebrityTracks = $util.emptyArray; + PersonDetectionAnnotation.prototype.tracks = $util.emptyArray; /** - * Creates a new CelebrityRecognitionAnnotation instance using the specified properties. + * Creates a new PersonDetectionAnnotation instance using the specified properties. * @function create - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation=} [properties] Properties to set - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} CelebrityRecognitionAnnotation instance + * @param {google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation=} [properties] Properties to set + * @returns {google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation} PersonDetectionAnnotation instance */ - CelebrityRecognitionAnnotation.create = function create(properties) { - return new CelebrityRecognitionAnnotation(properties); + PersonDetectionAnnotation.create = function create(properties) { + return new PersonDetectionAnnotation(properties); }; /** - * Encodes the specified CelebrityRecognitionAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.verify|verify} messages. + * Encodes the specified PersonDetectionAnnotation message. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.verify|verify} messages. * @function encode - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation} message CelebrityRecognitionAnnotation message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation} message PersonDetectionAnnotation message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CelebrityRecognitionAnnotation.encode = function encode(message, writer) { + PersonDetectionAnnotation.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.celebrityTracks != null && message.celebrityTracks.length) - for (var i = 0; i < message.celebrityTracks.length; ++i) - $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.encode(message.celebrityTracks[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.tracks != null && message.tracks.length) + for (var i = 0; i < message.tracks.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.Track.encode(message.tracks[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified CelebrityRecognitionAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.verify|verify} messages. + * Encodes the specified PersonDetectionAnnotation message, length delimited. Does not implicitly {@link google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.verify|verify} messages. * @function encodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation} message CelebrityRecognitionAnnotation message or plain object to encode + * @param {google.cloud.videointelligence.v1p3beta1.IPersonDetectionAnnotation} message PersonDetectionAnnotation message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CelebrityRecognitionAnnotation.encodeDelimited = function encodeDelimited(message, writer) { + PersonDetectionAnnotation.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CelebrityRecognitionAnnotation message from the specified reader or buffer. + * Decodes a PersonDetectionAnnotation message from the specified reader or buffer. * @function decode - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} CelebrityRecognitionAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation} PersonDetectionAnnotation * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CelebrityRecognitionAnnotation.decode = function decode(reader, length) { + PersonDetectionAnnotation.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation(); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation(); while (reader.pos < end) { var tag = reader.uint32(); switch (tag >>> 3) { case 1: - if (!(message.celebrityTracks && message.celebrityTracks.length)) - message.celebrityTracks = []; - message.celebrityTracks.push($root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.decode(reader, reader.uint32())); + if (!(message.tracks && message.tracks.length)) + message.tracks = []; + message.tracks.push($root.google.cloud.videointelligence.v1p3beta1.Track.decode(reader, reader.uint32())); break; default: reader.skipType(tag & 7); @@ -32283,104 +33540,104 @@ }; /** - * Decodes a CelebrityRecognitionAnnotation message from the specified reader or buffer, length delimited. + * Decodes a PersonDetectionAnnotation message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} CelebrityRecognitionAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation} PersonDetectionAnnotation * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CelebrityRecognitionAnnotation.decodeDelimited = function decodeDelimited(reader) { + PersonDetectionAnnotation.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CelebrityRecognitionAnnotation message. + * Verifies a PersonDetectionAnnotation message. * @function verify - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CelebrityRecognitionAnnotation.verify = function verify(message) { + PersonDetectionAnnotation.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.celebrityTracks != null && message.hasOwnProperty("celebrityTracks")) { - if (!Array.isArray(message.celebrityTracks)) - return "celebrityTracks: array expected"; - for (var i = 0; i < message.celebrityTracks.length; ++i) { - var error = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.verify(message.celebrityTracks[i]); + if (message.tracks != null && message.hasOwnProperty("tracks")) { + if (!Array.isArray(message.tracks)) + return "tracks: array expected"; + for (var i = 0; i < message.tracks.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.Track.verify(message.tracks[i]); if (error) - return "celebrityTracks." + error; + return "tracks." + error; } } return null; }; /** - * Creates a CelebrityRecognitionAnnotation message from a plain object. Also converts values to their respective internal types. + * Creates a PersonDetectionAnnotation message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @static * @param {Object.} object Plain object - * @returns {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} CelebrityRecognitionAnnotation + * @returns {google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation} PersonDetectionAnnotation */ - CelebrityRecognitionAnnotation.fromObject = function fromObject(object) { - if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation) + PersonDetectionAnnotation.fromObject = function fromObject(object) { + if (object instanceof $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation) return object; - var message = new $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation(); - if (object.celebrityTracks) { - if (!Array.isArray(object.celebrityTracks)) - throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.celebrityTracks: array expected"); - message.celebrityTracks = []; - for (var i = 0; i < object.celebrityTracks.length; ++i) { - if (typeof object.celebrityTracks[i] !== "object") - throw TypeError(".google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.celebrityTracks: object expected"); - message.celebrityTracks[i] = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.fromObject(object.celebrityTracks[i]); + var message = new $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation(); + if (object.tracks) { + if (!Array.isArray(object.tracks)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.tracks: array expected"); + message.tracks = []; + for (var i = 0; i < object.tracks.length; ++i) { + if (typeof object.tracks[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.tracks: object expected"); + message.tracks[i] = $root.google.cloud.videointelligence.v1p3beta1.Track.fromObject(object.tracks[i]); } } return message; }; /** - * Creates a plain object from a CelebrityRecognitionAnnotation message. Also converts values to other types if specified. + * Creates a plain object from a PersonDetectionAnnotation message. Also converts values to other types if specified. * @function toObject - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @static - * @param {google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation} message CelebrityRecognitionAnnotation + * @param {google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation} message PersonDetectionAnnotation * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CelebrityRecognitionAnnotation.toObject = function toObject(message, options) { + PersonDetectionAnnotation.toObject = function toObject(message, options) { if (!options) options = {}; var object = {}; if (options.arrays || options.defaults) - object.celebrityTracks = []; - if (message.celebrityTracks && message.celebrityTracks.length) { - object.celebrityTracks = []; - for (var j = 0; j < message.celebrityTracks.length; ++j) - object.celebrityTracks[j] = $root.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.toObject(message.celebrityTracks[j], options); + object.tracks = []; + if (message.tracks && message.tracks.length) { + object.tracks = []; + for (var j = 0; j < message.tracks.length; ++j) + object.tracks[j] = $root.google.cloud.videointelligence.v1p3beta1.Track.toObject(message.tracks[j], options); } return object; }; /** - * Converts this CelebrityRecognitionAnnotation to JSON. + * Converts this PersonDetectionAnnotation to JSON. * @function toJSON - * @memberof google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation * @instance * @returns {Object.} JSON object */ - CelebrityRecognitionAnnotation.prototype.toJSON = function toJSON() { + PersonDetectionAnnotation.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; - return CelebrityRecognitionAnnotation; + return PersonDetectionAnnotation; })(); v1p3beta1.VideoAnnotationResults = (function() { @@ -32396,12 +33653,14 @@ * @property {Array.|null} [shotLabelAnnotations] VideoAnnotationResults shotLabelAnnotations * @property {Array.|null} [shotPresenceLabelAnnotations] VideoAnnotationResults shotPresenceLabelAnnotations * @property {Array.|null} [frameLabelAnnotations] VideoAnnotationResults frameLabelAnnotations + * @property {Array.|null} [faceDetectionAnnotations] VideoAnnotationResults faceDetectionAnnotations * @property {Array.|null} [shotAnnotations] VideoAnnotationResults shotAnnotations * @property {google.cloud.videointelligence.v1p3beta1.IExplicitContentAnnotation|null} [explicitAnnotation] VideoAnnotationResults explicitAnnotation * @property {Array.|null} [speechTranscriptions] VideoAnnotationResults speechTranscriptions * @property {Array.|null} [textAnnotations] VideoAnnotationResults textAnnotations * @property {Array.|null} [objectAnnotations] VideoAnnotationResults objectAnnotations * @property {Array.|null} [logoRecognitionAnnotations] VideoAnnotationResults logoRecognitionAnnotations + * @property {Array.|null} [personDetectionAnnotations] VideoAnnotationResults personDetectionAnnotations * @property {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation|null} [celebrityRecognitionAnnotations] VideoAnnotationResults celebrityRecognitionAnnotations * @property {google.rpc.IStatus|null} [error] VideoAnnotationResults error */ @@ -32420,11 +33679,13 @@ this.shotLabelAnnotations = []; this.shotPresenceLabelAnnotations = []; this.frameLabelAnnotations = []; + this.faceDetectionAnnotations = []; this.shotAnnotations = []; this.speechTranscriptions = []; this.textAnnotations = []; this.objectAnnotations = []; this.logoRecognitionAnnotations = []; + this.personDetectionAnnotations = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -32487,6 +33748,14 @@ */ VideoAnnotationResults.prototype.frameLabelAnnotations = $util.emptyArray; + /** + * VideoAnnotationResults faceDetectionAnnotations. + * @member {Array.} faceDetectionAnnotations + * @memberof google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.faceDetectionAnnotations = $util.emptyArray; + /** * VideoAnnotationResults shotAnnotations. * @member {Array.} shotAnnotations @@ -32535,6 +33804,14 @@ */ VideoAnnotationResults.prototype.logoRecognitionAnnotations = $util.emptyArray; + /** + * VideoAnnotationResults personDetectionAnnotations. + * @member {Array.} personDetectionAnnotations + * @memberof google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults + * @instance + */ + VideoAnnotationResults.prototype.personDetectionAnnotations = $util.emptyArray; + /** * VideoAnnotationResults celebrityRecognitionAnnotations. * @member {google.cloud.videointelligence.v1p3beta1.ICelebrityRecognitionAnnotation|null|undefined} celebrityRecognitionAnnotations @@ -32601,12 +33878,18 @@ if (message.textAnnotations != null && message.textAnnotations.length) for (var i = 0; i < message.textAnnotations.length; ++i) $root.google.cloud.videointelligence.v1p3beta1.TextAnnotation.encode(message.textAnnotations[i], writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); + if (message.faceDetectionAnnotations != null && message.faceDetectionAnnotations.length) + for (var i = 0; i < message.faceDetectionAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.encode(message.faceDetectionAnnotations[i], writer.uint32(/* id 13, wireType 2 =*/106).fork()).ldelim(); if (message.objectAnnotations != null && message.objectAnnotations.length) for (var i = 0; i < message.objectAnnotations.length; ++i) $root.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation.encode(message.objectAnnotations[i], writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); if (message.logoRecognitionAnnotations != null && message.logoRecognitionAnnotations.length) for (var i = 0; i < message.logoRecognitionAnnotations.length; ++i) $root.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation.encode(message.logoRecognitionAnnotations[i], writer.uint32(/* id 19, wireType 2 =*/154).fork()).ldelim(); + if (message.personDetectionAnnotations != null && message.personDetectionAnnotations.length) + for (var i = 0; i < message.personDetectionAnnotations.length; ++i) + $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.encode(message.personDetectionAnnotations[i], writer.uint32(/* id 20, wireType 2 =*/162).fork()).ldelim(); if (message.celebrityRecognitionAnnotations != null && message.hasOwnProperty("celebrityRecognitionAnnotations")) $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.encode(message.celebrityRecognitionAnnotations, writer.uint32(/* id 21, wireType 2 =*/170).fork()).ldelim(); if (message.segmentPresenceLabelAnnotations != null && message.segmentPresenceLabelAnnotations.length) @@ -32680,6 +33963,11 @@ message.frameLabelAnnotations = []; message.frameLabelAnnotations.push($root.google.cloud.videointelligence.v1p3beta1.LabelAnnotation.decode(reader, reader.uint32())); break; + case 13: + if (!(message.faceDetectionAnnotations && message.faceDetectionAnnotations.length)) + message.faceDetectionAnnotations = []; + message.faceDetectionAnnotations.push($root.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.decode(reader, reader.uint32())); + break; case 6: if (!(message.shotAnnotations && message.shotAnnotations.length)) message.shotAnnotations = []; @@ -32708,6 +33996,11 @@ message.logoRecognitionAnnotations = []; message.logoRecognitionAnnotations.push($root.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation.decode(reader, reader.uint32())); break; + case 20: + if (!(message.personDetectionAnnotations && message.personDetectionAnnotations.length)) + message.personDetectionAnnotations = []; + message.personDetectionAnnotations.push($root.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.decode(reader, reader.uint32())); + break; case 21: message.celebrityRecognitionAnnotations = $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.decode(reader, reader.uint32()); break; @@ -32802,6 +34095,15 @@ return "frameLabelAnnotations." + error; } } + if (message.faceDetectionAnnotations != null && message.hasOwnProperty("faceDetectionAnnotations")) { + if (!Array.isArray(message.faceDetectionAnnotations)) + return "faceDetectionAnnotations: array expected"; + for (var i = 0; i < message.faceDetectionAnnotations.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.verify(message.faceDetectionAnnotations[i]); + if (error) + return "faceDetectionAnnotations." + error; + } + } if (message.shotAnnotations != null && message.hasOwnProperty("shotAnnotations")) { if (!Array.isArray(message.shotAnnotations)) return "shotAnnotations: array expected"; @@ -32852,6 +34154,15 @@ return "logoRecognitionAnnotations." + error; } } + if (message.personDetectionAnnotations != null && message.hasOwnProperty("personDetectionAnnotations")) { + if (!Array.isArray(message.personDetectionAnnotations)) + return "personDetectionAnnotations: array expected"; + for (var i = 0; i < message.personDetectionAnnotations.length; ++i) { + var error = $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.verify(message.personDetectionAnnotations[i]); + if (error) + return "personDetectionAnnotations." + error; + } + } if (message.celebrityRecognitionAnnotations != null && message.hasOwnProperty("celebrityRecognitionAnnotations")) { var error = $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.verify(message.celebrityRecognitionAnnotations); if (error) @@ -32934,6 +34245,16 @@ message.frameLabelAnnotations[i] = $root.google.cloud.videointelligence.v1p3beta1.LabelAnnotation.fromObject(object.frameLabelAnnotations[i]); } } + if (object.faceDetectionAnnotations) { + if (!Array.isArray(object.faceDetectionAnnotations)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.faceDetectionAnnotations: array expected"); + message.faceDetectionAnnotations = []; + for (var i = 0; i < object.faceDetectionAnnotations.length; ++i) { + if (typeof object.faceDetectionAnnotations[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.faceDetectionAnnotations: object expected"); + message.faceDetectionAnnotations[i] = $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.fromObject(object.faceDetectionAnnotations[i]); + } + } if (object.shotAnnotations) { if (!Array.isArray(object.shotAnnotations)) throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.shotAnnotations: array expected"); @@ -32989,6 +34310,16 @@ message.logoRecognitionAnnotations[i] = $root.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation.fromObject(object.logoRecognitionAnnotations[i]); } } + if (object.personDetectionAnnotations) { + if (!Array.isArray(object.personDetectionAnnotations)) + throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.personDetectionAnnotations: array expected"); + message.personDetectionAnnotations = []; + for (var i = 0; i < object.personDetectionAnnotations.length; ++i) { + if (typeof object.personDetectionAnnotations[i] !== "object") + throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.personDetectionAnnotations: object expected"); + message.personDetectionAnnotations[i] = $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.fromObject(object.personDetectionAnnotations[i]); + } + } if (object.celebrityRecognitionAnnotations != null) { if (typeof object.celebrityRecognitionAnnotations !== "object") throw TypeError(".google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.celebrityRecognitionAnnotations: object expected"); @@ -33022,8 +34353,10 @@ object.shotAnnotations = []; object.speechTranscriptions = []; object.textAnnotations = []; + object.faceDetectionAnnotations = []; object.objectAnnotations = []; object.logoRecognitionAnnotations = []; + object.personDetectionAnnotations = []; object.segmentPresenceLabelAnnotations = []; object.shotPresenceLabelAnnotations = []; } @@ -33072,6 +34405,11 @@ for (var j = 0; j < message.textAnnotations.length; ++j) object.textAnnotations[j] = $root.google.cloud.videointelligence.v1p3beta1.TextAnnotation.toObject(message.textAnnotations[j], options); } + if (message.faceDetectionAnnotations && message.faceDetectionAnnotations.length) { + object.faceDetectionAnnotations = []; + for (var j = 0; j < message.faceDetectionAnnotations.length; ++j) + object.faceDetectionAnnotations[j] = $root.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.toObject(message.faceDetectionAnnotations[j], options); + } if (message.objectAnnotations && message.objectAnnotations.length) { object.objectAnnotations = []; for (var j = 0; j < message.objectAnnotations.length; ++j) @@ -33082,6 +34420,11 @@ for (var j = 0; j < message.logoRecognitionAnnotations.length; ++j) object.logoRecognitionAnnotations[j] = $root.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation.toObject(message.logoRecognitionAnnotations[j], options); } + if (message.personDetectionAnnotations && message.personDetectionAnnotations.length) { + object.personDetectionAnnotations = []; + for (var j = 0; j < message.personDetectionAnnotations.length; ++j) + object.personDetectionAnnotations[j] = $root.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.toObject(message.personDetectionAnnotations[j], options); + } if (message.celebrityRecognitionAnnotations != null && message.hasOwnProperty("celebrityRecognitionAnnotations")) object.celebrityRecognitionAnnotations = $root.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation.toObject(message.celebrityRecognitionAnnotations, options); if (message.segmentPresenceLabelAnnotations && message.segmentPresenceLabelAnnotations.length) { @@ -33543,11 +34886,13 @@ case 1: case 2: case 3: + case 4: case 6: case 7: case 9: case 12: case 13: + case 14: break; } if (message.segment != null && message.hasOwnProperty("segment")) { @@ -33601,6 +34946,10 @@ case 3: message.feature = 3; break; + case "FACE_DETECTION": + case 4: + message.feature = 4; + break; case "SPEECH_TRANSCRIPTION": case 6: message.feature = 6; @@ -33621,6 +34970,10 @@ case 13: message.feature = 13; break; + case "PERSON_DETECTION": + case 14: + message.feature = 14; + break; } if (object.segment != null) { if (typeof object.segment !== "object") @@ -39781,11 +41134,13 @@ * @property {number} LABEL_DETECTION=1 LABEL_DETECTION value * @property {number} SHOT_CHANGE_DETECTION=2 SHOT_CHANGE_DETECTION value * @property {number} EXPLICIT_CONTENT_DETECTION=3 EXPLICIT_CONTENT_DETECTION value + * @property {number} FACE_DETECTION=4 FACE_DETECTION value * @property {number} SPEECH_TRANSCRIPTION=6 SPEECH_TRANSCRIPTION value * @property {number} TEXT_DETECTION=7 TEXT_DETECTION value * @property {number} OBJECT_TRACKING=9 OBJECT_TRACKING value * @property {number} LOGO_RECOGNITION=12 LOGO_RECOGNITION value * @property {number} CELEBRITY_RECOGNITION=13 CELEBRITY_RECOGNITION value + * @property {number} PERSON_DETECTION=14 PERSON_DETECTION value */ v1p3beta1.Feature = (function() { var valuesById = {}, values = Object.create(valuesById); @@ -39793,11 +41148,13 @@ values[valuesById[1] = "LABEL_DETECTION"] = 1; values[valuesById[2] = "SHOT_CHANGE_DETECTION"] = 2; values[valuesById[3] = "EXPLICIT_CONTENT_DETECTION"] = 3; + values[valuesById[4] = "FACE_DETECTION"] = 4; values[valuesById[6] = "SPEECH_TRANSCRIPTION"] = 6; values[valuesById[7] = "TEXT_DETECTION"] = 7; values[valuesById[9] = "OBJECT_TRACKING"] = 9; values[valuesById[12] = "LOGO_RECOGNITION"] = 12; values[valuesById[13] = "CELEBRITY_RECOGNITION"] = 13; + values[valuesById[14] = "PERSON_DETECTION"] = 14; return values; })(); diff --git a/packages/google-cloud-videointelligence/protos/protos.json b/packages/google-cloud-videointelligence/protos/protos.json index 23e6e570f45..6c026992bb4 100644 --- a/packages/google-cloud-videointelligence/protos/protos.json +++ b/packages/google-cloud-videointelligence/protos/protos.json @@ -757,13 +757,20 @@ }, "nested": { "VideoIntelligenceService": { + "options": { + "(google.api.default_host)": "videointelligence.googleapis.com", + "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform" + }, "methods": { "AnnotateVideo": { "requestType": "AnnotateVideoRequest", "responseType": "google.longrunning.Operation", "options": { "(google.api.http).post": "/v1beta2/videos:annotate", - "(google.api.http).body": "*" + "(google.api.http).body": "*", + "(google.api.method_signature)": "input_uri,features", + "(google.longrunning.operation_info).response_type": "AnnotateVideoResponse", + "(google.longrunning.operation_info).metadata_type": "AnnotateVideoProgress" } } } @@ -781,7 +788,10 @@ "features": { "rule": "repeated", "type": "Feature", - "id": 2 + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } }, "videoContext": { "type": "VideoContext", @@ -789,11 +799,17 @@ }, "outputUri": { "type": "string", - "id": 4 + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "locationId": { "type": "string", - "id": 5 + "id": 5, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } }, @@ -1141,13 +1157,20 @@ }, "nested": { "VideoIntelligenceService": { + "options": { + "(google.api.default_host)": "videointelligence.googleapis.com", + "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform" + }, "methods": { "AnnotateVideo": { "requestType": "AnnotateVideoRequest", "responseType": "google.longrunning.Operation", "options": { "(google.api.http).post": "/v1p1beta1/videos:annotate", - "(google.api.http).body": "*" + "(google.api.http).body": "*", + "(google.api.method_signature)": "input_uri,features", + "(google.longrunning.operation_info).response_type": "AnnotateVideoResponse", + "(google.longrunning.operation_info).metadata_type": "AnnotateVideoProgress" } } } @@ -1165,7 +1188,10 @@ "features": { "rule": "repeated", "type": "Feature", - "id": 2 + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } }, "videoContext": { "type": "VideoContext", @@ -1173,11 +1199,17 @@ }, "outputUri": { "type": "string", - "id": 4 + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "locationId": { "type": "string", - "id": 5 + "id": 5, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } }, @@ -1417,29 +1449,47 @@ "fields": { "languageCode": { "type": "string", - "id": 1 + "id": 1, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } }, "maxAlternatives": { "type": "int32", - "id": 2 + "id": 2, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "filterProfanity": { "type": "bool", - "id": 3 + "id": 3, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "speechContexts": { "rule": "repeated", "type": "SpeechContext", - "id": 4 + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "enableAutomaticPunctuation": { "type": "bool", - "id": 5 + "id": 5, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "audioTracks": { "rule": "repeated", "type": "int32", - "id": 6 + "id": 6, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } }, @@ -1448,7 +1498,10 @@ "phrases": { "rule": "repeated", "type": "string", - "id": 1 + "id": 1, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } }, @@ -1469,7 +1522,10 @@ }, "confidence": { "type": "float", - "id": 2 + "id": 2, + "options": { + "(google.api.field_behavior)": "OUTPUT_ONLY" + } }, "words": { "rule": "repeated", @@ -1535,13 +1591,20 @@ }, "nested": { "VideoIntelligenceService": { + "options": { + "(google.api.default_host)": "videointelligence.googleapis.com", + "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform" + }, "methods": { "AnnotateVideo": { "requestType": "AnnotateVideoRequest", "responseType": "google.longrunning.Operation", "options": { "(google.api.http).post": "/v1p2beta1/videos:annotate", - "(google.api.http).body": "*" + "(google.api.http).body": "*", + "(google.api.method_signature)": "input_uri,features", + "(google.longrunning.operation_info).response_type": "AnnotateVideoResponse", + "(google.longrunning.operation_info).metadata_type": "AnnotateVideoProgress" } } } @@ -1559,7 +1622,10 @@ "features": { "rule": "repeated", "type": "Feature", - "id": 2 + "id": 2, + "options": { + "(google.api.field_behavior)": "REQUIRED" + } }, "videoContext": { "type": "VideoContext", @@ -1567,11 +1633,17 @@ }, "outputUri": { "type": "string", - "id": 4 + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } }, "locationId": { "type": "string", - "id": 5 + "id": 5, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } }, @@ -2067,6 +2139,10 @@ "type": "ExplicitContentDetectionConfig", "id": 4 }, + "faceDetectionConfig": { + "type": "FaceDetectionConfig", + "id": 5 + }, "speechTranscriptionConfig": { "type": "SpeechTranscriptionConfig", "id": 6 @@ -2075,6 +2151,10 @@ "type": "TextDetectionConfig", "id": 8 }, + "personDetectionConfig": { + "type": "PersonDetectionConfig", + "id": 11 + }, "objectTrackingConfig": { "type": "ObjectTrackingConfig", "id": 13 @@ -2129,6 +2209,38 @@ } } }, + "FaceDetectionConfig": { + "fields": { + "model": { + "type": "string", + "id": 1 + }, + "includeBoundingBoxes": { + "type": "bool", + "id": 2 + }, + "includeAttributes": { + "type": "bool", + "id": 5 + } + } + }, + "PersonDetectionConfig": { + "fields": { + "includeBoundingBoxes": { + "type": "bool", + "id": 1 + }, + "includePoseLandmarks": { + "type": "bool", + "id": 2 + }, + "includeAttributes": { + "type": "bool", + "id": 3 + } + } + }, "TextDetectionConfig": { "fields": { "languageHints": { @@ -2275,6 +2387,14 @@ "options": { "(google.api.field_behavior)": "OPTIONAL" } + }, + "landmarks": { + "rule": "repeated", + "type": "DetectedLandmark", + "id": 4, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } }, @@ -2374,6 +2494,44 @@ } } }, + "DetectedLandmark": { + "fields": { + "name": { + "type": "string", + "id": 1 + }, + "point": { + "type": "NormalizedVertex", + "id": 2 + }, + "confidence": { + "type": "float", + "id": 3 + } + } + }, + "FaceDetectionAnnotation": { + "fields": { + "tracks": { + "rule": "repeated", + "type": "Track", + "id": 3 + }, + "thumbnail": { + "type": "bytes", + "id": 4 + } + } + }, + "PersonDetectionAnnotation": { + "fields": { + "tracks": { + "rule": "repeated", + "type": "Track", + "id": 1 + } + } + }, "VideoAnnotationResults": { "fields": { "inputUri": { @@ -2409,6 +2567,11 @@ "type": "LabelAnnotation", "id": 4 }, + "faceDetectionAnnotations": { + "rule": "repeated", + "type": "FaceDetectionAnnotation", + "id": 13 + }, "shotAnnotations": { "rule": "repeated", "type": "VideoSegment", @@ -2438,6 +2601,11 @@ "type": "LogoRecognitionAnnotation", "id": 19 }, + "personDetectionAnnotations": { + "rule": "repeated", + "type": "PersonDetectionAnnotation", + "id": 20 + }, "celebrityRecognitionAnnotations": { "type": "CelebrityRecognitionAnnotation", "id": 21 @@ -2928,11 +3096,13 @@ "LABEL_DETECTION": 1, "SHOT_CHANGE_DETECTION": 2, "EXPLICIT_CONTENT_DETECTION": 3, + "FACE_DETECTION": 4, "SPEECH_TRANSCRIPTION": 6, "TEXT_DETECTION": 7, "OBJECT_TRACKING": 9, "LOGO_RECOGNITION": 12, - "CELEBRITY_RECOGNITION": 13 + "CELEBRITY_RECOGNITION": 13, + "PERSON_DETECTION": 14 } }, "LabelDetectionMode": { diff --git a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/cloud/videointelligence/v1beta2/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1beta2/doc/google/cloud/videointelligence/v1beta2/doc_video_intelligence.js index 18960fed46e..4019788cd52 100644 --- a/packages/google-cloud-videointelligence/src/v1beta2/doc/google/cloud/videointelligence/v1beta2/doc_video_intelligence.js +++ b/packages/google-cloud-videointelligence/src/v1beta2/doc/google/cloud/videointelligence/v1beta2/doc_video_intelligence.js @@ -36,7 +36,7 @@ * If set, `input_uri` should be unset. * * @property {number[]} features - * Requested video annotation features. + * Required. Requested video annotation features. * * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1beta2.Feature} * @@ -46,7 +46,7 @@ * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1beta2.VideoContext} * * @property {string} outputUri - * Optional location where the output (in JSON format) should be stored. + * Optional. Location where the output (in JSON format) should be stored. * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) * URIs are supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return @@ -54,7 +54,7 @@ * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * @property {string} locationId - * Optional cloud region where annotation should take place. Supported cloud + * Optional. Cloud region where annotation should take place. Supported cloud * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region * is specified, a region will be determined based on video file location. * @@ -71,8 +71,8 @@ const AnnotateVideoRequest = { * * @property {Object[]} segments * Video segments to annotate. The segments may overlap and are not required - * to be contiguous or span the whole video. If unspecified, each video - * is treated as a single segment. + * to be contiguous or span the whole video. If unspecified, each video is + * treated as a single segment. * * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1beta2.VideoSegment} * diff --git a/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.js index 26ac21660a1..46a2571d732 100644 --- a/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.js +++ b/packages/google-cloud-videointelligence/src/v1beta2/video_intelligence_service_client.js @@ -260,7 +260,7 @@ class VideoIntelligenceServiceClient { * If unset, the input video(s) should be specified via `input_uri`. * If set, `input_uri` should be unset. * @param {number[]} [request.features] - * Requested video annotation features. + * Required. Requested video annotation features. * * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1beta2.Feature} * @param {Object} [request.videoContext] @@ -268,14 +268,14 @@ class VideoIntelligenceServiceClient { * * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1beta2.VideoContext} * @param {string} [request.outputUri] - * Optional location where the output (in JSON format) should be stored. + * Optional. Location where the output (in JSON format) should be stored. * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) * URIs are supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return * google.rpc.Code.INVALID_ARGUMENT). For * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * @param {string} [request.locationId] - * Optional cloud region where annotation should take place. Supported cloud + * Optional. Cloud region where annotation should take place. Supported cloud * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region * is specified, a region will be determined based on video file location. * @param {Object} [options] diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/doc_video_intelligence.js index f74de028f31..0ab262f5f3b 100644 --- a/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/doc_video_intelligence.js +++ b/packages/google-cloud-videointelligence/src/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/doc_video_intelligence.js @@ -23,10 +23,10 @@ * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are * supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT). For - * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). A video - * URI may include wildcards in `object-id`, and thus identify multiple - * videos. Supported wildcards: '*' to match 0 or more characters; + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; * '?' to match 1 character. If unset, the input video should be embedded * in the request as `input_content`. If set, `input_content` should be unset. * @@ -36,7 +36,7 @@ * If set, `input_uri` should be unset. * * @property {number[]} features - * Requested video annotation features. + * Required. Requested video annotation features. * * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p1beta1.Feature} * @@ -46,15 +46,15 @@ * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p1beta1.VideoContext} * * @property {string} outputUri - * Optional location where the output (in JSON format) should be stored. + * Optional. Location where the output (in JSON format) should be stored. * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) * URIs are supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT). For - * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * @property {string} locationId - * Optional cloud region where annotation should take place. Supported cloud + * Optional. Cloud region where annotation should take place. Supported cloud * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region * is specified, a region will be determined based on video file location. * @@ -445,32 +445,32 @@ const AnnotateVideoProgress = { * Config for SPEECH_TRANSCRIPTION. * * @property {string} languageCode - * *Required* The language of the supplied audio as a + * Required. *Required* The language of the supplied audio as a * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * Example: "en-US". * See [Language Support](https://cloud.google.com/speech/docs/languages) * for a list of the currently supported language codes. * * @property {number} maxAlternatives - * *Optional* Maximum number of recognition hypotheses to be returned. + * Optional. Maximum number of recognition hypotheses to be returned. * Specifically, the maximum number of `SpeechRecognitionAlternative` messages - * within each `SpeechRecognitionResult`. The server may return fewer than + * within each `SpeechTranscription`. The server may return fewer than * `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will * return a maximum of one. If omitted, will return a maximum of one. * * @property {boolean} filterProfanity - * *Optional* If set to `true`, the server will attempt to filter out + * Optional. If set to `true`, the server will attempt to filter out * profanities, replacing all but the initial character in each filtered word * with asterisks, e.g. "f***". If set to `false` or omitted, profanities * won't be filtered out. * * @property {Object[]} speechContexts - * *Optional* A means to provide context to assist the speech recognition. + * Optional. A means to provide context to assist the speech recognition. * * This object should have the same structure as [SpeechContext]{@link google.cloud.videointelligence.v1p1beta1.SpeechContext} * * @property {boolean} enableAutomaticPunctuation - * *Optional* If 'true', adds punctuation to recognition result hypotheses. + * Optional. If 'true', adds punctuation to recognition result hypotheses. * This feature is only available in select languages. Setting this for * requests in other languages has no effect at all. The default 'false' value * does not add punctuation to result hypotheses. NOTE: "This is currently @@ -478,7 +478,7 @@ const AnnotateVideoProgress = { * future this may be exclusively available as a premium feature." * * @property {number[]} audioTracks - * *Optional* For file formats, such as MXF or MKV, supporting multiple audio + * Optional. For file formats, such as MXF or MKV, supporting multiple audio * tracks, specify up to two tracks. Default: track 0. * * @typedef SpeechTranscriptionConfig @@ -494,7 +494,7 @@ const SpeechTranscriptionConfig = { * in the results. * * @property {string[]} phrases - * *Optional* A list of strings containing words and phrases "hints" so that + * Optional. A list of strings containing words and phrases "hints" so that * the speech recognition is more likely to recognize them. This can be used * to improve the accuracy for specific words and phrases, for example, if * specific commands are typically spoken by the user. This can also be used @@ -513,10 +513,10 @@ const SpeechContext = { * A speech recognition result corresponding to a portion of the audio. * * @property {Object[]} alternatives - * Output only. May contain one or more recognition hypotheses (up to the - * maximum specified in `max_alternatives`). - * These alternatives are ordered in terms of accuracy, with the top (first) - * alternative being the most probable, as ranked by the recognizer. + * May contain one or more recognition hypotheses (up to the maximum specified + * in `max_alternatives`). These alternatives are ordered in terms of + * accuracy, with the top (first) alternative being the most probable, as + * ranked by the recognizer. * * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative} * @@ -537,9 +537,9 @@ const SpeechTranscription = { * @property {number} confidence * Output only. The confidence estimate between 0.0 and 1.0. A higher number * indicates an estimated greater likelihood that the recognized words are - * correct. This field is typically provided only for the top hypothesis, and - * only for `is_final=true` results. Clients should not rely on the - * `confidence` field as it is not guaranteed to be accurate or consistent. + * correct. This field is set only for the top alternative. + * This field is not guaranteed to be accurate and users should not rely on it + * to be always provided. * The default of 0.0 is a sentinel value indicating `confidence` was not set. * * @property {Object[]} words diff --git a/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.js index e34b6561895..bea8f82bc97 100644 --- a/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.js +++ b/packages/google-cloud-videointelligence/src/v1p1beta1/video_intelligence_service_client.js @@ -249,10 +249,10 @@ class VideoIntelligenceServiceClient { * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are * supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT). For - * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). A video - * URI may include wildcards in `object-id`, and thus identify multiple - * videos. Supported wildcards: '*' to match 0 or more characters; + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; * '?' to match 1 character. If unset, the input video should be embedded * in the request as `input_content`. If set, `input_content` should be unset. * @param {Buffer} [request.inputContent] @@ -260,7 +260,7 @@ class VideoIntelligenceServiceClient { * If unset, the input video(s) should be specified via `input_uri`. * If set, `input_uri` should be unset. * @param {number[]} [request.features] - * Requested video annotation features. + * Required. Requested video annotation features. * * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p1beta1.Feature} * @param {Object} [request.videoContext] @@ -268,14 +268,14 @@ class VideoIntelligenceServiceClient { * * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p1beta1.VideoContext} * @param {string} [request.outputUri] - * Optional location where the output (in JSON format) should be stored. + * Optional. Location where the output (in JSON format) should be stored. * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) * URIs are supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT). For - * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * @param {string} [request.locationId] - * Optional cloud region where annotation should take place. Supported cloud + * Optional. Cloud region where annotation should take place. Supported cloud * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region * is specified, a region will be determined based on video file location. * @param {Object} [options] diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/doc_video_intelligence.js index 058d50a01b5..31a4ea321b3 100644 --- a/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/doc_video_intelligence.js +++ b/packages/google-cloud-videointelligence/src/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/doc_video_intelligence.js @@ -23,10 +23,10 @@ * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are * supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT). For - * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). A video - * URI may include wildcards in `object-id`, and thus identify multiple - * videos. Supported wildcards: '*' to match 0 or more characters; + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; * '?' to match 1 character. If unset, the input video should be embedded * in the request as `input_content`. If set, `input_content` should be unset. * @@ -36,7 +36,7 @@ * If set, `input_uri` should be unset. * * @property {number[]} features - * Requested video annotation features. + * Required. Requested video annotation features. * * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p2beta1.Feature} * @@ -46,15 +46,15 @@ * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p2beta1.VideoContext} * * @property {string} outputUri - * Optional location where the output (in JSON format) should be stored. + * Optional. Location where the output (in JSON format) should be stored. * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) * URIs are supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT). For - * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * * @property {string} locationId - * Optional cloud region where annotation should take place. Supported cloud + * Optional. Cloud region where annotation should take place. Supported cloud * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region * is specified, a region will be determined based on video file location. * diff --git a/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.js index 5a6bf4535b8..1b9511df368 100644 --- a/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.js +++ b/packages/google-cloud-videointelligence/src/v1p2beta1/video_intelligence_service_client.js @@ -249,10 +249,10 @@ class VideoIntelligenceServiceClient { * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are * supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT). For - * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). A video - * URI may include wildcards in `object-id`, and thus identify multiple - * videos. Supported wildcards: '*' to match 0 or more characters; + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; * '?' to match 1 character. If unset, the input video should be embedded * in the request as `input_content`. If set, `input_content` should be unset. * @param {Buffer} [request.inputContent] @@ -260,7 +260,7 @@ class VideoIntelligenceServiceClient { * If unset, the input video(s) should be specified via `input_uri`. * If set, `input_uri` should be unset. * @param {number[]} [request.features] - * Requested video annotation features. + * Required. Requested video annotation features. * * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p2beta1.Feature} * @param {Object} [request.videoContext] @@ -268,14 +268,14 @@ class VideoIntelligenceServiceClient { * * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p2beta1.VideoContext} * @param {string} [request.outputUri] - * Optional location where the output (in JSON format) should be stored. + * Optional. Location where the output (in JSON format) should be stored. * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) * URIs are supported, which must be specified in the following format: * `gs://bucket-id/object-id` (other URI formats return - * google.rpc.Code.INVALID_ARGUMENT). For - * more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). * @param {string} [request.locationId] - * Optional cloud region where annotation should take place. Supported cloud + * Optional. Cloud region where annotation should take place. Supported cloud * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region * is specified, a region will be determined based on video file location. * @param {Object} [options] diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js index 4e4a1ba0bad..ae450b25b92 100644 --- a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js @@ -91,6 +91,11 @@ const AnnotateVideoRequest = { * * This object should have the same structure as [ExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig} * + * @property {Object} faceDetectionConfig + * Config for FACE_DETECTION. + * + * This object should have the same structure as [FaceDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig} + * * @property {Object} speechTranscriptionConfig * Config for SPEECH_TRANSCRIPTION. * @@ -101,6 +106,11 @@ const AnnotateVideoRequest = { * * This object should have the same structure as [TextDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} * + * @property {Object} personDetectionConfig + * Config for PERSON_DETECTION. + * + * This object should have the same structure as [PersonDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig} + * * @property {Object} objectTrackingConfig * Config for OBJECT_TRACKING. * @@ -206,6 +216,54 @@ const ExplicitContentDetectionConfig = { // This is for documentation. Actual contents will be loaded by gRPC. }; +/** + * Config for FACE_DETECTION. + * + * @property {string} model + * Model to use for face detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @property {boolean} includeBoundingBoxes + * Whether bounding boxes be included in the face annotation output. + * + * @property {boolean} includeAttributes + * Whether to enable face attributes detection, such as glasses, dark_glasses, + * mouth_open etc. Ignored if 'include_bounding_boxes' is false. + * + * @typedef FaceDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const FaceDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for PERSON_DETECTION. + * + * @property {boolean} includeBoundingBoxes + * Whether bounding boxes be included in the person detection annotation + * output. + * + * @property {boolean} includePoseLandmarks + * Whether to enable pose landmarks detection. Ignored if + * 'include_bounding_boxes' is false. + * + * @property {boolean} includeAttributes + * Whether to enable person attributes detection, such as cloth color (black, + * blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair + * color (black, blonde, etc), hair length (long, short, bald), etc. + * Ignored if 'include_bounding_boxes' is false. + * + * @typedef PersonDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const PersonDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + /** * Config for TEXT_DETECTION. * @@ -433,6 +491,11 @@ const NormalizedBoundingBox = { * * This object should have the same structure as [DetectedAttribute]{@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute} * + * @property {Object[]} landmarks + * Optional. The detected landmarks. + * + * This object should have the same structure as [DetectedLandmark]{@link google.cloud.videointelligence.v1p3beta1.DetectedLandmark} + * * @typedef TimestampedObject * @memberof google.cloud.videointelligence.v1p3beta1 * @see [google.cloud.videointelligence.v1p3beta1.TimestampedObject definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} @@ -574,6 +637,65 @@ const CelebrityRecognitionAnnotation = { // This is for documentation. Actual contents will be loaded by gRPC. }; +/** + * A generic detected landmark represented by name in string format and a 2D + * location. + * + * @property {string} name + * The name of this landmark, i.e. left_hand, right_shoulder. + * + * @property {Object} point + * The 2D point of the detected landmark using the normalized image + * coordindate system. The normalized coordinates have the range from 0 to 1. + * + * This object should have the same structure as [NormalizedVertex]{@link google.cloud.videointelligence.v1p3beta1.NormalizedVertex} + * + * @property {number} confidence + * The confidence score of the detected landmark. Range [0, 1]. + * + * @typedef DetectedLandmark + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.DetectedLandmark definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const DetectedLandmark = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Face detection annotation. + * + * @property {Object[]} tracks + * The face tracks with attributes. + * + * This object should have the same structure as [Track]{@link google.cloud.videointelligence.v1p3beta1.Track} + * + * @property {Buffer} thumbnail + * The thumbnail of a person's face. + * + * @typedef FaceDetectionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const FaceDetectionAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Person detection annotation per video. + * + * @property {Object[]} tracks + * The trackes that a person is detected. + * + * This object should have the same structure as [Track]{@link google.cloud.videointelligence.v1p3beta1.Track} + * + * @typedef PersonDetectionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const PersonDetectionAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + /** * Annotation results for a single video. * @@ -623,6 +745,11 @@ const CelebrityRecognitionAnnotation = { * * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation} * + * @property {Object[]} faceDetectionAnnotations + * Face detection annotations. + * + * This object should have the same structure as [FaceDetectionAnnotation]{@link google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation} + * * @property {Object[]} shotAnnotations * Shot annotations. Each shot is represented as a video segment. * @@ -655,6 +782,11 @@ const CelebrityRecognitionAnnotation = { * * This object should have the same structure as [LogoRecognitionAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation} * + * @property {Object[]} personDetectionAnnotations + * Person detection annotations. + * + * This object should have the same structure as [PersonDetectionAnnotation]{@link google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation} + * * @property {Object} celebrityRecognitionAnnotations * Celebrity recognition annotations. * @@ -1423,6 +1555,11 @@ const Feature = { */ EXPLICIT_CONTENT_DETECTION: 3, + /** + * Human face detection. + */ + FACE_DETECTION: 4, + /** * Speech transcription. */ @@ -1446,7 +1583,12 @@ const Feature = { /** * Celebrity recognition. */ - CELEBRITY_RECOGNITION: 13 + CELEBRITY_RECOGNITION: 13, + + /** + * Person detection. + */ + PERSON_DETECTION: 14 }; /** diff --git a/packages/google-cloud-videointelligence/synth.metadata b/packages/google-cloud-videointelligence/synth.metadata index 93246f6e085..9e1992306c4 100644 --- a/packages/google-cloud-videointelligence/synth.metadata +++ b/packages/google-cloud-videointelligence/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2020-01-24T12:41:24.060995Z", + "updateTime": "2020-01-28T22:44:43.779282Z", "sources": [ { "generator": { "name": "artman", - "version": "0.44.1", - "dockerImage": "googleapis/artman@sha256:5599b61e56a372d21b671969ee915fbca0f6c3a0daaeb898d01f8f685f1bbc8b" + "version": "0.44.3", + "dockerImage": "googleapis/artman@sha256:62b8b29acaae54b06a4183aa772e65b106e92d4bc466eb4db07953ab78bdb90c" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "e26cab8afd19d396b929039dac5d874cf0b5336c", - "internalRef": "291240093" + "sha": "9c483584f8fd5a1b862ae07973f4cc7bb3e46648", + "internalRef": "292009868" } }, { @@ -98,15 +98,9 @@ { "path": ".github/release-please.yml" }, - { - "path": ".gitignore" - }, { "path": ".jsdoc.js" }, - { - "path": ".kokoro/.gitattributes" - }, { "path": ".kokoro/common.cfg" }, @@ -146,9 +140,6 @@ { "path": ".kokoro/lint.sh" }, - { - "path": ".kokoro/pre-samples-test.sh" - }, { "path": ".kokoro/presubmit/node10/common.cfg" }, @@ -221,12 +212,6 @@ { "path": ".prettierrc" }, - { - "path": ".repo-metadata.json" - }, - { - "path": "CHANGELOG.md" - }, { "path": "CODE_OF_CONDUCT.md" }, @@ -242,18 +227,9 @@ { "path": "codecov.yaml" }, - { - "path": "linkinator.config.json" - }, - { - "path": "package.json" - }, { "path": "protos/google/cloud/videointelligence/v1/video_intelligence.proto" }, - { - "path": "protos/google/cloud/videointelligence/v1beta1/video_intelligence.proto" - }, { "path": "protos/google/cloud/videointelligence/v1beta2/video_intelligence.proto" }, @@ -278,81 +254,12 @@ { "path": "renovate.json" }, - { - "path": "samples/.eslintrc.yml" - }, { "path": "samples/README.md" }, - { - "path": "samples/analyze-streaming-annotation-to-storage.js" - }, - { - "path": "samples/analyze-streaming-automl-classification.js" - }, - { - "path": "samples/analyze-streaming-labels.js" - }, - { - "path": "samples/analyze-streaming-object.js" - }, - { - "path": "samples/analyze-streaming-safe-search.js" - }, - { - "path": "samples/analyze-streaming-shot-change.js" - }, - { - "path": "samples/analyze.js" - }, - { - "path": "samples/analyze.v1p2beta1.js" - }, - { - "path": "samples/package.json" - }, - { - "path": "samples/quickstart.js" - }, - { - "path": "samples/resources/cat.mp4" - }, - { - "path": "samples/resources/googlework_short.mp4" - }, - { - "path": "samples/system-test/analyze-streaming-annotation-to-storage.test.js" - }, - { - "path": "samples/system-test/analyze-streaming-automl-classification.test.js" - }, - { - "path": "samples/system-test/analyze-streaming-labels.test.js" - }, - { - "path": "samples/system-test/analyze-streaming-object.test.js" - }, - { - "path": "samples/system-test/analyze-streaming-safe-search.test.js" - }, - { - "path": "samples/system-test/analyze-streaming-shot-change.test.js" - }, - { - "path": "samples/system-test/analyze.test.js" - }, - { - "path": "samples/system-test/analyze.v1p2beta1.test.js" - }, - { - "path": "samples/system-test/quickstart.test.js" - }, { "path": "src/browser.js" }, - { - "path": "src/index.js" - }, { "path": "src/v1/doc/google/cloud/videointelligence/v1/doc_video_intelligence.js" }, @@ -497,15 +404,6 @@ { "path": "src/v1p3beta1/video_intelligence_service_proto_list.json" }, - { - "path": "synth.py" - }, - { - "path": "system-test/.eslintrc.yml" - }, - { - "path": "system-test/video_intelligence_service_smoke_test.js" - }, { "path": "test/gapic-v1.js" }, @@ -521,9 +419,6 @@ { "path": "test/gapic-v1p3beta1.js" }, - { - "path": "test/mocha.opts" - }, { "path": "webpack.config.js" }