From 1b2f4ccc67fc6a4d50b60970eacb4a528d470990 Mon Sep 17 00:00:00 2001 From: Jonathan Lui Date: Thu, 21 Feb 2019 13:45:36 -0800 Subject: [PATCH] feat: generate v1p3beta1 with streaming support (#198) --- .../google-cloud-videointelligence/.gitignore | 1 + .../package.json | 5 +- .../v1p3beta1/video_intelligence.proto | 627 ++++++++++ .../src/index.js | 15 + .../v1p3beta1/doc_video_intelligence.js | 1023 +++++++++++++++++ .../doc/google/longrunning/doc_operations.js | 63 + .../v1p3beta1/doc/google/protobuf/doc_any.js | 136 +++ .../doc/google/protobuf/doc_duration.js | 97 ++ .../v1p3beta1/doc/google/rpc/doc_status.js | 92 ++ .../src/v1p3beta1/index.js | 21 + ...aming_video_intelligence_service_client.js | 222 ++++ ...eo_intelligence_service_client_config.json | 31 + .../video_intelligence_service_client.js | 342 ++++++ ...eo_intelligence_service_client_config.json | 31 + .../synth.metadata | 12 +- .../google-cloud-videointelligence/synth.py | 2 +- .../test/gapic-v1p3beta1.js | 229 ++++ 17 files changed, 2945 insertions(+), 4 deletions(-) create mode 100644 packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/longrunning/doc_operations.js create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_any.js create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_duration.js create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/rpc/doc_status.js create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/index.js create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.js create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client_config.json create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.js create mode 100644 packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client_config.json create mode 100644 packages/google-cloud-videointelligence/test/gapic-v1p3beta1.js diff --git a/packages/google-cloud-videointelligence/.gitignore b/packages/google-cloud-videointelligence/.gitignore index 8db114fab10..c199edd9e91 100644 --- a/packages/google-cloud-videointelligence/.gitignore +++ b/packages/google-cloud-videointelligence/.gitignore @@ -9,3 +9,4 @@ system-test/*key.json *.lock package-lock.json .vscode +__pycache__ diff --git a/packages/google-cloud-videointelligence/package.json b/packages/google-cloud-videointelligence/package.json index 06b0a6dd34d..b13e765e15b 100644 --- a/packages/google-cloud-videointelligence/package.json +++ b/packages/google-cloud-videointelligence/package.json @@ -53,13 +53,14 @@ "eslint-config-prettier": "^4.0.0", "eslint-plugin-node": "^8.0.0", "eslint-plugin-prettier": "^3.0.0", - "jsdoc-baseline": "git+https://github.com/hegemonic/jsdoc-baseline.git", "intelli-espower-loader": "^1.0.1", "jsdoc": "^3.5.5", + "jsdoc-baseline": "git+https://github.com/hegemonic/jsdoc-baseline.git", + "linkinator": "^1.1.2", "mocha": "^6.0.0", "nyc": "^13.0.0", "power-assert": "^1.6.0", "prettier": "^1.13.5", - "linkinator": "^1.1.2" + "through2": "^3.0.0" } } diff --git a/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto new file mode 100644 index 00000000000..fa5b7515d56 --- /dev/null +++ b/packages/google-cloud-videointelligence/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto @@ -0,0 +1,627 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.videointelligence.v1p3beta1; + +import "google/api/annotations.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P3Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1p3beta1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p3beta1"; + + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1p3beta1/videos:annotate" + body: "*" + }; + } +} + +// Service that implements Google Cloud Video Intelligence Streaming API. +service StreamingVideoIntelligenceService { + // Performs video annotation with bidirectional streaming: emitting results + // while sending video/audio bytes. + // This method is only available via the gRPC API (not REST). + rpc StreamingAnnotateVideo(stream StreamingAnnotateVideoRequest) + returns (stream StreamingAnnotateVideoResponse); +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via `input_uri`. + // If set, `input_uri` should be unset. + bytes input_content = 6; + + // Requested video annotation features. + repeated Feature features = 2; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + string output_uri = 4; + + // Optional cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for TEXT_DETECTION. + TextDetectionConfig text_detection_config = 8; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for TEXT_DETECTION. +message TextDetectionConfig { + // Language hint can be specified if the language to be detected is known a + // priori. It can increase the accuracy of the detection. Language hint must + // be language code in BCP-47 format. + // + // Automatic language detection is performed if no hint is provided. + repeated string language_hints = 1; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // OCR text detection and tracking. + // Annotations for list of detected text snippets. Each will have list of + // frame information associated with it. + repeated TextAnnotation text_annotations = 12; + + // Annotations for list of objects detected and tracked in video. + repeated ObjectTrackingAnnotation object_annotations = 14; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// A vertex represents a 2D point in the image. +// NOTE: the normalized vertex coordinates are relative to the original image +// and range from 0 to 1. +message NormalizedVertex { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; +} + +// Normalized bounding polygon for text (that might not be aligned with axis). +// Contains list of the corner points in clockwise order starting from +// top-left corner. For example, for a rectangular bounding box: +// When the text is horizontal it might look like: +// 0----1 +// | | +// 3----2 +// +// When it's clockwise rotated 180 degrees around the top-left corner it +// becomes: +// 2----3 +// | | +// 1----0 +// +// and the vertex order will still be (0, 1, 2, 3). Note that values can be less +// than 0, or greater than 1 due to trignometric calculations for location of +// the box. +message NormalizedBoundingPoly { + // Normalized vertices of the bounding polygon. + repeated NormalizedVertex vertices = 1; +} + +// Video segment level annotation results for text detection. +message TextSegment { + // Video segment where a text snippet was detected. + VideoSegment segment = 1; + + // Confidence for the track of detected text. It is calculated as the highest + // over all frames where OCR detected text appears. + float confidence = 2; + + // Information related to the frames where OCR detected text appears. + repeated TextFrame frames = 3; +} + +// Video frame level annotation results for text annotation (OCR). +// Contains information regarding timestamp and bounding box locations for the +// frames containing detected OCR text snippets. +message TextFrame { + // Bounding polygon of the detected text for this frame. + NormalizedBoundingPoly rotated_bounding_box = 1; + + // Timestamp of this frame. + google.protobuf.Duration time_offset = 2; +} + +// Annotations related to one detected OCR text snippet. This will contain the +// corresponding text, confidence value, and frame level information for each +// detection. +message TextAnnotation { + // The detected text. + string text = 1; + + // All video segments where OCR detected text appears. + repeated TextSegment segments = 2; +} + +// Video frame level annotations for object detection and tracking. This field +// stores per frame location, time offset, and confidence. +message ObjectTrackingFrame { + // The normalized bounding box location of this object track for the frame. + NormalizedBoundingBox normalized_bounding_box = 1; + + // The timestamp of the frame in microseconds. + google.protobuf.Duration time_offset = 2; +} + +// Annotations corresponding to one tracked object. +message ObjectTrackingAnnotation { + // Entity to specify the object category that this track is labeled as. + Entity entity = 1; + + // Object category's labeling confidence of this track. + float confidence = 4; + + // Information corresponding to all frames where this object track appears. + // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + // messages in frames. + // Streaming mode: it can only be one ObjectTrackingFrame message in frames. + repeated ObjectTrackingFrame frames = 2; + + // Different representation of tracking info in non-streaming batch + // and streaming modes. + oneof track_info { + // Non-streaming batch mode ONLY. + // Each object track corresponds to one video segment where it appears. + VideoSegment segment = 3; + // Streaming mode ONLY. + // In streaming mode, we do not know the end time of a tracked object + // before it is completed. Hence, there is no VideoSegment info returned. + // Instead, we provide a unique identifiable integer track_id so that + // the customers can correlate the results of the ongoing + // ObjectTrackAnnotation of the same track_id over time. + int64 track_id = 5; + } +} + +// The top-level message sent by the client for the `StreamingAnnotateVideo` +// method. Multiple `StreamingAnnotateVideoRequest` messages are sent. +// The first message must only contain a `StreamingVideoConfig` message. +// All subsequent messages must only contain `input_content` data. +message StreamingAnnotateVideoRequest { + // *Required* The streaming request, which is either a streaming config or + // video content. + oneof streaming_request { + // Provides information to the annotator, specifing how to process the + // request. The first `AnnotateStreamingVideoRequest` message must only + // contain a `video_config` message. + StreamingVideoConfig video_config = 1; + + // The video data to be annotated. Chunks of video data are sequentially + // sent in `StreamingAnnotateVideoRequest` messages. Except the initial + // `StreamingAnnotateVideoRequest` message containing only + // `video_config`, all subsequent `AnnotateStreamingVideoRequest` + // messages must only contain `input_content` field. + bytes input_content = 2; + } +} + +// `StreamingAnnotateVideoResponse` is the only message returned to the client +// by `StreamingAnnotateVideo`. A series of zero or more +// `StreamingAnnotateVideoResponse` messages are streamed back to the client. +message StreamingAnnotateVideoResponse { + // If set, returns a [google.rpc.Status][] message that + // specifies the error for the operation. + google.rpc.Status error = 1; + + // Streaming annotation results. + StreamingVideoAnnotationResults annotation_results = 2; + + // GCS URI that stores annotation results of one streaming session. + // It is a directory that can hold multiple files in JSON format. + // Example uri format: + // gs://bucket_id/object_id/cloud_project_name-session_id + string annotation_results_uri = 3; +} + +// Config for EXPLICIT_CONTENT_DETECTION in streaming mode. +message StreamingExplicitContentDetectionConfig { + // No customized config support. +} + +// Config for LABEL_DETECTION in streaming mode. +message StreamingLabelDetectionConfig { + // Whether the video has been captured from a stationary (i.e. non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Default: false. + bool stationary_camera = 1; +} + +// Config for STREAMING_OBJECT_TRACKING. +message StreamingObjectTrackingConfig { + // No customized config support. +} + +// Config for SHOT_CHANGE_DETECTION in streaming mode. +message StreamingShotChangeDetectionConfig { + // No customized config support. +} + +// Config for streaming storage option. +message StreamingStorageConfig { + // Enable streaming storage. Default: false. + bool enable_storage_annotation_result = 1; + + // GCS URI to store all annotation results for one client. Client should + // specify this field as the top-level storage directory. Annotation results + // of different sessions will be put into different sub-directories denoted + // by project_name and session_id. All sub-directories will be auto generated + // by program and will be made accessible to client in response proto. + // URIs must be specified in the following format: `gs://bucket-id/object-id` + // `bucket-id` should be a valid GCS bucket created by client and bucket + // permission shall also be configured properly. `object-id` can be arbitrary + // string that make sense to client. Other URI formats will return error and + // cause GCS write failure. + string annotation_result_storage_directory = 3; +} + +// Streaming annotation results corresponding to a portion of the video +// that is currently being processed. +message StreamingVideoAnnotationResults { + // Shot annotation results. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 1; + + // Label annotation results. + repeated LabelAnnotation label_annotations = 2; + + // Explicit content detection results. + ExplicitContentAnnotation explicit_annotation = 3; + + // Object tracking results. + repeated ObjectTrackingAnnotation object_annotations = 4; +} + +// Provides information to the annotator that specifies how to process the +// request. +message StreamingVideoConfig { + // Requested annotation feature. + StreamingFeature feature = 1; + + // Config for requested annotation feature. + oneof streaming_config { + // Config for SHOT_CHANGE_DETECTION. + StreamingShotChangeDetectionConfig shot_change_detection_config = 2; + + // Config for LABEL_DETECTION. + StreamingLabelDetectionConfig label_detection_config = 3; + + // Config for STREAMING_EXPLICIT_CONTENT_DETECTION. + StreamingExplicitContentDetectionConfig explicit_content_detection_config = + 4; + + // Config for STREAMING_OBJECT_TRACKING. + StreamingObjectTrackingConfig object_tracking_config = 5; + } + + // Streaming storage option. By default: storage is disabled. + StreamingStorageConfig storage_config = 30; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} + +// Streaming video annotation feature. +enum StreamingFeature { + // Unspecified. + STREAMING_FEATURE_UNSPECIFIED = 0; + // Label detection. Detect objects, such as dog or flower. + STREAMING_LABEL_DETECTION = 1; + // Shot change detection. + STREAMING_SHOT_CHANGE_DETECTION = 2; + // Explicit content detection. + STREAMING_EXPLICIT_CONTENT_DETECTION = 3; + // Object detection and tracking. + STREAMING_OBJECT_TRACKING = 4; +} diff --git a/packages/google-cloud-videointelligence/src/index.js b/packages/google-cloud-videointelligence/src/index.js index c6928ebff16..d9954a53c3e 100644 --- a/packages/google-cloud-videointelligence/src/index.js +++ b/packages/google-cloud-videointelligence/src/index.js @@ -39,6 +39,9 @@ /** * @namespace google.cloud.videointelligence.v1p2beta1 */ +/** + * @namespace google.cloud.videointelligence.v1p3beta1 + */ 'use strict'; @@ -49,6 +52,7 @@ const gapic = Object.freeze({ v1beta1: require('./v1beta1'), v1p1beta1: require('./v1p1beta1'), v1p2beta1: require('./v1p2beta1'), + v1p3beta1: require('./v1p3beta1'), }); /** @@ -76,6 +80,10 @@ const gapic = Object.freeze({ * particular backend service version. It exports: * - `VideoIntelligenceServiceClient` - Reference to * {@link v1p2beta1.VideoIntelligenceServiceClient} + * - `v1p3beta1` - This is used for selecting or pinning a + * particular backend service version. It exports: + * - `VideoIntelligenceServiceClient` - Reference to + * {@link v1p3beta1.VideoIntelligenceServiceClient} * * @module {object} @google-cloud/video-intelligence * @alias nodejs-video-intelligence @@ -142,5 +150,12 @@ module.exports.v1p1beta1 = gapic.v1p1beta1; */ module.exports.v1p2beta1 = gapic.v1p2beta1; +/** + * @type {object} + * @property {constructor} VideoIntelligenceServiceClient + * Reference to {@link v1p3beta1.VideoIntelligenceServiceClient} + */ +module.exports.v1p3beta1 = gapic.v1p3beta1; + // Alias `module.exports` as `module.exports.default`, for future-proofing. module.exports.default = Object.assign({}, module.exports); diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js new file mode 100644 index 00000000000..6362370ed2f --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js @@ -0,0 +1,1023 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * Video annotation request. + * + * @property {string} inputUri + * Input video location. Currently, only + * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + * + * @property {string} inputContent + * The video data bytes. + * If unset, the input video(s) should be specified via `input_uri`. + * If set, `input_uri` should be unset. + * + * @property {number[]} features + * Requested video annotation features. + * + * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p3beta1.Feature} + * + * @property {Object} videoContext + * Additional video context and/or feature-specific parameters. + * + * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p3beta1.VideoContext} + * + * @property {string} outputUri + * Optional location where the output (in JSON format) should be stored. + * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * + * @property {string} locationId + * Optional cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + * + * @typedef AnnotateVideoRequest + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const AnnotateVideoRequest = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video context and/or feature-specific parameters. + * + * @property {Object[]} segments + * Video segments to annotate. The segments may overlap and are not required + * to be contiguous or span the whole video. If unspecified, each video is + * treated as a single segment. + * + * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment} + * + * @property {Object} labelDetectionConfig + * Config for LABEL_DETECTION. + * + * This object should have the same structure as [LabelDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig} + * + * @property {Object} shotChangeDetectionConfig + * Config for SHOT_CHANGE_DETECTION. + * + * This object should have the same structure as [ShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig} + * + * @property {Object} explicitContentDetectionConfig + * Config for EXPLICIT_CONTENT_DETECTION. + * + * This object should have the same structure as [ExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig} + * + * @property {Object} textDetectionConfig + * Config for TEXT_DETECTION. + * + * This object should have the same structure as [TextDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} + * + * @typedef VideoContext + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const VideoContext = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for LABEL_DETECTION. + * + * @property {number} labelDetectionMode + * What labels should be detected with LABEL_DETECTION, in addition to + * video-level labels or segment-level labels. + * If unspecified, defaults to `SHOT_MODE`. + * + * The number should be among the values of [LabelDetectionMode]{@link google.cloud.videointelligence.v1p3beta1.LabelDetectionMode} + * + * @property {boolean} stationaryCamera + * Whether the video has been shot from a stationary (i.e. non-moving) camera. + * When set to true, might improve detection accuracy for moving objects. + * Should be used with `SHOT_AND_FRAME_MODE` enabled. + * + * @property {string} model + * Model to use for label detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @typedef LabelDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const LabelDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for SHOT_CHANGE_DETECTION. + * + * @property {string} model + * Model to use for shot change detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @typedef ShotChangeDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const ShotChangeDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for EXPLICIT_CONTENT_DETECTION. + * + * @property {string} model + * Model to use for explicit content detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @typedef ExplicitContentDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const ExplicitContentDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for TEXT_DETECTION. + * + * @property {string[]} languageHints + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * + * Automatic language detection is performed if no hint is provided. + * + * @typedef TextDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.TextDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const TextDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video segment. + * + * @property {Object} startTimeOffset + * Time-offset, relative to the beginning of the video, + * corresponding to the start of the segment (inclusive). + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @property {Object} endTimeOffset + * Time-offset, relative to the beginning of the video, + * corresponding to the end of the segment (inclusive). + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @typedef VideoSegment + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.VideoSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const VideoSegment = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video segment level annotation results for label detection. + * + * @property {Object} segment + * Video segment where a label was detected. + * + * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment} + * + * @property {number} confidence + * Confidence that the label is accurate. Range: [0, 1]. + * + * @typedef LabelSegment + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.LabelSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const LabelSegment = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video frame level annotation results for label detection. + * + * @property {Object} timeOffset + * Time-offset, relative to the beginning of the video, corresponding to the + * video frame for this location. + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @property {number} confidence + * Confidence that the label is accurate. Range: [0, 1]. + * + * @typedef LabelFrame + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.LabelFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const LabelFrame = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Detected entity from video analysis. + * + * @property {string} entityId + * Opaque entity ID. Some IDs may be available in + * [Google Knowledge Graph Search + * API](https://developers.google.com/knowledge-graph/). + * + * @property {string} description + * Textual description, e.g. `Fixed-gear bicycle`. + * + * @property {string} languageCode + * Language code for `description` in BCP-47 format. + * + * @typedef Entity + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.Entity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const Entity = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Label annotation. + * + * @property {Object} entity + * Detected entity. + * + * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p3beta1.Entity} + * + * @property {Object[]} categoryEntities + * Common categories for the detected entity. + * E.g. when the label is `Terrier` the category is likely `dog`. And in some + * cases there might be more than one categories e.g. `Terrier` could also be + * a `pet`. + * + * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p3beta1.Entity} + * + * @property {Object[]} segments + * All video segments where a label was detected. + * + * This object should have the same structure as [LabelSegment]{@link google.cloud.videointelligence.v1p3beta1.LabelSegment} + * + * @property {Object[]} frames + * All video frames where a label was detected. + * + * This object should have the same structure as [LabelFrame]{@link google.cloud.videointelligence.v1p3beta1.LabelFrame} + * + * @typedef LabelAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.LabelAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const LabelAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video frame level annotation results for explicit content. + * + * @property {Object} timeOffset + * Time-offset, relative to the beginning of the video, corresponding to the + * video frame for this location. + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @property {number} pornographyLikelihood + * Likelihood of the pornography content.. + * + * The number should be among the values of [Likelihood]{@link google.cloud.videointelligence.v1p3beta1.Likelihood} + * + * @typedef ExplicitContentFrame + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const ExplicitContentFrame = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Explicit content annotation (based on per-frame visual signals only). + * If no explicit content has been detected in a frame, no annotations are + * present for that frame. + * + * @property {Object[]} frames + * All video frames where explicit content was detected. + * + * This object should have the same structure as [ExplicitContentFrame]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame} + * + * @typedef ExplicitContentAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const ExplicitContentAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Normalized bounding box. + * The normalized vertex coordinates are relative to the original image. + * Range: [0, 1]. + * + * @property {number} left + * Left X coordinate. + * + * @property {number} top + * Top Y coordinate. + * + * @property {number} right + * Right X coordinate. + * + * @property {number} bottom + * Bottom Y coordinate. + * + * @typedef NormalizedBoundingBox + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const NormalizedBoundingBox = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Annotation results for a single video. + * + * @property {string} inputUri + * Video file location in + * [Google Cloud Storage](https://cloud.google.com/storage/). + * + * @property {Object[]} segmentLabelAnnotations + * Label annotations on video level or user specified segment level. + * There is exactly one element for each unique label. + * + * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation} + * + * @property {Object[]} shotLabelAnnotations + * Label annotations on shot level. + * There is exactly one element for each unique label. + * + * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation} + * + * @property {Object[]} frameLabelAnnotations + * Label annotations on frame level. + * There is exactly one element for each unique label. + * + * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation} + * + * @property {Object[]} shotAnnotations + * Shot annotations. Each shot is represented as a video segment. + * + * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment} + * + * @property {Object} explicitAnnotation + * Explicit content annotation. + * + * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} + * + * @property {Object[]} textAnnotations + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + * + * This object should have the same structure as [TextAnnotation]{@link google.cloud.videointelligence.v1p3beta1.TextAnnotation} + * + * @property {Object[]} objectAnnotations + * Annotations for list of objects detected and tracked in video. + * + * This object should have the same structure as [ObjectTrackingAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation} + * + * @property {Object} error + * If set, indicates an error. Note that for a single `AnnotateVideoRequest` + * some videos may succeed and some may fail. + * + * This object should have the same structure as [Status]{@link google.rpc.Status} + * + * @typedef VideoAnnotationResults + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const VideoAnnotationResults = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video annotation response. Included in the `response` + * field of the `Operation` returned by the `GetOperation` + * call of the `google::longrunning::Operations` service. + * + * @property {Object[]} annotationResults + * Annotation results for all videos specified in `AnnotateVideoRequest`. + * + * This object should have the same structure as [VideoAnnotationResults]{@link google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults} + * + * @typedef AnnotateVideoResponse + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const AnnotateVideoResponse = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Annotation progress for a single video. + * + * @property {string} inputUri + * Video file location in + * [Google Cloud Storage](https://cloud.google.com/storage/). + * + * @property {number} progressPercent + * Approximate percentage processed thus far. Guaranteed to be + * 100 when fully processed. + * + * @property {Object} startTime + * Time when the request was received. + * + * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp} + * + * @property {Object} updateTime + * Time of the most recent update. + * + * This object should have the same structure as [Timestamp]{@link google.protobuf.Timestamp} + * + * @typedef VideoAnnotationProgress + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const VideoAnnotationProgress = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video annotation progress. Included in the `metadata` + * field of the `Operation` returned by the `GetOperation` + * call of the `google::longrunning::Operations` service. + * + * @property {Object[]} annotationProgress + * Progress metadata for all videos specified in `AnnotateVideoRequest`. + * + * This object should have the same structure as [VideoAnnotationProgress]{@link google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress} + * + * @typedef AnnotateVideoProgress + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const AnnotateVideoProgress = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A vertex represents a 2D point in the image. + * NOTE: the normalized vertex coordinates are relative to the original image + * and range from 0 to 1. + * + * @property {number} x + * X coordinate. + * + * @property {number} y + * Y coordinate. + * + * @typedef NormalizedVertex + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.NormalizedVertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const NormalizedVertex = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Normalized bounding polygon for text (that might not be aligned with axis). + * Contains list of the corner points in clockwise order starting from + * top-left corner. For example, for a rectangular bounding box: + * When the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * + * When it's clockwise rotated 180 degrees around the top-left corner it + * becomes: + * 2----3 + * | | + * 1----0 + * + * and the vertex order will still be (0, 1, 2, 3). Note that values can be less + * than 0, or greater than 1 due to trignometric calculations for location of + * the box. + * + * @property {Object[]} vertices + * Normalized vertices of the bounding polygon. + * + * This object should have the same structure as [NormalizedVertex]{@link google.cloud.videointelligence.v1p3beta1.NormalizedVertex} + * + * @typedef NormalizedBoundingPoly + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const NormalizedBoundingPoly = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video segment level annotation results for text detection. + * + * @property {Object} segment + * Video segment where a text snippet was detected. + * + * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment} + * + * @property {number} confidence + * Confidence for the track of detected text. It is calculated as the highest + * over all frames where OCR detected text appears. + * + * @property {Object[]} frames + * Information related to the frames where OCR detected text appears. + * + * This object should have the same structure as [TextFrame]{@link google.cloud.videointelligence.v1p3beta1.TextFrame} + * + * @typedef TextSegment + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.TextSegment definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const TextSegment = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video frame level annotation results for text annotation (OCR). + * Contains information regarding timestamp and bounding box locations for the + * frames containing detected OCR text snippets. + * + * @property {Object} rotatedBoundingBox + * Bounding polygon of the detected text for this frame. + * + * This object should have the same structure as [NormalizedBoundingPoly]{@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly} + * + * @property {Object} timeOffset + * Timestamp of this frame. + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @typedef TextFrame + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.TextFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const TextFrame = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Annotations related to one detected OCR text snippet. This will contain the + * corresponding text, confidence value, and frame level information for each + * detection. + * + * @property {string} text + * The detected text. + * + * @property {Object[]} segments + * All video segments where OCR detected text appears. + * + * This object should have the same structure as [TextSegment]{@link google.cloud.videointelligence.v1p3beta1.TextSegment} + * + * @typedef TextAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.TextAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const TextAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video frame level annotations for object detection and tracking. This field + * stores per frame location, time offset, and confidence. + * + * @property {Object} normalizedBoundingBox + * The normalized bounding box location of this object track for the frame. + * + * This object should have the same structure as [NormalizedBoundingBox]{@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} + * + * @property {Object} timeOffset + * The timestamp of the frame in microseconds. + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @typedef ObjectTrackingFrame + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const ObjectTrackingFrame = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Annotations corresponding to one tracked object. + * + * @property {Object} entity + * Entity to specify the object category that this track is labeled as. + * + * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p3beta1.Entity} + * + * @property {number} confidence + * Object category's labeling confidence of this track. + * + * @property {Object[]} frames + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + * + * This object should have the same structure as [ObjectTrackingFrame]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame} + * + * @property {Object} segment + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + * + * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment} + * + * @property {number} trackId + * Streaming mode ONLY. + * In streaming mode, we do not know the end time of a tracked object + * before it is completed. Hence, there is no VideoSegment info returned. + * Instead, we provide a unique identifiable integer track_id so that + * the customers can correlate the results of the ongoing + * ObjectTrackAnnotation of the same track_id over time. + * + * @typedef ObjectTrackingAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const ObjectTrackingAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * The top-level message sent by the client for the `StreamingAnnotateVideo` + * method. Multiple `StreamingAnnotateVideoRequest` messages are sent. + * The first message must only contain a `StreamingVideoConfig` message. + * All subsequent messages must only contain `input_content` data. + * + * @property {Object} videoConfig + * Provides information to the annotator, specifing how to process the + * request. The first `AnnotateStreamingVideoRequest` message must only + * contain a `video_config` message. + * + * This object should have the same structure as [StreamingVideoConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig} + * + * @property {string} inputContent + * The video data to be annotated. Chunks of video data are sequentially + * sent in `StreamingAnnotateVideoRequest` messages. Except the initial + * `StreamingAnnotateVideoRequest` message containing only + * `video_config`, all subsequent `AnnotateStreamingVideoRequest` + * messages must only contain `input_content` field. + * + * @typedef StreamingAnnotateVideoRequest + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingAnnotateVideoRequest = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * `StreamingAnnotateVideoResponse` is the only message returned to the client + * by `StreamingAnnotateVideo`. A series of zero or more + * `StreamingAnnotateVideoResponse` messages are streamed back to the client. + * + * @property {Object} error + * If set, returns a google.rpc.Status message that + * specifies the error for the operation. + * + * This object should have the same structure as [Status]{@link google.rpc.Status} + * + * @property {Object} annotationResults + * Streaming annotation results. + * + * This object should have the same structure as [StreamingVideoAnnotationResults]{@link google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults} + * + * @property {string} annotationResultsUri + * GCS URI that stores annotation results of one streaming session. + * It is a directory that can hold multiple files in JSON format. + * Example uri format: + * gs://bucket_id/object_id/cloud_project_name-session_id + * + * @typedef StreamingAnnotateVideoResponse + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingAnnotateVideoResponse = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for EXPLICIT_CONTENT_DETECTION in streaming mode. + * No customized config support. + * @typedef StreamingExplicitContentDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingExplicitContentDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for LABEL_DETECTION in streaming mode. + * + * @property {boolean} stationaryCamera + * Whether the video has been captured from a stationary (i.e. non-moving) + * camera. When set to true, might improve detection accuracy for moving + * objects. Default: false. + * + * @typedef StreamingLabelDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingLabelDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for STREAMING_OBJECT_TRACKING. + * No customized config support. + * @typedef StreamingObjectTrackingConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingObjectTrackingConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for SHOT_CHANGE_DETECTION in streaming mode. + * No customized config support. + * @typedef StreamingShotChangeDetectionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingShotChangeDetectionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Config for streaming storage option. + * + * @property {boolean} enableStorageAnnotationResult + * Enable streaming storage. Default: false. + * + * @property {string} annotationResultStorageDirectory + * GCS URI to store all annotation results for one client. Client should + * specify this field as the top-level storage directory. Annotation results + * of different sessions will be put into different sub-directories denoted + * by project_name and session_id. All sub-directories will be auto generated + * by program and will be made accessible to client in response proto. + * URIs must be specified in the following format: `gs://bucket-id/object-id` + * `bucket-id` should be a valid GCS bucket created by client and bucket + * permission shall also be configured properly. `object-id` can be arbitrary + * string that make sense to client. Other URI formats will return error and + * cause GCS write failure. + * + * @typedef StreamingStorageConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingStorageConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Streaming annotation results corresponding to a portion of the video + * that is currently being processed. + * + * @property {Object[]} shotAnnotations + * Shot annotation results. Each shot is represented as a video segment. + * + * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment} + * + * @property {Object[]} labelAnnotations + * Label annotation results. + * + * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation} + * + * @property {Object} explicitAnnotation + * Explicit content detection results. + * + * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} + * + * @property {Object[]} objectAnnotations + * Object tracking results. + * + * This object should have the same structure as [ObjectTrackingAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation} + * + * @typedef StreamingVideoAnnotationResults + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingVideoAnnotationResults = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Provides information to the annotator that specifies how to process the + * request. + * + * @property {number} feature + * Requested annotation feature. + * + * The number should be among the values of [StreamingFeature]{@link google.cloud.videointelligence.v1p3beta1.StreamingFeature} + * + * @property {Object} shotChangeDetectionConfig + * Config for SHOT_CHANGE_DETECTION. + * + * This object should have the same structure as [StreamingShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig} + * + * @property {Object} labelDetectionConfig + * Config for LABEL_DETECTION. + * + * This object should have the same structure as [StreamingLabelDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig} + * + * @property {Object} explicitContentDetectionConfig + * Config for STREAMING_EXPLICIT_CONTENT_DETECTION. + * + * This object should have the same structure as [StreamingExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig} + * + * @property {Object} objectTrackingConfig + * Config for STREAMING_OBJECT_TRACKING. + * + * This object should have the same structure as [StreamingObjectTrackingConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig} + * + * @property {Object} storageConfig + * Streaming storage option. By default: storage is disabled. + * + * This object should have the same structure as [StreamingStorageConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig} + * + * @typedef StreamingVideoConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const StreamingVideoConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Video annotation feature. + * + * @enum {number} + * @memberof google.cloud.videointelligence.v1p3beta1 + */ +const Feature = { + + /** + * Unspecified. + */ + FEATURE_UNSPECIFIED: 0, + + /** + * Label detection. Detect objects, such as dog or flower. + */ + LABEL_DETECTION: 1, + + /** + * Shot change detection. + */ + SHOT_CHANGE_DETECTION: 2, + + /** + * Explicit content detection. + */ + EXPLICIT_CONTENT_DETECTION: 3, + + /** + * OCR text detection and tracking. + */ + TEXT_DETECTION: 7, + + /** + * Object detection and tracking. + */ + OBJECT_TRACKING: 9 +}; + +/** + * Label detection mode. + * + * @enum {number} + * @memberof google.cloud.videointelligence.v1p3beta1 + */ +const LabelDetectionMode = { + + /** + * Unspecified. + */ + LABEL_DETECTION_MODE_UNSPECIFIED: 0, + + /** + * Detect shot-level labels. + */ + SHOT_MODE: 1, + + /** + * Detect frame-level labels. + */ + FRAME_MODE: 2, + + /** + * Detect both shot-level and frame-level labels. + */ + SHOT_AND_FRAME_MODE: 3 +}; + +/** + * Bucketized representation of likelihood. + * + * @enum {number} + * @memberof google.cloud.videointelligence.v1p3beta1 + */ +const Likelihood = { + + /** + * Unspecified likelihood. + */ + LIKELIHOOD_UNSPECIFIED: 0, + + /** + * Very unlikely. + */ + VERY_UNLIKELY: 1, + + /** + * Unlikely. + */ + UNLIKELY: 2, + + /** + * Possible. + */ + POSSIBLE: 3, + + /** + * Likely. + */ + LIKELY: 4, + + /** + * Very likely. + */ + VERY_LIKELY: 5 +}; + +/** + * Streaming video annotation feature. + * + * @enum {number} + * @memberof google.cloud.videointelligence.v1p3beta1 + */ +const StreamingFeature = { + + /** + * Unspecified. + */ + STREAMING_FEATURE_UNSPECIFIED: 0, + + /** + * Label detection. Detect objects, such as dog or flower. + */ + STREAMING_LABEL_DETECTION: 1, + + /** + * Shot change detection. + */ + STREAMING_SHOT_CHANGE_DETECTION: 2, + + /** + * Explicit content detection. + */ + STREAMING_EXPLICIT_CONTENT_DETECTION: 3, + + /** + * Object detection and tracking. + */ + STREAMING_OBJECT_TRACKING: 4 +}; \ No newline at end of file diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/longrunning/doc_operations.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/longrunning/doc_operations.js new file mode 100644 index 00000000000..bd03cc3da0e --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/longrunning/doc_operations.js @@ -0,0 +1,63 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * This resource represents a long-running operation that is the result of a + * network API call. + * + * @property {string} name + * The server-assigned name, which is only unique within the same service that + * originally returns it. If you use the default HTTP mapping, the + * `name` should have the format of `operations/some/unique/name`. + * + * @property {Object} metadata + * Service-specific metadata associated with the operation. It typically + * contains progress information and common metadata such as create time. + * Some services might not provide such metadata. Any method that returns a + * long-running operation should document the metadata type, if any. + * + * This object should have the same structure as [Any]{@link google.protobuf.Any} + * + * @property {boolean} done + * If the value is `false`, it means the operation is still in progress. + * If true, the operation is completed, and either `error` or `response` is + * available. + * + * @property {Object} error + * The error result of the operation in case of failure or cancellation. + * + * This object should have the same structure as [Status]{@link google.rpc.Status} + * + * @property {Object} response + * The normal response of the operation in case of success. If the original + * method returns no data on success, such as `Delete`, the response is + * `google.protobuf.Empty`. If the original method is standard + * `Get`/`Create`/`Update`, the response should be the resource. For other + * methods, the response should have the type `XxxResponse`, where `Xxx` + * is the original method name. For example, if the original method name + * is `TakeSnapshot()`, the inferred response type is + * `TakeSnapshotResponse`. + * + * This object should have the same structure as [Any]{@link google.protobuf.Any} + * + * @typedef Operation + * @memberof google.longrunning + * @see [google.longrunning.Operation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto} + */ +const Operation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_any.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_any.js new file mode 100644 index 00000000000..f3278b34e66 --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_any.js @@ -0,0 +1,136 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * `Any` contains an arbitrary serialized protocol buffer message along with a + * URL that describes the type of the serialized message. + * + * Protobuf library provides support to pack/unpack Any values in the form + * of utility functions or additional generated methods of the Any type. + * + * Example 1: Pack and unpack a message in C++. + * + * Foo foo = ...; + * Any any; + * any.PackFrom(foo); + * ... + * if (any.UnpackTo(&foo)) { + * ... + * } + * + * Example 2: Pack and unpack a message in Java. + * + * Foo foo = ...; + * Any any = Any.pack(foo); + * ... + * if (any.is(Foo.class)) { + * foo = any.unpack(Foo.class); + * } + * + * Example 3: Pack and unpack a message in Python. + * + * foo = Foo(...) + * any = Any() + * any.Pack(foo) + * ... + * if any.Is(Foo.DESCRIPTOR): + * any.Unpack(foo) + * ... + * + * Example 4: Pack and unpack a message in Go + * + * foo := &pb.Foo{...} + * any, err := ptypes.MarshalAny(foo) + * ... + * foo := &pb.Foo{} + * if err := ptypes.UnmarshalAny(any, foo); err != nil { + * ... + * } + * + * The pack methods provided by protobuf library will by default use + * 'type.googleapis.com/full.type.name' as the type URL and the unpack + * methods only use the fully qualified type name after the last '/' + * in the type URL, for example "foo.bar.com/x/y.z" will yield type + * name "y.z". + * + * + * # JSON + * + * The JSON representation of an `Any` value uses the regular + * representation of the deserialized, embedded message, with an + * additional field `@type` which contains the type URL. Example: + * + * package google.profile; + * message Person { + * string first_name = 1; + * string last_name = 2; + * } + * + * { + * "@type": "type.googleapis.com/google.profile.Person", + * "firstName": , + * "lastName": + * } + * + * If the embedded message type is well-known and has a custom JSON + * representation, that representation will be embedded adding a field + * `value` which holds the custom JSON in addition to the `@type` + * field. Example (for message google.protobuf.Duration): + * + * { + * "@type": "type.googleapis.com/google.protobuf.Duration", + * "value": "1.212s" + * } + * + * @property {string} typeUrl + * A URL/resource name that uniquely identifies the type of the serialized + * protocol buffer message. The last segment of the URL's path must represent + * the fully qualified name of the type (as in + * `path/google.protobuf.Duration`). The name should be in a canonical form + * (e.g., leading "." is not accepted). + * + * In practice, teams usually precompile into the binary all types that they + * expect it to use in the context of Any. However, for URLs which use the + * scheme `http`, `https`, or no scheme, one can optionally set up a type + * server that maps type URLs to message definitions as follows: + * + * * If no scheme is provided, `https` is assumed. + * * An HTTP GET on the URL must yield a google.protobuf.Type + * value in binary format, or produce an error. + * * Applications are allowed to cache lookup results based on the + * URL, or have them precompiled into a binary to avoid any + * lookup. Therefore, binary compatibility needs to be preserved + * on changes to types. (Use versioned type names to manage + * breaking changes.) + * + * Note: this functionality is not currently available in the official + * protobuf release, and it is not used for type URLs beginning with + * type.googleapis.com. + * + * Schemes other than `http`, `https` (or the empty scheme) might be + * used with implementation specific semantics. + * + * @property {string} value + * Must be a valid serialized protocol buffer of the above specified type. + * + * @typedef Any + * @memberof google.protobuf + * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto} + */ +const Any = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_duration.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_duration.js new file mode 100644 index 00000000000..1275f8f4d13 --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/protobuf/doc_duration.js @@ -0,0 +1,97 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * A Duration represents a signed, fixed-length span of time represented + * as a count of seconds and fractions of seconds at nanosecond + * resolution. It is independent of any calendar and concepts like "day" + * or "month". It is related to Timestamp in that the difference between + * two Timestamp values is a Duration and it can be added or subtracted + * from a Timestamp. Range is approximately +-10,000 years. + * + * # Examples + * + * Example 1: Compute Duration from two Timestamps in pseudo code. + * + * Timestamp start = ...; + * Timestamp end = ...; + * Duration duration = ...; + * + * duration.seconds = end.seconds - start.seconds; + * duration.nanos = end.nanos - start.nanos; + * + * if (duration.seconds < 0 && duration.nanos > 0) { + * duration.seconds += 1; + * duration.nanos -= 1000000000; + * } else if (durations.seconds > 0 && duration.nanos < 0) { + * duration.seconds -= 1; + * duration.nanos += 1000000000; + * } + * + * Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. + * + * Timestamp start = ...; + * Duration duration = ...; + * Timestamp end = ...; + * + * end.seconds = start.seconds + duration.seconds; + * end.nanos = start.nanos + duration.nanos; + * + * if (end.nanos < 0) { + * end.seconds -= 1; + * end.nanos += 1000000000; + * } else if (end.nanos >= 1000000000) { + * end.seconds += 1; + * end.nanos -= 1000000000; + * } + * + * Example 3: Compute Duration from datetime.timedelta in Python. + * + * td = datetime.timedelta(days=3, minutes=10) + * duration = Duration() + * duration.FromTimedelta(td) + * + * # JSON Mapping + * + * In JSON format, the Duration type is encoded as a string rather than an + * object, where the string ends in the suffix "s" (indicating seconds) and + * is preceded by the number of seconds, with nanoseconds expressed as + * fractional seconds. For example, 3 seconds with 0 nanoseconds should be + * encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should + * be expressed in JSON format as "3.000000001s", and 3 seconds and 1 + * microsecond should be expressed in JSON format as "3.000001s". + * + * @property {number} seconds + * Signed seconds of the span of time. Must be from -315,576,000,000 + * to +315,576,000,000 inclusive. Note: these bounds are computed from: + * 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + * + * @property {number} nanos + * Signed fractions of a second at nanosecond resolution of the span + * of time. Durations less than one second are represented with a 0 + * `seconds` field and a positive or negative `nanos` field. For durations + * of one second or more, a non-zero value for the `nanos` field must be + * of the same sign as the `seconds` field. Must be from -999,999,999 + * to +999,999,999 inclusive. + * + * @typedef Duration + * @memberof google.protobuf + * @see [google.protobuf.Duration definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/duration.proto} + */ +const Duration = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/rpc/doc_status.js b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/rpc/doc_status.js new file mode 100644 index 00000000000..fc4b5be93f0 --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/doc/google/rpc/doc_status.js @@ -0,0 +1,92 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. + +/** + * The `Status` type defines a logical error model that is suitable for different + * programming environments, including REST APIs and RPC APIs. It is used by + * [gRPC](https://github.com/grpc). The error model is designed to be: + * + * - Simple to use and understand for most users + * - Flexible enough to meet unexpected needs + * + * # Overview + * + * The `Status` message contains three pieces of data: error code, error message, + * and error details. The error code should be an enum value of + * google.rpc.Code, but it may accept additional error codes if needed. The + * error message should be a developer-facing English message that helps + * developers *understand* and *resolve* the error. If a localized user-facing + * error message is needed, put the localized message in the error details or + * localize it in the client. The optional error details may contain arbitrary + * information about the error. There is a predefined set of error detail types + * in the package `google.rpc` that can be used for common error conditions. + * + * # Language mapping + * + * The `Status` message is the logical representation of the error model, but it + * is not necessarily the actual wire format. When the `Status` message is + * exposed in different client libraries and different wire protocols, it can be + * mapped differently. For example, it will likely be mapped to some exceptions + * in Java, but more likely mapped to some error codes in C. + * + * # Other uses + * + * The error model and the `Status` message can be used in a variety of + * environments, either with or without APIs, to provide a + * consistent developer experience across different environments. + * + * Example uses of this error model include: + * + * - Partial errors. If a service needs to return partial errors to the client, + * it may embed the `Status` in the normal response to indicate the partial + * errors. + * + * - Workflow errors. A typical workflow has multiple steps. Each step may + * have a `Status` message for error reporting. + * + * - Batch operations. If a client uses batch request and batch response, the + * `Status` message should be used directly inside batch response, one for + * each error sub-response. + * + * - Asynchronous operations. If an API call embeds asynchronous operation + * results in its response, the status of those operations should be + * represented directly using the `Status` message. + * + * - Logging. If some API errors are stored in logs, the message `Status` could + * be used directly after any stripping needed for security/privacy reasons. + * + * @property {number} code + * The status code, which should be an enum value of google.rpc.Code. + * + * @property {string} message + * A developer-facing error message, which should be in English. Any + * user-facing error message should be localized and sent in the + * google.rpc.Status.details field, or localized by the client. + * + * @property {Object[]} details + * A list of messages that carry the error details. There is a common set of + * message types for APIs to use. + * + * This object should have the same structure as [Any]{@link google.protobuf.Any} + * + * @typedef Status + * @memberof google.rpc + * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto} + */ +const Status = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/index.js b/packages/google-cloud-videointelligence/src/v1p3beta1/index.js new file mode 100644 index 00000000000..21857997950 --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/index.js @@ -0,0 +1,21 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const VideoIntelligenceServiceClient = require('./video_intelligence_service_client'); +const StreamingVideoIntelligenceServiceClient = require('./streaming_video_intelligence_service_client'); + +module.exports.VideoIntelligenceServiceClient = VideoIntelligenceServiceClient; +module.exports.StreamingVideoIntelligenceServiceClient = StreamingVideoIntelligenceServiceClient; diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.js new file mode 100644 index 00000000000..bb36f74dcdf --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client.js @@ -0,0 +1,222 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const gapicConfig = require('./streaming_video_intelligence_service_client_config'); +const gax = require('google-gax'); +const merge = require('lodash.merge'); +const path = require('path'); + +const VERSION = require('../../package.json').version; + +/** + * Service that implements Google Cloud Video Intelligence Streaming API. + * + * @class + * @memberof v1p3beta1 + */ +class StreamingVideoIntelligenceServiceClient { + /** + * Construct an instance of StreamingVideoIntelligenceServiceClient. + * + * @param {object} [options] - The configuration object. See the subsequent + * parameters for more details. + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {function} [options.promise] - Custom promise module to use instead + * of native Promises. + * @param {string} [options.servicePath] - The domain name of the + * API remote host. + */ + constructor(opts) { + this._descriptors = {}; + + // Ensure that options include the service address and port. + opts = Object.assign( + { + clientConfig: {}, + port: this.constructor.port, + servicePath: this.constructor.servicePath, + }, + opts + ); + + // Create a `gaxGrpc` object, with any grpc-specific options + // sent to the client. + opts.scopes = this.constructor.scopes; + const gaxGrpc = new gax.GrpcClient(opts); + + // Save the auth object to the client, for use by other methods. + this.auth = gaxGrpc.auth; + + // Determine the client header string. + const clientHeader = [ + `gl-node/${process.version}`, + `grpc/${gaxGrpc.grpcVersion}`, + `gax/${gax.version}`, + `gapic/${VERSION}`, + ]; + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + const protos = merge( + {}, + gaxGrpc.loadProto( + path.join(__dirname, '..', '..', 'protos'), + 'google/cloud/videointelligence/v1p3beta1/video_intelligence.proto' + ) + ); + + // Some of the methods on this service provide streaming responses. + // Provide descriptors for these. + this._descriptors.stream = { + streamingAnnotateVideo: new gax.StreamDescriptor( + gax.StreamType.BIDI_STREAMING + ), + }; + + // Put together the default options sent with requests. + const defaults = gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService', + gapicConfig, + opts.clientConfig, + {'x-goog-api-client': clientHeader.join(' ')} + ); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this._innerApiCalls = {}; + + // Put together the "service stub" for + // google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService. + const streamingVideoIntelligenceServiceStub = gaxGrpc.createStub( + protos.google.cloud.videointelligence.v1p3beta1 + .StreamingVideoIntelligenceService, + opts + ); + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const streamingVideoIntelligenceServiceStubMethods = [ + 'streamingAnnotateVideo', + ]; + for (const methodName of streamingVideoIntelligenceServiceStubMethods) { + this._innerApiCalls[methodName] = gax.createApiCall( + streamingVideoIntelligenceServiceStub.then( + stub => + function() { + const args = Array.prototype.slice.call(arguments, 0); + return stub[methodName].apply(stub, args); + }, + err => + function() { + throw err; + } + ), + defaults[methodName], + this._descriptors.stream[methodName] + ); + } + } + + /** + * The DNS address for this API service. + */ + static get servicePath() { + return 'videointelligence.googleapis.com'; + } + + /** + * The port for this API service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + */ + static get scopes() { + return ['https://www.googleapis.com/auth/cloud-platform']; + } + + /** + * Return the project ID used by this class. + * @param {function(Error, string)} callback - the callback to + * be called with the current project Id. + */ + getProjectId(callback) { + return this.auth.getProjectId(callback); + } + + // ------------------- + // -- Service calls -- + // ------------------- + + /** + * Performs video annotation with bidirectional streaming: emitting results + * while sending video/audio bytes. + * This method is only available via the gRPC API (not REST). + * + * @param {Object} [options] + * Optional parameters. You can override the default settings for this call, e.g, timeout, + * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details. + * @returns {Stream} + * An object stream which is both readable and writable. It accepts objects + * representing [StreamingAnnotateVideoRequest]{@link google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest} for write() method, and + * will emit objects representing [StreamingAnnotateVideoResponse]{@link google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse} on 'data' event asynchronously. + * + * @example + * + * const videoIntelligence = require('@google-cloud/video-intelligence'); + * + * const client = new videoIntelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient({ + * // optional auth parameters. + * }); + * + * const stream = client.streamingAnnotateVideo().on('data', response => { + * // doThingsWith(response) + * }); + * const request = {}; + * // Write request objects. + * stream.write(request); + */ + streamingAnnotateVideo(options) { + options = options || {}; + + return this._innerApiCalls.streamingAnnotateVideo(options); + } +} + +module.exports = StreamingVideoIntelligenceServiceClient; diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client_config.json b/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client_config.json new file mode 100644 index 00000000000..7638632a36e --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/streaming_video_intelligence_service_client_config.json @@ -0,0 +1,31 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService": { + "retry_codes": { + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "non_idempotent": [] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 10800000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 10800000, + "total_timeout_millis": 10800000 + } + }, + "methods": { + "StreamingAnnotateVideo": { + "timeout_millis": 10800000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.js b/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.js new file mode 100644 index 00000000000..b1890d0a94e --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client.js @@ -0,0 +1,342 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const gapicConfig = require('./video_intelligence_service_client_config'); +const gax = require('google-gax'); +const merge = require('lodash.merge'); +const path = require('path'); +const protobuf = require('protobufjs'); + +const VERSION = require('../../package.json').version; + +/** + * Service that implements Google Cloud Video Intelligence API. + * + * @class + * @memberof v1p3beta1 + */ +class VideoIntelligenceServiceClient { + /** + * Construct an instance of VideoIntelligenceServiceClient. + * + * @param {object} [options] - The configuration object. See the subsequent + * parameters for more details. + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {function} [options.promise] - Custom promise module to use instead + * of native Promises. + * @param {string} [options.servicePath] - The domain name of the + * API remote host. + */ + constructor(opts) { + this._descriptors = {}; + + // Ensure that options include the service address and port. + opts = Object.assign( + { + clientConfig: {}, + port: this.constructor.port, + servicePath: this.constructor.servicePath, + }, + opts + ); + + // Create a `gaxGrpc` object, with any grpc-specific options + // sent to the client. + opts.scopes = this.constructor.scopes; + const gaxGrpc = new gax.GrpcClient(opts); + + // Save the auth object to the client, for use by other methods. + this.auth = gaxGrpc.auth; + + // Determine the client header string. + const clientHeader = [ + `gl-node/${process.version}`, + `grpc/${gaxGrpc.grpcVersion}`, + `gax/${gax.version}`, + `gapic/${VERSION}`, + ]; + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + const protos = merge( + {}, + gaxGrpc.loadProto( + path.join(__dirname, '..', '..', 'protos'), + 'google/cloud/videointelligence/v1p3beta1/video_intelligence.proto' + ) + ); + let protoFilesRoot = new gax.GoogleProtoFilesRoot(); + protoFilesRoot = protobuf.loadSync( + path.join( + __dirname, + '..', + '..', + 'protos', + 'google/cloud/videointelligence/v1p3beta1/video_intelligence.proto' + ), + protoFilesRoot + ); + + // This API contains "long-running operations", which return a + // an Operation object that allows for tracking of the operation, + // rather than holding a request open. + this.operationsClient = new gax.lro({ + auth: gaxGrpc.auth, + grpc: gaxGrpc.grpc, + }).operationsClient(opts); + + const annotateVideoResponse = protoFilesRoot.lookup( + 'google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse' + ); + const annotateVideoMetadata = protoFilesRoot.lookup( + 'google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress' + ); + + this._descriptors.longrunning = { + annotateVideo: new gax.LongrunningDescriptor( + this.operationsClient, + annotateVideoResponse.decode.bind(annotateVideoResponse), + annotateVideoMetadata.decode.bind(annotateVideoMetadata) + ), + }; + + // Put together the default options sent with requests. + const defaults = gaxGrpc.constructSettings( + 'google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService', + gapicConfig, + opts.clientConfig, + {'x-goog-api-client': clientHeader.join(' ')} + ); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this._innerApiCalls = {}; + + // Put together the "service stub" for + // google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService. + const videoIntelligenceServiceStub = gaxGrpc.createStub( + protos.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService, + opts + ); + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const videoIntelligenceServiceStubMethods = ['annotateVideo']; + for (const methodName of videoIntelligenceServiceStubMethods) { + this._innerApiCalls[methodName] = gax.createApiCall( + videoIntelligenceServiceStub.then( + stub => + function() { + const args = Array.prototype.slice.call(arguments, 0); + return stub[methodName].apply(stub, args); + }, + err => + function() { + throw err; + } + ), + defaults[methodName], + this._descriptors.longrunning[methodName] + ); + } + } + + /** + * The DNS address for this API service. + */ + static get servicePath() { + return 'videointelligence.googleapis.com'; + } + + /** + * The port for this API service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + */ + static get scopes() { + return ['https://www.googleapis.com/auth/cloud-platform']; + } + + /** + * Return the project ID used by this class. + * @param {function(Error, string)} callback - the callback to + * be called with the current project Id. + */ + getProjectId(callback) { + return this.auth.getProjectId(callback); + } + + // ------------------- + // -- Service calls -- + // ------------------- + + /** + * Performs asynchronous video annotation. Progress and results can be + * retrieved through the `google.longrunning.Operations` interface. + * `Operation.metadata` contains `AnnotateVideoProgress` (progress). + * `Operation.response` contains `AnnotateVideoResponse` (results). + * + * @param {Object} request + * The request object that will be sent. + * @param {string} [request.inputUri] + * Input video location. Currently, only + * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + * supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * A video URI may include wildcards in `object-id`, and thus identify + * multiple videos. Supported wildcards: '*' to match 0 or more characters; + * '?' to match 1 character. If unset, the input video should be embedded + * in the request as `input_content`. If set, `input_content` should be unset. + * @param {string} [request.inputContent] + * The video data bytes. + * If unset, the input video(s) should be specified via `input_uri`. + * If set, `input_uri` should be unset. + * @param {number[]} [request.features] + * Requested video annotation features. + * + * The number should be among the values of [Feature]{@link google.cloud.videointelligence.v1p3beta1.Feature} + * @param {Object} [request.videoContext] + * Additional video context and/or feature-specific parameters. + * + * This object should have the same structure as [VideoContext]{@link google.cloud.videointelligence.v1p3beta1.VideoContext} + * @param {string} [request.outputUri] + * Optional location where the output (in JSON format) should be stored. + * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + * URIs are supported, which must be specified in the following format: + * `gs://bucket-id/object-id` (other URI formats return + * google.rpc.Code.INVALID_ARGUMENT). For more information, see + * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). + * @param {string} [request.locationId] + * Optional cloud region where annotation should take place. Supported cloud + * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + * is specified, a region will be determined based on video file location. + * @param {Object} [options] + * Optional parameters. You can override the default settings for this call, e.g, timeout, + * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details. + * @param {function(?Error, ?Object)} [callback] + * The function which will be called with the result of the API call. + * + * The second parameter to the callback is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is a [gax.Operation]{@link https://googleapis.github.io/gax-nodejs/Operation} object. + * The promise has a method named "cancel" which cancels the ongoing API call. + * + * @example + * + * const videoIntelligence = require('@google-cloud/video-intelligence'); + * + * const client = new videoIntelligence.v1p3beta1.VideoIntelligenceServiceClient({ + * // optional auth parameters. + * }); + * + * const inputUri = 'gs://demomaker/cat.mp4'; + * const featuresElement = 'LABEL_DETECTION'; + * const features = [featuresElement]; + * const request = { + * inputUri: inputUri, + * features: features, + * }; + * + * // Handle the operation using the promise pattern. + * client.annotateVideo(request) + * .then(responses => { + * const [operation, initialApiResponse] = responses; + * + * // Operation#promise starts polling for the completion of the LRO. + * return operation.promise(); + * }) + * .then(responses => { + * const result = responses[0]; + * const metadata = responses[1]; + * const finalApiResponse = responses[2]; + * }) + * .catch(err => { + * console.error(err); + * }); + * + * const inputUri = 'gs://demomaker/cat.mp4'; + * const featuresElement = 'LABEL_DETECTION'; + * const features = [featuresElement]; + * const request = { + * inputUri: inputUri, + * features: features, + * }; + * + * // Handle the operation using the event emitter pattern. + * client.annotateVideo(request) + * .then(responses => { + * const [operation, initialApiResponse] = responses; + * + * // Adding a listener for the "complete" event starts polling for the + * // completion of the operation. + * operation.on('complete', (result, metadata, finalApiResponse) => { + * // doSomethingWith(result); + * }); + * + * // Adding a listener for the "progress" event causes the callback to be + * // called on any change in metadata when the operation is polled. + * operation.on('progress', (metadata, apiResponse) => { + * // doSomethingWith(metadata) + * }); + * + * // Adding a listener for the "error" event handles any errors found during polling. + * operation.on('error', err => { + * // throw(err); + * }); + * }) + * .catch(err => { + * console.error(err); + * }); + */ + annotateVideo(request, options, callback) { + if (options instanceof Function && callback === undefined) { + callback = options; + options = {}; + } + options = options || {}; + + return this._innerApiCalls.annotateVideo(request, options, callback); + } +} + +module.exports = VideoIntelligenceServiceClient; diff --git a/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client_config.json b/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client_config.json new file mode 100644 index 00000000000..2023e52ad84 --- /dev/null +++ b/packages/google-cloud-videointelligence/src/v1p3beta1/video_intelligence_service_client_config.json @@ -0,0 +1,31 @@ +{ + "interfaces": { + "google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService": { + "retry_codes": { + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ], + "non_idempotent": [] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.5, + "max_retry_delay_millis": 120000, + "initial_rpc_timeout_millis": 120000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 120000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "AnnotateVideo": { + "timeout_millis": 600000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/packages/google-cloud-videointelligence/synth.metadata b/packages/google-cloud-videointelligence/synth.metadata index 3962c6f4efd..4902f8ce09a 100644 --- a/packages/google-cloud-videointelligence/synth.metadata +++ b/packages/google-cloud-videointelligence/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2019-02-21T12:20:49.083829Z", + "updateTime": "2019-02-21T19:19:06.179395Z", "sources": [ { "generator": { @@ -74,6 +74,16 @@ "generator": "gapic", "config": "google/cloud/videointelligence/artman_videointelligence_v1p2beta1.yaml" } + }, + { + "client": { + "source": "googleapis", + "apiName": "video-intelligence", + "apiVersion": "v1p3beta1", + "language": "nodejs", + "generator": "gapic", + "config": "google/cloud/videointelligence/artman_videointelligence_v1p3beta1.yaml" + } } ] } \ No newline at end of file diff --git a/packages/google-cloud-videointelligence/synth.py b/packages/google-cloud-videointelligence/synth.py index c82da634984..46afa7a7028 100644 --- a/packages/google-cloud-videointelligence/synth.py +++ b/packages/google-cloud-videointelligence/synth.py @@ -8,7 +8,7 @@ gapic = gcp.GAPICGenerator() common_templates = gcp.CommonTemplates() -versions = ["v1", "v1beta1", "v1beta2", "v1p1beta1", "v1p2beta1"] +versions = ["v1", "v1beta1", "v1beta2", "v1p1beta1", "v1p2beta1", "v1p3beta1"] for version in versions: library = gapic.node_library( diff --git a/packages/google-cloud-videointelligence/test/gapic-v1p3beta1.js b/packages/google-cloud-videointelligence/test/gapic-v1p3beta1.js new file mode 100644 index 00000000000..79c56511cae --- /dev/null +++ b/packages/google-cloud-videointelligence/test/gapic-v1p3beta1.js @@ -0,0 +1,229 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +'use strict'; + +const assert = require('assert'); +const through2 = require('through2'); + +const videoIntelligenceModule = require('../src'); + +const FAKE_STATUS_CODE = 1; +const error = new Error(); +error.code = FAKE_STATUS_CODE; + +describe('VideoIntelligenceServiceClient', () => { + describe('annotateVideo', function() { + it('invokes annotateVideo without error', done => { + const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + + // Mock request + const inputUri = 'gs://demomaker/cat.mp4'; + const featuresElement = 'LABEL_DETECTION'; + const features = [featuresElement]; + const request = { + inputUri: inputUri, + features: features, + }; + + // Mock response + const expectedResponse = {}; + + // Mock Grpc layer + client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod( + request, + expectedResponse + ); + + client + .annotateVideo(request) + .then(responses => { + const operation = responses[0]; + return operation.promise(); + }) + .then(responses => { + assert.deepStrictEqual(responses[0], expectedResponse); + done(); + }) + .catch(err => { + done(err); + }); + }); + + it('invokes annotateVideo with error', done => { + const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + + // Mock request + const inputUri = 'gs://demomaker/cat.mp4'; + const featuresElement = 'LABEL_DETECTION'; + const features = [featuresElement]; + const request = { + inputUri: inputUri, + features: features, + }; + + // Mock Grpc layer + client._innerApiCalls.annotateVideo = mockLongRunningGrpcMethod( + request, + null, + error + ); + + client + .annotateVideo(request) + .then(responses => { + const operation = responses[0]; + return operation.promise(); + }) + .then(() => { + assert.fail(); + }) + .catch(err => { + assert(err instanceof Error); + assert.strictEqual(err.code, FAKE_STATUS_CODE); + done(); + }); + }); + + it('has longrunning decoder functions', () => { + const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + assert( + client._descriptors.longrunning.annotateVideo.responseDecoder instanceof + Function + ); + assert( + client._descriptors.longrunning.annotateVideo.metadataDecoder instanceof + Function + ); + }); + }); +}); +describe('StreamingVideoIntelligenceServiceClient', () => { + describe('streamingAnnotateVideo', () => { + it('invokes streamingAnnotateVideo without error', done => { + const client = new videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + + // Mock request + const request = {}; + + // Mock response + const annotationResultsUri = 'annotationResultsUri-238075757'; + const expectedResponse = { + annotationResultsUri: annotationResultsUri, + }; + + // Mock Grpc layer + client._innerApiCalls.streamingAnnotateVideo = mockBidiStreamingGrpcMethod( + request, + expectedResponse + ); + + const stream = client + .streamingAnnotateVideo() + .on('data', response => { + assert.deepStrictEqual(response, expectedResponse); + done(); + }) + .on('error', err => { + done(err); + }); + + stream.write(request); + }); + + it('invokes streamingAnnotateVideo with error', done => { + const client = new videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient( + { + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + } + ); + + // Mock request + const request = {}; + + // Mock Grpc layer + client._innerApiCalls.streamingAnnotateVideo = mockBidiStreamingGrpcMethod( + request, + null, + error + ); + + const stream = client + .streamingAnnotateVideo() + .on('data', () => { + assert.fail(); + }) + .on('error', err => { + assert(err instanceof Error); + assert.strictEqual(err.code, FAKE_STATUS_CODE); + done(); + }); + + stream.write(request); + }); + }); +}); + +function mockBidiStreamingGrpcMethod(expectedRequest, response, error) { + return () => { + const mockStream = through2.obj((chunk, enc, callback) => { + assert.deepStrictEqual(chunk, expectedRequest); + if (error) { + callback(error); + } else { + callback(null, response); + } + }); + return mockStream; + }; +} + +function mockLongRunningGrpcMethod(expectedRequest, response, error) { + return request => { + assert.deepStrictEqual(request, expectedRequest); + const mockOperation = { + promise: function() { + return new Promise((resolve, reject) => { + if (error) { + reject(error); + } else { + resolve([response]); + } + }); + }, + }; + return Promise.resolve([mockOperation]); + }; +}