Skip to content

Commit

Permalink
samples: update shared config (#2443)
Browse files Browse the repository at this point in the history
* update shared config

* Update to 1.0.13

* lint

* Fix linting

* lint

* fix imports

Co-authored-by: Les Vogel <lesv@users.noreply.github.com>
  • Loading branch information
2 people authored and chingor13 committed Aug 24, 2020
1 parent d442bfd commit 4e4b0ab
Show file tree
Hide file tree
Showing 28 changed files with 301 additions and 316 deletions.
46 changes: 23 additions & 23 deletions video/src/main/java/com/example/video/Detect.java
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@

public class Detect {
/**
* Detects video transcription using the Video Intelligence
* API
* Detects video transcription using the Video Intelligence API
*
* @param args specifies features to detect and the path to the video on Google Cloud Storage.
*/
public static void main(String[] args) {
Expand All @@ -48,8 +48,8 @@ public static void main(String[] args) {

/**
* Helper that handles the input passed to the program.
* @param args specifies features to detect and the path to the video on Google Cloud Storage.
*
* @param args specifies features to detect and the path to the video on Google Cloud Storage.
* @throws IOException on Input/Output errors.
*/
public static void argsHelper(String[] args) throws Exception {
Expand Down Expand Up @@ -82,31 +82,31 @@ public static void speechTranscription(String gcsUri) throws Exception {
// Instantiate a com.google.cloud.videointelligence.v1p1beta1.VideoIntelligenceServiceClient
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Set the language code
SpeechTranscriptionConfig config = SpeechTranscriptionConfig.newBuilder()
.setLanguageCode("en-US")
.setEnableAutomaticPunctuation(true)
.build();
SpeechTranscriptionConfig config =
SpeechTranscriptionConfig.newBuilder()
.setLanguageCode("en-US")
.setEnableAutomaticPunctuation(true)
.build();

// Set the video context with the above configuration
VideoContext context = VideoContext.newBuilder()
.setSpeechTranscriptionConfig(config)
.build();
VideoContext context = VideoContext.newBuilder().setSpeechTranscriptionConfig(config).build();

// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.SPEECH_TRANSCRIPTION)
.setVideoContext(context)
.build();
AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.SPEECH_TRANSCRIPTION)
.setVideoContext(context)
.build();

// asynchronously perform speech transcription on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> response =
client.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
// Display the results
for (VideoAnnotationResults results : response.get(300, TimeUnit.SECONDS)
.getAnnotationResultsList()) {
for (VideoAnnotationResults results :
response.get(300, TimeUnit.SECONDS).getAnnotationResultsList()) {
for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
try {
// Print the transcription
Expand All @@ -118,12 +118,12 @@ public static void speechTranscription(String gcsUri) throws Exception {

System.out.println("Word level information:");
for (WordInfo wordInfo : alternative.getWordsList()) {
double startTime = wordInfo.getStartTime().getSeconds()
+ wordInfo.getStartTime().getNanos() / 1e9;
double endTime = wordInfo.getEndTime().getSeconds()
+ wordInfo.getEndTime().getNanos() / 1e9;
System.out.printf("\t%4.2fs - %4.2fs: %s\n",
startTime, endTime, wordInfo.getWord());
double startTime =
wordInfo.getStartTime().getSeconds() + wordInfo.getStartTime().getNanos() / 1e9;
double endTime =
wordInfo.getEndTime().getSeconds() + wordInfo.getEndTime().getNanos() / 1e9;
System.out.printf(
"\t%4.2fs - %4.2fs: %s\n", startTime, endTime, wordInfo.getWord());
}
} else {
System.out.println("No transcription found");
Expand Down
44 changes: 20 additions & 24 deletions video/src/main/java/com/example/video/DetectFaces.java
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,9 @@
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
import com.google.protobuf.ByteString;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.ExecutionException;

public class DetectFaces {

Expand All @@ -51,31 +48,31 @@ public static void detectFaces() throws Exception {
// Detects faces in a video stored in a local file using the Cloud Video Intelligence API.
public static void detectFaces(String localFilePath) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
VideoIntelligenceServiceClient.create()) {
VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
Path path = Paths.get(localFilePath);
byte[] data = Files.readAllBytes(path);
ByteString inputContent = ByteString.copyFrom(data);

FaceDetectionConfig faceDetectionConfig =
FaceDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get facial attributes.
.setIncludeBoundingBoxes(true)
.setIncludeAttributes(true)
.build();
FaceDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get facial attributes.
.setIncludeBoundingBoxes(true)
.setIncludeAttributes(true)
.build();
VideoContext videoContext =
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();

AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputContent(inputContent)
.addFeatures(Feature.FACE_DETECTION)
.setVideoContext(videoContext)
.build();
AnnotateVideoRequest.newBuilder()
.setInputContent(inputContent)
.addFeatures(Feature.FACE_DETECTION)
.setVideoContext(videoContext)
.build();

// Detects faces in a video
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
videoIntelligenceServiceClient.annotateVideoAsync(request);
videoIntelligenceServiceClient.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();
Expand All @@ -85,18 +82,17 @@ public static void detectFaces(String localFilePath) throws Exception {

// Annotations for list of faces detected, tracked and recognized in video.
for (FaceDetectionAnnotation faceDetectionAnnotation :
annotationResult.getFaceDetectionAnnotationsList()) {
annotationResult.getFaceDetectionAnnotationsList()) {
System.out.print("Face detected:\n");
for (Track track : faceDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf(
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf(
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(),
segment.getEndTimeOffset().getNanos() / 1e6);
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);

// Each segment includes timestamped objects that
// include characteristics of the face detected.
Expand All @@ -111,4 +107,4 @@ public static void detectFaces(String localFilePath) throws Exception {
}
}
}
// [END video_detect_faces_beta]
// [END video_detect_faces_beta]
39 changes: 19 additions & 20 deletions video/src/main/java/com/example/video/DetectFacesGcs.java
Original file line number Diff line number Diff line change
Expand Up @@ -44,27 +44,27 @@ public static void detectFacesGcs() throws Exception {
// Detects faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.
public static void detectFacesGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
VideoIntelligenceServiceClient.create()) {
VideoIntelligenceServiceClient.create()) {

FaceDetectionConfig faceDetectionConfig =
FaceDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get facial attributes.
.setIncludeBoundingBoxes(true)
.setIncludeAttributes(true)
.build();
FaceDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get facial attributes.
.setIncludeBoundingBoxes(true)
.setIncludeAttributes(true)
.build();
VideoContext videoContext =
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();

AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.FACE_DETECTION)
.setVideoContext(videoContext)
.build();
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.FACE_DETECTION)
.setVideoContext(videoContext)
.build();

// Detects faces in a video
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
videoIntelligenceServiceClient.annotateVideoAsync(request);
videoIntelligenceServiceClient.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();
Expand All @@ -74,18 +74,17 @@ public static void detectFacesGcs(String gcsUri) throws Exception {

// Annotations for list of people detected, tracked and recognized in video.
for (FaceDetectionAnnotation faceDetectionAnnotation :
annotationResult.getFaceDetectionAnnotationsList()) {
annotationResult.getFaceDetectionAnnotationsList()) {
System.out.print("Face detected:\n");
for (Track track : faceDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf(
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf(
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(),
segment.getEndTimeOffset().getNanos() / 1e6);
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);

// Each segment includes timestamped objects that
// include characteristics of the face detected.
Expand Down
2 changes: 0 additions & 2 deletions video/src/main/java/com/example/video/DetectLogo.java
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,10 @@
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
import com.google.protobuf.ByteString;
import com.google.protobuf.Duration;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;

import java.util.concurrent.ExecutionException;

public class DetectLogo {
Expand Down
2 changes: 0 additions & 2 deletions video/src/main/java/com/example/video/DetectLogoGcs.java
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,7 @@
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
import com.google.protobuf.Duration;

import java.io.IOException;

import java.util.concurrent.ExecutionException;

public class DetectLogoGcs {
Expand Down
51 changes: 24 additions & 27 deletions video/src/main/java/com/example/video/DetectPerson.java
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
import com.google.protobuf.ByteString;

import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
Expand All @@ -47,37 +46,36 @@ public static void detectPerson() throws Exception {
detectPerson(localFilePath);
}


// Detects people in a video stored in a local file using the Cloud Video Intelligence API.
public static void detectPerson(String localFilePath) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
VideoIntelligenceServiceClient.create()) {
VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
Path path = Paths.get(localFilePath);
byte[] data = Files.readAllBytes(path);
ByteString inputContent = ByteString.copyFrom(data);

PersonDetectionConfig personDetectionConfig =
PersonDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get poses and attributes.
.setIncludeBoundingBoxes(true)
.setIncludePoseLandmarks(true)
.setIncludeAttributes(true)
.build();
PersonDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get poses and attributes.
.setIncludeBoundingBoxes(true)
.setIncludePoseLandmarks(true)
.setIncludeAttributes(true)
.build();
VideoContext videoContext =
VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();

AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputContent(inputContent)
.addFeatures(Feature.PERSON_DETECTION)
.setVideoContext(videoContext)
.build();
AnnotateVideoRequest.newBuilder()
.setInputContent(inputContent)
.addFeatures(Feature.PERSON_DETECTION)
.setVideoContext(videoContext)
.build();

// Detects people in a video
// We get the first result because only one video is processed.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
videoIntelligenceServiceClient.annotateVideoAsync(request);
videoIntelligenceServiceClient.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();
Expand All @@ -87,18 +85,17 @@ public static void detectPerson(String localFilePath) throws Exception {

// Annotations for list of people detected, tracked and recognized in video.
for (PersonDetectionAnnotation personDetectionAnnotation :
annotationResult.getPersonDetectionAnnotationsList()) {
annotationResult.getPersonDetectionAnnotationsList()) {
System.out.print("Person detected:\n");
for (Track track : personDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf(
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf(
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(),
segment.getEndTimeOffset().getNanos() / 1e6);
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(), segment.getEndTimeOffset().getNanos() / 1e6);

// Each segment includes timestamped objects that include characteristic--e.g. clothes,
// posture of the person detected.
Expand All @@ -107,18 +104,18 @@ public static void detectPerson(String localFilePath) throws Exception {
// Attributes include unique pieces of clothing, poses, or hair color.
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
System.out.printf(
"\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
"\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
}

// Landmarks in person detection include body parts.
for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
System.out.printf(
"\tLandmark: %s; Vertex: %f, %f\n",
attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
"\tLandmark: %s; Vertex: %f, %f\n",
attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
}
}
}
}
}
}
// [END video_detect_person_beta]
// [END video_detect_person_beta]
Loading

0 comments on commit 4e4b0ab

Please sign in to comment.