Skip to content

Commit

Permalink
chore(samples): drop obsolete samples (#210)
Browse files Browse the repository at this point in the history
Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly:
- [x] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-videointelligence/issues/new/choose) before writing your code!  That way we can discuss the change, evaluate designs, and agree on the general idea
- [ ] Ensure the tests and linter pass
- [ ] Code coverage does not decrease (if any source code was changed)
- [ ] Appropriate docs were updated (if necessary)

Fixes #178🦕
  • Loading branch information
andrewferlitsch authored Sep 1, 2021
1 parent 85b3e75 commit 15e2a16
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 165 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@
python beta_snippets.py video-text-gcs \
gs://python-docs-samples-tests/video/googlework_tiny.mp4
python beta_snippets.py track-objects resources/cat.mp4
python beta_snippets.py streaming-labels resources/cat.mp4
python beta_snippets.py streaming-shot-change resources/cat.mp4
Expand Down Expand Up @@ -212,130 +210,6 @@ def video_detect_text(path):
return annotation_result.text_annotations


def track_objects_gcs(gcs_uri):
# [START video_object_tracking_gcs_beta]
"""Object Tracking."""
from google.cloud import videointelligence_v1p2beta1 as videointelligence

# It is recommended to use location_id as 'us-east1' for the best latency
# due to different types of processors used in this region and others.
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.OBJECT_TRACKING]
operation = video_client.annotate_video(
request={
"features": features,
"input_uri": gcs_uri,
"location_id": "us-east1",
}
)
print("\nProcessing video for object annotations.")

result = operation.result(timeout=500)
print("\nFinished processing.\n")

# The first result is retrieved because a single video was processed.
object_annotations = result.annotation_results[0].object_annotations

# Get only the first annotation for demo purposes.
object_annotation = object_annotations[0]
# description is in Unicode
print(u"Entity description: {}".format(object_annotation.entity.description))
if object_annotation.entity.entity_id:
print("Entity id: {}".format(object_annotation.entity.entity_id))

print(
"Segment: {}s to {}s".format(
object_annotation.segment.start_time_offset.seconds
+ object_annotation.segment.start_time_offset.microseconds / 1e6,
object_annotation.segment.end_time_offset.seconds
+ object_annotation.segment.end_time_offset.microseconds / 1e6,
)
)

print("Confidence: {}".format(object_annotation.confidence))

# Here we print only the bounding box of the first frame in this segment
frame = object_annotation.frames[0]
box = frame.normalized_bounding_box
print(
"Time offset of the first frame: {}s".format(
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
)
)
print("Bounding box position:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}".format(box.bottom))
print("\n")
# [END video_object_tracking_gcs_beta]
return object_annotations


def track_objects(path):
# [START video_object_tracking_beta]
"""Object Tracking."""
from google.cloud import videointelligence_v1p2beta1 as videointelligence

video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.OBJECT_TRACKING]

with io.open(path, "rb") as file:
input_content = file.read()

# It is recommended to use location_id as 'us-east1' for the best latency
# due to different types of processors used in this region and others.
operation = video_client.annotate_video(
request={
"features": features,
"input_content": input_content,
"location_id": "us-east1",
}
)
print("\nProcessing video for object annotations.")

result = operation.result(timeout=500)
print("\nFinished processing.\n")

# The first result is retrieved because a single video was processed.
object_annotations = result.annotation_results[0].object_annotations

# Get only the first annotation for demo purposes.
object_annotation = object_annotations[0]
# description is in Unicode
print(u"Entity description: {}".format(object_annotation.entity.description))
if object_annotation.entity.entity_id:
print("Entity id: {}".format(object_annotation.entity.entity_id))

print(
"Segment: {}s to {}s".format(
object_annotation.segment.start_time_offset.seconds
+ object_annotation.segment.start_time_offset.microseconds / 1e6,
object_annotation.segment.end_time_offset.seconds
+ object_annotation.segment.end_time_offset.microseconds / 1e6,
)
)

print("Confidence: {}".format(object_annotation.confidence))

# Here we print only the bounding box of the first frame in this segment
frame = object_annotation.frames[0]
box = frame.normalized_bounding_box
print(
"Time offset of the first frame: {}s".format(
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
)
)
print("Bounding box position:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}".format(box.bottom))
print("\n")
# [END video_object_tracking_beta]
return object_annotations


def detect_labels_streaming(path):
# [START video_streaming_label_detection_beta]
from google.cloud import videointelligence_v1p3beta1 as videointelligence
Expand Down Expand Up @@ -890,16 +764,6 @@ def stream_generator():
)
video_text_parser.add_argument("path")

video_object_tracking_gcs_parser = subparsers.add_parser(
"track-objects-gcs", help=track_objects_gcs.__doc__
)
video_object_tracking_gcs_parser.add_argument("gcs_uri")

video_object_tracking_parser = subparsers.add_parser(
"track-objects", help=track_objects.__doc__
)
video_object_tracking_parser.add_argument("path")

video_streaming_labels_parser = subparsers.add_parser(
"streaming-labels", help=detect_labels_streaming.__doc__
)
Expand Down Expand Up @@ -948,10 +812,6 @@ def stream_generator():
video_detect_text_gcs(args.gcs_uri)
elif args.command == "video-text":
video_detect_text(args.path)
elif args.command == "track-objects-gcs":
track_objects_gcs(args.gcs_uri)
elif args.command == "track-objects":
track_objects(args.path)
elif args.command == "streaming-labels":
detect_labels_streaming(args.path)
elif args.command == "streaming-shot-change":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,31 +143,6 @@ def test_detect_text_gcs(capsys):
assert "Text" in out


# Flaky InvalidArgument
@pytest.mark.flaky(max_runs=3, min_passes=1)
def test_track_objects(capsys):
in_file = "./resources/googlework_tiny.mp4"
beta_snippets.track_objects(in_file)
out, _ = capsys.readouterr()
assert "Entity id" in out


# Flaky exceeding designed timeout
@pytest.mark.slow
@pytest.mark.flaky(max_runs=3, min_passes=1)
def test_track_objects_gcs():
in_file = "gs://cloud-samples-data/video/cat.mp4"
object_annotations = beta_snippets.track_objects_gcs(in_file)

text_exists = False
for object_annotation in object_annotations:
if "CAT" in object_annotation.entity.description.upper():
text_exists = True
assert text_exists
assert object_annotations[0].frames[0].normalized_bounding_box.left >= 0.0
assert object_annotations[0].frames[0].normalized_bounding_box.left <= 1.0


# Flaky Gateway
@pytest.mark.flaky(max_runs=3, min_passes=1)
def test_streaming_automl_classification(capsys, video_path):
Expand Down

0 comments on commit 15e2a16

Please sign in to comment.