1414# See the License for the specific language governing permissions and
1515# limitations under the License.
1616
17- """This application demonstrates face detection, label detection,
17+ """This application demonstrates label detection,
1818explicit content, and shot change detection using the Google Cloud API.
1919
2020Usage Examples:
2121
22- python analyze.py faces gs://demomaker/google_gmail.mp4
2322 python analyze.py labels gs://cloud-ml-sandbox/video/chicago.mp4
2423 python analyze.py labels_file resources/cat.mp4
2524 python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4
@@ -55,52 +54,6 @@ def analyze_explicit_content(path):
5554 likely_string [frame .pornography_likelihood ]))
5655
5756
58- def analyze_faces (path ):
59- """ Detects faces given a GCS path. """
60- video_client = videointelligence .VideoIntelligenceServiceClient ()
61- features = [videointelligence .enums .Feature .FACE_DETECTION ]
62-
63- config = videointelligence .types .FaceDetectionConfig (
64- include_bounding_boxes = True )
65- context = videointelligence .types .VideoContext (
66- face_detection_config = config )
67-
68- operation = video_client .annotate_video (
69- path , features = features , video_context = context )
70- print ('\n Processing video for face annotations:' )
71-
72- result = operation .result (timeout = 600 )
73- print ('\n Finished processing.' )
74-
75- # first result is retrieved because a single video was processed
76- faces = result .annotation_results [0 ].face_annotations
77- for face_id , face in enumerate (faces ):
78- print ('Face {}' .format (face_id ))
79- print ('Thumbnail size: {}' .format (len (face .thumbnail )))
80-
81- for segment_id , segment in enumerate (face .segments ):
82- start_time = (segment .segment .start_time_offset .seconds +
83- segment .segment .start_time_offset .nanos / 1e9 )
84- end_time = (segment .segment .end_time_offset .seconds +
85- segment .segment .end_time_offset .nanos / 1e9 )
86- positions = '{}s to {}s' .format (start_time , end_time )
87- print ('\t Segment {}: {}' .format (segment_id , positions ))
88-
89- # There are typically many frames for each face,
90- # here we print information on only the first frame.
91- frame = face .frames [0 ]
92- time_offset = (frame .time_offset .seconds +
93- frame .time_offset .nanos / 1e9 )
94- box = frame .normalized_bounding_boxes [0 ]
95- print ('First frame time offset: {}s' .format (time_offset ))
96- print ('First frame normalized bounding box:' )
97- print ('\t left: {}' .format (box .left ))
98- print ('\t top: {}' .format (box .top ))
99- print ('\t right: {}' .format (box .right ))
100- print ('\t bottom: {}' .format (box .bottom ))
101- print ('\n ' )
102-
103-
10457def analyze_labels (path ):
10558 """ Detects labels given a GCS path. """
10659 video_client = videointelligence .VideoIntelligenceServiceClient ()
@@ -275,9 +228,6 @@ def analyze_shots(path):
275228 description = __doc__ ,
276229 formatter_class = argparse .RawDescriptionHelpFormatter )
277230 subparsers = parser .add_subparsers (dest = 'command' )
278- analyze_faces_parser = subparsers .add_parser (
279- 'faces' , help = analyze_faces .__doc__ )
280- analyze_faces_parser .add_argument ('path' )
281231 analyze_labels_parser = subparsers .add_parser (
282232 'labels' , help = analyze_labels .__doc__ )
283233 analyze_labels_parser .add_argument ('path' )
@@ -293,8 +243,6 @@ def analyze_shots(path):
293243
294244 args = parser .parse_args ()
295245
296- if args .command == 'faces' :
297- analyze_faces (args .path )
298246 if args .command == 'labels' :
299247 analyze_labels (args .path )
300248 if args .command == 'labels_file' :
0 commit comments