3939
4040 python beta_snippets.py streaming-automl-classification resources/cat.mp4 \
4141 $PROJECT_ID $MODEL_ID
42+
43+ python beta_snippets.py streaming-automl-object-tracking resources/cat.mp4 \
44+ $PROJECT_ID $MODEL_ID
4245"""
4346
4447import argparse
@@ -762,6 +765,110 @@ def stream_generator():
762765 # [END video_streaming_automl_classification_beta]
763766
764767
768+ def streaming_automl_object_tracking (path , project_id , model_id ):
769+ # [START video_streaming_automl_object_tracking_beta]
770+ import io
771+
772+ from google .cloud import videointelligence_v1p3beta1 as videointelligence
773+
774+ # path = 'path_to_file'
775+ # project_id = 'project_id'
776+ # model_id = 'automl_object_tracking_model_id'
777+
778+ client = videointelligence .StreamingVideoIntelligenceServiceClient ()
779+
780+ model_path = "projects/{}/locations/us-central1/models/{}" .format (
781+ project_id , model_id
782+ )
783+
784+ automl_config = videointelligence .StreamingAutomlObjectTrackingConfig (
785+ model_name = model_path
786+ )
787+
788+ video_config = videointelligence .StreamingVideoConfig (
789+ feature = videointelligence .StreamingFeature .STREAMING_AUTOML_OBJECT_TRACKING ,
790+ automl_object_tracking_config = automl_config ,
791+ )
792+
793+ # config_request should be the first in the stream of requests.
794+ config_request = videointelligence .StreamingAnnotateVideoRequest (
795+ video_config = video_config
796+ )
797+
798+ # Set the chunk size to 5MB (recommended less than 10MB).
799+ chunk_size = 5 * 1024 * 1024
800+
801+ # Load file content.
802+ # Note: Input videos must have supported video codecs. See
803+ # https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
804+ # for more details.
805+ stream = []
806+ with io .open (path , "rb" ) as video_file :
807+ while True :
808+ data = video_file .read (chunk_size )
809+ if not data :
810+ break
811+ stream .append (data )
812+
813+ def stream_generator ():
814+ yield config_request
815+ for chunk in stream :
816+ yield videointelligence .StreamingAnnotateVideoRequest (input_content = chunk )
817+
818+ requests = stream_generator ()
819+
820+ # streaming_annotate_video returns a generator.
821+ # The default timeout is about 300 seconds.
822+ # To process longer videos it should be set to
823+ # larger than the length (in seconds) of the stream.
824+ responses = client .streaming_annotate_video (requests , timeout = 900 )
825+
826+ # Each response corresponds to about 1 second of video.
827+ for response in responses :
828+ # Check for errors.
829+ if response .error .message :
830+ print (response .error .message )
831+ break
832+
833+ object_annotations = response .annotation_results .object_annotations
834+
835+ # object_annotations could be empty
836+ if not object_annotations :
837+ continue
838+
839+ for annotation in object_annotations :
840+ # Each annotation has one frame, which has a timeoffset.
841+ frame = annotation .frames [0 ]
842+ time_offset = (
843+ frame .time_offset .seconds + frame .time_offset .microseconds / 1e6
844+ )
845+
846+ description = annotation .entity .description
847+ confidence = annotation .confidence
848+
849+ # track_id tracks the same object in the video.
850+ track_id = annotation .track_id
851+
852+ # description is in Unicode
853+ print ("{}s" .format (time_offset ))
854+ print (u"\t Entity description: {}" .format (description ))
855+ print ("\t Track Id: {}" .format (track_id ))
856+ if annotation .entity .entity_id :
857+ print ("\t Entity id: {}" .format (annotation .entity .entity_id ))
858+
859+ print ("\t Confidence: {}" .format (confidence ))
860+
861+ # Every annotation has only one frame
862+ frame = annotation .frames [0 ]
863+ box = frame .normalized_bounding_box
864+ print ("\t Bounding box position:" )
865+ print ("\t left : {}" .format (box .left ))
866+ print ("\t top : {}" .format (box .top ))
867+ print ("\t right : {}" .format (box .right ))
868+ print ("\t bottom: {}\n " .format (box .bottom ))
869+ # [END video_streaming_automl_object_tracking_beta]
870+
871+
765872if __name__ == "__main__" :
766873 parser = argparse .ArgumentParser (
767874 description = __doc__ , formatter_class = argparse .RawDescriptionHelpFormatter
@@ -826,6 +933,13 @@ def stream_generator():
826933 video_streaming_automl_classification_parser .add_argument ("project_id" )
827934 video_streaming_automl_classification_parser .add_argument ("model_id" )
828935
936+ video_streaming_automl_object_tracking_parser = subparsers .add_parser (
937+ "streaming-automl-object-tracking" , help = streaming_automl_object_tracking .__doc__
938+ )
939+ video_streaming_automl_object_tracking_parser .add_argument ("path" )
940+ video_streaming_automl_object_tracking_parser .add_argument ("project_id" )
941+ video_streaming_automl_object_tracking_parser .add_argument ("model_id" )
942+
829943 args = parser .parse_args ()
830944
831945 if args .command == "transcription" :
@@ -850,3 +964,5 @@ def stream_generator():
850964 annotation_to_storage_streaming (args .path , args .output_uri )
851965 elif args .command == "streaming-automl-classification" :
852966 streaming_automl_classification (args .path , args .project_id , args .model_id )
967+ elif args .command == "streaming-automl-object-tracking" :
968+ streaming_automl_object_tracking (args .path , args .project_id , args .model_id )
0 commit comments