|
15 | 15 |
|
16 | 16 | 'use strict'; |
17 | 17 |
|
18 | | -function analyzeFaces(gcsUri) { |
19 | | - // [START analyze_faces] |
20 | | - // Imports the Google Cloud Video Intelligence library |
21 | | - const video = require('@google-cloud/video-intelligence').v1; |
22 | | - |
23 | | - // Creates a client |
24 | | - const client = new video.VideoIntelligenceServiceClient(); |
25 | | - |
26 | | - /** |
27 | | - * TODO(developer): Uncomment the following line before running the sample. |
28 | | - */ |
29 | | - // const gcsUri = 'GCS URI of the video to analyze, e.g. gs://my-bucket/my-video.mp4'; |
30 | | - |
31 | | - const request = { |
32 | | - inputUri: gcsUri, |
33 | | - features: ['FACE_DETECTION'], |
34 | | - }; |
35 | | - |
36 | | - // Detects faces in a video |
37 | | - client |
38 | | - .annotateVideo(request) |
39 | | - .then(results => { |
40 | | - const operation = results[0]; |
41 | | - console.log('Waiting for operation to complete...'); |
42 | | - return operation.promise(); |
43 | | - }) |
44 | | - .then(results => { |
45 | | - // Gets faces |
46 | | - const faces = results[0].annotationResults[0].faceAnnotations; |
47 | | - faces.forEach((face, faceIdx) => { |
48 | | - console.log(`Face #${faceIdx}`); |
49 | | - console.log(`\tThumbnail size: ${face.thumbnail.length}`); |
50 | | - face.segments.forEach((segment, segmentIdx) => { |
51 | | - segment = segment.segment; |
52 | | - if (segment.startTimeOffset.seconds === undefined) { |
53 | | - segment.startTimeOffset.seconds = 0; |
54 | | - } |
55 | | - if (segment.startTimeOffset.nanos === undefined) { |
56 | | - segment.startTimeOffset.nanos = 0; |
57 | | - } |
58 | | - if (segment.endTimeOffset.seconds === undefined) { |
59 | | - segment.endTimeOffset.seconds = 0; |
60 | | - } |
61 | | - if (segment.endTimeOffset.nanos === undefined) { |
62 | | - segment.endTimeOffset.nanos = 0; |
63 | | - } |
64 | | - console.log(`\tAppearance #${segmentIdx}:`); |
65 | | - console.log( |
66 | | - `\t\tStart: ${segment.startTimeOffset.seconds}` + |
67 | | - `.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s` |
68 | | - ); |
69 | | - console.log( |
70 | | - `\t\tEnd: ${segment.endTimeOffset.seconds}.` + |
71 | | - `${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s` |
72 | | - ); |
73 | | - }); |
74 | | - console.log(`\tLocations:`); |
75 | | - }); |
76 | | - }) |
77 | | - .catch(err => { |
78 | | - console.error('ERROR:', err); |
79 | | - }); |
80 | | - // [END analyze_faces] |
81 | | -} |
82 | | - |
83 | 18 | function analyzeLabelsGCS(gcsUri) { |
84 | 19 | // [START analyze_labels_gcs] |
85 | 20 | // Imports the Google Cloud Video Intelligence library |
@@ -410,12 +345,6 @@ function analyzeVideoTranscription(gcsUri) { |
410 | 345 |
|
411 | 346 | require(`yargs`) |
412 | 347 | .demand(1) |
413 | | - .command( |
414 | | - `faces <gcsUri>`, |
415 | | - `Analyzes faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.`, |
416 | | - {}, |
417 | | - opts => analyzeFaces(opts.gcsUri) |
418 | | - ) |
419 | 348 | .command( |
420 | 349 | `shots <gcsUri>`, |
421 | 350 | `Analyzes shot angles in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.`, |
@@ -446,7 +375,6 @@ require(`yargs`) |
446 | 375 | {}, |
447 | 376 | opts => analyzeVideoTranscription(opts.gcsUri) |
448 | 377 | ) |
449 | | - .example(`node $0 faces gs://demomaker/larry_sergey_ice_bucket_short.mp4`) |
450 | 378 | .example(`node $0 shots gs://demomaker/sushi.mp4`) |
451 | 379 | .example(`node $0 labels-gcs gs://demomaker/tomatoes.mp4`) |
452 | 380 | .example(`node $0 labels-file cat.mp4`) |
|
0 commit comments