type: com.google.api.codegen.ConfigProto config_schema_version: 2.0.0 language_settings: python: package_name: google.cloud.videointelligence_v1p3beta1.gapic go: package_name: cloud.google.com/go/videointelligence/apiv1p3beta1 csharp: package_name: Google.Cloud.VideoIntelligence.V1P3Beta1 ruby: package_name: Google::Cloud::VideoIntelligence::V1p3beta1 release_level: BETA php: package_name: Google\Cloud\VideoIntelligence\V1p3beta1 nodejs: package_name: video-intelligence.v1p3beta1 domain_layer_location: google-cloud interfaces: - name: google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService smoke_test: method: AnnotateVideo init_fields: - input_uri=gs://cloud-samples-data/video/cat.mp4 - features[0]=LABEL_DETECTION methods: - name: AnnotateVideo long_running: initial_poll_delay_millis: 20000 poll_delay_multiplier: 1.5 max_poll_delay_millis: 45000 total_poll_timeout_millis: 86400000 sample_code_init_fields: - input_uri=gs://cloud-samples-data/video/cat.mp4 - features[0]=LABEL_DETECTION samples: standalone: - region_tag: video_detect_logo_beta value_sets: [video_detect_logo_beta] - region_tag: video_detect_logo_gcs_beta value_sets: [video_detect_logo_gcs_beta] sample_value_sets: - id: video_detect_logo_beta description: "Performs asynchronous video annotation for logo recognition from inline video content." parameters: defaults: - input_content="resources/googlework_short.mp4" - features[0]=LOGO_RECOGNITION attributes: - parameter: input_content sample_argument_name: local_file_path read_file: true description: Path to local video file, e.g. /path/video.mp4 on_success: - comment: ["Get the first response, since we sent only one video."] - define: annotation_result=$resp.annotation_results[0] - comment: ["Annotations for list of logos detected, tracked and recognized in video."] - loop: collection: annotation_result.logo_recognition_annotations variable: logo_recognition_annotation body: - define: entity=logo_recognition_annotation.entity - comment: ["Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/)."] - print: ["Entity Id : %s", entity.entity_id] - comment: ["Textual description, e.g. `Google`."] - print: ["Description : %s", entity.description] - comment: ["All logo tracks where the recognized logo appears. Each track corresponds to one logo instance appearing in consecutive frames."] - loop: collection: logo_recognition_annotation.tracks variable: track body: - comment: ["Video segment of a track."] - define: segment=track.segment - define: segment_start_time_offset=segment.start_time_offset - print: [" Start Time Offset : %s.%s", segment_start_time_offset.seconds, segment_start_time_offset.nanos] - define: segment_end_time_offset=segment.end_time_offset - print: [" End Time Offset : %s.%s", segment_end_time_offset.seconds, segment_end_time_offset.nanos] - print: [" Confidence : %s", track.confidence] - comment: ["The object with timestamp and attributes per frame in the track."] - loop: collection: track.timestamped_objects variable: timestamped_object body: - comment: ["Normalized Bounding box in a frame, where the object is located."] - define: normalized_bounding_box=timestamped_object.normalized_bounding_box - print: [" Left : %s", normalized_bounding_box.left] - print: [" Top : %s", normalized_bounding_box.top] - print: [" Right : %s", normalized_bounding_box.right] - print: [" Bottom : %s", normalized_bounding_box.bottom] - comment: ["Optional. The attributes of the object in the bounding box."] - loop: collection: timestamped_object.attributes variable: attribute body: - print: [" Name : %s", attribute.name] - print: [" Confidence : %s", attribute.confidence] - print: [" Value : %s", attribute.value] - comment: ["Optional. Attributes in the track level."] - loop: collection: track.attributes variable: track_attribute body: - print: [" Name : %s", track_attribute.name] - print: [" Confidence : %s", track_attribute.confidence] - print: [" Value : %s", track_attribute.value] - comment: ["All video segments where the recognized logo appears. There might be multiple instances of the same logo class appearing in one VideoSegment."] - loop: collection: logo_recognition_annotation.segments variable: logo_recognition_annotation_segment body: - define: logo_recognition_annotation_segment_start_time_offset=logo_recognition_annotation_segment.start_time_offset - print: [" Start Time Offset : %s.%s", logo_recognition_annotation_segment_start_time_offset.seconds, logo_recognition_annotation_segment_start_time_offset.nanos] - define: logo_recognition_annotation_segment_end_time_offset=logo_recognition_annotation_segment.end_time_offset - print: [" End Time Offset : %s.%s", logo_recognition_annotation_segment_end_time_offset.seconds, logo_recognition_annotation_segment_end_time_offset.nanos] - id: video_detect_logo_gcs_beta description: "Performs asynchronous video annotation for logo recognition on a file hosted in GCS." parameters: defaults: - input_uri=gs://cloud-samples-data/video/googlework_short.mp4 - features[0]=LOGO_RECOGNITION on_success: - comment: ["Get the first response, since we sent only one video."] - define: annotation_result=$resp.annotation_results[0] - comment: ["Annotations for list of logos detected, tracked and recognized in video."] - loop: collection: annotation_result.logo_recognition_annotations variable: logo_recognition_annotation body: - define: entity=logo_recognition_annotation.entity - comment: ["Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/)."] - print: ["Entity Id : %s", entity.entity_id] - comment: ["Textual description, e.g. `Google`."] - print: ["Description : %s", entity.description] - comment: ["All logo tracks where the recognized logo appears. Each track corresponds to one logo instance appearing in consecutive frames."] - loop: collection: logo_recognition_annotation.tracks variable: track body: - comment: ["Video segment of a track."] - define: segment=track.segment - define: segment_start_time_offset=segment.start_time_offset - print: [" Start Time Offset : %s.%s", segment_start_time_offset.seconds, segment_start_time_offset.nanos] - define: segment_end_time_offset=segment.end_time_offset - print: [" End Time Offset : %s.%s", segment_end_time_offset.seconds, segment_end_time_offset.nanos] - print: [" Confidence : %s", track.confidence] - comment: ["The object with timestamp and attributes per frame in the track."] - loop: collection: track.timestamped_objects variable: timestamped_object body: - comment: ["Normalized Bounding box in a frame, where the object is located."] - define: normalized_bounding_box=timestamped_object.normalized_bounding_box - print: [" Left : %s", normalized_bounding_box.left] - print: [" Top : %s", normalized_bounding_box.top] - print: [" Right : %s", normalized_bounding_box.right] - print: [" Bottom : %s", normalized_bounding_box.bottom] - comment: ["Optional. The attributes of the object in the bounding box."] - loop: collection: timestamped_object.attributes variable: attribute body: - print: [" Name : %s", attribute.name] - print: [" Confidence : %s", attribute.confidence] - print: [" Value : %s", attribute.value] - comment: ["Optional. Attributes in the track level."] - loop: collection: track.attributes variable: track_attribute body: - print: [" Name : %s", track_attribute.name] - print: [" Confidence : %s", track_attribute.confidence] - print: [" Value : %s", track_attribute.value] - comment: ["All video segments where the recognized logo appears. There might be multiple instances of the same logo class appearing in one VideoSegment."] - loop: collection: logo_recognition_annotation.segments variable: logo_recognition_annotation_segment body: - define: logo_recognition_annotation_segment_start_time_offset=logo_recognition_annotation_segment.start_time_offset - print: [" Start Time Offset : %s.%s", logo_recognition_annotation_segment_start_time_offset.seconds, logo_recognition_annotation_segment_start_time_offset.nanos] - define: logo_recognition_annotation_segment_end_time_offset=logo_recognition_annotation_segment.end_time_offset - print: [" End Time Offset : %s.%s", logo_recognition_annotation_segment_end_time_offset.seconds, logo_recognition_annotation_segment_end_time_offset.nanos]