2019-01-31 00:36:50 +00:00
|
|
|
# Copyright 2015 Google Inc.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
require 'date'
|
|
|
|
require 'google/apis/core/base_service'
|
|
|
|
require 'google/apis/core/json_representation'
|
|
|
|
require 'google/apis/core/hashable'
|
|
|
|
require 'google/apis/errors'
|
|
|
|
|
|
|
|
module Google
|
|
|
|
module Apis
|
|
|
|
module VideointelligenceV1p2beta1
|
|
|
|
|
|
|
|
# Video annotation progress. Included in the `metadata`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
|
|
|
class GoogleCloudVideointelligenceV1AnnotateVideoProgress
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Progress metadata for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationProgress`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoAnnotationProgress>]
|
|
|
|
attr_accessor :annotation_progress
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation response. Included in the `response`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
|
|
|
class GoogleCloudVideointelligenceV1AnnotateVideoResponse
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Annotation results for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationResults`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoAnnotationResults>]
|
|
|
|
attr_accessor :annotation_results
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_results = args[:annotation_results] if args.key?(:annotation_results)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
class GoogleCloudVideointelligenceV1Entity
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Textual description, e.g. `Fixed-gear bicycle`.
|
|
|
|
# Corresponds to the JSON property `description`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :description
|
|
|
|
|
|
|
|
# Opaque entity ID. Some IDs may be available in
|
|
|
|
# [Google Knowledge Graph Search
|
|
|
|
# API](https://developers.google.com/knowledge-graph/).
|
|
|
|
# Corresponds to the JSON property `entityId`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :entity_id
|
|
|
|
|
|
|
|
# Language code for `description` in BCP-47 format.
|
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@description = args[:description] if args.key?(:description)
|
|
|
|
@entity_id = args[:entity_id] if args.key?(:entity_id)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
class GoogleCloudVideointelligenceV1ExplicitContentAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# All video frames where explicit content was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ExplicitContentFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for explicit content.
|
|
|
|
class GoogleCloudVideointelligenceV1ExplicitContentFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Likelihood of the pornography content..
|
|
|
|
# Corresponds to the JSON property `pornographyLikelihood`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :pornography_likelihood
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Label annotation.
|
|
|
|
class GoogleCloudVideointelligenceV1LabelAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Common categories for the detected entity.
|
|
|
|
# E.g. when the label is `Terrier` the category is likely `dog`. And in some
|
|
|
|
# cases there might be more than one categories e.g. `Terrier` could also be
|
|
|
|
# a `pet`.
|
|
|
|
# Corresponds to the JSON property `categoryEntities`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity>]
|
|
|
|
attr_accessor :category_entities
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity]
|
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# All video frames where a label was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# All video segments where a label was detected.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@category_entities = args[:category_entities] if args.key?(:category_entities)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for label detection.
|
|
|
|
class GoogleCloudVideointelligenceV1LabelFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment level annotation results for label detection.
|
|
|
|
class GoogleCloudVideointelligenceV1LabelSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
class GoogleCloudVideointelligenceV1NormalizedBoundingBox
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Bottom Y coordinate.
|
|
|
|
# Corresponds to the JSON property `bottom`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :bottom
|
|
|
|
|
|
|
|
# Left X coordinate.
|
|
|
|
# Corresponds to the JSON property `left`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :left
|
|
|
|
|
|
|
|
# Right X coordinate.
|
|
|
|
# Corresponds to the JSON property `right`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :right
|
|
|
|
|
|
|
|
# Top Y coordinate.
|
|
|
|
# Corresponds to the JSON property `top`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :top
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@bottom = args[:bottom] if args.key?(:bottom)
|
|
|
|
@left = args[:left] if args.key?(:left)
|
|
|
|
@right = args[:right] if args.key?(:right)
|
|
|
|
@top = args[:top] if args.key?(:top)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
class GoogleCloudVideointelligenceV1NormalizedBoundingPoly
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized vertices of the bounding polygon.
|
|
|
|
# Corresponds to the JSON property `vertices`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedVertex>]
|
|
|
|
attr_accessor :vertices
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@vertices = args[:vertices] if args.key?(:vertices)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A vertex represents a 2D point in the image.
|
|
|
|
# NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
|
# and range from 0 to 1.
|
|
|
|
class GoogleCloudVideointelligenceV1NormalizedVertex
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# X coordinate.
|
|
|
|
# Corresponds to the JSON property `x`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :x
|
|
|
|
|
|
|
|
# Y coordinate.
|
|
|
|
# Corresponds to the JSON property `y`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :y
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@x = args[:x] if args.key?(:x)
|
|
|
|
@y = args[:y] if args.key?(:y)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotations corresponding to one tracked object.
|
|
|
|
class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Object category's labeling confidence of this track.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity]
|
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# Information corresponding to all frames where this object track appears.
|
|
|
|
# Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
|
|
|
|
# messages in frames.
|
|
|
|
# Streaming mode: it can only be one ObjectTrackingFrame message in frames.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
# Streaming mode ONLY.
|
|
|
|
# In streaming mode, we do not know the end time of a tracked object
|
|
|
|
# before it is completed. Hence, there is no VideoSegment info returned.
|
|
|
|
# Instead, we provide a unique identifiable integer track_id so that
|
|
|
|
# the customers can correlate the results of the ongoing
|
|
|
|
# ObjectTrackAnnotation of the same track_id over time.
|
|
|
|
# Corresponds to the JSON property `trackId`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :track_id
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
@track_id = args[:track_id] if args.key?(:track_id)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotations for object detection and tracking. This field
|
|
|
|
# stores per frame location, time offset, and confidence.
|
|
|
|
class GoogleCloudVideointelligenceV1ObjectTrackingFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox]
|
|
|
|
attr_accessor :normalized_bounding_box
|
|
|
|
|
|
|
|
# The timestamp of the frame in microseconds.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Alternative hypotheses (a.k.a. n-best list).
|
|
|
|
class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2019-01-31 00:36:50 +00:00
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
2019-06-22 00:37:44 +00:00
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
2019-01-31 00:36:50 +00:00
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Transcript text representing the words that the user spoke.
|
|
|
|
# Corresponds to the JSON property `transcript`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :transcript
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. A list of word-specific information for each recognized word.
|
|
|
|
# Note: When `enable_speaker_diarization` is true, you will see all the words
|
|
|
|
# from the beginning of the audio.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `words`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1WordInfo>]
|
|
|
|
attr_accessor :words
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@transcript = args[:transcript] if args.key?(:transcript)
|
|
|
|
@words = args[:words] if args.key?(:words)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A speech recognition result corresponding to a portion of the audio.
|
|
|
|
class GoogleCloudVideointelligenceV1SpeechTranscription
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# May contain one or more recognition hypotheses (up to the maximum specified
|
|
|
|
# in `max_alternatives`). These alternatives are ordered in terms of
|
|
|
|
# accuracy, with the top (first) alternative being the most probable, as
|
|
|
|
# ranked by the recognizer.
|
|
|
|
# Corresponds to the JSON property `alternatives`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1SpeechRecognitionAlternative>]
|
|
|
|
attr_accessor :alternatives
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
|
|
|
# language tag of
|
|
|
|
# the language in this result. This language code was detected to have the
|
|
|
|
# most likelihood of being spoken in the audio.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@alternatives = args[:alternatives] if args.key?(:alternatives)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Annotations related to one detected OCR text snippet. This will contain the
|
|
|
|
# corresponding text, confidence value, and frame level information for each
|
|
|
|
# detection.
|
|
|
|
class GoogleCloudVideointelligenceV1TextAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# All video segments where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
# The detected text.
|
|
|
|
# Corresponds to the JSON property `text`
|
2019-01-31 00:36:50 +00:00
|
|
|
# @return [String]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :text
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
@text = args[:text] if args.key?(:text)
|
|
|
|
end
|
|
|
|
end
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video frame level annotation results for text annotation (OCR).
|
|
|
|
# Contains information regarding timestamp and bounding box locations for the
|
|
|
|
# frames containing detected OCR text snippets.
|
|
|
|
class GoogleCloudVideointelligenceV1TextFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly]
|
|
|
|
attr_accessor :rotated_bounding_box
|
|
|
|
|
|
|
|
# Timestamp of this frame.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
2019-01-31 00:36:50 +00:00
|
|
|
# @return [String]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :time_offset
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video segment level annotation results for text detection.
|
|
|
|
class GoogleCloudVideointelligenceV1TextSegment
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Confidence for the track of detected text. It is calculated as the highest
|
|
|
|
# over all frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Information related to the frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation progress for a single video.
|
|
|
|
class GoogleCloudVideointelligenceV1VideoAnnotationProgress
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Specifies which feature is being tracked if the request contains more than
|
|
|
|
# one features.
|
|
|
|
# Corresponds to the JSON property `feature`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :feature
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
|
|
|
# Approximate percentage processed thus far. Guaranteed to be
|
|
|
|
# 100 when fully processed.
|
|
|
|
# Corresponds to the JSON property `progressPercent`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :progress_percent
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Time when the request was received.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# Time of the most recent update.
|
|
|
|
# Corresponds to the JSON property `updateTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :update_time
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-06-12 00:37:34 +00:00
|
|
|
@feature = args[:feature] if args.key?(:feature)
|
2019-02-22 00:36:49 +00:00
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@progress_percent = args[:progress_percent] if args.key?(:progress_percent)
|
2019-06-12 00:37:34 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-02-22 00:36:49 +00:00
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@update_time = args[:update_time] if args.key?(:update_time)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation results for a single video.
|
|
|
|
class GoogleCloudVideointelligenceV1VideoAnnotationResults
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The `Status` type defines a logical error model that is suitable for
|
|
|
|
# different programming environments, including REST APIs and RPC APIs. It is
|
2019-06-12 00:37:34 +00:00
|
|
|
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
|
|
|
# three pieces of data: error code, error message, and error details.
|
|
|
|
# You can find out more about this error model and how to work with it in the
|
|
|
|
# [API Design Guide](https://cloud.google.com/apis/design/errors).
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `error`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
|
|
|
attr_accessor :error
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
# Corresponds to the JSON property `explicitAnnotation`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ExplicitContentAnnotation]
|
|
|
|
attr_accessor :explicit_annotation
|
|
|
|
|
|
|
|
# Label annotations on frame level.
|
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
|
|
|
attr_accessor :frame_label_annotations
|
|
|
|
|
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Annotations for list of objects detected and tracked in video.
|
|
|
|
# Corresponds to the JSON property `objectAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation>]
|
|
|
|
attr_accessor :object_annotations
|
|
|
|
|
2019-07-16 00:37:29 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on video level or user specified segment level.
|
2019-01-31 00:36:50 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `segmentLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
|
|
|
attr_accessor :segment_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on video level or user specified segment level.
|
2019-09-23 00:37:58 +00:00
|
|
|
# There is exactly one element for each unique label. Compared to the
|
|
|
|
# existing topical `segment_label_annotations`, this field presents more
|
|
|
|
# fine-grained, segment-level labels detected in video content and is made
|
|
|
|
# available only when the client sets `LabelDetectionConfig.model` to
|
|
|
|
# "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
|
|
|
attr_accessor :segment_presence_label_annotations
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Shot annotations. Each shot is represented as a video segment.
|
|
|
|
# Corresponds to the JSON property `shotAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment>]
|
|
|
|
attr_accessor :shot_annotations
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on shot level.
|
2019-01-31 00:36:50 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
|
|
|
attr_accessor :shot_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on shot level. There is exactly one element for
|
2019-09-23 00:37:58 +00:00
|
|
|
# each unique label. Compared to the existing topical
|
|
|
|
# `shot_label_annotations`, this field presents more fine-grained, shot-level
|
|
|
|
# labels detected in video content and is made available only when the client
|
|
|
|
# sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation>]
|
|
|
|
attr_accessor :shot_presence_label_annotations
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Speech transcription.
|
|
|
|
# Corresponds to the JSON property `speechTranscriptions`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1SpeechTranscription>]
|
|
|
|
attr_accessor :speech_transcriptions
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# OCR text detection and tracking.
|
|
|
|
# Annotations for list of detected text snippets. Each will have list of
|
|
|
|
# frame information associated with it.
|
|
|
|
# Corresponds to the JSON property `textAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextAnnotation>]
|
|
|
|
attr_accessor :text_annotations
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@error = args[:error] if args.key?(:error)
|
|
|
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
|
|
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
2019-02-22 00:36:49 +00:00
|
|
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
2019-07-16 00:37:29 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-01-31 00:36:50 +00:00
|
|
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
|
|
|
@shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@shot_presence_label_annotations = args[:shot_presence_label_annotations] if args.key?(:shot_presence_label_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
@speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions)
|
2019-02-22 00:36:49 +00:00
|
|
|
@text_annotations = args[:text_annotations] if args.key?(:text_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
class GoogleCloudVideointelligenceV1VideoSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the end of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `endTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time_offset
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the start of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `startTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset)
|
|
|
|
@start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Word-specific information for recognized words. Word information is only
|
|
|
|
# included in the response when certain request parameters are set, such
|
|
|
|
# as `enable_word_time_offsets`.
|
|
|
|
class GoogleCloudVideointelligenceV1WordInfo
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the end of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `endTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time
|
|
|
|
|
|
|
|
# Output only. A distinct integer value is assigned for every speaker within
|
|
|
|
# the audio. This field specifies which one of those speakers was detected to
|
|
|
|
# have spoken this word. Value ranges from 1 up to diarization_speaker_count,
|
|
|
|
# and is only set if speaker diarization is enabled.
|
|
|
|
# Corresponds to the JSON property `speakerTag`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :speaker_tag
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the start of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# The word corresponding to this set of information.
|
|
|
|
# Corresponds to the JSON property `word`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :word
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@end_time = args[:end_time] if args.key?(:end_time)
|
|
|
|
@speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag)
|
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@word = args[:word] if args.key?(:word)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation progress. Included in the `metadata`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Progress metadata for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationProgress`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress>]
|
|
|
|
attr_accessor :annotation_progress
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation response. Included in the `response`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Annotation results for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationResults`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoAnnotationResults>]
|
|
|
|
attr_accessor :annotation_results
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_results = args[:annotation_results] if args.key?(:annotation_results)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2Entity
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Textual description, e.g. `Fixed-gear bicycle`.
|
|
|
|
# Corresponds to the JSON property `description`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :description
|
|
|
|
|
|
|
|
# Opaque entity ID. Some IDs may be available in
|
|
|
|
# [Google Knowledge Graph Search
|
|
|
|
# API](https://developers.google.com/knowledge-graph/).
|
|
|
|
# Corresponds to the JSON property `entityId`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :entity_id
|
|
|
|
|
|
|
|
# Language code for `description` in BCP-47 format.
|
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@description = args[:description] if args.key?(:description)
|
|
|
|
@entity_id = args[:entity_id] if args.key?(:entity_id)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# All video frames where explicit content was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ExplicitContentFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for explicit content.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2ExplicitContentFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Likelihood of the pornography content..
|
|
|
|
# Corresponds to the JSON property `pornographyLikelihood`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :pornography_likelihood
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Label annotation.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2LabelAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Common categories for the detected entity.
|
|
|
|
# E.g. when the label is `Terrier` the category is likely `dog`. And in some
|
|
|
|
# cases there might be more than one categories e.g. `Terrier` could also be
|
|
|
|
# a `pet`.
|
|
|
|
# Corresponds to the JSON property `categoryEntities`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity>]
|
|
|
|
attr_accessor :category_entities
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity]
|
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# All video frames where a label was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# All video segments where a label was detected.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@category_entities = args[:category_entities] if args.key?(:category_entities)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for label detection.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2LabelFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment level annotation results for label detection.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2LabelSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Bottom Y coordinate.
|
|
|
|
# Corresponds to the JSON property `bottom`
|
2019-01-31 00:36:50 +00:00
|
|
|
# @return [Float]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :bottom
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Left X coordinate.
|
|
|
|
# Corresponds to the JSON property `left`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :left
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Right X coordinate.
|
|
|
|
# Corresponds to the JSON property `right`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :right
|
|
|
|
|
|
|
|
# Top Y coordinate.
|
|
|
|
# Corresponds to the JSON property `top`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :top
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@bottom = args[:bottom] if args.key?(:bottom)
|
|
|
|
@left = args[:left] if args.key?(:left)
|
|
|
|
@right = args[:right] if args.key?(:right)
|
|
|
|
@top = args[:top] if args.key?(:top)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized vertices of the bounding polygon.
|
|
|
|
# Corresponds to the JSON property `vertices`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex>]
|
|
|
|
attr_accessor :vertices
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@vertices = args[:vertices] if args.key?(:vertices)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# A vertex represents a 2D point in the image.
|
|
|
|
# NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
|
# and range from 0 to 1.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2NormalizedVertex
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# X coordinate.
|
|
|
|
# Corresponds to the JSON property `x`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :x
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Y coordinate.
|
|
|
|
# Corresponds to the JSON property `y`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :y
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@x = args[:x] if args.key?(:x)
|
|
|
|
@y = args[:y] if args.key?(:y)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Annotations corresponding to one tracked object.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Object category's labeling confidence of this track.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity]
|
|
|
|
attr_accessor :entity
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Information corresponding to all frames where this object track appears.
|
|
|
|
# Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
|
|
|
|
# messages in frames.
|
|
|
|
# Streaming mode: it can only be one ObjectTrackingFrame message in frames.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame>]
|
|
|
|
attr_accessor :frames
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
|
|
|
attr_accessor :segment
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Streaming mode ONLY.
|
|
|
|
# In streaming mode, we do not know the end time of a tracked object
|
|
|
|
# before it is completed. Hence, there is no VideoSegment info returned.
|
|
|
|
# Instead, we provide a unique identifiable integer track_id so that
|
|
|
|
# the customers can correlate the results of the ongoing
|
|
|
|
# ObjectTrackAnnotation of the same track_id over time.
|
|
|
|
# Corresponds to the JSON property `trackId`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :track_id
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
@track_id = args[:track_id] if args.key?(:track_id)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video frame level annotations for object detection and tracking. This field
|
|
|
|
# stores per frame location, time offset, and confidence.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox]
|
|
|
|
attr_accessor :normalized_bounding_box
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# The timestamp of the frame in microseconds.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
2019-01-31 00:36:50 +00:00
|
|
|
# @return [String]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :time_offset
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Alternative hypotheses (a.k.a. n-best list).
|
|
|
|
class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2019-01-31 00:36:50 +00:00
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
2019-06-22 00:37:44 +00:00
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
2019-01-31 00:36:50 +00:00
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Transcript text representing the words that the user spoke.
|
|
|
|
# Corresponds to the JSON property `transcript`
|
2019-01-31 00:36:50 +00:00
|
|
|
# @return [String]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :transcript
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. A list of word-specific information for each recognized word.
|
|
|
|
# Note: When `enable_speaker_diarization` is true, you will see all the words
|
|
|
|
# from the beginning of the audio.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `words`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2WordInfo>]
|
|
|
|
attr_accessor :words
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
2019-02-22 00:36:49 +00:00
|
|
|
@transcript = args[:transcript] if args.key?(:transcript)
|
|
|
|
@words = args[:words] if args.key?(:words)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# A speech recognition result corresponding to a portion of the audio.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2SpeechTranscription
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# May contain one or more recognition hypotheses (up to the maximum specified
|
|
|
|
# in `max_alternatives`). These alternatives are ordered in terms of
|
|
|
|
# accuracy, with the top (first) alternative being the most probable, as
|
|
|
|
# ranked by the recognizer.
|
|
|
|
# Corresponds to the JSON property `alternatives`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative>]
|
|
|
|
attr_accessor :alternatives
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
|
|
|
# language tag of
|
|
|
|
# the language in this result. This language code was detected to have the
|
|
|
|
# most likelihood of being spoken in the audio.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@alternatives = args[:alternatives] if args.key?(:alternatives)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Annotations related to one detected OCR text snippet. This will contain the
|
|
|
|
# corresponding text, confidence value, and frame level information for each
|
|
|
|
# detection.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2TextAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# All video segments where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
# The detected text.
|
|
|
|
# Corresponds to the JSON property `text`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :text
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
@text = args[:text] if args.key?(:text)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video frame level annotation results for text annotation (OCR).
|
|
|
|
# Contains information regarding timestamp and bounding box locations for the
|
|
|
|
# frames containing detected OCR text snippets.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2TextFrame
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly]
|
|
|
|
attr_accessor :rotated_bounding_box
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Timestamp of this frame.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
2019-01-31 00:36:50 +00:00
|
|
|
# @return [String]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :time_offset
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video segment level annotation results for text detection.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2TextSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence for the track of detected text. It is calculated as the highest
|
|
|
|
# over all frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Information related to the frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation progress for a single video.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Specifies which feature is being tracked if the request contains more than
|
|
|
|
# one features.
|
|
|
|
# Corresponds to the JSON property `feature`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :feature
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
|
|
|
# Approximate percentage processed thus far. Guaranteed to be
|
|
|
|
# 100 when fully processed.
|
|
|
|
# Corresponds to the JSON property `progressPercent`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :progress_percent
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Time when the request was received.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# Time of the most recent update.
|
|
|
|
# Corresponds to the JSON property `updateTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :update_time
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-06-12 00:37:34 +00:00
|
|
|
@feature = args[:feature] if args.key?(:feature)
|
2019-02-22 00:36:49 +00:00
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@progress_percent = args[:progress_percent] if args.key?(:progress_percent)
|
2019-06-12 00:37:34 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-02-22 00:36:49 +00:00
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@update_time = args[:update_time] if args.key?(:update_time)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation results for a single video.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The `Status` type defines a logical error model that is suitable for
|
|
|
|
# different programming environments, including REST APIs and RPC APIs. It is
|
2019-06-12 00:37:34 +00:00
|
|
|
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
|
|
|
# three pieces of data: error code, error message, and error details.
|
|
|
|
# You can find out more about this error model and how to work with it in the
|
|
|
|
# [API Design Guide](https://cloud.google.com/apis/design/errors).
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `error`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
|
|
|
attr_accessor :error
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
# Corresponds to the JSON property `explicitAnnotation`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation]
|
|
|
|
attr_accessor :explicit_annotation
|
|
|
|
|
|
|
|
# Label annotations on frame level.
|
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
|
|
|
attr_accessor :frame_label_annotations
|
|
|
|
|
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
|
|
|
# Annotations for list of objects detected and tracked in video.
|
|
|
|
# Corresponds to the JSON property `objectAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation>]
|
|
|
|
attr_accessor :object_annotations
|
|
|
|
|
2019-07-16 00:37:29 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on video level or user specified segment level.
|
2019-02-22 00:36:49 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `segmentLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
|
|
|
attr_accessor :segment_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on video level or user specified segment level.
|
2019-09-23 00:37:58 +00:00
|
|
|
# There is exactly one element for each unique label. Compared to the
|
|
|
|
# existing topical `segment_label_annotations`, this field presents more
|
|
|
|
# fine-grained, segment-level labels detected in video content and is made
|
|
|
|
# available only when the client sets `LabelDetectionConfig.model` to
|
|
|
|
# "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
|
|
|
attr_accessor :segment_presence_label_annotations
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Shot annotations. Each shot is represented as a video segment.
|
|
|
|
# Corresponds to the JSON property `shotAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment>]
|
|
|
|
attr_accessor :shot_annotations
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on shot level.
|
2019-02-22 00:36:49 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
|
|
|
attr_accessor :shot_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on shot level. There is exactly one element for
|
2019-09-23 00:37:58 +00:00
|
|
|
# each unique label. Compared to the existing topical
|
|
|
|
# `shot_label_annotations`, this field presents more fine-grained, shot-level
|
|
|
|
# labels detected in video content and is made available only when the client
|
|
|
|
# sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation>]
|
|
|
|
attr_accessor :shot_presence_label_annotations
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Speech transcription.
|
|
|
|
# Corresponds to the JSON property `speechTranscriptions`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription>]
|
|
|
|
attr_accessor :speech_transcriptions
|
|
|
|
|
|
|
|
# OCR text detection and tracking.
|
|
|
|
# Annotations for list of detected text snippets. Each will have list of
|
|
|
|
# frame information associated with it.
|
|
|
|
# Corresponds to the JSON property `textAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation>]
|
|
|
|
attr_accessor :text_annotations
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@error = args[:error] if args.key?(:error)
|
|
|
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
|
|
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
2019-07-16 00:37:29 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-02-22 00:36:49 +00:00
|
|
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
2019-02-22 00:36:49 +00:00
|
|
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
|
|
|
@shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@shot_presence_label_annotations = args[:shot_presence_label_annotations] if args.key?(:shot_presence_label_annotations)
|
2019-02-22 00:36:49 +00:00
|
|
|
@speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions)
|
|
|
|
@text_annotations = args[:text_annotations] if args.key?(:text_annotations)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2VideoSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the end of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `endTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time_offset
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the start of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `startTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset)
|
|
|
|
@start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Word-specific information for recognized words. Word information is only
|
|
|
|
# included in the response when certain request parameters are set, such
|
|
|
|
# as `enable_word_time_offsets`.
|
|
|
|
class GoogleCloudVideointelligenceV1beta2WordInfo
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the end of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `endTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time
|
|
|
|
|
|
|
|
# Output only. A distinct integer value is assigned for every speaker within
|
|
|
|
# the audio. This field specifies which one of those speakers was detected to
|
|
|
|
# have spoken this word. Value ranges from 1 up to diarization_speaker_count,
|
|
|
|
# and is only set if speaker diarization is enabled.
|
|
|
|
# Corresponds to the JSON property `speakerTag`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :speaker_tag
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the start of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# The word corresponding to this set of information.
|
|
|
|
# Corresponds to the JSON property `word`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :word
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@end_time = args[:end_time] if args.key?(:end_time)
|
|
|
|
@speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag)
|
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@word = args[:word] if args.key?(:word)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation progress. Included in the `metadata`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoProgress
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Progress metadata for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationProgress`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress>]
|
|
|
|
attr_accessor :annotation_progress
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation response. Included in the `response`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1AnnotateVideoResponse
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Annotation results for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationResults`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults>]
|
|
|
|
attr_accessor :annotation_results
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_results = args[:annotation_results] if args.key?(:annotation_results)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1Entity
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Textual description, e.g. `Fixed-gear bicycle`.
|
|
|
|
# Corresponds to the JSON property `description`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :description
|
|
|
|
|
|
|
|
# Opaque entity ID. Some IDs may be available in
|
|
|
|
# [Google Knowledge Graph Search
|
|
|
|
# API](https://developers.google.com/knowledge-graph/).
|
|
|
|
# Corresponds to the JSON property `entityId`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :entity_id
|
|
|
|
|
|
|
|
# Language code for `description` in BCP-47 format.
|
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@description = args[:description] if args.key?(:description)
|
|
|
|
@entity_id = args[:entity_id] if args.key?(:entity_id)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# All video frames where explicit content was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for explicit content.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1ExplicitContentFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Likelihood of the pornography content..
|
|
|
|
# Corresponds to the JSON property `pornographyLikelihood`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :pornography_likelihood
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Label annotation.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1LabelAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Common categories for the detected entity.
|
|
|
|
# E.g. when the label is `Terrier` the category is likely `dog`. And in some
|
|
|
|
# cases there might be more than one categories e.g. `Terrier` could also be
|
|
|
|
# a `pet`.
|
|
|
|
# Corresponds to the JSON property `categoryEntities`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity>]
|
|
|
|
attr_accessor :category_entities
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
|
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# All video frames where a label was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# All video segments where a label was detected.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@category_entities = args[:category_entities] if args.key?(:category_entities)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for label detection.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1LabelFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment level annotation results for label detection.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1LabelSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Bottom Y coordinate.
|
|
|
|
# Corresponds to the JSON property `bottom`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :bottom
|
|
|
|
|
|
|
|
# Left X coordinate.
|
|
|
|
# Corresponds to the JSON property `left`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :left
|
|
|
|
|
|
|
|
# Right X coordinate.
|
|
|
|
# Corresponds to the JSON property `right`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :right
|
|
|
|
|
|
|
|
# Top Y coordinate.
|
|
|
|
# Corresponds to the JSON property `top`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :top
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@bottom = args[:bottom] if args.key?(:bottom)
|
|
|
|
@left = args[:left] if args.key?(:left)
|
|
|
|
@right = args[:right] if args.key?(:right)
|
|
|
|
@top = args[:top] if args.key?(:top)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized vertices of the bounding polygon.
|
|
|
|
# Corresponds to the JSON property `vertices`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex>]
|
|
|
|
attr_accessor :vertices
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@vertices = args[:vertices] if args.key?(:vertices)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A vertex represents a 2D point in the image.
|
|
|
|
# NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
|
# and range from 0 to 1.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# X coordinate.
|
|
|
|
# Corresponds to the JSON property `x`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :x
|
|
|
|
|
|
|
|
# Y coordinate.
|
|
|
|
# Corresponds to the JSON property `y`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :y
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@x = args[:x] if args.key?(:x)
|
|
|
|
@y = args[:y] if args.key?(:y)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotations corresponding to one tracked object.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Object category's labeling confidence of this track.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity]
|
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# Information corresponding to all frames where this object track appears.
|
|
|
|
# Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
|
|
|
|
# messages in frames.
|
|
|
|
# Streaming mode: it can only be one ObjectTrackingFrame message in frames.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
# Streaming mode ONLY.
|
|
|
|
# In streaming mode, we do not know the end time of a tracked object
|
|
|
|
# before it is completed. Hence, there is no VideoSegment info returned.
|
|
|
|
# Instead, we provide a unique identifiable integer track_id so that
|
|
|
|
# the customers can correlate the results of the ongoing
|
|
|
|
# ObjectTrackAnnotation of the same track_id over time.
|
|
|
|
# Corresponds to the JSON property `trackId`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :track_id
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
@track_id = args[:track_id] if args.key?(:track_id)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotations for object detection and tracking. This field
|
|
|
|
# stores per frame location, time offset, and confidence.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox]
|
|
|
|
attr_accessor :normalized_bounding_box
|
|
|
|
|
|
|
|
# The timestamp of the frame in microseconds.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Alternative hypotheses (a.k.a. n-best list).
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2019-02-22 00:36:49 +00:00
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
2019-06-22 00:37:44 +00:00
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
2019-02-22 00:36:49 +00:00
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Transcript text representing the words that the user spoke.
|
|
|
|
# Corresponds to the JSON property `transcript`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :transcript
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. A list of word-specific information for each recognized word.
|
|
|
|
# Note: When `enable_speaker_diarization` is true, you will see all the words
|
|
|
|
# from the beginning of the audio.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `words`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1WordInfo>]
|
|
|
|
attr_accessor :words
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@transcript = args[:transcript] if args.key?(:transcript)
|
|
|
|
@words = args[:words] if args.key?(:words)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A speech recognition result corresponding to a portion of the audio.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1SpeechTranscription
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# May contain one or more recognition hypotheses (up to the maximum specified
|
|
|
|
# in `max_alternatives`). These alternatives are ordered in terms of
|
|
|
|
# accuracy, with the top (first) alternative being the most probable, as
|
|
|
|
# ranked by the recognizer.
|
|
|
|
# Corresponds to the JSON property `alternatives`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative>]
|
|
|
|
attr_accessor :alternatives
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
|
|
|
# language tag of
|
|
|
|
# the language in this result. This language code was detected to have the
|
|
|
|
# most likelihood of being spoken in the audio.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@alternatives = args[:alternatives] if args.key?(:alternatives)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotations related to one detected OCR text snippet. This will contain the
|
|
|
|
# corresponding text, confidence value, and frame level information for each
|
|
|
|
# detection.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1TextAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# All video segments where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
# The detected text.
|
|
|
|
# Corresponds to the JSON property `text`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :text
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
@text = args[:text] if args.key?(:text)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for text annotation (OCR).
|
|
|
|
# Contains information regarding timestamp and bounding box locations for the
|
|
|
|
# frames containing detected OCR text snippets.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1TextFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly]
|
|
|
|
attr_accessor :rotated_bounding_box
|
|
|
|
|
|
|
|
# Timestamp of this frame.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment level annotation results for text detection.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1TextSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence for the track of detected text. It is calculated as the highest
|
|
|
|
# over all frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Information related to the frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation progress for a single video.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Specifies which feature is being tracked if the request contains more than
|
|
|
|
# one features.
|
|
|
|
# Corresponds to the JSON property `feature`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :feature
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
|
|
|
# Approximate percentage processed thus far. Guaranteed to be
|
|
|
|
# 100 when fully processed.
|
|
|
|
# Corresponds to the JSON property `progressPercent`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :progress_percent
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Time when the request was received.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# Time of the most recent update.
|
|
|
|
# Corresponds to the JSON property `updateTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :update_time
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-06-12 00:37:34 +00:00
|
|
|
@feature = args[:feature] if args.key?(:feature)
|
2019-02-22 00:36:49 +00:00
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@progress_percent = args[:progress_percent] if args.key?(:progress_percent)
|
2019-06-12 00:37:34 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-02-22 00:36:49 +00:00
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@update_time = args[:update_time] if args.key?(:update_time)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation results for a single video.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationResults
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The `Status` type defines a logical error model that is suitable for
|
|
|
|
# different programming environments, including REST APIs and RPC APIs. It is
|
2019-06-12 00:37:34 +00:00
|
|
|
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
|
|
|
# three pieces of data: error code, error message, and error details.
|
|
|
|
# You can find out more about this error model and how to work with it in the
|
|
|
|
# [API Design Guide](https://cloud.google.com/apis/design/errors).
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `error`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
|
|
|
attr_accessor :error
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
# Corresponds to the JSON property `explicitAnnotation`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ExplicitContentAnnotation]
|
|
|
|
attr_accessor :explicit_annotation
|
|
|
|
|
|
|
|
# Label annotations on frame level.
|
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
|
|
|
attr_accessor :frame_label_annotations
|
|
|
|
|
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
|
|
|
# Annotations for list of objects detected and tracked in video.
|
|
|
|
# Corresponds to the JSON property `objectAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation>]
|
|
|
|
attr_accessor :object_annotations
|
|
|
|
|
2019-07-16 00:37:29 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on video level or user specified segment level.
|
2019-02-22 00:36:49 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `segmentLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
|
|
|
attr_accessor :segment_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on video level or user specified segment level.
|
2019-09-23 00:37:58 +00:00
|
|
|
# There is exactly one element for each unique label. Compared to the
|
|
|
|
# existing topical `segment_label_annotations`, this field presents more
|
|
|
|
# fine-grained, segment-level labels detected in video content and is made
|
|
|
|
# available only when the client sets `LabelDetectionConfig.model` to
|
|
|
|
# "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
|
|
|
attr_accessor :segment_presence_label_annotations
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Shot annotations. Each shot is represented as a video segment.
|
|
|
|
# Corresponds to the JSON property `shotAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment>]
|
|
|
|
attr_accessor :shot_annotations
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on shot level.
|
2019-02-22 00:36:49 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
|
|
|
attr_accessor :shot_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on shot level. There is exactly one element for
|
2019-09-23 00:37:58 +00:00
|
|
|
# each unique label. Compared to the existing topical
|
|
|
|
# `shot_label_annotations`, this field presents more fine-grained, shot-level
|
|
|
|
# labels detected in video content and is made available only when the client
|
|
|
|
# sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation>]
|
|
|
|
attr_accessor :shot_presence_label_annotations
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Speech transcription.
|
|
|
|
# Corresponds to the JSON property `speechTranscriptions`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription>]
|
|
|
|
attr_accessor :speech_transcriptions
|
|
|
|
|
|
|
|
# OCR text detection and tracking.
|
|
|
|
# Annotations for list of detected text snippets. Each will have list of
|
|
|
|
# frame information associated with it.
|
|
|
|
# Corresponds to the JSON property `textAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation>]
|
|
|
|
attr_accessor :text_annotations
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@error = args[:error] if args.key?(:error)
|
|
|
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
|
|
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
2019-07-16 00:37:29 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-02-22 00:36:49 +00:00
|
|
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
2019-02-22 00:36:49 +00:00
|
|
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
|
|
|
@shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@shot_presence_label_annotations = args[:shot_presence_label_annotations] if args.key?(:shot_presence_label_annotations)
|
2019-02-22 00:36:49 +00:00
|
|
|
@speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions)
|
|
|
|
@text_annotations = args[:text_annotations] if args.key?(:text_annotations)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1VideoSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the end of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `endTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time_offset
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the start of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `startTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset)
|
|
|
|
@start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Word-specific information for recognized words. Word information is only
|
|
|
|
# included in the response when certain request parameters are set, such
|
|
|
|
# as `enable_word_time_offsets`.
|
|
|
|
class GoogleCloudVideointelligenceV1p1beta1WordInfo
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the end of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `endTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time
|
|
|
|
|
|
|
|
# Output only. A distinct integer value is assigned for every speaker within
|
|
|
|
# the audio. This field specifies which one of those speakers was detected to
|
|
|
|
# have spoken this word. Value ranges from 1 up to diarization_speaker_count,
|
|
|
|
# and is only set if speaker diarization is enabled.
|
|
|
|
# Corresponds to the JSON property `speakerTag`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :speaker_tag
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the start of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# The word corresponding to this set of information.
|
|
|
|
# Corresponds to the JSON property `word`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :word
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@end_time = args[:end_time] if args.key?(:end_time)
|
|
|
|
@speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag)
|
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@word = args[:word] if args.key?(:word)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation progress. Included in the `metadata`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoProgress
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Progress metadata for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationProgress`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress>]
|
|
|
|
attr_accessor :annotation_progress
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation request.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoRequest
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Required. Requested video annotation features.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `features`
|
|
|
|
# @return [Array<String>]
|
|
|
|
attr_accessor :features
|
|
|
|
|
|
|
|
# The video data bytes.
|
|
|
|
# If unset, the input video(s) should be specified via `input_uri`.
|
|
|
|
# If set, `input_uri` should be unset.
|
|
|
|
# Corresponds to the JSON property `inputContent`
|
|
|
|
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_content
|
|
|
|
|
|
|
|
# Input video location. Currently, only
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
|
|
|
|
# supported, which must be specified in the following format:
|
|
|
|
# `gs://bucket-id/object-id` (other URI formats return
|
|
|
|
# google.rpc.Code.INVALID_ARGUMENT). For more information, see
|
|
|
|
# [Request URIs](/storage/docs/reference-uris).
|
|
|
|
# A video URI may include wildcards in `object-id`, and thus identify
|
|
|
|
# multiple videos. Supported wildcards: '*' to match 0 or more characters;
|
|
|
|
# '?' to match 1 character. If unset, the input video should be embedded
|
|
|
|
# in the request as `input_content`. If set, `input_content` should be unset.
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. Cloud region where annotation should take place. Supported cloud
|
2019-02-22 00:36:49 +00:00
|
|
|
# regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
|
|
|
|
# is specified, a region will be determined based on video file location.
|
|
|
|
# Corresponds to the JSON property `locationId`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :location_id
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. Location where the output (in JSON format) should be stored.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
|
|
|
|
# URIs are supported, which must be specified in the following format:
|
|
|
|
# `gs://bucket-id/object-id` (other URI formats return
|
|
|
|
# google.rpc.Code.INVALID_ARGUMENT). For more information, see
|
|
|
|
# [Request URIs](/storage/docs/reference-uris).
|
|
|
|
# Corresponds to the JSON property `outputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :output_uri
|
|
|
|
|
|
|
|
# Video context and/or feature-specific parameters.
|
|
|
|
# Corresponds to the JSON property `videoContext`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoContext]
|
|
|
|
attr_accessor :video_context
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@features = args[:features] if args.key?(:features)
|
|
|
|
@input_content = args[:input_content] if args.key?(:input_content)
|
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@location_id = args[:location_id] if args.key?(:location_id)
|
|
|
|
@output_uri = args[:output_uri] if args.key?(:output_uri)
|
|
|
|
@video_context = args[:video_context] if args.key?(:video_context)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation response. Included in the `response`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1AnnotateVideoResponse
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Annotation results for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationResults`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults>]
|
|
|
|
attr_accessor :annotation_results
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_results = args[:annotation_results] if args.key?(:annotation_results)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1Entity
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Textual description, e.g. `Fixed-gear bicycle`.
|
|
|
|
# Corresponds to the JSON property `description`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :description
|
|
|
|
|
|
|
|
# Opaque entity ID. Some IDs may be available in
|
|
|
|
# [Google Knowledge Graph Search
|
|
|
|
# API](https://developers.google.com/knowledge-graph/).
|
|
|
|
# Corresponds to the JSON property `entityId`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :entity_id
|
|
|
|
|
|
|
|
# Language code for `description` in BCP-47 format.
|
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@description = args[:description] if args.key?(:description)
|
|
|
|
@entity_id = args[:entity_id] if args.key?(:entity_id)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
2019-01-31 00:36:50 +00:00
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
2019-02-22 00:36:49 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# All video frames where explicit content was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Config for EXPLICIT_CONTENT_DETECTION.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1ExplicitContentDetectionConfig
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Model to use for explicit content detection.
|
|
|
|
# Supported values: "builtin/stable" (the default if unset) and
|
|
|
|
# "builtin/latest".
|
|
|
|
# Corresponds to the JSON property `model`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :model
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@model = args[:model] if args.key?(:model)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for explicit content.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1ExplicitContentFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Likelihood of the pornography content..
|
|
|
|
# Corresponds to the JSON property `pornographyLikelihood`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :pornography_likelihood
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Label annotation.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1LabelAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Common categories for the detected entity.
|
|
|
|
# E.g. when the label is `Terrier` the category is likely `dog`. And in some
|
|
|
|
# cases there might be more than one categories e.g. `Terrier` could also be
|
|
|
|
# a `pet`.
|
|
|
|
# Corresponds to the JSON property `categoryEntities`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1Entity>]
|
|
|
|
attr_accessor :category_entities
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
|
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# All video frames where a label was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelFrame>]
|
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# All video segments where a label was detected.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@category_entities = args[:category_entities] if args.key?(:category_entities)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Config for LABEL_DETECTION.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1LabelDetectionConfig
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The confidence threshold we perform filtering on the labels from
|
|
|
|
# frame-level detection. If not set, it is set to 0.4 by default. The valid
|
|
|
|
# range for this threshold is [0.1, 0.9]. Any value set outside of this
|
|
|
|
# range will be clipped.
|
|
|
|
# Note: for best results please follow the default threshold. We will update
|
|
|
|
# the default threshold everytime when we release a new model.
|
|
|
|
# Corresponds to the JSON property `frameConfidenceThreshold`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :frame_confidence_threshold
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# What labels should be detected with LABEL_DETECTION, in addition to
|
|
|
|
# video-level labels or segment-level labels.
|
|
|
|
# If unspecified, defaults to `SHOT_MODE`.
|
|
|
|
# Corresponds to the JSON property `labelDetectionMode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :label_detection_mode
|
|
|
|
|
|
|
|
# Model to use for label detection.
|
|
|
|
# Supported values: "builtin/stable" (the default if unset) and
|
|
|
|
# "builtin/latest".
|
|
|
|
# Corresponds to the JSON property `model`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :model
|
|
|
|
|
|
|
|
# Whether the video has been shot from a stationary (i.e. non-moving) camera.
|
|
|
|
# When set to true, might improve detection accuracy for moving objects.
|
|
|
|
# Should be used with `SHOT_AND_FRAME_MODE` enabled.
|
|
|
|
# Corresponds to the JSON property `stationaryCamera`
|
|
|
|
# @return [Boolean]
|
|
|
|
attr_accessor :stationary_camera
|
|
|
|
alias_method :stationary_camera?, :stationary_camera
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The confidence threshold we perform filtering on the labels from
|
|
|
|
# video-level and shot-level detections. If not set, it is set to 0.3 by
|
|
|
|
# default. The valid range for this threshold is [0.1, 0.9]. Any value set
|
|
|
|
# outside of this range will be clipped.
|
|
|
|
# Note: for best results please follow the default threshold. We will update
|
|
|
|
# the default threshold everytime when we release a new model.
|
|
|
|
# Corresponds to the JSON property `videoConfidenceThreshold`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :video_confidence_threshold
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-03-13 00:37:11 +00:00
|
|
|
@frame_confidence_threshold = args[:frame_confidence_threshold] if args.key?(:frame_confidence_threshold)
|
2019-02-22 00:36:49 +00:00
|
|
|
@label_detection_mode = args[:label_detection_mode] if args.key?(:label_detection_mode)
|
|
|
|
@model = args[:model] if args.key?(:model)
|
|
|
|
@stationary_camera = args[:stationary_camera] if args.key?(:stationary_camera)
|
2019-03-13 00:37:11 +00:00
|
|
|
@video_confidence_threshold = args[:video_confidence_threshold] if args.key?(:video_confidence_threshold)
|
2019-02-22 00:36:49 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for label detection.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1LabelFrame
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment level annotation results for label detection.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1LabelSegment
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Bottom Y coordinate.
|
|
|
|
# Corresponds to the JSON property `bottom`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :bottom
|
|
|
|
|
|
|
|
# Left X coordinate.
|
|
|
|
# Corresponds to the JSON property `left`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :left
|
|
|
|
|
|
|
|
# Right X coordinate.
|
|
|
|
# Corresponds to the JSON property `right`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :right
|
|
|
|
|
|
|
|
# Top Y coordinate.
|
|
|
|
# Corresponds to the JSON property `top`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :top
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@bottom = args[:bottom] if args.key?(:bottom)
|
|
|
|
@left = args[:left] if args.key?(:left)
|
|
|
|
@right = args[:right] if args.key?(:right)
|
|
|
|
@top = args[:top] if args.key?(:top)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized vertices of the bounding polygon.
|
|
|
|
# Corresponds to the JSON property `vertices`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedVertex>]
|
|
|
|
attr_accessor :vertices
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@vertices = args[:vertices] if args.key?(:vertices)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A vertex represents a 2D point in the image.
|
|
|
|
# NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
|
# and range from 0 to 1.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# X coordinate.
|
|
|
|
# Corresponds to the JSON property `x`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :x
|
|
|
|
|
|
|
|
# Y coordinate.
|
|
|
|
# Corresponds to the JSON property `y`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :y
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@x = args[:x] if args.key?(:x)
|
|
|
|
@y = args[:y] if args.key?(:y)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotations corresponding to one tracked object.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Object category's labeling confidence of this track.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1Entity]
|
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# Information corresponding to all frames where this object track appears.
|
|
|
|
# Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
|
|
|
|
# messages in frames.
|
|
|
|
# Streaming mode: it can only be one ObjectTrackingFrame message in frames.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `frames`
|
2019-02-22 00:36:49 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :frames
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
# Streaming mode ONLY.
|
|
|
|
# In streaming mode, we do not know the end time of a tracked object
|
|
|
|
# before it is completed. Hence, there is no VideoSegment info returned.
|
|
|
|
# Instead, we provide a unique identifiable integer track_id so that
|
|
|
|
# the customers can correlate the results of the ongoing
|
|
|
|
# ObjectTrackAnnotation of the same track_id over time.
|
|
|
|
# Corresponds to the JSON property `trackId`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :track_id
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
2019-01-31 00:36:50 +00:00
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
2019-02-22 00:36:49 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
@track_id = args[:track_id] if args.key?(:track_id)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Config for OBJECT_TRACKING.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingConfig
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Model to use for object tracking.
|
|
|
|
# Supported values: "builtin/stable" (the default if unset) and
|
|
|
|
# "builtin/latest".
|
|
|
|
# Corresponds to the JSON property `model`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :model
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@model = args[:model] if args.key?(:model)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video frame level annotations for object detection and tracking. This field
|
|
|
|
# stores per frame location, time offset, and confidence.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox]
|
|
|
|
attr_accessor :normalized_bounding_box
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# The timestamp of the frame in microseconds.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Config for SHOT_CHANGE_DETECTION.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1ShotChangeDetectionConfig
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Model to use for shot change detection.
|
|
|
|
# Supported values: "builtin/stable" (the default if unset) and
|
|
|
|
# "builtin/latest".
|
|
|
|
# Corresponds to the JSON property `model`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :model
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@model = args[:model] if args.key?(:model)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Provides "hints" to the speech recognizer to favor specific words and phrases
|
|
|
|
# in the results.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1SpeechContext
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. A list of strings containing words and phrases "hints" so that
|
2019-02-22 00:36:49 +00:00
|
|
|
# the speech recognition is more likely to recognize them. This can be used
|
|
|
|
# to improve the accuracy for specific words and phrases, for example, if
|
|
|
|
# specific commands are typically spoken by the user. This can also be used
|
|
|
|
# to add additional words to the vocabulary of the recognizer. See
|
|
|
|
# [usage limits](https://cloud.google.com/speech/limits#content).
|
|
|
|
# Corresponds to the JSON property `phrases`
|
|
|
|
# @return [Array<String>]
|
|
|
|
attr_accessor :phrases
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@phrases = args[:phrases] if args.key?(:phrases)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Alternative hypotheses (a.k.a. n-best list).
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2019-02-22 00:36:49 +00:00
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
2019-06-22 00:37:44 +00:00
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
2019-02-22 00:36:49 +00:00
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Transcript text representing the words that the user spoke.
|
|
|
|
# Corresponds to the JSON property `transcript`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :transcript
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. A list of word-specific information for each recognized word.
|
|
|
|
# Note: When `enable_speaker_diarization` is true, you will see all the words
|
|
|
|
# from the beginning of the audio.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `words`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1WordInfo>]
|
|
|
|
attr_accessor :words
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@transcript = args[:transcript] if args.key?(:transcript)
|
|
|
|
@words = args[:words] if args.key?(:words)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A speech recognition result corresponding to a portion of the audio.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# May contain one or more recognition hypotheses (up to the maximum specified
|
|
|
|
# in `max_alternatives`). These alternatives are ordered in terms of
|
|
|
|
# accuracy, with the top (first) alternative being the most probable, as
|
|
|
|
# ranked by the recognizer.
|
|
|
|
# Corresponds to the JSON property `alternatives`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative>]
|
|
|
|
attr_accessor :alternatives
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
|
|
|
# language tag of
|
|
|
|
# the language in this result. This language code was detected to have the
|
|
|
|
# most likelihood of being spoken in the audio.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@alternatives = args[:alternatives] if args.key?(:alternatives)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Config for SPEECH_TRANSCRIPTION.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1SpeechTranscriptionConfig
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. For file formats, such as MXF or MKV, supporting multiple audio
|
2019-02-22 00:36:49 +00:00
|
|
|
# tracks, specify up to two tracks. Default: track 0.
|
|
|
|
# Corresponds to the JSON property `audioTracks`
|
|
|
|
# @return [Array<Fixnum>]
|
|
|
|
attr_accessor :audio_tracks
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. If set, specifies the estimated number of speakers in the
|
|
|
|
# conversation.
|
2019-02-22 00:36:49 +00:00
|
|
|
# If not set, defaults to '2'.
|
|
|
|
# Ignored unless enable_speaker_diarization is set to true.
|
|
|
|
# Corresponds to the JSON property `diarizationSpeakerCount`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :diarization_speaker_count
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. If 'true', adds punctuation to recognition result hypotheses.
|
2019-02-22 00:36:49 +00:00
|
|
|
# This feature is only available in select languages. Setting this for
|
|
|
|
# requests in other languages has no effect at all. The default 'false' value
|
|
|
|
# does not add punctuation to result hypotheses. NOTE: "This is currently
|
|
|
|
# offered as an experimental service, complimentary to all users. In the
|
|
|
|
# future this may be exclusively available as a premium feature."
|
|
|
|
# Corresponds to the JSON property `enableAutomaticPunctuation`
|
|
|
|
# @return [Boolean]
|
|
|
|
attr_accessor :enable_automatic_punctuation
|
|
|
|
alias_method :enable_automatic_punctuation?, :enable_automatic_punctuation
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. If 'true', enables speaker detection for each recognized word in
|
2019-02-22 00:36:49 +00:00
|
|
|
# the top alternative of the recognition result using a speaker_tag provided
|
|
|
|
# in the WordInfo.
|
|
|
|
# Note: When this is true, we send all the words from the beginning of the
|
|
|
|
# audio for the top alternative in every consecutive responses.
|
|
|
|
# This is done in order to improve our speaker tags as our models learn to
|
|
|
|
# identify the speakers in the conversation over time.
|
|
|
|
# Corresponds to the JSON property `enableSpeakerDiarization`
|
|
|
|
# @return [Boolean]
|
|
|
|
attr_accessor :enable_speaker_diarization
|
|
|
|
alias_method :enable_speaker_diarization?, :enable_speaker_diarization
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. If `true`, the top result includes a list of words and the
|
2019-02-22 00:36:49 +00:00
|
|
|
# confidence for those words. If `false`, no word-level confidence
|
|
|
|
# information is returned. The default is `false`.
|
|
|
|
# Corresponds to the JSON property `enableWordConfidence`
|
|
|
|
# @return [Boolean]
|
|
|
|
attr_accessor :enable_word_confidence
|
|
|
|
alias_method :enable_word_confidence?, :enable_word_confidence
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. If set to `true`, the server will attempt to filter out
|
2019-02-22 00:36:49 +00:00
|
|
|
# profanities, replacing all but the initial character in each filtered word
|
|
|
|
# with asterisks, e.g. "f***". If set to `false` or omitted, profanities
|
|
|
|
# won't be filtered out.
|
|
|
|
# Corresponds to the JSON property `filterProfanity`
|
|
|
|
# @return [Boolean]
|
|
|
|
attr_accessor :filter_profanity
|
|
|
|
alias_method :filter_profanity?, :filter_profanity
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Required. *Required* The language of the supplied audio as a
|
2019-02-22 00:36:49 +00:00
|
|
|
# [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
|
|
|
|
# Example: "en-US".
|
|
|
|
# See [Language Support](https://cloud.google.com/speech/docs/languages)
|
|
|
|
# for a list of the currently supported language codes.
|
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. Maximum number of recognition hypotheses to be returned.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
|
|
|
|
# within each `SpeechTranscription`. The server may return fewer than
|
|
|
|
# `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
|
|
|
|
# return a maximum of one. If omitted, will return a maximum of one.
|
|
|
|
# Corresponds to the JSON property `maxAlternatives`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :max_alternatives
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Optional. A means to provide context to assist the speech recognition.
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `speechContexts`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1SpeechContext>]
|
|
|
|
attr_accessor :speech_contexts
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@audio_tracks = args[:audio_tracks] if args.key?(:audio_tracks)
|
|
|
|
@diarization_speaker_count = args[:diarization_speaker_count] if args.key?(:diarization_speaker_count)
|
|
|
|
@enable_automatic_punctuation = args[:enable_automatic_punctuation] if args.key?(:enable_automatic_punctuation)
|
|
|
|
@enable_speaker_diarization = args[:enable_speaker_diarization] if args.key?(:enable_speaker_diarization)
|
|
|
|
@enable_word_confidence = args[:enable_word_confidence] if args.key?(:enable_word_confidence)
|
|
|
|
@filter_profanity = args[:filter_profanity] if args.key?(:filter_profanity)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
@max_alternatives = args[:max_alternatives] if args.key?(:max_alternatives)
|
|
|
|
@speech_contexts = args[:speech_contexts] if args.key?(:speech_contexts)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Annotations related to one detected OCR text snippet. This will contain the
|
|
|
|
# corresponding text, confidence value, and frame level information for each
|
|
|
|
# detection.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1TextAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# All video segments where OCR detected text appears.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `segments`
|
2019-02-22 00:36:49 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1TextSegment>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :segments
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# The detected text.
|
|
|
|
# Corresponds to the JSON property `text`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :text
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
2019-02-22 00:36:49 +00:00
|
|
|
@text = args[:text] if args.key?(:text)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Config for TEXT_DETECTION.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1TextDetectionConfig
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Language hint can be specified if the language to be detected is known a
|
|
|
|
# priori. It can increase the accuracy of the detection. Language hint must
|
|
|
|
# be language code in BCP-47 format.
|
|
|
|
# Automatic language detection is performed if no hint is provided.
|
|
|
|
# Corresponds to the JSON property `languageHints`
|
|
|
|
# @return [Array<String>]
|
|
|
|
attr_accessor :language_hints
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Model to use for text detection.
|
|
|
|
# Supported values: "builtin/stable" (the default if unset) and
|
|
|
|
# "builtin/latest".
|
|
|
|
# Corresponds to the JSON property `model`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :model
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@language_hints = args[:language_hints] if args.key?(:language_hints)
|
2019-06-12 00:37:34 +00:00
|
|
|
@model = args[:model] if args.key?(:model)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video frame level annotation results for text annotation (OCR).
|
|
|
|
# Contains information regarding timestamp and bounding box locations for the
|
|
|
|
# frames containing detected OCR text snippets.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1TextFrame
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly]
|
|
|
|
attr_accessor :rotated_bounding_box
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Timestamp of this frame.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video segment level annotation results for text detection.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1TextSegment
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Confidence for the track of detected text. It is calculated as the highest
|
|
|
|
# over all frames where OCR detected text appears.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Information related to the frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `frames`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1TextFrame>]
|
|
|
|
attr_accessor :frames
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
2019-02-22 00:36:49 +00:00
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation progress for a single video.
|
2019-02-22 00:36:49 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Specifies which feature is being tracked if the request contains more than
|
|
|
|
# one features.
|
|
|
|
# Corresponds to the JSON property `feature`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :feature
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
|
|
|
# Approximate percentage processed thus far. Guaranteed to be
|
|
|
|
# 100 when fully processed.
|
|
|
|
# Corresponds to the JSON property `progressPercent`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :progress_percent
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Time when the request was received.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# Time of the most recent update.
|
|
|
|
# Corresponds to the JSON property `updateTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :update_time
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-06-12 00:37:34 +00:00
|
|
|
@feature = args[:feature] if args.key?(:feature)
|
2019-01-31 00:36:50 +00:00
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@progress_percent = args[:progress_percent] if args.key?(:progress_percent)
|
2019-06-12 00:37:34 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-01-31 00:36:50 +00:00
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@update_time = args[:update_time] if args.key?(:update_time)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation results for a single video.
|
2019-02-22 00:36:49 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The `Status` type defines a logical error model that is suitable for
|
|
|
|
# different programming environments, including REST APIs and RPC APIs. It is
|
2019-06-12 00:37:34 +00:00
|
|
|
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
|
|
|
# three pieces of data: error code, error message, and error details.
|
|
|
|
# You can find out more about this error model and how to work with it in the
|
|
|
|
# [API Design Guide](https://cloud.google.com/apis/design/errors).
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `error`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
|
|
|
attr_accessor :error
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
# Corresponds to the JSON property `explicitAnnotation`
|
2019-02-22 00:36:49 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :explicit_annotation
|
|
|
|
|
|
|
|
# Label annotations on frame level.
|
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
2019-02-22 00:36:49 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :frame_label_annotations
|
|
|
|
|
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Annotations for list of objects detected and tracked in video.
|
|
|
|
# Corresponds to the JSON property `objectAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingAnnotation>]
|
|
|
|
attr_accessor :object_annotations
|
|
|
|
|
2019-07-16 00:37:29 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on video level or user specified segment level.
|
2019-01-31 00:36:50 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `segmentLabelAnnotations`
|
2019-02-22 00:36:49 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :segment_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on video level or user specified segment level.
|
2019-09-23 00:37:58 +00:00
|
|
|
# There is exactly one element for each unique label. Compared to the
|
|
|
|
# existing topical `segment_label_annotations`, this field presents more
|
|
|
|
# fine-grained, segment-level labels detected in video content and is made
|
|
|
|
# available only when the client sets `LabelDetectionConfig.model` to
|
|
|
|
# "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
|
|
|
attr_accessor :segment_presence_label_annotations
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Shot annotations. Each shot is represented as a video segment.
|
|
|
|
# Corresponds to the JSON property `shotAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
|
|
|
attr_accessor :shot_annotations
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on shot level.
|
2019-02-22 00:36:49 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
|
|
|
attr_accessor :shot_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on shot level. There is exactly one element for
|
2019-09-23 00:37:58 +00:00
|
|
|
# each unique label. Compared to the existing topical
|
|
|
|
# `shot_label_annotations`, this field presents more fine-grained, shot-level
|
|
|
|
# labels detected in video content and is made available only when the client
|
|
|
|
# sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelAnnotation>]
|
|
|
|
attr_accessor :shot_presence_label_annotations
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Speech transcription.
|
|
|
|
# Corresponds to the JSON property `speechTranscriptions`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1SpeechTranscription>]
|
|
|
|
attr_accessor :speech_transcriptions
|
|
|
|
|
|
|
|
# OCR text detection and tracking.
|
|
|
|
# Annotations for list of detected text snippets. Each will have list of
|
|
|
|
# frame information associated with it.
|
|
|
|
# Corresponds to the JSON property `textAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1TextAnnotation>]
|
|
|
|
attr_accessor :text_annotations
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@error = args[:error] if args.key?(:error)
|
|
|
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
|
|
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
2019-07-16 00:37:29 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-02-22 00:36:49 +00:00
|
|
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
2019-02-22 00:36:49 +00:00
|
|
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
|
|
|
@shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@shot_presence_label_annotations = args[:shot_presence_label_annotations] if args.key?(:shot_presence_label_annotations)
|
2019-02-22 00:36:49 +00:00
|
|
|
@speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions)
|
|
|
|
@text_annotations = args[:text_annotations] if args.key?(:text_annotations)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video context and/or feature-specific parameters.
|
|
|
|
class GoogleCloudVideointelligenceV1p2beta1VideoContext
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Config for EXPLICIT_CONTENT_DETECTION.
|
|
|
|
# Corresponds to the JSON property `explicitContentDetectionConfig`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ExplicitContentDetectionConfig]
|
|
|
|
attr_accessor :explicit_content_detection_config
|
|
|
|
|
|
|
|
# Config for LABEL_DETECTION.
|
|
|
|
# Corresponds to the JSON property `labelDetectionConfig`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1LabelDetectionConfig]
|
|
|
|
attr_accessor :label_detection_config
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Config for OBJECT_TRACKING.
|
|
|
|
# Corresponds to the JSON property `objectTrackingConfig`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ObjectTrackingConfig]
|
|
|
|
attr_accessor :object_tracking_config
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Video segments to annotate. The segments may overlap and are not required
|
|
|
|
# to be contiguous or span the whole video. If unspecified, each video is
|
|
|
|
# treated as a single segment.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1VideoSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
# Config for SHOT_CHANGE_DETECTION.
|
|
|
|
# Corresponds to the JSON property `shotChangeDetectionConfig`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1ShotChangeDetectionConfig]
|
|
|
|
attr_accessor :shot_change_detection_config
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Config for SPEECH_TRANSCRIPTION.
|
|
|
|
# Corresponds to the JSON property `speechTranscriptionConfig`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1SpeechTranscriptionConfig]
|
|
|
|
attr_accessor :speech_transcription_config
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Config for TEXT_DETECTION.
|
|
|
|
# Corresponds to the JSON property `textDetectionConfig`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p2beta1TextDetectionConfig]
|
|
|
|
attr_accessor :text_detection_config
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@explicit_content_detection_config = args[:explicit_content_detection_config] if args.key?(:explicit_content_detection_config)
|
|
|
|
@label_detection_config = args[:label_detection_config] if args.key?(:label_detection_config)
|
2019-06-12 00:37:34 +00:00
|
|
|
@object_tracking_config = args[:object_tracking_config] if args.key?(:object_tracking_config)
|
2019-02-22 00:36:49 +00:00
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
@shot_change_detection_config = args[:shot_change_detection_config] if args.key?(:shot_change_detection_config)
|
|
|
|
@speech_transcription_config = args[:speech_transcription_config] if args.key?(:speech_transcription_config)
|
|
|
|
@text_detection_config = args[:text_detection_config] if args.key?(:text_detection_config)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment.
|
2019-02-22 00:36:49 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p2beta1VideoSegment
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the end of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `endTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time_offset
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the start of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `startTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset)
|
|
|
|
@start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Word-specific information for recognized words. Word information is only
|
|
|
|
# included in the response when certain request parameters are set, such
|
|
|
|
# as `enable_word_time_offsets`.
|
2019-02-22 00:36:49 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p2beta1WordInfo
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the end of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `endTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time
|
|
|
|
|
|
|
|
# Output only. A distinct integer value is assigned for every speaker within
|
|
|
|
# the audio. This field specifies which one of those speakers was detected to
|
|
|
|
# have spoken this word. Value ranges from 1 up to diarization_speaker_count,
|
|
|
|
# and is only set if speaker diarization is enabled.
|
|
|
|
# Corresponds to the JSON property `speakerTag`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :speaker_tag
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the start of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# The word corresponding to this set of information.
|
|
|
|
# Corresponds to the JSON property `word`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :word
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@end_time = args[:end_time] if args.key?(:end_time)
|
|
|
|
@speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag)
|
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@word = args[:word] if args.key?(:word)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation progress. Included in the `metadata`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoProgress
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Progress metadata for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationProgress`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :annotation_progress
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video annotation response. Included in the `response`
|
|
|
|
# field of the `Operation` returned by the `GetOperation`
|
|
|
|
# call of the `google::longrunning::Operations` service.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1AnnotateVideoResponse
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Annotation results for all videos specified in `AnnotateVideoRequest`.
|
|
|
|
# Corresponds to the JSON property `annotationResults`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoAnnotationResults>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :annotation_results
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_results = args[:annotation_results] if args.key?(:annotation_results)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-11-12 00:37:27 +00:00
|
|
|
# Celebrity definition.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1Celebrity
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Textual description of additional information about the celebrity, if
|
|
|
|
# applicable.
|
|
|
|
# Corresponds to the JSON property `description`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :description
|
|
|
|
|
|
|
|
# The celebrity name.
|
|
|
|
# Corresponds to the JSON property `displayName`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :display_name
|
|
|
|
|
|
|
|
# The resource name of the celebrity. Have the format
|
|
|
|
# `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
|
|
|
|
# kg-mid is the id in Google knowledge graph, which is unique for the
|
|
|
|
# celebrity.
|
|
|
|
# Corresponds to the JSON property `name`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :name
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@description = args[:description] if args.key?(:description)
|
|
|
|
@display_name = args[:display_name] if args.key?(:display_name)
|
|
|
|
@name = args[:name] if args.key?(:name)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Celebrity recognition annotation per video.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# The tracks detected from the input video, including recognized celebrities
|
|
|
|
# and other detected faces in the video.
|
|
|
|
# Corresponds to the JSON property `celebrityTracks`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityTrack>]
|
|
|
|
attr_accessor :celebrity_tracks
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@celebrity_tracks = args[:celebrity_tracks] if args.key?(:celebrity_tracks)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# The annotation result of a celebrity face track. RecognizedCelebrity field
|
|
|
|
# could be empty if the face track does not have any matched celebrities.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1CelebrityTrack
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Top N match of the celebrities for the face in this track.
|
|
|
|
# Corresponds to the JSON property `celebrities`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1RecognizedCelebrity>]
|
|
|
|
attr_accessor :celebrities
|
|
|
|
|
|
|
|
# A track of an object instance.
|
|
|
|
# Corresponds to the JSON property `faceTrack`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Track]
|
|
|
|
attr_accessor :face_track
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@celebrities = args[:celebrities] if args.key?(:celebrities)
|
|
|
|
@face_track = args[:face_track] if args.key?(:face_track)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# A generic detected attribute represented by name in string format.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1DetectedAttribute
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Detected attribute confidence. Range [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
|
|
|
|
# A full list of supported type names will be provided in the document.
|
|
|
|
# Corresponds to the JSON property `name`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :name
|
|
|
|
|
|
|
|
# Text value of the detection result. For example, the value for "HairColor"
|
|
|
|
# can be "black", "blonde", etc.
|
|
|
|
# Corresponds to the JSON property `value`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :value
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@name = args[:name] if args.key?(:name)
|
|
|
|
@value = args[:value] if args.key?(:value)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-01-30 00:37:15 +00:00
|
|
|
# A generic detected landmark represented by name in string format and a 2D
|
|
|
|
# location.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1DetectedLandmark
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# The confidence score of the detected landmark. Range [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# The name of this landmark, i.e. left_hand, right_shoulder.
|
|
|
|
# Corresponds to the JSON property `name`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :name
|
|
|
|
|
|
|
|
# A vertex represents a 2D point in the image.
|
|
|
|
# NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
|
# and range from 0 to 1.
|
|
|
|
# Corresponds to the JSON property `point`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex]
|
|
|
|
attr_accessor :point
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@name = args[:name] if args.key?(:name)
|
|
|
|
@point = args[:point] if args.key?(:point)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Detected entity from video analysis.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1Entity
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Textual description, e.g. `Fixed-gear bicycle`.
|
|
|
|
# Corresponds to the JSON property `description`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :description
|
|
|
|
|
|
|
|
# Opaque entity ID. Some IDs may be available in
|
|
|
|
# [Google Knowledge Graph Search
|
|
|
|
# API](https://developers.google.com/knowledge-graph/).
|
|
|
|
# Corresponds to the JSON property `entityId`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :entity_id
|
|
|
|
|
|
|
|
# Language code for `description` in BCP-47 format.
|
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@description = args[:description] if args.key?(:description)
|
|
|
|
@entity_id = args[:entity_id] if args.key?(:entity_id)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# All video frames where explicit content was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentFrame>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for explicit content.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1ExplicitContentFrame
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Likelihood of the pornography content..
|
|
|
|
# Corresponds to the JSON property `pornographyLikelihood`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :pornography_likelihood
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-01-23 00:37:29 +00:00
|
|
|
# Face detection annotation.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# The thumbnail of a person's face.
|
|
|
|
# Corresponds to the JSON property `thumbnail`
|
|
|
|
# NOTE: Values are automatically base64 encoded/decoded in the client library.
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :thumbnail
|
|
|
|
|
|
|
|
# The face tracks with attributes.
|
|
|
|
# Corresponds to the JSON property `tracks`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Track>]
|
|
|
|
attr_accessor :tracks
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@thumbnail = args[:thumbnail] if args.key?(:thumbnail)
|
|
|
|
@tracks = args[:tracks] if args.key?(:tracks)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Label annotation.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1LabelAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Common categories for the detected entity.
|
|
|
|
# E.g. when the label is `Terrier` the category is likely `dog`. And in some
|
|
|
|
# cases there might be more than one categories e.g. `Terrier` could also be
|
|
|
|
# a `pet`.
|
|
|
|
# Corresponds to the JSON property `categoryEntities`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Entity>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :category_entities
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# All video frames where a label was detected.
|
|
|
|
# Corresponds to the JSON property `frames`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelFrame>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# All video segments where a label was detected.
|
|
|
|
# Corresponds to the JSON property `segments`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelSegment>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@category_entities = args[:category_entities] if args.key?(:category_entities)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for label detection.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1LabelFrame
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video, corresponding to the
|
|
|
|
# video frame for this location.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment level annotation results for label detection.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1LabelSegment
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence that the label is accurate. Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Annotation corresponding to one detected, tracked and recognized logo class.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1LogoRecognitionAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# All video segments where the recognized logo appears. There might be
|
|
|
|
# multiple instances of the same logo class appearing in one VideoSegment.
|
|
|
|
# Corresponds to the JSON property `segments`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
# All logo tracks where the recognized logo appears. Each track corresponds
|
|
|
|
# to one logo instance appearing in consecutive frames.
|
|
|
|
# Corresponds to the JSON property `tracks`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Track>]
|
|
|
|
attr_accessor :tracks
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
@tracks = args[:tracks] if args.key?(:tracks)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Bottom Y coordinate.
|
|
|
|
# Corresponds to the JSON property `bottom`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :bottom
|
|
|
|
|
|
|
|
# Left X coordinate.
|
|
|
|
# Corresponds to the JSON property `left`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :left
|
|
|
|
|
|
|
|
# Right X coordinate.
|
|
|
|
# Corresponds to the JSON property `right`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :right
|
|
|
|
|
|
|
|
# Top Y coordinate.
|
|
|
|
# Corresponds to the JSON property `top`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :top
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@bottom = args[:bottom] if args.key?(:bottom)
|
|
|
|
@left = args[:left] if args.key?(:left)
|
|
|
|
@right = args[:right] if args.key?(:right)
|
|
|
|
@top = args[:top] if args.key?(:top)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized vertices of the bounding polygon.
|
|
|
|
# Corresponds to the JSON property `vertices`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedVertex>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :vertices
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@vertices = args[:vertices] if args.key?(:vertices)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A vertex represents a 2D point in the image.
|
|
|
|
# NOTE: the normalized vertex coordinates are relative to the original image
|
|
|
|
# and range from 0 to 1.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1NormalizedVertex
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# X coordinate.
|
|
|
|
# Corresponds to the JSON property `x`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :x
|
|
|
|
|
|
|
|
# Y coordinate.
|
|
|
|
# Corresponds to the JSON property `y`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :y
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@x = args[:x] if args.key?(:x)
|
|
|
|
@y = args[:y] if args.key?(:y)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotations corresponding to one tracked object.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Object category's labeling confidence of this track.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Detected entity from video analysis.
|
|
|
|
# Corresponds to the JSON property `entity`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Entity]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :entity
|
|
|
|
|
|
|
|
# Information corresponding to all frames where this object track appears.
|
|
|
|
# Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
|
|
|
|
# messages in frames.
|
|
|
|
# Streaming mode: it can only be one ObjectTrackingFrame message in frames.
|
|
|
|
# Corresponds to the JSON property `frames`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
# Streaming mode ONLY.
|
|
|
|
# In streaming mode, we do not know the end time of a tracked object
|
|
|
|
# before it is completed. Hence, there is no VideoSegment info returned.
|
|
|
|
# Instead, we provide a unique identifiable integer track_id so that
|
|
|
|
# the customers can correlate the results of the ongoing
|
|
|
|
# ObjectTrackAnnotation of the same track_id over time.
|
|
|
|
# Corresponds to the JSON property `trackId`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :track_id
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@entity = args[:entity] if args.key?(:entity)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
@track_id = args[:track_id] if args.key?(:track_id)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotations for object detection and tracking. This field
|
|
|
|
# stores per frame location, time offset, and confidence.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1ObjectTrackingFrame
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :normalized_bounding_box
|
|
|
|
|
|
|
|
# The timestamp of the frame in microseconds.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-01-30 00:37:15 +00:00
|
|
|
# Person detection annotation per video.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1PersonDetectionAnnotation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# The trackes that a person is detected.
|
|
|
|
# Corresponds to the JSON property `tracks`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Track>]
|
|
|
|
attr_accessor :tracks
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@tracks = args[:tracks] if args.key?(:tracks)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-11-12 00:37:27 +00:00
|
|
|
# The recognized celebrity with confidence score.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1RecognizedCelebrity
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Celebrity definition.
|
|
|
|
# Corresponds to the JSON property `celebrity`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1Celebrity]
|
|
|
|
attr_accessor :celebrity
|
|
|
|
|
|
|
|
# Recognition confidence. Range [0, 1].
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@celebrity = args[:celebrity] if args.key?(:celebrity)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Alternative hypotheses (a.k.a. n-best list).
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
2019-01-31 00:36:50 +00:00
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
2019-06-22 00:37:44 +00:00
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
2019-01-31 00:36:50 +00:00
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Transcript text representing the words that the user spoke.
|
|
|
|
# Corresponds to the JSON property `transcript`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :transcript
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Output only. A list of word-specific information for each recognized word.
|
|
|
|
# Note: When `enable_speaker_diarization` is true, you will see all the words
|
|
|
|
# from the beginning of the audio.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `words`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1WordInfo>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :words
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@transcript = args[:transcript] if args.key?(:transcript)
|
|
|
|
@words = args[:words] if args.key?(:words)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A speech recognition result corresponding to a portion of the audio.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1SpeechTranscription
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# May contain one or more recognition hypotheses (up to the maximum specified
|
|
|
|
# in `max_alternatives`). These alternatives are ordered in terms of
|
|
|
|
# accuracy, with the top (first) alternative being the most probable, as
|
|
|
|
# ranked by the recognizer.
|
|
|
|
# Corresponds to the JSON property `alternatives`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1SpeechRecognitionAlternative>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :alternatives
|
|
|
|
|
2019-10-22 00:37:07 +00:00
|
|
|
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
|
|
|
# language tag of
|
|
|
|
# the language in this result. This language code was detected to have the
|
|
|
|
# most likelihood of being spoken in the audio.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `languageCode`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :language_code
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@alternatives = args[:alternatives] if args.key?(:alternatives)
|
|
|
|
@language_code = args[:language_code] if args.key?(:language_code)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# `StreamingAnnotateVideoResponse` is the only message returned to the client
|
|
|
|
# by `StreamingAnnotateVideo`. A series of zero or more
|
|
|
|
# `StreamingAnnotateVideoResponse` messages are streamed back to the client.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1StreamingAnnotateVideoResponse
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Streaming annotation results corresponding to a portion of the video
|
|
|
|
# that is currently being processed.
|
|
|
|
# Corresponds to the JSON property `annotationResults`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :annotation_results
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# GCS URI that stores annotation results of one streaming session.
|
|
|
|
# It is a directory that can hold multiple files in JSON format.
|
|
|
|
# Example uri format:
|
|
|
|
# gs://bucket_id/object_id/cloud_project_name-session_id
|
|
|
|
# Corresponds to the JSON property `annotationResultsUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :annotation_results_uri
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The `Status` type defines a logical error model that is suitable for
|
|
|
|
# different programming environments, including REST APIs and RPC APIs. It is
|
2019-06-12 00:37:34 +00:00
|
|
|
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
|
|
|
# three pieces of data: error code, error message, and error details.
|
|
|
|
# You can find out more about this error model and how to work with it in the
|
|
|
|
# [API Design Guide](https://cloud.google.com/apis/design/errors).
|
2019-02-22 00:36:49 +00:00
|
|
|
# Corresponds to the JSON property `error`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
|
|
|
attr_accessor :error
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@annotation_results = args[:annotation_results] if args.key?(:annotation_results)
|
|
|
|
@annotation_results_uri = args[:annotation_results_uri] if args.key?(:annotation_results_uri)
|
|
|
|
@error = args[:error] if args.key?(:error)
|
|
|
|
end
|
|
|
|
end
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Streaming annotation results corresponding to a portion of the video
|
|
|
|
# that is currently being processed.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1StreamingVideoAnnotationResults
|
2019-02-22 00:36:49 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
# Corresponds to the JSON property `explicitAnnotation`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :explicit_annotation
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Label annotation results.
|
|
|
|
# Corresponds to the JSON property `labelAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :label_annotations
|
2019-01-31 00:36:50 +00:00
|
|
|
|
2019-02-22 00:36:49 +00:00
|
|
|
# Object tracking results.
|
|
|
|
# Corresponds to the JSON property `objectAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingAnnotation>]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :object_annotations
|
|
|
|
|
|
|
|
# Shot annotation results. Each shot is represented as a video segment.
|
|
|
|
# Corresponds to the JSON property `shotAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
2019-02-22 00:36:49 +00:00
|
|
|
attr_accessor :shot_annotations
|
2019-01-31 00:36:50 +00:00
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-02-22 00:36:49 +00:00
|
|
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
|
|
|
@label_annotations = args[:label_annotations] if args.key?(:label_annotations)
|
|
|
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
|
|
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotations related to one detected OCR text snippet. This will contain the
|
|
|
|
# corresponding text, confidence value, and frame level information for each
|
|
|
|
# detection.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1TextAnnotation
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# All video segments where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `segments`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1TextSegment>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :segments
|
|
|
|
|
|
|
|
# The detected text.
|
|
|
|
# Corresponds to the JSON property `text`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :text
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@segments = args[:segments] if args.key?(:segments)
|
|
|
|
@text = args[:text] if args.key?(:text)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video frame level annotation results for text annotation (OCR).
|
|
|
|
# Contains information regarding timestamp and bounding box locations for the
|
|
|
|
# frames containing detected OCR text snippets.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1TextFrame
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Normalized bounding polygon for text (that might not be aligned with axis).
|
|
|
|
# Contains list of the corner points in clockwise order starting from
|
|
|
|
# top-left corner. For example, for a rectangular bounding box:
|
|
|
|
# When the text is horizontal it might look like:
|
|
|
|
# 0----1
|
|
|
|
# | |
|
|
|
|
# 3----2
|
|
|
|
# When it's clockwise rotated 180 degrees around the top-left corner it
|
|
|
|
# becomes:
|
|
|
|
# 2----3
|
|
|
|
# | |
|
|
|
|
# 1----0
|
|
|
|
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
|
|
|
# than 0, or greater than 1 due to trignometric calculations for location of
|
|
|
|
# the box.
|
|
|
|
# Corresponds to the JSON property `rotatedBoundingBox`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingPoly]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :rotated_bounding_box
|
|
|
|
|
|
|
|
# Timestamp of this frame.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment level annotation results for text detection.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1TextSegment
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Confidence for the track of detected text. It is calculated as the highest
|
|
|
|
# over all frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Information related to the frames where OCR detected text appears.
|
|
|
|
# Corresponds to the JSON property `frames`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1TextFrame>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :frames
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@frames = args[:frames] if args.key?(:frames)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-11-01 00:37:15 +00:00
|
|
|
# For tracking related features.
|
2019-06-12 00:37:34 +00:00
|
|
|
# An object at time_offset with attributes, and located with
|
|
|
|
# normalized_bounding_box.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1TimestampedObject
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Optional. The attributes of the object in the bounding box.
|
|
|
|
# Corresponds to the JSON property `attributes`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1DetectedAttribute>]
|
|
|
|
attr_accessor :attributes
|
|
|
|
|
2020-01-30 00:37:15 +00:00
|
|
|
# Optional. The detected landmarks.
|
|
|
|
# Corresponds to the JSON property `landmarks`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1DetectedLandmark>]
|
|
|
|
attr_accessor :landmarks
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Normalized bounding box.
|
|
|
|
# The normalized vertex coordinates are relative to the original image.
|
|
|
|
# Range: [0, 1].
|
|
|
|
# Corresponds to the JSON property `normalizedBoundingBox`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1NormalizedBoundingBox]
|
|
|
|
attr_accessor :normalized_bounding_box
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the video frame for this object.
|
|
|
|
# Corresponds to the JSON property `timeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@attributes = args[:attributes] if args.key?(:attributes)
|
2020-01-30 00:37:15 +00:00
|
|
|
@landmarks = args[:landmarks] if args.key?(:landmarks)
|
2019-06-12 00:37:34 +00:00
|
|
|
@normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box)
|
|
|
|
@time_offset = args[:time_offset] if args.key?(:time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# A track of an object instance.
|
|
|
|
class GoogleCloudVideointelligenceV1p3beta1Track
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Optional. Attributes in the track level.
|
|
|
|
# Corresponds to the JSON property `attributes`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1DetectedAttribute>]
|
|
|
|
attr_accessor :attributes
|
|
|
|
|
|
|
|
# Optional. The confidence score of the tracked object.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
|
|
|
# The object with timestamp and attributes per frame in the track.
|
|
|
|
# Corresponds to the JSON property `timestampedObjects`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1TimestampedObject>]
|
|
|
|
attr_accessor :timestamped_objects
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@attributes = args[:attributes] if args.key?(:attributes)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
|
|
|
@timestamped_objects = args[:timestamped_objects] if args.key?(:timestamped_objects)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Annotation progress for a single video.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationProgress
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Specifies which feature is being tracked if the request contains more than
|
|
|
|
# one features.
|
|
|
|
# Corresponds to the JSON property `feature`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :feature
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
|
|
|
# Approximate percentage processed thus far. Guaranteed to be
|
|
|
|
# 100 when fully processed.
|
|
|
|
# Corresponds to the JSON property `progressPercent`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :progress_percent
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Time when the request was received.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# Time of the most recent update.
|
|
|
|
# Corresponds to the JSON property `updateTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :update_time
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-06-12 00:37:34 +00:00
|
|
|
@feature = args[:feature] if args.key?(:feature)
|
2019-01-31 00:36:50 +00:00
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
|
|
|
@progress_percent = args[:progress_percent] if args.key?(:progress_percent)
|
2019-06-12 00:37:34 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-01-31 00:36:50 +00:00
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@update_time = args[:update_time] if args.key?(:update_time)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Annotation results for a single video.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1VideoAnnotationResults
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
2019-11-12 00:37:27 +00:00
|
|
|
# Celebrity recognition annotation per video.
|
|
|
|
# Corresponds to the JSON property `celebrityRecognitionAnnotations`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1CelebrityRecognitionAnnotation]
|
|
|
|
attr_accessor :celebrity_recognition_annotations
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The `Status` type defines a logical error model that is suitable for
|
|
|
|
# different programming environments, including REST APIs and RPC APIs. It is
|
2019-06-12 00:37:34 +00:00
|
|
|
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
|
|
|
# three pieces of data: error code, error message, and error details.
|
|
|
|
# You can find out more about this error model and how to work with it in the
|
|
|
|
# [API Design Guide](https://cloud.google.com/apis/design/errors).
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `error`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
|
|
|
attr_accessor :error
|
|
|
|
|
|
|
|
# Explicit content annotation (based on per-frame visual signals only).
|
|
|
|
# If no explicit content has been detected in a frame, no annotations are
|
|
|
|
# present for that frame.
|
|
|
|
# Corresponds to the JSON property `explicitAnnotation`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ExplicitContentAnnotation]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :explicit_annotation
|
|
|
|
|
2020-01-23 00:37:29 +00:00
|
|
|
# Face detection annotations.
|
|
|
|
# Corresponds to the JSON property `faceDetectionAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1FaceDetectionAnnotation>]
|
|
|
|
attr_accessor :face_detection_annotations
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Label annotations on frame level.
|
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `frameLabelAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :frame_label_annotations
|
|
|
|
|
|
|
|
# Video file location in
|
|
|
|
# [Google Cloud Storage](https://cloud.google.com/storage/).
|
|
|
|
# Corresponds to the JSON property `inputUri`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :input_uri
|
|
|
|
|
2019-06-12 00:37:34 +00:00
|
|
|
# Annotations for list of logos detected, tracked and recognized in video.
|
|
|
|
# Corresponds to the JSON property `logoRecognitionAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LogoRecognitionAnnotation>]
|
|
|
|
attr_accessor :logo_recognition_annotations
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Annotations for list of objects detected and tracked in video.
|
|
|
|
# Corresponds to the JSON property `objectAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1ObjectTrackingAnnotation>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :object_annotations
|
|
|
|
|
2020-01-30 00:37:15 +00:00
|
|
|
# Person detection annotations.
|
|
|
|
# Corresponds to the JSON property `personDetectionAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1PersonDetectionAnnotation>]
|
|
|
|
attr_accessor :person_detection_annotations
|
|
|
|
|
2019-07-16 00:37:29 +00:00
|
|
|
# Video segment.
|
|
|
|
# Corresponds to the JSON property `segment`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment]
|
|
|
|
attr_accessor :segment
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on video level or user specified segment level.
|
2019-01-31 00:36:50 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `segmentLabelAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :segment_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on video level or user specified segment level.
|
2019-09-23 00:37:58 +00:00
|
|
|
# There is exactly one element for each unique label. Compared to the
|
|
|
|
# existing topical `segment_label_annotations`, this field presents more
|
|
|
|
# fine-grained, segment-level labels detected in video content and is made
|
|
|
|
# available only when the client sets `LabelDetectionConfig.model` to
|
|
|
|
# "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `segmentPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
|
|
|
attr_accessor :segment_presence_label_annotations
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Shot annotations. Each shot is represented as a video segment.
|
|
|
|
# Corresponds to the JSON property `shotAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1VideoSegment>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :shot_annotations
|
|
|
|
|
2019-06-22 00:37:44 +00:00
|
|
|
# Topical label annotations on shot level.
|
2019-01-31 00:36:50 +00:00
|
|
|
# There is exactly one element for each unique label.
|
|
|
|
# Corresponds to the JSON property `shotLabelAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :shot_label_annotations
|
|
|
|
|
2019-09-12 00:38:48 +00:00
|
|
|
# Presence label annotations on shot level. There is exactly one element for
|
2019-09-23 00:37:58 +00:00
|
|
|
# each unique label. Compared to the existing topical
|
|
|
|
# `shot_label_annotations`, this field presents more fine-grained, shot-level
|
|
|
|
# labels detected in video content and is made available only when the client
|
|
|
|
# sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
|
2019-09-12 00:38:48 +00:00
|
|
|
# Corresponds to the JSON property `shotPresenceLabelAnnotations`
|
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1LabelAnnotation>]
|
|
|
|
attr_accessor :shot_presence_label_annotations
|
|
|
|
|
2019-01-31 00:36:50 +00:00
|
|
|
# Speech transcription.
|
|
|
|
# Corresponds to the JSON property `speechTranscriptions`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1SpeechTranscription>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :speech_transcriptions
|
|
|
|
|
|
|
|
# OCR text detection and tracking.
|
|
|
|
# Annotations for list of detected text snippets. Each will have list of
|
|
|
|
# frame information associated with it.
|
|
|
|
# Corresponds to the JSON property `textAnnotations`
|
2019-03-13 00:37:11 +00:00
|
|
|
# @return [Array<Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p3beta1TextAnnotation>]
|
2019-01-31 00:36:50 +00:00
|
|
|
attr_accessor :text_annotations
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
2019-11-12 00:37:27 +00:00
|
|
|
@celebrity_recognition_annotations = args[:celebrity_recognition_annotations] if args.key?(:celebrity_recognition_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
@error = args[:error] if args.key?(:error)
|
|
|
|
@explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation)
|
2020-01-23 00:37:29 +00:00
|
|
|
@face_detection_annotations = args[:face_detection_annotations] if args.key?(:face_detection_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
@frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations)
|
|
|
|
@input_uri = args[:input_uri] if args.key?(:input_uri)
|
2019-06-12 00:37:34 +00:00
|
|
|
@logo_recognition_annotations = args[:logo_recognition_annotations] if args.key?(:logo_recognition_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
@object_annotations = args[:object_annotations] if args.key?(:object_annotations)
|
2020-01-30 00:37:15 +00:00
|
|
|
@person_detection_annotations = args[:person_detection_annotations] if args.key?(:person_detection_annotations)
|
2019-07-16 00:37:29 +00:00
|
|
|
@segment = args[:segment] if args.key?(:segment)
|
2019-01-31 00:36:50 +00:00
|
|
|
@segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@segment_presence_label_annotations = args[:segment_presence_label_annotations] if args.key?(:segment_presence_label_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
@shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations)
|
|
|
|
@shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations)
|
2019-09-12 00:38:48 +00:00
|
|
|
@shot_presence_label_annotations = args[:shot_presence_label_annotations] if args.key?(:shot_presence_label_annotations)
|
2019-01-31 00:36:50 +00:00
|
|
|
@speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions)
|
|
|
|
@text_annotations = args[:text_annotations] if args.key?(:text_annotations)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Video segment.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1VideoSegment
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the end of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `endTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time_offset
|
|
|
|
|
|
|
|
# Time-offset, relative to the beginning of the video,
|
|
|
|
# corresponding to the start of the segment (inclusive).
|
|
|
|
# Corresponds to the JSON property `startTimeOffset`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time_offset
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset)
|
|
|
|
@start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Word-specific information for recognized words. Word information is only
|
|
|
|
# included in the response when certain request parameters are set, such
|
|
|
|
# as `enable_word_time_offsets`.
|
2019-03-13 00:37:11 +00:00
|
|
|
class GoogleCloudVideointelligenceV1p3beta1WordInfo
|
2019-01-31 00:36:50 +00:00
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
|
|
|
# indicates an estimated greater likelihood that the recognized words are
|
|
|
|
# correct. This field is set only for the top alternative.
|
|
|
|
# This field is not guaranteed to be accurate and users should not rely on it
|
|
|
|
# to be always provided.
|
|
|
|
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
|
|
|
# Corresponds to the JSON property `confidence`
|
|
|
|
# @return [Float]
|
|
|
|
attr_accessor :confidence
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the end of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `endTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :end_time
|
|
|
|
|
|
|
|
# Output only. A distinct integer value is assigned for every speaker within
|
|
|
|
# the audio. This field specifies which one of those speakers was detected to
|
|
|
|
# have spoken this word. Value ranges from 1 up to diarization_speaker_count,
|
|
|
|
# and is only set if speaker diarization is enabled.
|
|
|
|
# Corresponds to the JSON property `speakerTag`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :speaker_tag
|
|
|
|
|
|
|
|
# Time offset relative to the beginning of the audio, and
|
|
|
|
# corresponding to the start of the spoken word. This field is only set if
|
|
|
|
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
|
|
|
# experimental feature and the accuracy of the time offset can vary.
|
|
|
|
# Corresponds to the JSON property `startTime`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :start_time
|
|
|
|
|
|
|
|
# The word corresponding to this set of information.
|
|
|
|
# Corresponds to the JSON property `word`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :word
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@confidence = args[:confidence] if args.key?(:confidence)
|
|
|
|
@end_time = args[:end_time] if args.key?(:end_time)
|
|
|
|
@speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag)
|
|
|
|
@start_time = args[:start_time] if args.key?(:start_time)
|
|
|
|
@word = args[:word] if args.key?(:word)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# This resource represents a long-running operation that is the result of a
|
|
|
|
# network API call.
|
|
|
|
class GoogleLongrunningOperation
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# If the value is `false`, it means the operation is still in progress.
|
|
|
|
# If `true`, the operation is completed, and either `error` or `response` is
|
|
|
|
# available.
|
|
|
|
# Corresponds to the JSON property `done`
|
|
|
|
# @return [Boolean]
|
|
|
|
attr_accessor :done
|
|
|
|
alias_method :done?, :done
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The `Status` type defines a logical error model that is suitable for
|
|
|
|
# different programming environments, including REST APIs and RPC APIs. It is
|
2019-06-12 00:37:34 +00:00
|
|
|
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
|
|
|
# three pieces of data: error code, error message, and error details.
|
|
|
|
# You can find out more about this error model and how to work with it in the
|
|
|
|
# [API Design Guide](https://cloud.google.com/apis/design/errors).
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `error`
|
|
|
|
# @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus]
|
|
|
|
attr_accessor :error
|
|
|
|
|
|
|
|
# Service-specific metadata associated with the operation. It typically
|
|
|
|
# contains progress information and common metadata such as create time.
|
|
|
|
# Some services might not provide such metadata. Any method that returns a
|
|
|
|
# long-running operation should document the metadata type, if any.
|
|
|
|
# Corresponds to the JSON property `metadata`
|
|
|
|
# @return [Hash<String,Object>]
|
|
|
|
attr_accessor :metadata
|
|
|
|
|
|
|
|
# The server-assigned name, which is only unique within the same service that
|
|
|
|
# originally returns it. If you use the default HTTP mapping, the
|
2019-06-12 00:37:34 +00:00
|
|
|
# `name` should be a resource name ending with `operations/`unique_id``.
|
2019-01-31 00:36:50 +00:00
|
|
|
# Corresponds to the JSON property `name`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :name
|
|
|
|
|
|
|
|
# The normal response of the operation in case of success. If the original
|
|
|
|
# method returns no data on success, such as `Delete`, the response is
|
|
|
|
# `google.protobuf.Empty`. If the original method is standard
|
|
|
|
# `Get`/`Create`/`Update`, the response should be the resource. For other
|
|
|
|
# methods, the response should have the type `XxxResponse`, where `Xxx`
|
|
|
|
# is the original method name. For example, if the original method name
|
|
|
|
# is `TakeSnapshot()`, the inferred response type is
|
|
|
|
# `TakeSnapshotResponse`.
|
|
|
|
# Corresponds to the JSON property `response`
|
|
|
|
# @return [Hash<String,Object>]
|
|
|
|
attr_accessor :response
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@done = args[:done] if args.key?(:done)
|
|
|
|
@error = args[:error] if args.key?(:error)
|
|
|
|
@metadata = args[:metadata] if args.key?(:metadata)
|
|
|
|
@name = args[:name] if args.key?(:name)
|
|
|
|
@response = args[:response] if args.key?(:response)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-03-13 00:37:11 +00:00
|
|
|
# The `Status` type defines a logical error model that is suitable for
|
|
|
|
# different programming environments, including REST APIs and RPC APIs. It is
|
2019-06-12 00:37:34 +00:00
|
|
|
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
|
|
|
# three pieces of data: error code, error message, and error details.
|
|
|
|
# You can find out more about this error model and how to work with it in the
|
|
|
|
# [API Design Guide](https://cloud.google.com/apis/design/errors).
|
2019-01-31 00:36:50 +00:00
|
|
|
class GoogleRpcStatus
|
|
|
|
include Google::Apis::Core::Hashable
|
|
|
|
|
|
|
|
# The status code, which should be an enum value of google.rpc.Code.
|
|
|
|
# Corresponds to the JSON property `code`
|
|
|
|
# @return [Fixnum]
|
|
|
|
attr_accessor :code
|
|
|
|
|
|
|
|
# A list of messages that carry the error details. There is a common set of
|
|
|
|
# message types for APIs to use.
|
|
|
|
# Corresponds to the JSON property `details`
|
|
|
|
# @return [Array<Hash<String,Object>>]
|
|
|
|
attr_accessor :details
|
|
|
|
|
|
|
|
# A developer-facing error message, which should be in English. Any
|
|
|
|
# user-facing error message should be localized and sent in the
|
|
|
|
# google.rpc.Status.details field, or localized by the client.
|
|
|
|
# Corresponds to the JSON property `message`
|
|
|
|
# @return [String]
|
|
|
|
attr_accessor :message
|
|
|
|
|
|
|
|
def initialize(**args)
|
|
|
|
update!(**args)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Update properties of this object
|
|
|
|
def update!(**args)
|
|
|
|
@code = args[:code] if args.key?(:code)
|
|
|
|
@details = args[:details] if args.key?(:details)
|
|
|
|
@message = args[:message] if args.key?(:message)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|