From 131275fc04332b00984084edef3b93ed102a28fb Mon Sep 17 00:00:00 2001 From: Google APIs Date: Fri, 22 Feb 2019 00:36:49 +0000 Subject: [PATCH] Autogenerated update (2019-02-22) Update: - appengine_v1 - appengine_v1alpha - appengine_v1beta - cloudfunctions_v1 - cloudfunctions_v1beta2 - cloudresourcemanager_v1 - cloudresourcemanager_v1beta1 - cloudsearch_v1 - container_v1 - dialogflow_v2 - iam_v1 - spanner_v1 - tagmanager_v1 - tagmanager_v2 - testing_v1 - videointelligence_v1 - videointelligence_v1beta2 - videointelligence_v1p1beta1 - videointelligence_v1p2beta1 --- api_names_out.yaml | 976 +++++++++ generated/google/apis/appengine_v1.rb | 2 +- generated/google/apis/appengine_v1/classes.rb | 99 +- .../apis/appengine_v1/representations.rb | 35 - generated/google/apis/appengine_v1alpha.rb | 2 +- .../google/apis/appengine_v1alpha/classes.rb | 97 - .../apis/appengine_v1alpha/representations.rb | 35 - generated/google/apis/appengine_v1beta.rb | 2 +- .../google/apis/appengine_v1beta/classes.rb | 99 +- .../apis/appengine_v1beta/representations.rb | 35 - generated/google/apis/cloudfunctions_v1.rb | 2 +- .../google/apis/cloudfunctions_v1/classes.rb | 3 +- .../google/apis/cloudfunctions_v1/service.rb | 10 +- .../google/apis/cloudfunctions_v1beta2.rb | 2 +- .../apis/cloudfunctions_v1beta2/classes.rb | 3 +- .../apis/cloudfunctions_v1beta2/service.rb | 6 + .../google/apis/cloudresourcemanager_v1.rb | 2 +- .../apis/cloudresourcemanager_v1/classes.rb | 4 +- .../apis/cloudresourcemanager_v1beta1.rb | 2 +- .../cloudresourcemanager_v1beta1/classes.rb | 4 +- generated/google/apis/cloudsearch_v1.rb | 2 +- .../google/apis/cloudsearch_v1/classes.rb | 45 +- .../apis/cloudsearch_v1/representations.rb | 15 + generated/google/apis/container_v1.rb | 2 +- generated/google/apis/container_v1/classes.rb | 1 + generated/google/apis/container_v1/service.rb | 8 +- generated/google/apis/dialogflow_v2.rb | 2 +- .../google/apis/dialogflow_v2/classes.rb | 68 +- generated/google/apis/iam_v1.rb | 2 +- generated/google/apis/iam_v1/classes.rb | 51 + .../google/apis/iam_v1/representations.rb | 27 + generated/google/apis/iam_v1/service.rb | 36 + generated/google/apis/spanner_v1.rb | 2 +- generated/google/apis/spanner_v1/service.rb | 4 +- generated/google/apis/tagmanager_v1.rb | 2 +- .../google/apis/tagmanager_v1/service.rb | 46 - generated/google/apis/tagmanager_v2.rb | 2 +- .../google/apis/tagmanager_v2/classes.rb | 489 ++--- .../apis/tagmanager_v2/representations.rb | 173 +- .../google/apis/tagmanager_v2/service.rb | 400 ++-- generated/google/apis/testing_v1.rb | 2 +- generated/google/apis/videointelligence_v1.rb | 5 +- .../apis/videointelligence_v1/classes.rb | 1821 +++++++++++++++++ .../videointelligence_v1/representations.rb | 797 ++++++++ .../apis/videointelligence_v1/service.rb | 3 +- .../google/apis/videointelligence_v1beta2.rb | 5 +- .../apis/videointelligence_v1beta2/classes.rb | 1821 +++++++++++++++++ .../representations.rb | 797 ++++++++ .../apis/videointelligence_v1beta2/service.rb | 3 +- .../apis/videointelligence_v1p1beta1.rb | 5 +- .../videointelligence_v1p1beta1/classes.rb | 1821 +++++++++++++++++ .../representations.rb | 797 ++++++++ .../videointelligence_v1p1beta1/service.rb | 3 +- .../apis/videointelligence_v1p2beta1.rb | 5 +- .../videointelligence_v1p2beta1/classes.rb | 1793 ++++++++++++++++ .../representations.rb | 782 +++++++ .../videointelligence_v1p2beta1/service.rb | 3 +- 57 files changed, 12164 insertions(+), 1096 deletions(-) diff --git a/api_names_out.yaml b/api_names_out.yaml index 0dd7a361a..14451e822 100644 --- a/api_names_out.yaml +++ b/api_names_out.yaml @@ -17771,9 +17771,12 @@ "/cloudsearch:v1/HtmlValues": html_values "/cloudsearch:v1/HtmlValues/values": values "/cloudsearch:v1/HtmlValues/values/value": value +"/cloudsearch:v1/IndexItemOptions": index_item_options +"/cloudsearch:v1/IndexItemOptions/allowUnknownGsuitePrincipals": allow_unknown_gsuite_principals "/cloudsearch:v1/IndexItemRequest": index_item_request "/cloudsearch:v1/IndexItemRequest/connectorName": connector_name "/cloudsearch:v1/IndexItemRequest/debugOptions": debug_options +"/cloudsearch:v1/IndexItemRequest/indexItemOptions": index_item_options "/cloudsearch:v1/IndexItemRequest/item": item "/cloudsearch:v1/IndexItemRequest/mode": mode "/cloudsearch:v1/IntegerOperatorOptions": integer_operator_options @@ -78472,6 +78475,7 @@ "/iam:v1/Role/stage": stage "/iam:v1/Role/title": title "/iam:v1/ServiceAccount": service_account +"/iam:v1/ServiceAccount/disabled": disabled "/iam:v1/ServiceAccount/displayName": display_name "/iam:v1/ServiceAccount/email": email "/iam:v1/ServiceAccount/etag": etag @@ -78508,6 +78512,9 @@ "/iam:v1/TestIamPermissionsResponse/permissions/permission": permission "/iam:v1/UndeleteRoleRequest": undelete_role_request "/iam:v1/UndeleteRoleRequest/etag": etag +"/iam:v1/UndeleteServiceAccountRequest": undelete_service_account_request +"/iam:v1/UndeleteServiceAccountResponse": undelete_service_account_response +"/iam:v1/UndeleteServiceAccountResponse/restoredAccount": restored_account "/iam:v1/fields": fields "/iam:v1/iam.iamPolicies.lintPolicy": lint_iam_policy_policy "/iam:v1/iam.iamPolicies.queryAuditableServices": query_iam_policy_auditable_services @@ -78580,6 +78587,8 @@ "/iam:v1/iam.projects.serviceAccounts.signJwt/name": name "/iam:v1/iam.projects.serviceAccounts.testIamPermissions": test_service_account_iam_permissions "/iam:v1/iam.projects.serviceAccounts.testIamPermissions/resource": resource +"/iam:v1/iam.projects.serviceAccounts.undelete": undelete_service_account +"/iam:v1/iam.projects.serviceAccounts.undelete/name": name "/iam:v1/iam.projects.serviceAccounts.update": update_project_service_account "/iam:v1/iam.projects.serviceAccounts.update/name": name "/iam:v1/iam.roles.get": get_role @@ -99060,6 +99069,8 @@ "/tagmanager:v2/ContainerVersion/container": container "/tagmanager:v2/ContainerVersion/containerId": container_id "/tagmanager:v2/ContainerVersion/containerVersionId": container_version_id +"/tagmanager:v2/ContainerVersion/customTemplate": custom_template +"/tagmanager:v2/ContainerVersion/customTemplate/custom_template": custom_template "/tagmanager:v2/ContainerVersion/deleted": deleted "/tagmanager:v2/ContainerVersion/description": description "/tagmanager:v2/ContainerVersion/fingerprint": fingerprint @@ -99082,6 +99093,7 @@ "/tagmanager:v2/ContainerVersionHeader/containerVersionId": container_version_id "/tagmanager:v2/ContainerVersionHeader/deleted": deleted "/tagmanager:v2/ContainerVersionHeader/name": name +"/tagmanager:v2/ContainerVersionHeader/numCustomTemplates": num_custom_templates "/tagmanager:v2/ContainerVersionHeader/numMacros": num_macros "/tagmanager:v2/ContainerVersionHeader/numRules": num_rules "/tagmanager:v2/ContainerVersionHeader/numTags": num_tags @@ -99104,6 +99116,16 @@ "/tagmanager:v2/CreateWorkspaceProposalRequest/initialComment": initial_comment "/tagmanager:v2/CreateWorkspaceProposalRequest/reviewers": reviewers "/tagmanager:v2/CreateWorkspaceProposalRequest/reviewers/reviewer": reviewer +"/tagmanager:v2/CustomTemplate": custom_template +"/tagmanager:v2/CustomTemplate/accountId": account_id +"/tagmanager:v2/CustomTemplate/containerId": container_id +"/tagmanager:v2/CustomTemplate/fingerprint": fingerprint +"/tagmanager:v2/CustomTemplate/name": name +"/tagmanager:v2/CustomTemplate/path": path +"/tagmanager:v2/CustomTemplate/tagManagerUrl": tag_manager_url +"/tagmanager:v2/CustomTemplate/templateData": template_data +"/tagmanager:v2/CustomTemplate/templateId": template_id +"/tagmanager:v2/CustomTemplate/workspaceId": workspace_id "/tagmanager:v2/Entity": entity "/tagmanager:v2/Entity/changeStatus": change_status "/tagmanager:v2/Entity/folder": folder @@ -99193,6 +99215,10 @@ "/tagmanager:v2/ListWorkspacesResponse/nextPageToken": next_page_token "/tagmanager:v2/ListWorkspacesResponse/workspace": workspace "/tagmanager:v2/ListWorkspacesResponse/workspace/workspace": workspace +"/tagmanager:v2/ListZonesResponse": list_zones_response +"/tagmanager:v2/ListZonesResponse/nextPageToken": next_page_token +"/tagmanager:v2/ListZonesResponse/zone": zone +"/tagmanager:v2/ListZonesResponse/zone/zone": zone "/tagmanager:v2/MergeConflict": merge_conflict "/tagmanager:v2/MergeConflict/entityInBaseVersion": entity_in_base_version "/tagmanager:v2/MergeConflict/entityInWorkspace": entity_in_workspace @@ -99221,6 +99247,8 @@ "/tagmanager:v2/RevertTriggerResponse/trigger": trigger "/tagmanager:v2/RevertVariableResponse": revert_variable_response "/tagmanager:v2/RevertVariableResponse/variable": variable +"/tagmanager:v2/RevertZoneResponse": revert_zone_response +"/tagmanager:v2/RevertZoneResponse/zone": zone "/tagmanager:v2/SetupTag": setup_tag "/tagmanager:v2/SetupTag/stopOnSetupFailure": stop_on_setup_failure "/tagmanager:v2/SetupTag/tagName": tag_name @@ -99327,6 +99355,7 @@ "/tagmanager:v2/Variable/enablingTriggerId": enabling_trigger_id "/tagmanager:v2/Variable/enablingTriggerId/enabling_trigger_id": enabling_trigger_id "/tagmanager:v2/Variable/fingerprint": fingerprint +"/tagmanager:v2/Variable/formatValue": format_value "/tagmanager:v2/Variable/name": name "/tagmanager:v2/Variable/notes": notes "/tagmanager:v2/Variable/parameter": parameter @@ -99339,6 +99368,12 @@ "/tagmanager:v2/Variable/type": type "/tagmanager:v2/Variable/variableId": variable_id "/tagmanager:v2/Variable/workspaceId": workspace_id +"/tagmanager:v2/VariableFormatValue": variable_format_value +"/tagmanager:v2/VariableFormatValue/caseConversionType": case_conversion_type +"/tagmanager:v2/VariableFormatValue/convertFalseToValue": convert_false_to_value +"/tagmanager:v2/VariableFormatValue/convertNullToValue": convert_null_to_value +"/tagmanager:v2/VariableFormatValue/convertTrueToValue": convert_true_to_value +"/tagmanager:v2/VariableFormatValue/convertUndefinedToValue": convert_undefined_to_value "/tagmanager:v2/Workspace": workspace "/tagmanager:v2/Workspace/accountId": account_id "/tagmanager:v2/Workspace/containerId": container_id @@ -99564,6 +99599,21 @@ "/tagmanager:v2/tagmanager.accounts.containers.workspaces.variables.update": update_account_container_workspace_variable "/tagmanager:v2/tagmanager.accounts.containers.workspaces.variables.update/fingerprint": fingerprint "/tagmanager:v2/tagmanager.accounts.containers.workspaces.variables.update/path": path +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.create": create_account_container_workspace_zone +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.create/parent": parent +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.delete": delete_account_container_workspace_zone +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.delete/path": path +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.get": get_account_container_workspace_zone +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.get/path": path +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.list": list_account_container_workspace_zones +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.list/pageToken": page_token +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.list/parent": parent +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.revert": revert_account_container_workspace_zone +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.revert/fingerprint": fingerprint +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.revert/path": path +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.update": update_account_container_workspace_zone +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.update/fingerprint": fingerprint +"/tagmanager:v2/tagmanager.accounts.containers.workspaces.zones.update/path": path "/tagmanager:v2/tagmanager.accounts.get": get_account "/tagmanager:v2/tagmanager.accounts.get/path": path "/tagmanager:v2/tagmanager.accounts.list": list_accounts @@ -101636,6 +101686,27 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1_LabelSegment": google_cloud_videointelligence_v1_label_segment "/videointelligence:v1/GoogleCloudVideointelligenceV1_LabelSegment/confidence": confidence "/videointelligence:v1/GoogleCloudVideointelligenceV1_LabelSegment/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox": google_cloud_videointelligence_v1_normalized_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/left": left +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/right": right +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/top": top +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly": google_cloud_videointelligence_v1_normalized_bounding_poly +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedVertex": google_cloud_videointelligence_v1_normalized_vertex +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedVertex/x": x +"/videointelligence:v1/GoogleCloudVideointelligenceV1_NormalizedVertex/y": y +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation": google_cloud_videointelligence_v1_object_tracking_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame": google_cloud_videointelligence_v1_object_tracking_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1/GoogleCloudVideointelligenceV1_ShotChangeDetectionConfig": google_cloud_videointelligence_v1_shot_change_detection_config "/videointelligence:v1/GoogleCloudVideointelligenceV1_ShotChangeDetectionConfig/model": model "/videointelligence:v1/GoogleCloudVideointelligenceV1_SpeechContext": google_cloud_videointelligence_v1_speech_context @@ -101662,6 +101733,21 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1_SpeechTranscriptionConfig/maxAlternatives": max_alternatives "/videointelligence:v1/GoogleCloudVideointelligenceV1_SpeechTranscriptionConfig/speechContexts": speech_contexts "/videointelligence:v1/GoogleCloudVideointelligenceV1_SpeechTranscriptionConfig/speechContexts/speech_context": speech_context +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextAnnotation": google_cloud_videointelligence_v1_text_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextAnnotation/segments": segments +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextAnnotation/segments/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextAnnotation/text": text +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextDetectionConfig": google_cloud_videointelligence_v1_text_detection_config +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextDetectionConfig/languageHints": language_hints +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextDetectionConfig/languageHints/language_hint": language_hint +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextFrame": google_cloud_videointelligence_v1_text_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextFrame/timeOffset": time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextSegment": google_cloud_videointelligence_v1_text_segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextSegment/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextSegment/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextSegment/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1_TextSegment/segment": segment "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress": google_cloud_videointelligence_v1_video_annotation_progress "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress/progressPercent": progress_percent @@ -101673,6 +101759,8 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation": frame_label_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation": segment_label_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/shotAnnotations": shot_annotations @@ -101681,6 +101769,8 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoContext": google_cloud_videointelligence_v1_video_context "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoContext/explicitContentDetectionConfig": explicit_content_detection_config "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoContext/labelDetectionConfig": label_detection_config @@ -101688,6 +101778,7 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoContext/segments/segment": segment "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoContext/shotChangeDetectionConfig": shot_change_detection_config "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoContext/speechTranscriptionConfig": speech_transcription_config +"/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoContext/textDetectionConfig": text_detection_config "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoSegment": google_cloud_videointelligence_v1_video_segment "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1/GoogleCloudVideointelligenceV1_VideoSegment/startTimeOffset": start_time_offset @@ -101766,6 +101857,27 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_LabelSegment": google_cloud_videointelligence_v1beta2_label_segment "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_LabelSegment/confidence": confidence "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_LabelSegment/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox": google_cloud_videointelligence_v1beta2_normalized_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/left": left +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/right": right +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/top": top +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly": google_cloud_videointelligence_v1beta2_normalized_bounding_poly +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex": google_cloud_videointelligence_v1beta2_normalized_vertex +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex/x": x +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex/y": y +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation": google_cloud_videointelligence_v1beta2_object_tracking_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame": google_cloud_videointelligence_v1beta2_object_tracking_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative": google_cloud_videointelligence_v1beta2_speech_recognition_alternative "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative/transcript": transcript @@ -101775,6 +101887,18 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/alternatives": alternatives "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/languageCode": language_code +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextAnnotation": google_cloud_videointelligence_v1beta2_text_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/segments": segments +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/segments/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/text": text +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextFrame": google_cloud_videointelligence_v1beta2_text_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextFrame/timeOffset": time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextSegment": google_cloud_videointelligence_v1beta2_text_segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextSegment/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextSegment/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextSegment/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_TextSegment/segment": segment "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress": google_cloud_videointelligence_v1beta2_video_annotation_progress "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress/progressPercent": progress_percent @@ -101786,6 +101910,8 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation": frame_label_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -101795,6 +101921,8 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/speechTranscriptions": speech_transcriptions "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoSegment": google_cloud_videointelligence_v1beta2_video_segment "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1/GoogleCloudVideointelligenceV1beta2_VideoSegment/startTimeOffset": start_time_offset @@ -101857,6 +101985,22 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/left": left "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/right": right "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/top": top +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly": google_cloud_videointelligence_v1p1beta1_normalized_bounding_poly +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex": google_cloud_videointelligence_v1p1beta1_normalized_vertex +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex/x": x +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex/y": y +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation": google_cloud_videointelligence_v1p1beta1_object_tracking_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame": google_cloud_videointelligence_v1p1beta1_object_tracking_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative": google_cloud_videointelligence_v1p1beta1_speech_recognition_alternative "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative/transcript": transcript @@ -101866,6 +102010,18 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/alternatives": alternatives "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation": google_cloud_videointelligence_v1p1beta1_text_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/segments": segments +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/segments/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/text": text +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextFrame": google_cloud_videointelligence_v1p1beta1_text_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextFrame/timeOffset": time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextSegment": google_cloud_videointelligence_v1p1beta1_text_segment +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/segment": segment "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress": google_cloud_videointelligence_v1p1beta1_video_annotation_progress "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress/progressPercent": progress_percent @@ -101880,6 +102036,8 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation": frame_label_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -101889,6 +102047,8 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment": google_cloud_videointelligence_v1p1beta1_video_segment "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment/startTimeOffset": start_time_offset @@ -102006,6 +102166,123 @@ "/videointelligence:v1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/speakerTag": speaker_tag "/videointelligence:v1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/startTime": start_time "/videointelligence:v1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/word": word +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress": google_cloud_videointelligence_v2beta1_annotate_video_progress +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress/annotationProgress": annotation_progress +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress/annotationProgress/annotation_progress": annotation_progress +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse": google_cloud_videointelligence_v2beta1_annotate_video_response +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse/annotationResults": annotation_results +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse/annotationResults/annotation_result": annotation_result +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_Entity": google_cloud_videointelligence_v2beta1_entity +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_Entity/description": description +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_Entity/entityId": entity_id +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_Entity/languageCode": language_code +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation": google_cloud_videointelligence_v2beta1_explicit_content_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame": google_cloud_videointelligence_v2beta1_explicit_content_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame/pornographyLikelihood": pornography_likelihood +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame/timeOffset": time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation": google_cloud_videointelligence_v2beta1_label_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/categoryEntities": category_entities +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/categoryEntities/category_entity": category_entity +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/entity": entity +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/segments": segments +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/segments/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelFrame": google_cloud_videointelligence_v2beta1_label_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelFrame/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelFrame/timeOffset": time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelSegment": google_cloud_videointelligence_v2beta1_label_segment +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelSegment/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_LabelSegment/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox": google_cloud_videointelligence_v2beta1_normalized_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/left": left +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/right": right +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/top": top +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly": google_cloud_videointelligence_v2beta1_normalized_bounding_poly +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex": google_cloud_videointelligence_v2beta1_normalized_vertex +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex/x": x +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex/y": y +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation": google_cloud_videointelligence_v2beta1_object_tracking_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame": google_cloud_videointelligence_v2beta1_object_tracking_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame/timeOffset": time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative": google_cloud_videointelligence_v2beta1_speech_recognition_alternative +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/transcript": transcript +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/words": words +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/words/word": word +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription": google_cloud_videointelligence_v2beta1_speech_transcription +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/alternatives": alternatives +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/alternatives/alternative": alternative +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse": google_cloud_videointelligence_v2beta1_streaming_annotate_video_response +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/annotationResults": annotation_results +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/annotationResultsUri": annotation_results_uri +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/error": error +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults": google_cloud_videointelligence_v2beta1_streaming_video_annotation_results +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/explicitAnnotation": explicit_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/labelAnnotations": label_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/labelAnnotations/label_annotation": label_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/objectAnnotations/object_annotation": object_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/shotAnnotations": shot_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/shotAnnotations/shot_annotation": shot_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextAnnotation": google_cloud_videointelligence_v2beta1_text_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/segments": segments +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/segments/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/text": text +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextFrame": google_cloud_videointelligence_v2beta1_text_frame +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextFrame/timeOffset": time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextSegment": google_cloud_videointelligence_v2beta1_text_segment +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextSegment/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextSegment/frames": frames +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextSegment/frames/frame": frame +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_TextSegment/segment": segment +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress": google_cloud_videointelligence_v2beta1_video_annotation_progress +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/inputUri": input_uri +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/progressPercent": progress_percent +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/startTime": start_time +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/updateTime": update_time +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults": google_cloud_videointelligence_v2beta1_video_annotation_results +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/error": error +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/explicitAnnotation": explicit_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation": frame_label_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations +? "/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" +: segment_label_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotAnnotations": shot_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotAnnotations/shot_annotation": shot_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotLabelAnnotations": shot_label_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoSegment": google_cloud_videointelligence_v2beta1_video_segment +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoSegment/endTimeOffset": end_time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_VideoSegment/startTimeOffset": start_time_offset +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_WordInfo": google_cloud_videointelligence_v2beta1_word_info +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_WordInfo/confidence": confidence +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_WordInfo/endTime": end_time +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_WordInfo/speakerTag": speaker_tag +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_WordInfo/startTime": start_time +"/videointelligence:v1/GoogleCloudVideointelligenceV2beta1_WordInfo/word": word "/videointelligence:v1/GoogleLongrunning_CancelOperationRequest": google_longrunning_cancel_operation_request "/videointelligence:v1/GoogleLongrunning_ListOperationsResponse": google_longrunning_list_operations_response "/videointelligence:v1/GoogleLongrunning_ListOperationsResponse/nextPageToken": next_page_token @@ -102359,6 +102636,27 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_LabelSegment": google_cloud_videointelligence_v1_label_segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_LabelSegment/confidence": confidence "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_LabelSegment/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedBoundingBox": google_cloud_videointelligence_v1_normalized_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/left": left +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/right": right +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/top": top +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly": google_cloud_videointelligence_v1_normalized_bounding_poly +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedVertex": google_cloud_videointelligence_v1_normalized_vertex +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedVertex/x": x +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_NormalizedVertex/y": y +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation": google_cloud_videointelligence_v1_object_tracking_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingFrame": google_cloud_videointelligence_v1_object_tracking_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative": google_cloud_videointelligence_v1_speech_recognition_alternative "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative/transcript": transcript @@ -102368,6 +102666,18 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_SpeechTranscription/alternatives": alternatives "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextAnnotation": google_cloud_videointelligence_v1_text_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextAnnotation/segments": segments +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextAnnotation/segments/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextAnnotation/text": text +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextFrame": google_cloud_videointelligence_v1_text_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextFrame/timeOffset": time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextSegment": google_cloud_videointelligence_v1_text_segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextSegment/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextSegment/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextSegment/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_TextSegment/segment": segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationProgress": google_cloud_videointelligence_v1_video_annotation_progress "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationProgress/progressPercent": progress_percent @@ -102379,6 +102689,8 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation": frame_label_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -102388,6 +102700,8 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoSegment": google_cloud_videointelligence_v1_video_segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1_VideoSegment/startTimeOffset": start_time_offset @@ -102481,6 +102795,27 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_LabelSegment": google_cloud_videointelligence_v1beta2_label_segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_LabelSegment/confidence": confidence "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_LabelSegment/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox": google_cloud_videointelligence_v1beta2_normalized_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/left": left +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/right": right +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/top": top +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly": google_cloud_videointelligence_v1beta2_normalized_bounding_poly +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedVertex": google_cloud_videointelligence_v1beta2_normalized_vertex +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedVertex/x": x +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_NormalizedVertex/y": y +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation": google_cloud_videointelligence_v1beta2_object_tracking_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame": google_cloud_videointelligence_v1beta2_object_tracking_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ShotChangeDetectionConfig": google_cloud_videointelligence_v1beta2_shot_change_detection_config "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_ShotChangeDetectionConfig/model": model "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_SpeechContext": google_cloud_videointelligence_v1beta2_speech_context @@ -102507,6 +102842,21 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_SpeechTranscriptionConfig/maxAlternatives": max_alternatives "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_SpeechTranscriptionConfig/speechContexts": speech_contexts "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_SpeechTranscriptionConfig/speechContexts/speech_context": speech_context +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextAnnotation": google_cloud_videointelligence_v1beta2_text_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextAnnotation/segments": segments +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextAnnotation/segments/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextAnnotation/text": text +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextDetectionConfig": google_cloud_videointelligence_v1beta2_text_detection_config +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextDetectionConfig/languageHints": language_hints +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextDetectionConfig/languageHints/language_hint": language_hint +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextFrame": google_cloud_videointelligence_v1beta2_text_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextFrame/timeOffset": time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextSegment": google_cloud_videointelligence_v1beta2_text_segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextSegment/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextSegment/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextSegment/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_TextSegment/segment": segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress": google_cloud_videointelligence_v1beta2_video_annotation_progress "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress/progressPercent": progress_percent @@ -102519,6 +102869,8 @@ ? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" : frame_label_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -102528,6 +102880,8 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/speechTranscriptions": speech_transcriptions "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoContext": google_cloud_videointelligence_v1beta2_video_context "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoContext/explicitContentDetectionConfig": explicit_content_detection_config "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoContext/labelDetectionConfig": label_detection_config @@ -102535,6 +102889,7 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoContext/segments/segment": segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoContext/shotChangeDetectionConfig": shot_change_detection_config "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoContext/speechTranscriptionConfig": speech_transcription_config +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoContext/textDetectionConfig": text_detection_config "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoSegment": google_cloud_videointelligence_v1beta2_video_segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1beta2_VideoSegment/startTimeOffset": start_time_offset @@ -102597,6 +102952,22 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/left": left "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/right": right "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/top": top +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly": google_cloud_videointelligence_v1p1beta1_normalized_bounding_poly +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex": google_cloud_videointelligence_v1p1beta1_normalized_vertex +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex/x": x +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex/y": y +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation": google_cloud_videointelligence_v1p1beta1_object_tracking_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame": google_cloud_videointelligence_v1p1beta1_object_tracking_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative": google_cloud_videointelligence_v1p1beta1_speech_recognition_alternative "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative/transcript": transcript @@ -102606,6 +102977,18 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/alternatives": alternatives "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation": google_cloud_videointelligence_v1p1beta1_text_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/segments": segments +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/segments/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/text": text +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextFrame": google_cloud_videointelligence_v1p1beta1_text_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextFrame/timeOffset": time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextSegment": google_cloud_videointelligence_v1p1beta1_text_segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextSegment/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextSegment/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextSegment/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_TextSegment/segment": segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress": google_cloud_videointelligence_v1p1beta1_video_annotation_progress "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress/progressPercent": progress_percent @@ -102621,6 +103004,8 @@ ? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" : frame_label_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -102632,6 +103017,8 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions ? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/speechTranscriptions/speech_transcription" : speech_transcription +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoSegment": google_cloud_videointelligence_v1p1beta1_video_segment "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p1beta1_VideoSegment/startTimeOffset": start_time_offset @@ -102752,6 +103139,125 @@ "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p2beta1_WordInfo/speakerTag": speaker_tag "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p2beta1_WordInfo/startTime": start_time "/videointelligence:v1beta2/GoogleCloudVideointelligenceV1p2beta1_WordInfo/word": word +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress": google_cloud_videointelligence_v2beta1_annotate_video_progress +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress/annotationProgress": annotation_progress +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress/annotationProgress/annotation_progress": annotation_progress +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse": google_cloud_videointelligence_v2beta1_annotate_video_response +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse/annotationResults": annotation_results +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse/annotationResults/annotation_result": annotation_result +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_Entity": google_cloud_videointelligence_v2beta1_entity +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_Entity/description": description +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_Entity/entityId": entity_id +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_Entity/languageCode": language_code +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation": google_cloud_videointelligence_v2beta1_explicit_content_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame": google_cloud_videointelligence_v2beta1_explicit_content_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame/pornographyLikelihood": pornography_likelihood +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame/timeOffset": time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelAnnotation": google_cloud_videointelligence_v2beta1_label_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/categoryEntities": category_entities +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/categoryEntities/category_entity": category_entity +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/entity": entity +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/segments": segments +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/segments/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelFrame": google_cloud_videointelligence_v2beta1_label_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelFrame/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelFrame/timeOffset": time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelSegment": google_cloud_videointelligence_v2beta1_label_segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelSegment/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_LabelSegment/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox": google_cloud_videointelligence_v2beta1_normalized_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/left": left +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/right": right +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/top": top +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly": google_cloud_videointelligence_v2beta1_normalized_bounding_poly +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedVertex": google_cloud_videointelligence_v2beta1_normalized_vertex +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedVertex/x": x +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_NormalizedVertex/y": y +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation": google_cloud_videointelligence_v2beta1_object_tracking_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame": google_cloud_videointelligence_v2beta1_object_tracking_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame/timeOffset": time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative": google_cloud_videointelligence_v2beta1_speech_recognition_alternative +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/transcript": transcript +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/words": words +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/words/word": word +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechTranscription": google_cloud_videointelligence_v2beta1_speech_transcription +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/alternatives": alternatives +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/alternatives/alternative": alternative +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse": google_cloud_videointelligence_v2beta1_streaming_annotate_video_response +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/annotationResults": annotation_results +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/annotationResultsUri": annotation_results_uri +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/error": error +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults": google_cloud_videointelligence_v2beta1_streaming_video_annotation_results +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/explicitAnnotation": explicit_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/labelAnnotations": label_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/labelAnnotations/label_annotation": label_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/objectAnnotations": object_annotations +? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/objectAnnotations/object_annotation" +: object_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/shotAnnotations": shot_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/shotAnnotations/shot_annotation": shot_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextAnnotation": google_cloud_videointelligence_v2beta1_text_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextAnnotation/segments": segments +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextAnnotation/segments/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextAnnotation/text": text +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextFrame": google_cloud_videointelligence_v2beta1_text_frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextFrame/timeOffset": time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextSegment": google_cloud_videointelligence_v2beta1_text_segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextSegment/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextSegment/frames": frames +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextSegment/frames/frame": frame +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_TextSegment/segment": segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress": google_cloud_videointelligence_v2beta1_video_annotation_progress +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/inputUri": input_uri +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/progressPercent": progress_percent +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/startTime": start_time +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/updateTime": update_time +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults": google_cloud_videointelligence_v2beta1_video_annotation_results +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/error": error +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/explicitAnnotation": explicit_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations +? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" +: frame_label_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations +? "/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" +: segment_label_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotAnnotations": shot_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotAnnotations/shot_annotation": shot_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotLabelAnnotations": shot_label_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoSegment": google_cloud_videointelligence_v2beta1_video_segment +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoSegment/endTimeOffset": end_time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_VideoSegment/startTimeOffset": start_time_offset +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_WordInfo": google_cloud_videointelligence_v2beta1_word_info +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_WordInfo/confidence": confidence +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_WordInfo/endTime": end_time +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_WordInfo/speakerTag": speaker_tag +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_WordInfo/startTime": start_time +"/videointelligence:v1beta2/GoogleCloudVideointelligenceV2beta1_WordInfo/word": word "/videointelligence:v1beta2/GoogleLongrunning_Operation": google_longrunning_operation "/videointelligence:v1beta2/GoogleLongrunning_Operation/done": done "/videointelligence:v1beta2/GoogleLongrunning_Operation/error": error @@ -102800,6 +103306,27 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_LabelSegment": google_cloud_videointelligence_v1_label_segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_LabelSegment/confidence": confidence "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_LabelSegment/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox": google_cloud_videointelligence_v1_normalized_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/left": left +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/right": right +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/top": top +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly": google_cloud_videointelligence_v1_normalized_bounding_poly +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedVertex": google_cloud_videointelligence_v1_normalized_vertex +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedVertex/x": x +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_NormalizedVertex/y": y +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation": google_cloud_videointelligence_v1_object_tracking_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame": google_cloud_videointelligence_v1_object_tracking_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative": google_cloud_videointelligence_v1_speech_recognition_alternative "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative/transcript": transcript @@ -102809,6 +103336,18 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_SpeechTranscription/alternatives": alternatives "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextAnnotation": google_cloud_videointelligence_v1_text_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextAnnotation/segments": segments +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextAnnotation/segments/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextAnnotation/text": text +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextFrame": google_cloud_videointelligence_v1_text_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextFrame/timeOffset": time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextSegment": google_cloud_videointelligence_v1_text_segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextSegment/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextSegment/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextSegment/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_TextSegment/segment": segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress": google_cloud_videointelligence_v1_video_annotation_progress "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress/progressPercent": progress_percent @@ -102820,6 +103359,8 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation": frame_label_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -102829,6 +103370,8 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoSegment": google_cloud_videointelligence_v1_video_segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1_VideoSegment/startTimeOffset": start_time_offset @@ -102868,6 +103411,27 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_LabelSegment": google_cloud_videointelligence_v1beta2_label_segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_LabelSegment/confidence": confidence "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_LabelSegment/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox": google_cloud_videointelligence_v1beta2_normalized_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/left": left +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/right": right +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/top": top +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly": google_cloud_videointelligence_v1beta2_normalized_bounding_poly +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex": google_cloud_videointelligence_v1beta2_normalized_vertex +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex/x": x +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex/y": y +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation": google_cloud_videointelligence_v1beta2_object_tracking_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame": google_cloud_videointelligence_v1beta2_object_tracking_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative": google_cloud_videointelligence_v1beta2_speech_recognition_alternative "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative/transcript": transcript @@ -102877,6 +103441,18 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/alternatives": alternatives "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/languageCode": language_code +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextAnnotation": google_cloud_videointelligence_v1beta2_text_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/segments": segments +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/segments/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/text": text +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextFrame": google_cloud_videointelligence_v1beta2_text_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextFrame/timeOffset": time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextSegment": google_cloud_videointelligence_v1beta2_text_segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextSegment/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextSegment/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextSegment/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_TextSegment/segment": segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress": google_cloud_videointelligence_v1beta2_video_annotation_progress "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress/progressPercent": progress_percent @@ -102889,6 +103465,8 @@ ? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" : frame_label_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -102900,6 +103478,8 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/speechTranscriptions": speech_transcriptions ? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/speechTranscriptions/speech_transcription" : speech_transcription +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoSegment": google_cloud_videointelligence_v1beta2_video_segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1beta2_VideoSegment/startTimeOffset": start_time_offset @@ -102953,6 +103533,27 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_LabelSegment": google_cloud_videointelligence_v1p1beta1_label_segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_LabelSegment/confidence": confidence "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_LabelSegment/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox": google_cloud_videointelligence_v1p1beta1_normalized_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/left": left +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/right": right +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/top": top +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly": google_cloud_videointelligence_v1p1beta1_normalized_bounding_poly +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex": google_cloud_videointelligence_v1p1beta1_normalized_vertex +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex/x": x +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex/y": y +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation": google_cloud_videointelligence_v1p1beta1_object_tracking_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame": google_cloud_videointelligence_v1p1beta1_object_tracking_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ShotChangeDetectionConfig": google_cloud_videointelligence_v1p1beta1_shot_change_detection_config "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_ShotChangeDetectionConfig/model": model "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechContext": google_cloud_videointelligence_v1p1beta1_speech_context @@ -102982,6 +103583,21 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscriptionConfig/maxAlternatives": max_alternatives "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscriptionConfig/speechContexts": speech_contexts "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscriptionConfig/speechContexts/speech_context": speech_context +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation": google_cloud_videointelligence_v1p1beta1_text_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/segments": segments +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/segments/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/text": text +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextDetectionConfig": google_cloud_videointelligence_v1p1beta1_text_detection_config +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextDetectionConfig/languageHints": language_hints +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextDetectionConfig/languageHints/language_hint": language_hint +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextFrame": google_cloud_videointelligence_v1p1beta1_text_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextFrame/timeOffset": time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment": google_cloud_videointelligence_v1p1beta1_text_segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/segment": segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress": google_cloud_videointelligence_v1p1beta1_video_annotation_progress "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress/progressPercent": progress_percent @@ -102994,6 +103610,8 @@ ? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" : frame_label_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -103005,6 +103623,8 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions ? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/speechTranscriptions/speech_transcription" : speech_transcription +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoContext": google_cloud_videointelligence_v1p1beta1_video_context "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoContext/explicitContentDetectionConfig": explicit_content_detection_config "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoContext/labelDetectionConfig": label_detection_config @@ -103012,6 +103632,7 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoContext/segments/segment": segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoContext/shotChangeDetectionConfig": shot_change_detection_config "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoContext/speechTranscriptionConfig": speech_transcription_config +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoContext/textDetectionConfig": text_detection_config "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment": google_cloud_videointelligence_v1p1beta1_video_segment "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment/startTimeOffset": start_time_offset @@ -103132,6 +103753,128 @@ "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/speakerTag": speaker_tag "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/startTime": start_time "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/word": word +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress": google_cloud_videointelligence_v2beta1_annotate_video_progress +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress/annotationProgress": annotation_progress +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress/annotationProgress/annotation_progress": annotation_progress +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse": google_cloud_videointelligence_v2beta1_annotate_video_response +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse/annotationResults": annotation_results +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse/annotationResults/annotation_result": annotation_result +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_Entity": google_cloud_videointelligence_v2beta1_entity +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_Entity/description": description +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_Entity/entityId": entity_id +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_Entity/languageCode": language_code +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation": google_cloud_videointelligence_v2beta1_explicit_content_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame": google_cloud_videointelligence_v2beta1_explicit_content_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame/pornographyLikelihood": pornography_likelihood +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame/timeOffset": time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation": google_cloud_videointelligence_v2beta1_label_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/categoryEntities": category_entities +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/categoryEntities/category_entity": category_entity +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/entity": entity +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/segments": segments +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/segments/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelFrame": google_cloud_videointelligence_v2beta1_label_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelFrame/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelFrame/timeOffset": time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelSegment": google_cloud_videointelligence_v2beta1_label_segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelSegment/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_LabelSegment/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox": google_cloud_videointelligence_v2beta1_normalized_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/left": left +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/right": right +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/top": top +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly": google_cloud_videointelligence_v2beta1_normalized_bounding_poly +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex": google_cloud_videointelligence_v2beta1_normalized_vertex +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex/x": x +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex/y": y +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation": google_cloud_videointelligence_v2beta1_object_tracking_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame": google_cloud_videointelligence_v2beta1_object_tracking_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame/timeOffset": time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative": google_cloud_videointelligence_v2beta1_speech_recognition_alternative +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/transcript": transcript +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/words": words +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/words/word": word +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription": google_cloud_videointelligence_v2beta1_speech_transcription +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/alternatives": alternatives +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/alternatives/alternative": alternative +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse": google_cloud_videointelligence_v2beta1_streaming_annotate_video_response +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/annotationResults": annotation_results +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/annotationResultsUri": annotation_results_uri +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/error": error +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults": google_cloud_videointelligence_v2beta1_streaming_video_annotation_results +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/explicitAnnotation": explicit_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/labelAnnotations": label_annotations +? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/labelAnnotations/label_annotation" +: label_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/objectAnnotations": object_annotations +? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/objectAnnotations/object_annotation" +: object_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/shotAnnotations": shot_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/shotAnnotations/shot_annotation": shot_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextAnnotation": google_cloud_videointelligence_v2beta1_text_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/segments": segments +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/segments/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/text": text +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextFrame": google_cloud_videointelligence_v2beta1_text_frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextFrame/timeOffset": time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextSegment": google_cloud_videointelligence_v2beta1_text_segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextSegment/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextSegment/frames": frames +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextSegment/frames/frame": frame +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_TextSegment/segment": segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress": google_cloud_videointelligence_v2beta1_video_annotation_progress +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/inputUri": input_uri +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/progressPercent": progress_percent +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/startTime": start_time +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/updateTime": update_time +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults": google_cloud_videointelligence_v2beta1_video_annotation_results +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/error": error +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/explicitAnnotation": explicit_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations +? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" +: frame_label_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations +? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" +: segment_label_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotAnnotations": shot_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotAnnotations/shot_annotation": shot_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotLabelAnnotations": shot_label_annotations +? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation" +: shot_label_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions +? "/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/speechTranscriptions/speech_transcription" +: speech_transcription +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoSegment": google_cloud_videointelligence_v2beta1_video_segment +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoSegment/endTimeOffset": end_time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_VideoSegment/startTimeOffset": start_time_offset +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_WordInfo": google_cloud_videointelligence_v2beta1_word_info +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/confidence": confidence +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/endTime": end_time +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/speakerTag": speaker_tag +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/startTime": start_time +"/videointelligence:v1p1beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/word": word "/videointelligence:v1p1beta1/GoogleLongrunning_Operation": google_longrunning_operation "/videointelligence:v1p1beta1/GoogleLongrunning_Operation/done": done "/videointelligence:v1p1beta1/GoogleLongrunning_Operation/error": error @@ -103180,6 +103923,27 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_LabelSegment": google_cloud_videointelligence_v1_label_segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_LabelSegment/confidence": confidence "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_LabelSegment/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox": google_cloud_videointelligence_v1_normalized_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/left": left +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/right": right +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingBox/top": top +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly": google_cloud_videointelligence_v1_normalized_bounding_poly +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedVertex": google_cloud_videointelligence_v1_normalized_vertex +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedVertex/x": x +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_NormalizedVertex/y": y +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation": google_cloud_videointelligence_v1_object_tracking_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame": google_cloud_videointelligence_v1_object_tracking_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative": google_cloud_videointelligence_v1_speech_recognition_alternative "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_SpeechRecognitionAlternative/transcript": transcript @@ -103189,6 +103953,18 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_SpeechTranscription/alternatives": alternatives "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextAnnotation": google_cloud_videointelligence_v1_text_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextAnnotation/segments": segments +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextAnnotation/segments/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextAnnotation/text": text +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextFrame": google_cloud_videointelligence_v1_text_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextFrame/timeOffset": time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextSegment": google_cloud_videointelligence_v1_text_segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextSegment/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextSegment/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextSegment/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_TextSegment/segment": segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress": google_cloud_videointelligence_v1_video_annotation_progress "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationProgress/progressPercent": progress_percent @@ -103200,6 +103976,8 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation": frame_label_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -103209,6 +103987,8 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation": shot_label_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/speechTranscriptions/speech_transcription": speech_transcription +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoSegment": google_cloud_videointelligence_v1_video_segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1_VideoSegment/startTimeOffset": start_time_offset @@ -103248,6 +104028,27 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_LabelSegment": google_cloud_videointelligence_v1beta2_label_segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_LabelSegment/confidence": confidence "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_LabelSegment/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox": google_cloud_videointelligence_v1beta2_normalized_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/left": left +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/right": right +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox/top": top +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly": google_cloud_videointelligence_v1beta2_normalized_bounding_poly +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex": google_cloud_videointelligence_v1beta2_normalized_vertex +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex/x": x +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_NormalizedVertex/y": y +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation": google_cloud_videointelligence_v1beta2_object_tracking_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame": google_cloud_videointelligence_v1beta2_object_tracking_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative": google_cloud_videointelligence_v1beta2_speech_recognition_alternative "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative/transcript": transcript @@ -103257,6 +104058,18 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/alternatives": alternatives "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_SpeechTranscription/languageCode": language_code +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextAnnotation": google_cloud_videointelligence_v1beta2_text_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/segments": segments +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/segments/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextAnnotation/text": text +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextFrame": google_cloud_videointelligence_v1beta2_text_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextFrame/timeOffset": time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextSegment": google_cloud_videointelligence_v1beta2_text_segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextSegment/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextSegment/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextSegment/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_TextSegment/segment": segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress": google_cloud_videointelligence_v1beta2_video_annotation_progress "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress/progressPercent": progress_percent @@ -103269,6 +104082,8 @@ ? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" : frame_label_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -103280,6 +104095,8 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/speechTranscriptions": speech_transcriptions ? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/speechTranscriptions/speech_transcription" : speech_transcription +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoSegment": google_cloud_videointelligence_v1beta2_video_segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1beta2_VideoSegment/startTimeOffset": start_time_offset @@ -103319,6 +104136,27 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_LabelSegment": google_cloud_videointelligence_v1p1beta1_label_segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_LabelSegment/confidence": confidence "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_LabelSegment/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox": google_cloud_videointelligence_v1p1beta1_normalized_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/left": left +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/right": right +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox/top": top +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly": google_cloud_videointelligence_v1p1beta1_normalized_bounding_poly +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex": google_cloud_videointelligence_v1p1beta1_normalized_vertex +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex/x": x +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex/y": y +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation": google_cloud_videointelligence_v1p1beta1_object_tracking_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame": google_cloud_videointelligence_v1p1beta1_object_tracking_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame/timeOffset": time_offset "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative": google_cloud_videointelligence_v1p1beta1_speech_recognition_alternative "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative/confidence": confidence "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative/transcript": transcript @@ -103328,6 +104166,18 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/alternatives": alternatives "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/alternatives/alternative": alternative "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation": google_cloud_videointelligence_v1p1beta1_text_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/segments": segments +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/segments/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextAnnotation/text": text +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextFrame": google_cloud_videointelligence_v1p1beta1_text_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextFrame/timeOffset": time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment": google_cloud_videointelligence_v1p1beta1_text_segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_TextSegment/segment": segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress": google_cloud_videointelligence_v1p1beta1_video_annotation_progress "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress/inputUri": input_uri "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress/progressPercent": progress_percent @@ -103340,6 +104190,8 @@ ? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" : frame_label_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations ? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" : segment_label_annotation @@ -103351,6 +104203,8 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions ? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/speechTranscriptions/speech_transcription" : speech_transcription +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment": google_cloud_videointelligence_v1p1beta1_video_segment "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment/endTimeOffset": end_time_offset "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p1beta1_VideoSegment/startTimeOffset": start_time_offset @@ -103510,6 +104364,128 @@ "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/speakerTag": speaker_tag "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/startTime": start_time "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV1p2beta1_WordInfo/word": word +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress": google_cloud_videointelligence_v2beta1_annotate_video_progress +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress/annotationProgress": annotation_progress +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoProgress/annotationProgress/annotation_progress": annotation_progress +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse": google_cloud_videointelligence_v2beta1_annotate_video_response +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse/annotationResults": annotation_results +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_AnnotateVideoResponse/annotationResults/annotation_result": annotation_result +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_Entity": google_cloud_videointelligence_v2beta1_entity +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_Entity/description": description +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_Entity/entityId": entity_id +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_Entity/languageCode": language_code +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation": google_cloud_videointelligence_v2beta1_explicit_content_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentAnnotation/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame": google_cloud_videointelligence_v2beta1_explicit_content_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame/pornographyLikelihood": pornography_likelihood +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ExplicitContentFrame/timeOffset": time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation": google_cloud_videointelligence_v2beta1_label_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/categoryEntities": category_entities +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/categoryEntities/category_entity": category_entity +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/entity": entity +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/segments": segments +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelAnnotation/segments/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelFrame": google_cloud_videointelligence_v2beta1_label_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelFrame/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelFrame/timeOffset": time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelSegment": google_cloud_videointelligence_v2beta1_label_segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelSegment/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_LabelSegment/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox": google_cloud_videointelligence_v2beta1_normalized_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/bottom": bottom +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/left": left +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/right": right +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingBox/top": top +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly": google_cloud_videointelligence_v2beta1_normalized_bounding_poly +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly/vertices": vertices +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedBoundingPoly/vertices/vertex": vertex +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex": google_cloud_videointelligence_v2beta1_normalized_vertex +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex/x": x +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_NormalizedVertex/y": y +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation": google_cloud_videointelligence_v2beta1_object_tracking_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/entity": entity +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingAnnotation/trackId": track_id +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame": google_cloud_videointelligence_v2beta1_object_tracking_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame/normalizedBoundingBox": normalized_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_ObjectTrackingFrame/timeOffset": time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative": google_cloud_videointelligence_v2beta1_speech_recognition_alternative +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/transcript": transcript +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/words": words +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechRecognitionAlternative/words/word": word +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription": google_cloud_videointelligence_v2beta1_speech_transcription +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/alternatives": alternatives +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/alternatives/alternative": alternative +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_SpeechTranscription/languageCode": language_code +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse": google_cloud_videointelligence_v2beta1_streaming_annotate_video_response +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/annotationResults": annotation_results +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/annotationResultsUri": annotation_results_uri +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingAnnotateVideoResponse/error": error +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults": google_cloud_videointelligence_v2beta1_streaming_video_annotation_results +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/explicitAnnotation": explicit_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/labelAnnotations": label_annotations +? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/labelAnnotations/label_annotation" +: label_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/objectAnnotations": object_annotations +? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/objectAnnotations/object_annotation" +: object_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/shotAnnotations": shot_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_StreamingVideoAnnotationResults/shotAnnotations/shot_annotation": shot_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextAnnotation": google_cloud_videointelligence_v2beta1_text_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/segments": segments +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/segments/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextAnnotation/text": text +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextFrame": google_cloud_videointelligence_v2beta1_text_frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextFrame/rotatedBoundingBox": rotated_bounding_box +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextFrame/timeOffset": time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextSegment": google_cloud_videointelligence_v2beta1_text_segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextSegment/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextSegment/frames": frames +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextSegment/frames/frame": frame +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_TextSegment/segment": segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress": google_cloud_videointelligence_v2beta1_video_annotation_progress +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/inputUri": input_uri +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/progressPercent": progress_percent +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/startTime": start_time +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationProgress/updateTime": update_time +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults": google_cloud_videointelligence_v2beta1_video_annotation_results +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/error": error +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/explicitAnnotation": explicit_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/frameLabelAnnotations": frame_label_annotations +? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/frameLabelAnnotations/frame_label_annotation" +: frame_label_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/inputUri": input_uri +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/objectAnnotations": object_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/objectAnnotations/object_annotation": object_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/segmentLabelAnnotations": segment_label_annotations +? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/segmentLabelAnnotations/segment_label_annotation" +: segment_label_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotAnnotations": shot_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotAnnotations/shot_annotation": shot_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotLabelAnnotations": shot_label_annotations +? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/shotLabelAnnotations/shot_label_annotation" +: shot_label_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/speechTranscriptions": speech_transcriptions +? "/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/speechTranscriptions/speech_transcription" +: speech_transcription +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/textAnnotations": text_annotations +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoAnnotationResults/textAnnotations/text_annotation": text_annotation +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoSegment": google_cloud_videointelligence_v2beta1_video_segment +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoSegment/endTimeOffset": end_time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_VideoSegment/startTimeOffset": start_time_offset +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_WordInfo": google_cloud_videointelligence_v2beta1_word_info +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/confidence": confidence +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/endTime": end_time +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/speakerTag": speaker_tag +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/startTime": start_time +"/videointelligence:v1p2beta1/GoogleCloudVideointelligenceV2beta1_WordInfo/word": word "/videointelligence:v1p2beta1/GoogleLongrunning_Operation": google_longrunning_operation "/videointelligence:v1p2beta1/GoogleLongrunning_Operation/done": done "/videointelligence:v1p2beta1/GoogleLongrunning_Operation/error": error diff --git a/generated/google/apis/appengine_v1.rb b/generated/google/apis/appengine_v1.rb index fc34a3d0d..d93e5372b 100644 --- a/generated/google/apis/appengine_v1.rb +++ b/generated/google/apis/appengine_v1.rb @@ -25,7 +25,7 @@ module Google # @see https://cloud.google.com/appengine/docs/admin-api/ module AppengineV1 VERSION = 'V1' - REVISION = '20190201' + REVISION = '20190214' # View and manage your applications deployed on Google App Engine AUTH_APPENGINE_ADMIN = 'https://www.googleapis.com/auth/appengine.admin' diff --git a/generated/google/apis/appengine_v1/classes.rb b/generated/google/apis/appengine_v1/classes.rb index d8e2948de..d08975463 100644 --- a/generated/google/apis/appengine_v1/classes.rb +++ b/generated/google/apis/appengine_v1/classes.rb @@ -1864,58 +1864,6 @@ module Google end end - # Metadata for the given google.longrunning.Operation. - class OperationMetadata - include Google::Apis::Core::Hashable - - # Timestamp that this operation completed.@OutputOnly - # Corresponds to the JSON property `endTime` - # @return [String] - attr_accessor :end_time - - # Timestamp that this operation was created.@OutputOnly - # Corresponds to the JSON property `insertTime` - # @return [String] - attr_accessor :insert_time - - # API method that initiated this operation. Example: google.appengine.v1beta4. - # Version.CreateVersion.@OutputOnly - # Corresponds to the JSON property `method` - # @return [String] - attr_accessor :method_prop - - # Type of this operation. Deprecated, use method field instead. Example: " - # create_version".@OutputOnly - # Corresponds to the JSON property `operationType` - # @return [String] - attr_accessor :operation_type - - # Name of the resource that this operation is acting on. Example: apps/myapp/ - # modules/default.@OutputOnly - # Corresponds to the JSON property `target` - # @return [String] - attr_accessor :target - - # User who requested this operation.@OutputOnly - # Corresponds to the JSON property `user` - # @return [String] - attr_accessor :user - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @end_time = args[:end_time] if args.key?(:end_time) - @insert_time = args[:insert_time] if args.key?(:insert_time) - @method_prop = args[:method_prop] if args.key?(:method_prop) - @operation_type = args[:operation_type] if args.key?(:operation_type) - @target = args[:target] if args.key?(:target) - @user = args[:user] if args.key?(:user) - end - end - # Metadata for the given google.longrunning.Operation. class OperationMetadataV1 include Google::Apis::Core::Hashable @@ -2111,51 +2059,6 @@ module Google end end - # Metadata for the given google.longrunning.Operation. - class OperationMetadataV1Beta5 - include Google::Apis::Core::Hashable - - # Timestamp that this operation completed.@OutputOnly - # Corresponds to the JSON property `endTime` - # @return [String] - attr_accessor :end_time - - # Timestamp that this operation was created.@OutputOnly - # Corresponds to the JSON property `insertTime` - # @return [String] - attr_accessor :insert_time - - # API method name that initiated this operation. Example: google.appengine. - # v1beta5.Version.CreateVersion.@OutputOnly - # Corresponds to the JSON property `method` - # @return [String] - attr_accessor :method_prop - - # Name of the resource that this operation is acting on. Example: apps/myapp/ - # services/default.@OutputOnly - # Corresponds to the JSON property `target` - # @return [String] - attr_accessor :target - - # User who requested this operation.@OutputOnly - # Corresponds to the JSON property `user` - # @return [String] - attr_accessor :user - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @end_time = args[:end_time] if args.key?(:end_time) - @insert_time = args[:insert_time] if args.key?(:insert_time) - @method_prop = args[:method_prop] if args.key?(:method_prop) - @target = args[:target] if args.key?(:target) - @user = args[:user] if args.key?(:user) - end - end - # Readiness checking configuration for VM instances. Unhealthy instances are # removed from traffic rotation. class ReadinessCheck @@ -2962,7 +2865,7 @@ module Google alias_method :vm?, :vm # The Google Compute Engine zones that are supported by this version in the App - # Engine flexible environment. + # Engine flexible environment. Deprecated. # Corresponds to the JSON property `zones` # @return [Array] attr_accessor :zones diff --git a/generated/google/apis/appengine_v1/representations.rb b/generated/google/apis/appengine_v1/representations.rb index f70c6264a..297774f38 100644 --- a/generated/google/apis/appengine_v1/representations.rb +++ b/generated/google/apis/appengine_v1/representations.rb @@ -310,12 +310,6 @@ module Google include Google::Apis::Core::JsonObjectSupport end - class OperationMetadata - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - class OperationMetadataV1 class Representation < Google::Apis::Core::JsonRepresentation; end @@ -334,12 +328,6 @@ module Google include Google::Apis::Core::JsonObjectSupport end - class OperationMetadataV1Beta5 - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - class ReadinessCheck class Representation < Google::Apis::Core::JsonRepresentation; end @@ -922,18 +910,6 @@ module Google end end - class OperationMetadata - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :end_time, as: 'endTime' - property :insert_time, as: 'insertTime' - property :method_prop, as: 'method' - property :operation_type, as: 'operationType' - property :target, as: 'target' - property :user, as: 'user' - end - end - class OperationMetadataV1 # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -979,17 +955,6 @@ module Google end end - class OperationMetadataV1Beta5 - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :end_time, as: 'endTime' - property :insert_time, as: 'insertTime' - property :method_prop, as: 'method' - property :target, as: 'target' - property :user, as: 'user' - end - end - class ReadinessCheck # @private class Representation < Google::Apis::Core::JsonRepresentation diff --git a/generated/google/apis/appengine_v1alpha.rb b/generated/google/apis/appengine_v1alpha.rb index 6d3bb3a40..a97a413b3 100644 --- a/generated/google/apis/appengine_v1alpha.rb +++ b/generated/google/apis/appengine_v1alpha.rb @@ -25,7 +25,7 @@ module Google # @see https://cloud.google.com/appengine/docs/admin-api/ module AppengineV1alpha VERSION = 'V1alpha' - REVISION = '20181020' + REVISION = '20190214' # View and manage your applications deployed on Google App Engine AUTH_APPENGINE_ADMIN = 'https://www.googleapis.com/auth/appengine.admin' diff --git a/generated/google/apis/appengine_v1alpha/classes.rb b/generated/google/apis/appengine_v1alpha/classes.rb index cd18f21ce..9d1f0c9cc 100644 --- a/generated/google/apis/appengine_v1alpha/classes.rb +++ b/generated/google/apis/appengine_v1alpha/classes.rb @@ -604,58 +604,6 @@ module Google end end - # Metadata for the given google.longrunning.Operation. - class OperationMetadata - include Google::Apis::Core::Hashable - - # Timestamp that this operation completed.@OutputOnly - # Corresponds to the JSON property `endTime` - # @return [String] - attr_accessor :end_time - - # Timestamp that this operation was created.@OutputOnly - # Corresponds to the JSON property `insertTime` - # @return [String] - attr_accessor :insert_time - - # API method that initiated this operation. Example: google.appengine.v1beta4. - # Version.CreateVersion.@OutputOnly - # Corresponds to the JSON property `method` - # @return [String] - attr_accessor :method_prop - - # Type of this operation. Deprecated, use method field instead. Example: " - # create_version".@OutputOnly - # Corresponds to the JSON property `operationType` - # @return [String] - attr_accessor :operation_type - - # Name of the resource that this operation is acting on. Example: apps/myapp/ - # modules/default.@OutputOnly - # Corresponds to the JSON property `target` - # @return [String] - attr_accessor :target - - # User who requested this operation.@OutputOnly - # Corresponds to the JSON property `user` - # @return [String] - attr_accessor :user - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @end_time = args[:end_time] if args.key?(:end_time) - @insert_time = args[:insert_time] if args.key?(:insert_time) - @method_prop = args[:method_prop] if args.key?(:method_prop) - @operation_type = args[:operation_type] if args.key?(:operation_type) - @target = args[:target] if args.key?(:target) - @user = args[:user] if args.key?(:user) - end - end - # Metadata for the given google.longrunning.Operation. class OperationMetadataV1 include Google::Apis::Core::Hashable @@ -851,51 +799,6 @@ module Google end end - # Metadata for the given google.longrunning.Operation. - class OperationMetadataV1Beta5 - include Google::Apis::Core::Hashable - - # Timestamp that this operation completed.@OutputOnly - # Corresponds to the JSON property `endTime` - # @return [String] - attr_accessor :end_time - - # Timestamp that this operation was created.@OutputOnly - # Corresponds to the JSON property `insertTime` - # @return [String] - attr_accessor :insert_time - - # API method name that initiated this operation. Example: google.appengine. - # v1beta5.Version.CreateVersion.@OutputOnly - # Corresponds to the JSON property `method` - # @return [String] - attr_accessor :method_prop - - # Name of the resource that this operation is acting on. Example: apps/myapp/ - # services/default.@OutputOnly - # Corresponds to the JSON property `target` - # @return [String] - attr_accessor :target - - # User who requested this operation.@OutputOnly - # Corresponds to the JSON property `user` - # @return [String] - attr_accessor :user - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @end_time = args[:end_time] if args.key?(:end_time) - @insert_time = args[:insert_time] if args.key?(:insert_time) - @method_prop = args[:method_prop] if args.key?(:method_prop) - @target = args[:target] if args.key?(:target) - @user = args[:user] if args.key?(:user) - end - end - # A DNS resource record. class ResourceRecord include Google::Apis::Core::Hashable diff --git a/generated/google/apis/appengine_v1alpha/representations.rb b/generated/google/apis/appengine_v1alpha/representations.rb index 32d4dd0c7..e4268c14b 100644 --- a/generated/google/apis/appengine_v1alpha/representations.rb +++ b/generated/google/apis/appengine_v1alpha/representations.rb @@ -124,12 +124,6 @@ module Google include Google::Apis::Core::JsonObjectSupport end - class OperationMetadata - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - class OperationMetadataV1 class Representation < Google::Apis::Core::JsonRepresentation; end @@ -148,12 +142,6 @@ module Google include Google::Apis::Core::JsonObjectSupport end - class OperationMetadataV1Beta5 - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - class ResourceRecord class Representation < Google::Apis::Core::JsonRepresentation; end @@ -328,18 +316,6 @@ module Google end end - class OperationMetadata - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :end_time, as: 'endTime' - property :insert_time, as: 'insertTime' - property :method_prop, as: 'method' - property :operation_type, as: 'operationType' - property :target, as: 'target' - property :user, as: 'user' - end - end - class OperationMetadataV1 # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -385,17 +361,6 @@ module Google end end - class OperationMetadataV1Beta5 - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :end_time, as: 'endTime' - property :insert_time, as: 'insertTime' - property :method_prop, as: 'method' - property :target, as: 'target' - property :user, as: 'user' - end - end - class ResourceRecord # @private class Representation < Google::Apis::Core::JsonRepresentation diff --git a/generated/google/apis/appengine_v1beta.rb b/generated/google/apis/appengine_v1beta.rb index 131bd15a6..2a8e52b87 100644 --- a/generated/google/apis/appengine_v1beta.rb +++ b/generated/google/apis/appengine_v1beta.rb @@ -25,7 +25,7 @@ module Google # @see https://cloud.google.com/appengine/docs/admin-api/ module AppengineV1beta VERSION = 'V1beta' - REVISION = '20190201' + REVISION = '20190214' # View and manage your applications deployed on Google App Engine AUTH_APPENGINE_ADMIN = 'https://www.googleapis.com/auth/appengine.admin' diff --git a/generated/google/apis/appengine_v1beta/classes.rb b/generated/google/apis/appengine_v1beta/classes.rb index bd2bdf6b7..a5e32dbfc 100644 --- a/generated/google/apis/appengine_v1beta/classes.rb +++ b/generated/google/apis/appengine_v1beta/classes.rb @@ -1988,58 +1988,6 @@ module Google end end - # Metadata for the given google.longrunning.Operation. - class OperationMetadata - include Google::Apis::Core::Hashable - - # Timestamp that this operation completed.@OutputOnly - # Corresponds to the JSON property `endTime` - # @return [String] - attr_accessor :end_time - - # Timestamp that this operation was created.@OutputOnly - # Corresponds to the JSON property `insertTime` - # @return [String] - attr_accessor :insert_time - - # API method that initiated this operation. Example: google.appengine.v1beta4. - # Version.CreateVersion.@OutputOnly - # Corresponds to the JSON property `method` - # @return [String] - attr_accessor :method_prop - - # Type of this operation. Deprecated, use method field instead. Example: " - # create_version".@OutputOnly - # Corresponds to the JSON property `operationType` - # @return [String] - attr_accessor :operation_type - - # Name of the resource that this operation is acting on. Example: apps/myapp/ - # modules/default.@OutputOnly - # Corresponds to the JSON property `target` - # @return [String] - attr_accessor :target - - # User who requested this operation.@OutputOnly - # Corresponds to the JSON property `user` - # @return [String] - attr_accessor :user - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @end_time = args[:end_time] if args.key?(:end_time) - @insert_time = args[:insert_time] if args.key?(:insert_time) - @method_prop = args[:method_prop] if args.key?(:method_prop) - @operation_type = args[:operation_type] if args.key?(:operation_type) - @target = args[:target] if args.key?(:target) - @user = args[:user] if args.key?(:user) - end - end - # Metadata for the given google.longrunning.Operation. class OperationMetadataV1 include Google::Apis::Core::Hashable @@ -2235,51 +2183,6 @@ module Google end end - # Metadata for the given google.longrunning.Operation. - class OperationMetadataV1Beta5 - include Google::Apis::Core::Hashable - - # Timestamp that this operation completed.@OutputOnly - # Corresponds to the JSON property `endTime` - # @return [String] - attr_accessor :end_time - - # Timestamp that this operation was created.@OutputOnly - # Corresponds to the JSON property `insertTime` - # @return [String] - attr_accessor :insert_time - - # API method name that initiated this operation. Example: google.appengine. - # v1beta5.Version.CreateVersion.@OutputOnly - # Corresponds to the JSON property `method` - # @return [String] - attr_accessor :method_prop - - # Name of the resource that this operation is acting on. Example: apps/myapp/ - # services/default.@OutputOnly - # Corresponds to the JSON property `target` - # @return [String] - attr_accessor :target - - # User who requested this operation.@OutputOnly - # Corresponds to the JSON property `user` - # @return [String] - attr_accessor :user - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @end_time = args[:end_time] if args.key?(:end_time) - @insert_time = args[:insert_time] if args.key?(:insert_time) - @method_prop = args[:method_prop] if args.key?(:method_prop) - @target = args[:target] if args.key?(:target) - @user = args[:user] if args.key?(:user) - end - end - # Readiness checking configuration for VM instances. Unhealthy instances are # removed from traffic rotation. class ReadinessCheck @@ -3091,7 +2994,7 @@ module Google attr_accessor :vpc_access_connector # The Google Compute Engine zones that are supported by this version in the App - # Engine flexible environment. + # Engine flexible environment. Deprecated. # Corresponds to the JSON property `zones` # @return [Array] attr_accessor :zones diff --git a/generated/google/apis/appengine_v1beta/representations.rb b/generated/google/apis/appengine_v1beta/representations.rb index 34106e2ab..e6637f299 100644 --- a/generated/google/apis/appengine_v1beta/representations.rb +++ b/generated/google/apis/appengine_v1beta/representations.rb @@ -328,12 +328,6 @@ module Google include Google::Apis::Core::JsonObjectSupport end - class OperationMetadata - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - class OperationMetadataV1 class Representation < Google::Apis::Core::JsonRepresentation; end @@ -352,12 +346,6 @@ module Google include Google::Apis::Core::JsonObjectSupport end - class OperationMetadataV1Beta5 - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - class ReadinessCheck class Representation < Google::Apis::Core::JsonRepresentation; end @@ -980,18 +968,6 @@ module Google end end - class OperationMetadata - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :end_time, as: 'endTime' - property :insert_time, as: 'insertTime' - property :method_prop, as: 'method' - property :operation_type, as: 'operationType' - property :target, as: 'target' - property :user, as: 'user' - end - end - class OperationMetadataV1 # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -1037,17 +1013,6 @@ module Google end end - class OperationMetadataV1Beta5 - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :end_time, as: 'endTime' - property :insert_time, as: 'insertTime' - property :method_prop, as: 'method' - property :target, as: 'target' - property :user, as: 'user' - end - end - class ReadinessCheck # @private class Representation < Google::Apis::Core::JsonRepresentation diff --git a/generated/google/apis/cloudfunctions_v1.rb b/generated/google/apis/cloudfunctions_v1.rb index a9b931f68..8deb87dac 100644 --- a/generated/google/apis/cloudfunctions_v1.rb +++ b/generated/google/apis/cloudfunctions_v1.rb @@ -25,7 +25,7 @@ module Google # @see https://cloud.google.com/functions module CloudfunctionsV1 VERSION = 'V1' - REVISION = '20190122' + REVISION = '20190214' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/cloudfunctions_v1/classes.rb b/generated/google/apis/cloudfunctions_v1/classes.rb index 5c42175d4..bc2de371b 100644 --- a/generated/google/apis/cloudfunctions_v1/classes.rb +++ b/generated/google/apis/cloudfunctions_v1/classes.rb @@ -322,7 +322,8 @@ module Google # @return [String] attr_accessor :runtime - # Output only. The email of the function's service account. + # The email of the function's service account. If empty, defaults to + # `project_id`@appspot.gserviceaccount.com. # Corresponds to the JSON property `serviceAccountEmail` # @return [String] attr_accessor :service_account_email diff --git a/generated/google/apis/cloudfunctions_v1/service.rb b/generated/google/apis/cloudfunctions_v1/service.rb index 2378982f6..dfd8514a0 100644 --- a/generated/google/apis/cloudfunctions_v1/service.rb +++ b/generated/google/apis/cloudfunctions_v1/service.rb @@ -170,8 +170,8 @@ module Google # Synchronously invokes a deployed Cloud Function. To be used for testing # purposes as very limited traffic is allowed. For more information on - # the actual limits refer to [API Calls]( - # https://cloud.google.com/functions/quotas#rate_limits). + # the actual limits, refer to + # [Rate Limits](https://cloud.google.com/functions/quotas#rate_limits). # @param [String] name # The name of the function to be called. # @param [Google::Apis::CloudfunctionsV1::CallFunctionRequest] call_function_request_object @@ -320,9 +320,15 @@ module Google # these restrictions: # * Source file type should be a zip file. # * Source file size should not exceed 100MB limit. + # * No credentials should be attached - the signed URLs provide access to the + # target bucket using internal service identity; if credentials were + # attached, the identity from the credentials would be used, but that + # identity does not have permissions to upload files to the URL. # When making a HTTP PUT request, these two headers need to be specified: # * `content-type: application/zip` # * `x-goog-content-length-range: 0,104857600` + # And this header SHOULD NOT be specified: + # * `Authorization: Bearer YOUR_TOKEN` # @param [String] parent # The project and location in which the Google Cloud Storage signed URL # should be generated, specified in the format `projects/*/locations/*`. diff --git a/generated/google/apis/cloudfunctions_v1beta2.rb b/generated/google/apis/cloudfunctions_v1beta2.rb index 8f3d18838..93aa51bed 100644 --- a/generated/google/apis/cloudfunctions_v1beta2.rb +++ b/generated/google/apis/cloudfunctions_v1beta2.rb @@ -25,7 +25,7 @@ module Google # @see https://cloud.google.com/functions module CloudfunctionsV1beta2 VERSION = 'V1beta2' - REVISION = '20181212' + REVISION = '20190214' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/cloudfunctions_v1beta2/classes.rb b/generated/google/apis/cloudfunctions_v1beta2/classes.rb index 556aa5359..2798cc182 100644 --- a/generated/google/apis/cloudfunctions_v1beta2/classes.rb +++ b/generated/google/apis/cloudfunctions_v1beta2/classes.rb @@ -159,7 +159,8 @@ module Google # @return [String] attr_accessor :runtime - # Output only. The service account of the function. + # The email of the function's service account. If empty, defaults to + # `project_id`@appspot.gserviceaccount.com. # Corresponds to the JSON property `serviceAccount` # @return [String] attr_accessor :service_account diff --git a/generated/google/apis/cloudfunctions_v1beta2/service.rb b/generated/google/apis/cloudfunctions_v1beta2/service.rb index 9d15c23e2..4cfc6ab9e 100644 --- a/generated/google/apis/cloudfunctions_v1beta2/service.rb +++ b/generated/google/apis/cloudfunctions_v1beta2/service.rb @@ -320,9 +320,15 @@ module Google # these restrictions: # * Source file type should be a zip file. # * Source file size should not exceed 100MB limit. + # * No credentials should be attached - the signed URLs provide access to the + # target bucket using internal service identity; if credentials were + # attached, the identity from the credentials would be used, but that + # identity does not have permissions to upload files to the URL. # When making a HTTP PUT request, these two headers need to be specified: # * `content-type: application/zip` # * `x-goog-content-length-range: 0,104857600` + # And this header SHOULD NOT be specified: + # * `Authorization: Bearer YOUR_TOKEN` # @param [String] parent # The project and location in which the Google Cloud Storage signed URL # should be generated, specified in the format `projects/*/locations/*`. diff --git a/generated/google/apis/cloudresourcemanager_v1.rb b/generated/google/apis/cloudresourcemanager_v1.rb index e924f9de1..f7c90a161 100644 --- a/generated/google/apis/cloudresourcemanager_v1.rb +++ b/generated/google/apis/cloudresourcemanager_v1.rb @@ -26,7 +26,7 @@ module Google # @see https://cloud.google.com/resource-manager module CloudresourcemanagerV1 VERSION = 'V1' - REVISION = '20190128' + REVISION = '20190220' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/cloudresourcemanager_v1/classes.rb b/generated/google/apis/cloudresourcemanager_v1/classes.rb index 38e853d2c..addb86bd4 100644 --- a/generated/google/apis/cloudresourcemanager_v1/classes.rb +++ b/generated/google/apis/cloudresourcemanager_v1/classes.rb @@ -1410,8 +1410,8 @@ module Google # @return [String] attr_accessor :lifecycle_state - # The user-assigned display name of the Project. - # It must be 4 to 30 characters. + # The optional user-assigned display name of the Project. + # When present it must be between 4 to 30 characters. # Allowed characters are: lowercase and uppercase letters, numbers, # hyphen, single-quote, double-quote, space, and exclamation point. # Example: My Project diff --git a/generated/google/apis/cloudresourcemanager_v1beta1.rb b/generated/google/apis/cloudresourcemanager_v1beta1.rb index c5c3ee7f1..a639bdeaa 100644 --- a/generated/google/apis/cloudresourcemanager_v1beta1.rb +++ b/generated/google/apis/cloudresourcemanager_v1beta1.rb @@ -26,7 +26,7 @@ module Google # @see https://cloud.google.com/resource-manager module CloudresourcemanagerV1beta1 VERSION = 'V1beta1' - REVISION = '20190128' + REVISION = '20190220' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb b/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb index 284399d3c..8c77beeaa 100644 --- a/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb +++ b/generated/google/apis/cloudresourcemanager_v1beta1/classes.rb @@ -649,8 +649,8 @@ module Google # @return [String] attr_accessor :lifecycle_state - # The user-assigned display name of the Project. - # It must be 4 to 30 characters. + # The optional user-assigned display name of the Project. + # When present it must be between 4 to 30 characters. # Allowed characters are: lowercase and uppercase letters, numbers, # hyphen, single-quote, double-quote, space, and exclamation point. # Example: My Project diff --git a/generated/google/apis/cloudsearch_v1.rb b/generated/google/apis/cloudsearch_v1.rb index e1fa3618a..b4b367a97 100644 --- a/generated/google/apis/cloudsearch_v1.rb +++ b/generated/google/apis/cloudsearch_v1.rb @@ -26,7 +26,7 @@ module Google # @see https://gsuite.google.com/products/cloud-search/ module CloudsearchV1 VERSION = 'V1' - REVISION = '20190212' + REVISION = '20190221' # Index and serve your organization's data with Cloud Search AUTH_CLOUD_SEARCH = 'https://www.googleapis.com/auth/cloud_search' diff --git a/generated/google/apis/cloudsearch_v1/classes.rb b/generated/google/apis/cloudsearch_v1/classes.rb index aaff823f7..0aafce80d 100644 --- a/generated/google/apis/cloudsearch_v1/classes.rb +++ b/generated/google/apis/cloudsearch_v1/classes.rb @@ -1347,6 +1347,27 @@ module Google end end + # + class IndexItemOptions + include Google::Apis::Core::Hashable + + # Specifies if the index request should allow gsuite principals that do not + # exist or are deleted in the index request. + # Corresponds to the JSON property `allowUnknownGsuitePrincipals` + # @return [Boolean] + attr_accessor :allow_unknown_gsuite_principals + alias_method :allow_unknown_gsuite_principals?, :allow_unknown_gsuite_principals + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @allow_unknown_gsuite_principals = args[:allow_unknown_gsuite_principals] if args.key?(:allow_unknown_gsuite_principals) + end + end + # class IndexItemRequest include Google::Apis::Core::Hashable @@ -1362,6 +1383,11 @@ module Google # @return [Google::Apis::CloudsearchV1::DebugOptions] attr_accessor :debug_options + # + # Corresponds to the JSON property `indexItemOptions` + # @return [Google::Apis::CloudsearchV1::IndexItemOptions] + attr_accessor :index_item_options + # Represents a single object that is an item in the search index, such as a # file, folder, or a database record. # Corresponds to the JSON property `item` @@ -1381,6 +1407,7 @@ module Google def update!(**args) @connector_name = args[:connector_name] if args.key?(:connector_name) @debug_options = args[:debug_options] if args.key?(:debug_options) + @index_item_options = args[:index_item_options] if args.key?(:index_item_options) @item = args[:item] if args.key?(:item) @mode = args[:mode] if args.key?(:mode) end @@ -2862,13 +2889,13 @@ module Google attr_accessor :is_repeatable alias_method :is_repeatable?, :is_repeatable - # Indicates that the property identifies data that should be returned in search - # results via the Query API. If set to *true*, indicates that Query API - # users can use matching property fields in results. However, storing fields - # requires more space allocation and uses more bandwidth for search queries, - # which impacts performance over large datasets. Set to *true* here only if - # the field is needed for search results. Cannot be true for properties - # whose type is an object. + # Indicates that the property identifies data that should be returned in + # search results via the Query API. If set to *true*, indicates that Query + # API users can use matching property fields in results. However, storing + # fields requires more space allocation and uses more bandwidth for search + # queries, which impacts performance over large datasets. Set to *true* here + # only if the field is needed for search results. Cannot be true for + # properties whose type is an object. # Corresponds to the JSON property `isReturnable` # @return [Boolean] attr_accessor :is_returnable @@ -3696,8 +3723,8 @@ module Google # @return [String] attr_accessor :name - # IDs of the Long Running Operations (LROs) currently running for this schema. - # Output only field. + # IDs of the Long Running Operations (LROs) currently running for this + # schema. Output only field. # Corresponds to the JSON property `operationIds` # @return [Array] attr_accessor :operation_ids diff --git a/generated/google/apis/cloudsearch_v1/representations.rb b/generated/google/apis/cloudsearch_v1/representations.rb index 6eab4a910..1a102606c 100644 --- a/generated/google/apis/cloudsearch_v1/representations.rb +++ b/generated/google/apis/cloudsearch_v1/representations.rb @@ -304,6 +304,12 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class IndexItemOptions + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class IndexItemRequest class Representation < Google::Apis::Core::JsonRepresentation; end @@ -1264,12 +1270,21 @@ module Google end end + class IndexItemOptions + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :allow_unknown_gsuite_principals, as: 'allowUnknownGsuitePrincipals' + end + end + class IndexItemRequest # @private class Representation < Google::Apis::Core::JsonRepresentation property :connector_name, as: 'connectorName' property :debug_options, as: 'debugOptions', class: Google::Apis::CloudsearchV1::DebugOptions, decorator: Google::Apis::CloudsearchV1::DebugOptions::Representation + property :index_item_options, as: 'indexItemOptions', class: Google::Apis::CloudsearchV1::IndexItemOptions, decorator: Google::Apis::CloudsearchV1::IndexItemOptions::Representation + property :item, as: 'item', class: Google::Apis::CloudsearchV1::Item, decorator: Google::Apis::CloudsearchV1::Item::Representation property :mode, as: 'mode' diff --git a/generated/google/apis/container_v1.rb b/generated/google/apis/container_v1.rb index 5f9486204..80c581692 100644 --- a/generated/google/apis/container_v1.rb +++ b/generated/google/apis/container_v1.rb @@ -26,7 +26,7 @@ module Google # @see https://cloud.google.com/container-engine/ module ContainerV1 VERSION = 'V1' - REVISION = '20181211' + REVISION = '20190206' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/container_v1/classes.rb b/generated/google/apis/container_v1/classes.rb index 1239be814..769cbd9ab 100644 --- a/generated/google/apis/container_v1/classes.rb +++ b/generated/google/apis/container_v1/classes.rb @@ -1535,6 +1535,7 @@ module Google # "cluster-name" # "cluster-uid" # "configure-sh" + # "containerd-configure-sh" # "enable-os-login" # "gci-update-strategy" # "gci-ensure-gke-docker" diff --git a/generated/google/apis/container_v1/service.rb b/generated/google/apis/container_v1/service.rb index b5dba9c34..4fc815a93 100644 --- a/generated/google/apis/container_v1/service.rb +++ b/generated/google/apis/container_v1/service.rb @@ -50,8 +50,8 @@ module Google # Returns configuration info about the Kubernetes Engine service. # @param [String] name - # The name (project and location) of the server config to get - # Specified in the format 'projects/*/locations/*'. + # The name (project and location) of the server config to get, + # specified in the format 'projects/*/locations/*'. # @param [String] project_id # Deprecated. The Google Developers Console [project ID or project # number](https://support.google.com/cloud/answer/6158840). @@ -1279,8 +1279,8 @@ module Google # [zone](/compute/docs/zones#available) to return operations for. # This field has been deprecated and replaced by the name field. # @param [String] name - # The name (project and location) of the server config to get - # Specified in the format 'projects/*/locations/*'. + # The name (project and location) of the server config to get, + # specified in the format 'projects/*/locations/*'. # @param [String] fields # Selector specifying which fields to include in a partial response. # @param [String] quota_user diff --git a/generated/google/apis/dialogflow_v2.rb b/generated/google/apis/dialogflow_v2.rb index 9238a5b04..223d065e4 100644 --- a/generated/google/apis/dialogflow_v2.rb +++ b/generated/google/apis/dialogflow_v2.rb @@ -26,7 +26,7 @@ module Google # @see https://cloud.google.com/dialogflow-enterprise/ module DialogflowV2 VERSION = 'V2' - REVISION = '20190209' + REVISION = '20190219' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/dialogflow_v2/classes.rb b/generated/google/apis/dialogflow_v2/classes.rb index 80f9b5904..44b2fed53 100644 --- a/generated/google/apis/dialogflow_v2/classes.rb +++ b/generated/google/apis/dialogflow_v2/classes.rb @@ -36,7 +36,7 @@ module Google # Optional. To filter out false positive results and still get variety in # matched natural language inputs for your agent, you can tune the machine # learning classification threshold. If the returned score value is less than - # the threshold value, then a fallback intent is be triggered or, if there + # the threshold value, then a fallback intent will be triggered or, if there # are no fallback intents defined, no intent will be triggered. The score # values range from 0.0 (completely uncertain) to 1.0 (completely certain). # If set to 0.0, the default of 0.3 is used. @@ -1690,9 +1690,20 @@ module Google # @return [String] attr_accessor :name - # Required. The collection of training phrase parts (can be annotated). - # Fields: `entity_type`, `alias` and `user_defined` should be populated - # only for the annotated parts of the training phrase. + # Required. The ordered list of training phrase parts. + # The parts are concatenated in order to form the training phrase. + # Note: The API does not automatically annotate training phrases like the + # Dialogflow Console does. + # Note: Do not forget to include whitespace at part boundaries, + # so the training phrase is well formatted when the parts are concatenated. + # If the training phrase does not need to be annotated with parameters, + # you just need a single part with only the Part.text field set. + # If you want to annotate the training phrase, you must create multiple + # parts, where the fields of each part are populated in one of two ways: + # - `Part.text` is set to a part of the phrase that has no parameters. + # - `Part.text` is set to a part of the phrase that you want to annotate, + # and the `entity_type`, `alias`, and `user_defined` fields are all + # set. # Corresponds to the JSON property `parts` # @return [Array] attr_accessor :parts @@ -1728,26 +1739,26 @@ module Google # Optional. The parameter name for the value extracted from the # annotated part of the example. + # This field is required for annotated parts of the training phrase. # Corresponds to the JSON property `alias` # @return [String] attr_accessor :alias - # Optional. The entity type name prefixed with `@`. This field is - # required for the annotated part of the text and applies only to - # examples. + # Optional. The entity type name prefixed with `@`. + # This field is required for annotated parts of the training phrase. # Corresponds to the JSON property `entityType` # @return [String] attr_accessor :entity_type - # Required. The text corresponding to the example, - # if there are no annotations. For - # annotated examples, it is the text for one of the example's parts. + # Required. The text for this part. # Corresponds to the JSON property `text` # @return [String] attr_accessor :text - # Optional. Indicates whether the text was manually annotated by the - # developer. + # Optional. Indicates whether the text was manually annotated. + # This field is set to true when the Dialogflow Console is used to + # manually annotate the part. When creating an annotated part with the + # API, you must set this to true. # Corresponds to the JSON property `userDefined` # @return [Boolean] attr_accessor :user_defined @@ -3642,9 +3653,20 @@ module Google # @return [String] attr_accessor :name - # Required. The collection of training phrase parts (can be annotated). - # Fields: `entity_type`, `alias` and `user_defined` should be populated - # only for the annotated parts of the training phrase. + # Required. The ordered list of training phrase parts. + # The parts are concatenated in order to form the training phrase. + # Note: The API does not automatically annotate training phrases like the + # Dialogflow Console does. + # Note: Do not forget to include whitespace at part boundaries, + # so the training phrase is well formatted when the parts are concatenated. + # If the training phrase does not need to be annotated with parameters, + # you just need a single part with only the Part.text field set. + # If you want to annotate the training phrase, you must create multiple + # parts, where the fields of each part are populated in one of two ways: + # - `Part.text` is set to a part of the phrase that has no parameters. + # - `Part.text` is set to a part of the phrase that you want to annotate, + # and the `entity_type`, `alias`, and `user_defined` fields are all + # set. # Corresponds to the JSON property `parts` # @return [Array] attr_accessor :parts @@ -3680,26 +3702,26 @@ module Google # Optional. The parameter name for the value extracted from the # annotated part of the example. + # This field is required for annotated parts of the training phrase. # Corresponds to the JSON property `alias` # @return [String] attr_accessor :alias - # Optional. The entity type name prefixed with `@`. This field is - # required for the annotated part of the text and applies only to - # examples. + # Optional. The entity type name prefixed with `@`. + # This field is required for annotated parts of the training phrase. # Corresponds to the JSON property `entityType` # @return [String] attr_accessor :entity_type - # Required. The text corresponding to the example, - # if there are no annotations. For - # annotated examples, it is the text for one of the example's parts. + # Required. The text for this part. # Corresponds to the JSON property `text` # @return [String] attr_accessor :text - # Optional. Indicates whether the text was manually annotated by the - # developer. + # Optional. Indicates whether the text was manually annotated. + # This field is set to true when the Dialogflow Console is used to + # manually annotate the part. When creating an annotated part with the + # API, you must set this to true. # Corresponds to the JSON property `userDefined` # @return [Boolean] attr_accessor :user_defined diff --git a/generated/google/apis/iam_v1.rb b/generated/google/apis/iam_v1.rb index fcbe61c70..5c46c41be 100644 --- a/generated/google/apis/iam_v1.rb +++ b/generated/google/apis/iam_v1.rb @@ -27,7 +27,7 @@ module Google # @see https://cloud.google.com/iam/ module IamV1 VERSION = 'V1' - REVISION = '20190201' + REVISION = '20190215' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/iam_v1/classes.rb b/generated/google/apis/iam_v1/classes.rb index 8e56763d5..535b82262 100644 --- a/generated/google/apis/iam_v1/classes.rb +++ b/generated/google/apis/iam_v1/classes.rb @@ -1106,6 +1106,12 @@ module Google class ServiceAccount include Google::Apis::Core::Hashable + # @OutputOnly A bool indicate if the service account is disabled. + # Corresponds to the JSON property `disabled` + # @return [Boolean] + attr_accessor :disabled + alias_method :disabled?, :disabled + # Optional. A user-specified name for the service account. # Must be less than or equal to 100 UTF-8 bytes. # Corresponds to the JSON property `displayName` @@ -1158,6 +1164,7 @@ module Google # Update properties of this object def update!(**args) + @disabled = args[:disabled] if args.key?(:disabled) @display_name = args[:display_name] if args.key?(:display_name) @email = args[:email] if args.key?(:email) @etag = args[:etag] if args.key?(:etag) @@ -1465,6 +1472,50 @@ module Google @etag = args[:etag] if args.key?(:etag) end end + + # The service account undelete request. + class UndeleteServiceAccountRequest + include Google::Apis::Core::Hashable + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + end + end + + # + class UndeleteServiceAccountResponse + include Google::Apis::Core::Hashable + + # A service account in the Identity and Access Management API. + # To create a service account, specify the `project_id` and the `account_id` + # for the account. The `account_id` is unique within the project, and is used + # to generate the service account email address and a stable + # `unique_id`. + # If the account already exists, the account's resource name is returned + # in the format of projects/`PROJECT_ID`/serviceAccounts/`ACCOUNT`. The caller + # can use the name in other methods to access the account. + # All other methods can identify the service account using the format + # `projects/`PROJECT_ID`/serviceAccounts/`ACCOUNT``. + # Using `-` as a wildcard for the `PROJECT_ID` will infer the project from + # the account. The `ACCOUNT` value can be the `email` address or the + # `unique_id` of the service account. + # Corresponds to the JSON property `restoredAccount` + # @return [Google::Apis::IamV1::ServiceAccount] + attr_accessor :restored_account + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @restored_account = args[:restored_account] if args.key?(:restored_account) + end + end end end end diff --git a/generated/google/apis/iam_v1/representations.rb b/generated/google/apis/iam_v1/representations.rb index 222a46c19..8ceb9151a 100644 --- a/generated/google/apis/iam_v1/representations.rb +++ b/generated/google/apis/iam_v1/representations.rb @@ -244,6 +244,18 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class UndeleteServiceAccountRequest + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class UndeleteServiceAccountResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class AuditConfig # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -501,6 +513,7 @@ module Google class ServiceAccount # @private class Representation < Google::Apis::Core::JsonRepresentation + property :disabled, as: 'disabled' property :display_name, as: 'displayName' property :email, as: 'email' property :etag, :base64 => true, as: 'etag' @@ -583,6 +596,20 @@ module Google property :etag, :base64 => true, as: 'etag' end end + + class UndeleteServiceAccountRequest + # @private + class Representation < Google::Apis::Core::JsonRepresentation + end + end + + class UndeleteServiceAccountResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :restored_account, as: 'restoredAccount', class: Google::Apis::IamV1::ServiceAccount, decorator: Google::Apis::IamV1::ServiceAccount::Representation + + end + end end end end diff --git a/generated/google/apis/iam_v1/service.rb b/generated/google/apis/iam_v1/service.rb index ace396813..7b00c46fb 100644 --- a/generated/google/apis/iam_v1/service.rb +++ b/generated/google/apis/iam_v1/service.rb @@ -947,6 +947,42 @@ module Google execute_or_queue_command(command, &block) end + # Restores a deleted ServiceAccount. + # @param [String] name + # The resource name of the service account in the following format: + # `projects/`PROJECT_ID`/serviceAccounts/`ACCOUNT_UNIQUE_ID`'. + # Using `-` as a wildcard for the `PROJECT_ID` will infer the project from + # the account. + # @param [Google::Apis::IamV1::UndeleteServiceAccountRequest] undelete_service_account_request_object + # @param [String] fields + # Selector specifying which fields to include in a partial response. + # @param [String] quota_user + # Available to use for quota purposes for server-side applications. Can be any + # arbitrary string assigned to a user, but should not exceed 40 characters. + # @param [Google::Apis::RequestOptions] options + # Request-specific options + # + # @yield [result, err] Result & error if block supplied + # @yieldparam result [Google::Apis::IamV1::UndeleteServiceAccountResponse] parsed result object + # @yieldparam err [StandardError] error object if request failed + # + # @return [Google::Apis::IamV1::UndeleteServiceAccountResponse] + # + # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried + # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification + # @raise [Google::Apis::AuthorizationError] Authorization is required + def undelete_service_account(name, undelete_service_account_request_object = nil, fields: nil, quota_user: nil, options: nil, &block) + command = make_simple_command(:post, 'v1/{+name}:undelete', options) + command.request_representation = Google::Apis::IamV1::UndeleteServiceAccountRequest::Representation + command.request_object = undelete_service_account_request_object + command.response_representation = Google::Apis::IamV1::UndeleteServiceAccountResponse::Representation + command.response_class = Google::Apis::IamV1::UndeleteServiceAccountResponse + command.params['name'] = name unless name.nil? + command.query['fields'] = fields unless fields.nil? + command.query['quotaUser'] = quota_user unless quota_user.nil? + execute_or_queue_command(command, &block) + end + # Updates a ServiceAccount. # Currently, only the following fields are updatable: # `display_name` . diff --git a/generated/google/apis/spanner_v1.rb b/generated/google/apis/spanner_v1.rb index 32746509f..a610b9621 100644 --- a/generated/google/apis/spanner_v1.rb +++ b/generated/google/apis/spanner_v1.rb @@ -26,7 +26,7 @@ module Google # @see https://cloud.google.com/spanner/ module SpannerV1 VERSION = 'V1' - REVISION = '20180920' + REVISION = '20190212' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/spanner_v1/service.rb b/generated/google/apis/spanner_v1/service.rb index dae699d81..8bdba3026 100644 --- a/generated/google/apis/spanner_v1/service.rb +++ b/generated/google/apis/spanner_v1/service.rb @@ -1097,7 +1097,9 @@ module Google execute_or_queue_command(command, &block) end - # Ends a session, releasing server resources associated with it. + # Ends a session, releasing server resources associated with it. This will + # asynchronously trigger cancellation of any operations that are running with + # this session. # @param [String] name # Required. The name of the session to delete. # @param [String] fields diff --git a/generated/google/apis/tagmanager_v1.rb b/generated/google/apis/tagmanager_v1.rb index 25eba28ab..e63b612d4 100644 --- a/generated/google/apis/tagmanager_v1.rb +++ b/generated/google/apis/tagmanager_v1.rb @@ -25,7 +25,7 @@ module Google # @see https://developers.google.com/tag-manager/api/v1/ module TagmanagerV1 VERSION = 'V1' - REVISION = '20171108' + REVISION = '20190220' # Delete your Google Tag Manager containers AUTH_TAGMANAGER_DELETE_CONTAINERS = 'https://www.googleapis.com/auth/tagmanager.delete.containers' diff --git a/generated/google/apis/tagmanager_v1/service.rb b/generated/google/apis/tagmanager_v1/service.rb index a6d9e505c..18695f760 100644 --- a/generated/google/apis/tagmanager_v1/service.rb +++ b/generated/google/apis/tagmanager_v1/service.rb @@ -487,52 +487,6 @@ module Google execute_or_queue_command(command, &block) end - # Updates a GTM Environment. This method supports patch semantics. - # @param [String] account_id - # The GTM Account ID. - # @param [String] container_id - # The GTM Container ID. - # @param [String] environment_id - # The GTM Environment ID. - # @param [Google::Apis::TagmanagerV1::Environment] environment_object - # @param [String] fingerprint - # When provided, this fingerprint must match the fingerprint of the environment - # in storage. - # @param [String] fields - # Selector specifying which fields to include in a partial response. - # @param [String] quota_user - # An opaque string that represents a user for quota purposes. Must not exceed 40 - # characters. - # @param [String] user_ip - # Deprecated. Please use quotaUser instead. - # @param [Google::Apis::RequestOptions] options - # Request-specific options - # - # @yield [result, err] Result & error if block supplied - # @yieldparam result [Google::Apis::TagmanagerV1::Environment] parsed result object - # @yieldparam err [StandardError] error object if request failed - # - # @return [Google::Apis::TagmanagerV1::Environment] - # - # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried - # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification - # @raise [Google::Apis::AuthorizationError] Authorization is required - def patch_account_container_environment(account_id, container_id, environment_id, environment_object = nil, fingerprint: nil, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) - command = make_simple_command(:patch, 'accounts/{accountId}/containers/{containerId}/environments/{environmentId}', options) - command.request_representation = Google::Apis::TagmanagerV1::Environment::Representation - command.request_object = environment_object - command.response_representation = Google::Apis::TagmanagerV1::Environment::Representation - command.response_class = Google::Apis::TagmanagerV1::Environment - command.params['accountId'] = account_id unless account_id.nil? - command.params['containerId'] = container_id unless container_id.nil? - command.params['environmentId'] = environment_id unless environment_id.nil? - command.query['fingerprint'] = fingerprint unless fingerprint.nil? - command.query['fields'] = fields unless fields.nil? - command.query['quotaUser'] = quota_user unless quota_user.nil? - command.query['userIp'] = user_ip unless user_ip.nil? - execute_or_queue_command(command, &block) - end - # Updates a GTM Environment. # @param [String] account_id # The GTM Account ID. diff --git a/generated/google/apis/tagmanager_v2.rb b/generated/google/apis/tagmanager_v2.rb index da7852b20..56664ba4a 100644 --- a/generated/google/apis/tagmanager_v2.rb +++ b/generated/google/apis/tagmanager_v2.rb @@ -25,7 +25,7 @@ module Google # @see https://developers.google.com/tag-manager/api/v2/ module TagmanagerV2 VERSION = 'V2' - REVISION = '20171108' + REVISION = '20190220' # Delete your Google Tag Manager containers AUTH_TAGMANAGER_DELETE_CONTAINERS = 'https://www.googleapis.com/auth/tagmanager.delete.containers' diff --git a/generated/google/apis/tagmanager_v2/classes.rb b/generated/google/apis/tagmanager_v2/classes.rb index 84005cb9a..148f3ffdc 100644 --- a/generated/google/apis/tagmanager_v2/classes.rb +++ b/generated/google/apis/tagmanager_v2/classes.rb @@ -313,6 +313,11 @@ module Google # @return [String] attr_accessor :container_version_id + # The custom templates in the container that this version was taken from. + # Corresponds to the JSON property `customTemplate` + # @return [Array] + attr_accessor :custom_template + # A value of true indicates this container version has been deleted. # Corresponds to the JSON property `deleted` # @return [Boolean] @@ -381,6 +386,7 @@ module Google @container = args[:container] if args.key?(:container) @container_id = args[:container_id] if args.key?(:container_id) @container_version_id = args[:container_version_id] if args.key?(:container_version_id) + @custom_template = args[:custom_template] if args.key?(:custom_template) @deleted = args[:deleted] if args.key?(:deleted) @description = args[:description] if args.key?(:description) @fingerprint = args[:fingerprint] if args.key?(:fingerprint) @@ -425,6 +431,11 @@ module Google # @return [String] attr_accessor :name + # Number of custom templates in the container version. + # Corresponds to the JSON property `numCustomTemplates` + # @return [String] + attr_accessor :num_custom_templates + # Number of macros in the container version. # Corresponds to the JSON property `numMacros` # @return [String] @@ -471,6 +482,7 @@ module Google @container_version_id = args[:container_version_id] if args.key?(:container_version_id) @deleted = args[:deleted] if args.key?(:deleted) @name = args[:name] if args.key?(:name) + @num_custom_templates = args[:num_custom_templates] if args.key?(:num_custom_templates) @num_macros = args[:num_macros] if args.key?(:num_macros) @num_rules = args[:num_rules] if args.key?(:num_rules) @num_tags = args[:num_tags] if args.key?(:num_tags) @@ -564,19 +576,55 @@ module Google end end - # Creates a workspace proposal to start a review of a workspace. - class CreateWorkspaceProposalRequest + # Represents a Google Tag Manager Custom Template's contents. + class CustomTemplate include Google::Apis::Core::Hashable - # A comment from the reviewer or author. - # Corresponds to the JSON property `initialComment` - # @return [Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment] - attr_accessor :initial_comment + # GTM Account ID. + # Corresponds to the JSON property `accountId` + # @return [String] + attr_accessor :account_id - # List of users to review the workspace proposal. - # Corresponds to the JSON property `reviewers` - # @return [Array] - attr_accessor :reviewers + # GTM Container ID. + # Corresponds to the JSON property `containerId` + # @return [String] + attr_accessor :container_id + + # The fingerprint of the GTM Custom Template as computed at storage time. This + # value is recomputed whenever the template is modified. + # Corresponds to the JSON property `fingerprint` + # @return [String] + attr_accessor :fingerprint + + # Custom Template display name. + # Corresponds to the JSON property `name` + # @return [String] + attr_accessor :name + + # GTM Custom Template's API relative path. + # Corresponds to the JSON property `path` + # @return [String] + attr_accessor :path + + # Auto generated link to the tag manager UI + # Corresponds to the JSON property `tagManagerUrl` + # @return [String] + attr_accessor :tag_manager_url + + # The custom template in text format. + # Corresponds to the JSON property `templateData` + # @return [String] + attr_accessor :template_data + + # The Custom Template ID uniquely identifies the GTM custom template. + # Corresponds to the JSON property `templateId` + # @return [String] + attr_accessor :template_id + + # GTM Workspace ID. + # Corresponds to the JSON property `workspaceId` + # @return [String] + attr_accessor :workspace_id def initialize(**args) update!(**args) @@ -584,8 +632,15 @@ module Google # Update properties of this object def update!(**args) - @initial_comment = args[:initial_comment] if args.key?(:initial_comment) - @reviewers = args[:reviewers] if args.key?(:reviewers) + @account_id = args[:account_id] if args.key?(:account_id) + @container_id = args[:container_id] if args.key?(:container_id) + @fingerprint = args[:fingerprint] if args.key?(:fingerprint) + @name = args[:name] if args.key?(:name) + @path = args[:path] if args.key?(:path) + @tag_manager_url = args[:tag_manager_url] if args.key?(:tag_manager_url) + @template_data = args[:template_data] if args.key?(:template_data) + @template_id = args[:template_id] if args.key?(:template_id) + @workspace_id = args[:workspace_id] if args.key?(:workspace_id) end end @@ -649,16 +704,17 @@ module Google # @return [String] attr_accessor :authorization_code - # A Timestamp represents a point in time independent of any time zone or - # calendar, represented as seconds and fractions of seconds at nanosecond - # resolution in UTC Epoch time. It is encoded using the Proleptic Gregorian - # Calendar which extends the Gregorian calendar backwards to year one. It is - # encoded assuming all minutes are 60 seconds long, i.e. leap seconds are " - # smeared" so that no leap second table is needed for interpretation. Range is - # from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By restricting to - # that range, we ensure that we can convert to and from RFC 3339 date strings. - # See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339. - # txt). + # A Timestamp represents a point in time independent of any time zone or local + # calendar, encoded as a count of seconds and fractions of seconds at nanosecond + # resolution. The count is relative to an epoch at UTC midnight on January 1, + # 1970, in the proleptic Gregorian calendar which extends the Gregorian calendar + # backwards to year one. + # All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap + # second table is needed for interpretation, using a [24-hour linear smear]( + # https://developers.google.com/time/smear). + # The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By + # restricting to that range, we ensure that we can convert to and from [RFC 3339] + # (https://www.ietf.org/rfc/rfc3339.txt) date strings. # # Examples # Example 1: Compute Timestamp from POSIX `time()`. # Timestamp timestamp; timestamp.set_seconds(time(NULL)); timestamp.set_nanos(0); @@ -686,18 +742,20 @@ module Google # expressed using four digits while `month`, `day`, `hour`, `min`, and `sec` are # zero-padded to two digits each. The fractional seconds, which can go up to 9 # digits (i.e. up to 1 nanosecond resolution), are optional. The "Z" suffix - # indicates the timezone ("UTC"); the timezone is required, though only UTC (as - # indicated by "Z") is presently supported. + # indicates the timezone ("UTC"); the timezone is required. A proto3 JSON + # serializer should always use UTC (as indicated by "Z") when printing the + # Timestamp type and a proto3 JSON parser should be able to accept both UTC and + # other timezones (as indicated by an offset). # For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past 01:30 UTC on # January 15, 2017. # In JavaScript, one can convert a Date object to this format using the standard # [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/ - # Reference/Global_Objects/Date/toISOString] method. In Python, a standard ` + # Reference/Global_Objects/Date/toISOString) method. In Python, a standard ` # datetime.datetime` object can be converted to this format using [`strftime`]( # https://docs.python.org/2/library/time.html#time.strftime) with the time # format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use the Joda - # Time's [`ISODateTimeFormat.dateTime()`]( http://joda-time.sourceforge.net/ - # apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) to obtain a + # Time's [`ISODateTimeFormat.dateTime()`]( http://www.joda.org/joda-time/apidocs/ + # org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D ) to obtain a # formatter capable of generating timestamps in this format. # Corresponds to the JSON property `authorizationTimestamp` # @return [Google::Apis::TagmanagerV2::Timestamp] @@ -1197,6 +1255,31 @@ module Google end end + # + class ListZonesResponse + include Google::Apis::Core::Hashable + + # Continuation token for fetching the next page of results. + # Corresponds to the JSON property `nextPageToken` + # @return [String] + attr_accessor :next_page_token + + # All GTM Zones of a GTM Container. + # Corresponds to the JSON property `zone` + # @return [Array] + attr_accessor :zone + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @next_page_token = args[:next_page_token] if args.key?(:next_page_token) + @zone = args[:zone] if args.key?(:zone) + end + end + # Represents a merge conflict. class MergeConflict include Google::Apis::Core::Hashable @@ -1429,6 +1512,25 @@ module Google end end + # The result of reverting a zone in a workspace. + class RevertZoneResponse + include Google::Apis::Core::Hashable + + # Represents a Google Tag Manager Zone's contents. + # Corresponds to the JSON property `zone` + # @return [Google::Apis::TagmanagerV2::Zone] + attr_accessor :zone + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @zone = args[:zone] if args.key?(:zone) + end + end + # Represents a reference to atag that fires before another tag in order to set # up dependencies. class SetupTag @@ -1704,16 +1806,17 @@ module Google end end - # A Timestamp represents a point in time independent of any time zone or - # calendar, represented as seconds and fractions of seconds at nanosecond - # resolution in UTC Epoch time. It is encoded using the Proleptic Gregorian - # Calendar which extends the Gregorian calendar backwards to year one. It is - # encoded assuming all minutes are 60 seconds long, i.e. leap seconds are " - # smeared" so that no leap second table is needed for interpretation. Range is - # from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By restricting to - # that range, we ensure that we can convert to and from RFC 3339 date strings. - # See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339. - # txt). + # A Timestamp represents a point in time independent of any time zone or local + # calendar, encoded as a count of seconds and fractions of seconds at nanosecond + # resolution. The count is relative to an epoch at UTC midnight on January 1, + # 1970, in the proleptic Gregorian calendar which extends the Gregorian calendar + # backwards to year one. + # All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap + # second table is needed for interpretation, using a [24-hour linear smear]( + # https://developers.google.com/time/smear). + # The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By + # restricting to that range, we ensure that we can convert to and from [RFC 3339] + # (https://www.ietf.org/rfc/rfc3339.txt) date strings. # # Examples # Example 1: Compute Timestamp from POSIX `time()`. # Timestamp timestamp; timestamp.set_seconds(time(NULL)); timestamp.set_nanos(0); @@ -1741,18 +1844,20 @@ module Google # expressed using four digits while `month`, `day`, `hour`, `min`, and `sec` are # zero-padded to two digits each. The fractional seconds, which can go up to 9 # digits (i.e. up to 1 nanosecond resolution), are optional. The "Z" suffix - # indicates the timezone ("UTC"); the timezone is required, though only UTC (as - # indicated by "Z") is presently supported. + # indicates the timezone ("UTC"); the timezone is required. A proto3 JSON + # serializer should always use UTC (as indicated by "Z") when printing the + # Timestamp type and a proto3 JSON parser should be able to accept both UTC and + # other timezones (as indicated by an offset). # For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past 01:30 UTC on # January 15, 2017. # In JavaScript, one can convert a Date object to this format using the standard # [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/ - # Reference/Global_Objects/Date/toISOString] method. In Python, a standard ` + # Reference/Global_Objects/Date/toISOString) method. In Python, a standard ` # datetime.datetime` object can be converted to this format using [`strftime`]( # https://docs.python.org/2/library/time.html#time.strftime) with the time # format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use the Joda - # Time's [`ISODateTimeFormat.dateTime()`]( http://joda-time.sourceforge.net/ - # apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) to obtain a + # Time's [`ISODateTimeFormat.dateTime()`]( http://www.joda.org/joda-time/apidocs/ + # org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D ) to obtain a # formatter capable of generating timestamps in this format. class Timestamp include Google::Apis::Core::Hashable @@ -1987,44 +2092,6 @@ module Google end end - # Updates a workspace proposal with patch-like semantics. - class UpdateWorkspaceProposalRequest - include Google::Apis::Core::Hashable - - # When provided, this fingerprint must match the fingerprint of the proposal in - # storage. - # Corresponds to the JSON property `fingerprint` - # @return [String] - attr_accessor :fingerprint - - # A comment from the reviewer or author. - # Corresponds to the JSON property `newComment` - # @return [Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment] - attr_accessor :new_comment - - # If present, the list of reviewers of the workspace proposal is updated. - # Corresponds to the JSON property `reviewers` - # @return [Array] - attr_accessor :reviewers - - # If present, the status of the workspace proposal is updated. - # Corresponds to the JSON property `status` - # @return [String] - attr_accessor :status - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @fingerprint = args[:fingerprint] if args.key?(:fingerprint) - @new_comment = args[:new_comment] if args.key?(:new_comment) - @reviewers = args[:reviewers] if args.key?(:reviewers) - @status = args[:status] if args.key?(:status) - end - end - # Represents a user's permissions to an account and its container. class UserPermission include Google::Apis::Core::Hashable @@ -2102,6 +2169,11 @@ module Google # @return [String] attr_accessor :fingerprint + # Option to convert a variable value to other value. + # Corresponds to the JSON property `formatValue` + # @return [Google::Apis::TagmanagerV2::VariableFormatValue] + attr_accessor :format_value + # Variable display name. # Corresponds to the JSON property `name` # @return [String] @@ -2168,6 +2240,7 @@ module Google @disabling_trigger_id = args[:disabling_trigger_id] if args.key?(:disabling_trigger_id) @enabling_trigger_id = args[:enabling_trigger_id] if args.key?(:enabling_trigger_id) @fingerprint = args[:fingerprint] if args.key?(:fingerprint) + @format_value = args[:format_value] if args.key?(:format_value) @name = args[:name] if args.key?(:name) @notes = args[:notes] if args.key?(:notes) @parameter = args[:parameter] if args.key?(:parameter) @@ -2182,6 +2255,50 @@ module Google end end + # + class VariableFormatValue + include Google::Apis::Core::Hashable + + # The option to convert a string-type variable value to either lowercase or + # uppercase. + # Corresponds to the JSON property `caseConversionType` + # @return [String] + attr_accessor :case_conversion_type + + # Represents a Google Tag Manager Parameter. + # Corresponds to the JSON property `convertFalseToValue` + # @return [Google::Apis::TagmanagerV2::Parameter] + attr_accessor :convert_false_to_value + + # Represents a Google Tag Manager Parameter. + # Corresponds to the JSON property `convertNullToValue` + # @return [Google::Apis::TagmanagerV2::Parameter] + attr_accessor :convert_null_to_value + + # Represents a Google Tag Manager Parameter. + # Corresponds to the JSON property `convertTrueToValue` + # @return [Google::Apis::TagmanagerV2::Parameter] + attr_accessor :convert_true_to_value + + # Represents a Google Tag Manager Parameter. + # Corresponds to the JSON property `convertUndefinedToValue` + # @return [Google::Apis::TagmanagerV2::Parameter] + attr_accessor :convert_undefined_to_value + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @case_conversion_type = args[:case_conversion_type] if args.key?(:case_conversion_type) + @convert_false_to_value = args[:convert_false_to_value] if args.key?(:convert_false_to_value) + @convert_null_to_value = args[:convert_null_to_value] if args.key?(:convert_null_to_value) + @convert_true_to_value = args[:convert_true_to_value] if args.key?(:convert_true_to_value) + @convert_undefined_to_value = args[:convert_undefined_to_value] if args.key?(:convert_undefined_to_value) + end + end + # Represents a Google Tag Manager Container Workspace. class Workspace include Google::Apis::Core::Hashable @@ -2244,218 +2361,6 @@ module Google end end - # A workspace proposal represents an ongoing review of workspace changes in an - # effort to gain approval for container version creation. - class WorkspaceProposal - include Google::Apis::Core::Hashable - - # List of authors for the workspace proposal. - # Corresponds to the JSON property `authors` - # @return [Array] - attr_accessor :authors - - # The fingerprint of the GTM workspace proposal as computed at storage time. - # This value is recomputed whenever the proposal is modified. - # Corresponds to the JSON property `fingerprint` - # @return [String] - attr_accessor :fingerprint - - # Records the history of comments and status changes. - # Corresponds to the JSON property `history` - # @return [Array] - attr_accessor :history - - # GTM workspace proposal's relative path. - # Corresponds to the JSON property `path` - # @return [String] - attr_accessor :path - - # Lists of reviewers for the workspace proposal. - # Corresponds to the JSON property `reviewers` - # @return [Array] - attr_accessor :reviewers - - # The status of the workspace proposal as it goes through review. - # Corresponds to the JSON property `status` - # @return [String] - attr_accessor :status - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @authors = args[:authors] if args.key?(:authors) - @fingerprint = args[:fingerprint] if args.key?(:fingerprint) - @history = args[:history] if args.key?(:history) - @path = args[:path] if args.key?(:path) - @reviewers = args[:reviewers] if args.key?(:reviewers) - @status = args[:status] if args.key?(:status) - end - end - - # A history event that represents a comment or status change in the proposal. - class WorkspaceProposalHistory - include Google::Apis::Core::Hashable - - # A comment from the reviewer or author. - # Corresponds to the JSON property `comment` - # @return [Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment] - attr_accessor :comment - - # Represents an external user or internal Google Tag Manager system. - # Corresponds to the JSON property `createdBy` - # @return [Google::Apis::TagmanagerV2::WorkspaceProposalUser] - attr_accessor :created_by - - # A Timestamp represents a point in time independent of any time zone or - # calendar, represented as seconds and fractions of seconds at nanosecond - # resolution in UTC Epoch time. It is encoded using the Proleptic Gregorian - # Calendar which extends the Gregorian calendar backwards to year one. It is - # encoded assuming all minutes are 60 seconds long, i.e. leap seconds are " - # smeared" so that no leap second table is needed for interpretation. Range is - # from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By restricting to - # that range, we ensure that we can convert to and from RFC 3339 date strings. - # See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339. - # txt). - # # Examples - # Example 1: Compute Timestamp from POSIX `time()`. - # Timestamp timestamp; timestamp.set_seconds(time(NULL)); timestamp.set_nanos(0); - # Example 2: Compute Timestamp from POSIX `gettimeofday()`. - # struct timeval tv; gettimeofday(&tv, NULL); - # Timestamp timestamp; timestamp.set_seconds(tv.tv_sec); timestamp.set_nanos(tv. - # tv_usec * 1000); - # Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. - # FILETIME ft; GetSystemTimeAsFileTime(&ft); UINT64 ticks = (((UINT64)ft. - # dwHighDateTime) << 32) | ft.dwLowDateTime; - # // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // is - # 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. Timestamp - # timestamp; timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); - # timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); - # Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. - # long millis = System.currentTimeMillis(); - # Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) . - # setNanos((int) ((millis % 1000) * 1000000)).build(); - # Example 5: Compute Timestamp from current time in Python. - # timestamp = Timestamp() timestamp.GetCurrentTime() - # # JSON Mapping - # In JSON format, the Timestamp type is encoded as a string in the [RFC 3339]( - # https://www.ietf.org/rfc/rfc3339.txt) format. That is, the format is "`year`-` - # month`-`day`T`hour`:`min`:`sec`[.`frac_sec`]Z" where `year` is always - # expressed using four digits while `month`, `day`, `hour`, `min`, and `sec` are - # zero-padded to two digits each. The fractional seconds, which can go up to 9 - # digits (i.e. up to 1 nanosecond resolution), are optional. The "Z" suffix - # indicates the timezone ("UTC"); the timezone is required, though only UTC (as - # indicated by "Z") is presently supported. - # For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past 01:30 UTC on - # January 15, 2017. - # In JavaScript, one can convert a Date object to this format using the standard - # [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/ - # Reference/Global_Objects/Date/toISOString] method. In Python, a standard ` - # datetime.datetime` object can be converted to this format using [`strftime`]( - # https://docs.python.org/2/library/time.html#time.strftime) with the time - # format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use the Joda - # Time's [`ISODateTimeFormat.dateTime()`]( http://joda-time.sourceforge.net/ - # apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) to obtain a - # formatter capable of generating timestamps in this format. - # Corresponds to the JSON property `createdTimestamp` - # @return [Google::Apis::TagmanagerV2::Timestamp] - attr_accessor :created_timestamp - - # A change in the proposal's status. - # Corresponds to the JSON property `statusChange` - # @return [Google::Apis::TagmanagerV2::WorkspaceProposalHistoryStatusChange] - attr_accessor :status_change - - # The history type distinguishing between comments and status changes. - # Corresponds to the JSON property `type` - # @return [String] - attr_accessor :type - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @comment = args[:comment] if args.key?(:comment) - @created_by = args[:created_by] if args.key?(:created_by) - @created_timestamp = args[:created_timestamp] if args.key?(:created_timestamp) - @status_change = args[:status_change] if args.key?(:status_change) - @type = args[:type] if args.key?(:type) - end - end - - # A comment from the reviewer or author. - class WorkspaceProposalHistoryComment - include Google::Apis::Core::Hashable - - # The contents of the reviewer or author comment. - # Corresponds to the JSON property `content` - # @return [String] - attr_accessor :content - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @content = args[:content] if args.key?(:content) - end - end - - # A change in the proposal's status. - class WorkspaceProposalHistoryStatusChange - include Google::Apis::Core::Hashable - - # The new proposal status after that status change. - # Corresponds to the JSON property `newStatus` - # @return [String] - attr_accessor :new_status - - # The old proposal status before the status change. - # Corresponds to the JSON property `oldStatus` - # @return [String] - attr_accessor :old_status - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @new_status = args[:new_status] if args.key?(:new_status) - @old_status = args[:old_status] if args.key?(:old_status) - end - end - - # Represents an external user or internal Google Tag Manager system. - class WorkspaceProposalUser - include Google::Apis::Core::Hashable - - # Gaia id associated with a user, absent for the Google Tag Manager system. - # Corresponds to the JSON property `gaiaId` - # @return [Fixnum] - attr_accessor :gaia_id - - # User type distinguishes between a user and the Google Tag Manager system. - # Corresponds to the JSON property `type` - # @return [String] - attr_accessor :type - - def initialize(**args) - update!(**args) - end - - # Update properties of this object - def update!(**args) - @gaia_id = args[:gaia_id] if args.key?(:gaia_id) - @type = args[:type] if args.key?(:type) - end - end - # Represents a Google Tag Manager Zone's contents. class Zone include Google::Apis::Core::Hashable diff --git a/generated/google/apis/tagmanager_v2/representations.rb b/generated/google/apis/tagmanager_v2/representations.rb index 32ff978ae..40cc631b3 100644 --- a/generated/google/apis/tagmanager_v2/representations.rb +++ b/generated/google/apis/tagmanager_v2/representations.rb @@ -88,7 +88,7 @@ module Google include Google::Apis::Core::JsonObjectSupport end - class CreateWorkspaceProposalRequest + class CustomTemplate class Representation < Google::Apis::Core::JsonRepresentation; end include Google::Apis::Core::JsonObjectSupport @@ -190,6 +190,12 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class ListZonesResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class MergeConflict class Representation < Google::Apis::Core::JsonRepresentation; end @@ -244,6 +250,12 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class RevertZoneResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class SetupTag class Representation < Google::Apis::Core::JsonRepresentation; end @@ -286,12 +298,6 @@ module Google include Google::Apis::Core::JsonObjectSupport end - class UpdateWorkspaceProposalRequest - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - class UserPermission class Representation < Google::Apis::Core::JsonRepresentation; end @@ -304,42 +310,18 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class VariableFormatValue + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class Workspace class Representation < Google::Apis::Core::JsonRepresentation; end include Google::Apis::Core::JsonObjectSupport end - class WorkspaceProposal - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - - class WorkspaceProposalHistory - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - - class WorkspaceProposalHistoryComment - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - - class WorkspaceProposalHistoryStatusChange - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - - class WorkspaceProposalUser - class Representation < Google::Apis::Core::JsonRepresentation; end - - include Google::Apis::Core::JsonObjectSupport - end - class Zone class Representation < Google::Apis::Core::JsonRepresentation; end @@ -438,6 +420,8 @@ module Google property :container_id, as: 'containerId' property :container_version_id, as: 'containerVersionId' + collection :custom_template, as: 'customTemplate', class: Google::Apis::TagmanagerV2::CustomTemplate, decorator: Google::Apis::TagmanagerV2::CustomTemplate::Representation + property :deleted, as: 'deleted' property :description, as: 'description' property :fingerprint, as: 'fingerprint' @@ -465,6 +449,7 @@ module Google property :container_version_id, as: 'containerVersionId' property :deleted, as: 'deleted' property :name, as: 'name' + property :num_custom_templates, as: 'numCustomTemplates' property :num_macros, as: 'numMacros' property :num_rules, as: 'numRules' property :num_tags, as: 'numTags' @@ -503,13 +488,18 @@ module Google end end - class CreateWorkspaceProposalRequest + class CustomTemplate # @private class Representation < Google::Apis::Core::JsonRepresentation - property :initial_comment, as: 'initialComment', class: Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment::Representation - - collection :reviewers, as: 'reviewers', class: Google::Apis::TagmanagerV2::WorkspaceProposalUser, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalUser::Representation - + property :account_id, as: 'accountId' + property :container_id, as: 'containerId' + property :fingerprint, as: 'fingerprint' + property :name, as: 'name' + property :path, as: 'path' + property :tag_manager_url, as: 'tagManagerUrl' + property :template_data, as: 'templateData' + property :template_id, as: 'templateId' + property :workspace_id, as: 'workspaceId' end end @@ -687,6 +677,15 @@ module Google end end + class ListZonesResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :next_page_token, as: 'nextPageToken' + collection :zone, as: 'zone', class: Google::Apis::TagmanagerV2::Zone, decorator: Google::Apis::TagmanagerV2::Zone::Representation + + end + end + class MergeConflict # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -769,6 +768,14 @@ module Google end end + class RevertZoneResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :zone, as: 'zone', class: Google::Apis::TagmanagerV2::Zone, decorator: Google::Apis::TagmanagerV2::Zone::Representation + + end + end + class SetupTag # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -904,18 +911,6 @@ module Google end end - class UpdateWorkspaceProposalRequest - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :fingerprint, as: 'fingerprint' - property :new_comment, as: 'newComment', class: Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment::Representation - - collection :reviewers, as: 'reviewers', class: Google::Apis::TagmanagerV2::WorkspaceProposalUser, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalUser::Representation - - property :status, as: 'status' - end - end - class UserPermission # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -937,6 +932,8 @@ module Google collection :disabling_trigger_id, as: 'disablingTriggerId' collection :enabling_trigger_id, as: 'enablingTriggerId' property :fingerprint, as: 'fingerprint' + property :format_value, as: 'formatValue', class: Google::Apis::TagmanagerV2::VariableFormatValue, decorator: Google::Apis::TagmanagerV2::VariableFormatValue::Representation + property :name, as: 'name' property :notes, as: 'notes' collection :parameter, as: 'parameter', class: Google::Apis::TagmanagerV2::Parameter, decorator: Google::Apis::TagmanagerV2::Parameter::Representation @@ -952,6 +949,21 @@ module Google end end + class VariableFormatValue + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :case_conversion_type, as: 'caseConversionType' + property :convert_false_to_value, as: 'convertFalseToValue', class: Google::Apis::TagmanagerV2::Parameter, decorator: Google::Apis::TagmanagerV2::Parameter::Representation + + property :convert_null_to_value, as: 'convertNullToValue', class: Google::Apis::TagmanagerV2::Parameter, decorator: Google::Apis::TagmanagerV2::Parameter::Representation + + property :convert_true_to_value, as: 'convertTrueToValue', class: Google::Apis::TagmanagerV2::Parameter, decorator: Google::Apis::TagmanagerV2::Parameter::Representation + + property :convert_undefined_to_value, as: 'convertUndefinedToValue', class: Google::Apis::TagmanagerV2::Parameter, decorator: Google::Apis::TagmanagerV2::Parameter::Representation + + end + end + class Workspace # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -966,59 +978,6 @@ module Google end end - class WorkspaceProposal - # @private - class Representation < Google::Apis::Core::JsonRepresentation - collection :authors, as: 'authors', class: Google::Apis::TagmanagerV2::WorkspaceProposalUser, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalUser::Representation - - property :fingerprint, as: 'fingerprint' - collection :history, as: 'history', class: Google::Apis::TagmanagerV2::WorkspaceProposalHistory, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalHistory::Representation - - property :path, as: 'path' - collection :reviewers, as: 'reviewers', class: Google::Apis::TagmanagerV2::WorkspaceProposalUser, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalUser::Representation - - property :status, as: 'status' - end - end - - class WorkspaceProposalHistory - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :comment, as: 'comment', class: Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalHistoryComment::Representation - - property :created_by, as: 'createdBy', class: Google::Apis::TagmanagerV2::WorkspaceProposalUser, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalUser::Representation - - property :created_timestamp, as: 'createdTimestamp', class: Google::Apis::TagmanagerV2::Timestamp, decorator: Google::Apis::TagmanagerV2::Timestamp::Representation - - property :status_change, as: 'statusChange', class: Google::Apis::TagmanagerV2::WorkspaceProposalHistoryStatusChange, decorator: Google::Apis::TagmanagerV2::WorkspaceProposalHistoryStatusChange::Representation - - property :type, as: 'type' - end - end - - class WorkspaceProposalHistoryComment - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :content, as: 'content' - end - end - - class WorkspaceProposalHistoryStatusChange - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :new_status, as: 'newStatus' - property :old_status, as: 'oldStatus' - end - end - - class WorkspaceProposalUser - # @private - class Representation < Google::Apis::Core::JsonRepresentation - property :gaia_id, :numeric_string => true, as: 'gaiaId' - property :type, as: 'type' - end - end - class Zone # @private class Representation < Google::Apis::Core::JsonRepresentation diff --git a/generated/google/apis/tagmanager_v2/service.rb b/generated/google/apis/tagmanager_v2/service.rb index 2e6c1fa98..ca9a618b9 100644 --- a/generated/google/apis/tagmanager_v2/service.rb +++ b/generated/google/apis/tagmanager_v2/service.rb @@ -476,47 +476,6 @@ module Google execute_or_queue_command(command, &block) end - # Updates a GTM Environment. This method supports patch semantics. - # @param [String] path - # GTM Environment's API relative path. Example: accounts/`account_id`/containers/ - # `container_id`/environments/`environment_id` - # @param [Google::Apis::TagmanagerV2::Environment] environment_object - # @param [String] fingerprint - # When provided, this fingerprint must match the fingerprint of the environment - # in storage. - # @param [String] fields - # Selector specifying which fields to include in a partial response. - # @param [String] quota_user - # An opaque string that represents a user for quota purposes. Must not exceed 40 - # characters. - # @param [String] user_ip - # Deprecated. Please use quotaUser instead. - # @param [Google::Apis::RequestOptions] options - # Request-specific options - # - # @yield [result, err] Result & error if block supplied - # @yieldparam result [Google::Apis::TagmanagerV2::Environment] parsed result object - # @yieldparam err [StandardError] error object if request failed - # - # @return [Google::Apis::TagmanagerV2::Environment] - # - # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried - # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification - # @raise [Google::Apis::AuthorizationError] Authorization is required - def patch_account_container_environment(path, environment_object = nil, fingerprint: nil, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) - command = make_simple_command(:patch, '{+path}', options) - command.request_representation = Google::Apis::TagmanagerV2::Environment::Representation - command.request_object = environment_object - command.response_representation = Google::Apis::TagmanagerV2::Environment::Representation - command.response_class = Google::Apis::TagmanagerV2::Environment - command.params['path'] = path unless path.nil? - command.query['fingerprint'] = fingerprint unless fingerprint.nil? - command.query['fields'] = fields unless fields.nil? - command.query['quotaUser'] = quota_user unless quota_user.nil? - command.query['userIp'] = user_ip unless user_ip.nil? - execute_or_queue_command(command, &block) - end - # Re-generates the authorization code for a GTM Environment. # @param [String] path # GTM Environment's API relative path. Example: accounts/`account_id`/containers/ @@ -1063,40 +1022,6 @@ module Google execute_or_queue_command(command, &block) end - # Gets a GTM Workspace Proposal. - # @param [String] path - # GTM workspace proposal's relative path: Example: accounts/`aid`/containers/` - # cid`/workspace/`wid`/workspace_proposal - # @param [String] fields - # Selector specifying which fields to include in a partial response. - # @param [String] quota_user - # An opaque string that represents a user for quota purposes. Must not exceed 40 - # characters. - # @param [String] user_ip - # Deprecated. Please use quotaUser instead. - # @param [Google::Apis::RequestOptions] options - # Request-specific options - # - # @yield [result, err] Result & error if block supplied - # @yieldparam result [Google::Apis::TagmanagerV2::WorkspaceProposal] parsed result object - # @yieldparam err [StandardError] error object if request failed - # - # @return [Google::Apis::TagmanagerV2::WorkspaceProposal] - # - # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried - # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification - # @raise [Google::Apis::AuthorizationError] Authorization is required - def get_account_container_workspace_proposal(path, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) - command = make_simple_command(:get, '{+path}', options) - command.response_representation = Google::Apis::TagmanagerV2::WorkspaceProposal::Representation - command.response_class = Google::Apis::TagmanagerV2::WorkspaceProposal - command.params['path'] = path unless path.nil? - command.query['fields'] = fields unless fields.nil? - command.query['quotaUser'] = quota_user unless quota_user.nil? - command.query['userIp'] = user_ip unless user_ip.nil? - execute_or_queue_command(command, &block) - end - # Finds conflicting and modified entities in the workspace. # @param [String] path # GTM Workspace's API relative path. Example: accounts/`account_id`/containers/` @@ -1319,43 +1244,6 @@ module Google execute_or_queue_command(command, &block) end - # Updates a GTM Workspace Proposal. - # @param [String] path - # GTM workspace proposal's relative path: Example: accounts/`aid`/containers/` - # cid`/workspace/`wid`/workspace_proposal - # @param [Google::Apis::TagmanagerV2::UpdateWorkspaceProposalRequest] update_workspace_proposal_request_object - # @param [String] fields - # Selector specifying which fields to include in a partial response. - # @param [String] quota_user - # An opaque string that represents a user for quota purposes. Must not exceed 40 - # characters. - # @param [String] user_ip - # Deprecated. Please use quotaUser instead. - # @param [Google::Apis::RequestOptions] options - # Request-specific options - # - # @yield [result, err] Result & error if block supplied - # @yieldparam result [Google::Apis::TagmanagerV2::WorkspaceProposal] parsed result object - # @yieldparam err [StandardError] error object if request failed - # - # @return [Google::Apis::TagmanagerV2::WorkspaceProposal] - # - # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried - # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification - # @raise [Google::Apis::AuthorizationError] Authorization is required - def update_account_container_workspace_proposal(path, update_workspace_proposal_request_object = nil, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) - command = make_simple_command(:put, '{+path}', options) - command.request_representation = Google::Apis::TagmanagerV2::UpdateWorkspaceProposalRequest::Representation - command.request_object = update_workspace_proposal_request_object - command.response_representation = Google::Apis::TagmanagerV2::WorkspaceProposal::Representation - command.response_class = Google::Apis::TagmanagerV2::WorkspaceProposal - command.params['path'] = path unless path.nil? - command.query['fields'] = fields unless fields.nil? - command.query['quotaUser'] = quota_user unless quota_user.nil? - command.query['userIp'] = user_ip unless user_ip.nil? - execute_or_queue_command(command, &block) - end - # Creates one or more GTM Built-In Variables. # @param [String] parent # GTM Workspace's API relative path. Example: accounts/`account_id`/containers/` @@ -1802,75 +1690,6 @@ module Google execute_or_queue_command(command, &block) end - # Creates a GTM Workspace Proposal. - # @param [String] parent - # GTM Workspace's API relative path. Example: accounts/`aid`/containers/`cid`/ - # workspace/`wid` - # @param [Google::Apis::TagmanagerV2::CreateWorkspaceProposalRequest] create_workspace_proposal_request_object - # @param [String] fields - # Selector specifying which fields to include in a partial response. - # @param [String] quota_user - # An opaque string that represents a user for quota purposes. Must not exceed 40 - # characters. - # @param [String] user_ip - # Deprecated. Please use quotaUser instead. - # @param [Google::Apis::RequestOptions] options - # Request-specific options - # - # @yield [result, err] Result & error if block supplied - # @yieldparam result [Google::Apis::TagmanagerV2::WorkspaceProposal] parsed result object - # @yieldparam err [StandardError] error object if request failed - # - # @return [Google::Apis::TagmanagerV2::WorkspaceProposal] - # - # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried - # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification - # @raise [Google::Apis::AuthorizationError] Authorization is required - def create_workspace_proposal(parent, create_workspace_proposal_request_object = nil, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) - command = make_simple_command(:post, '{+parent}/proposal', options) - command.request_representation = Google::Apis::TagmanagerV2::CreateWorkspaceProposalRequest::Representation - command.request_object = create_workspace_proposal_request_object - command.response_representation = Google::Apis::TagmanagerV2::WorkspaceProposal::Representation - command.response_class = Google::Apis::TagmanagerV2::WorkspaceProposal - command.params['parent'] = parent unless parent.nil? - command.query['fields'] = fields unless fields.nil? - command.query['quotaUser'] = quota_user unless quota_user.nil? - command.query['userIp'] = user_ip unless user_ip.nil? - execute_or_queue_command(command, &block) - end - - # Deletes a GTM Workspace Proposal. - # @param [String] path - # GTM workspace proposal's relative path: Example: accounts/`aid`/containers/` - # cid`/workspace/`wid`/workspace_proposal - # @param [String] fields - # Selector specifying which fields to include in a partial response. - # @param [String] quota_user - # An opaque string that represents a user for quota purposes. Must not exceed 40 - # characters. - # @param [String] user_ip - # Deprecated. Please use quotaUser instead. - # @param [Google::Apis::RequestOptions] options - # Request-specific options - # - # @yield [result, err] Result & error if block supplied - # @yieldparam result [NilClass] No result returned for this method - # @yieldparam err [StandardError] error object if request failed - # - # @return [void] - # - # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried - # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification - # @raise [Google::Apis::AuthorizationError] Authorization is required - def delete_account_container_workspace_proposal(path, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) - command = make_simple_command(:delete, '{+path}', options) - command.params['path'] = path unless path.nil? - command.query['fields'] = fields unless fields.nil? - command.query['quotaUser'] = quota_user unless quota_user.nil? - command.query['userIp'] = user_ip unless user_ip.nil? - execute_or_queue_command(command, &block) - end - # Creates a GTM Tag. # @param [String] parent # GTM Workspace's API relative path. Example: accounts/`account_id`/containers/` @@ -2528,6 +2347,225 @@ module Google execute_or_queue_command(command, &block) end + # Creates a GTM Zone. + # @param [String] parent + # GTM Workspace's API relative path. Example: accounts/`account_id`/containers/` + # container_id`/workspaces/`workspace_id` + # @param [Google::Apis::TagmanagerV2::Zone] zone_object + # @param [String] fields + # Selector specifying which fields to include in a partial response. + # @param [String] quota_user + # An opaque string that represents a user for quota purposes. Must not exceed 40 + # characters. + # @param [String] user_ip + # Deprecated. Please use quotaUser instead. + # @param [Google::Apis::RequestOptions] options + # Request-specific options + # + # @yield [result, err] Result & error if block supplied + # @yieldparam result [Google::Apis::TagmanagerV2::Zone] parsed result object + # @yieldparam err [StandardError] error object if request failed + # + # @return [Google::Apis::TagmanagerV2::Zone] + # + # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried + # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification + # @raise [Google::Apis::AuthorizationError] Authorization is required + def create_account_container_workspace_zone(parent, zone_object = nil, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) + command = make_simple_command(:post, '{+parent}/zones', options) + command.request_representation = Google::Apis::TagmanagerV2::Zone::Representation + command.request_object = zone_object + command.response_representation = Google::Apis::TagmanagerV2::Zone::Representation + command.response_class = Google::Apis::TagmanagerV2::Zone + command.params['parent'] = parent unless parent.nil? + command.query['fields'] = fields unless fields.nil? + command.query['quotaUser'] = quota_user unless quota_user.nil? + command.query['userIp'] = user_ip unless user_ip.nil? + execute_or_queue_command(command, &block) + end + + # Deletes a GTM Zone. + # @param [String] path + # GTM Zone's API relative path. Example: accounts/`account_id`/containers/` + # container_id`/workspaces/`workspace_id`/zones/`zone_id` + # @param [String] fields + # Selector specifying which fields to include in a partial response. + # @param [String] quota_user + # An opaque string that represents a user for quota purposes. Must not exceed 40 + # characters. + # @param [String] user_ip + # Deprecated. Please use quotaUser instead. + # @param [Google::Apis::RequestOptions] options + # Request-specific options + # + # @yield [result, err] Result & error if block supplied + # @yieldparam result [NilClass] No result returned for this method + # @yieldparam err [StandardError] error object if request failed + # + # @return [void] + # + # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried + # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification + # @raise [Google::Apis::AuthorizationError] Authorization is required + def delete_account_container_workspace_zone(path, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) + command = make_simple_command(:delete, '{+path}', options) + command.params['path'] = path unless path.nil? + command.query['fields'] = fields unless fields.nil? + command.query['quotaUser'] = quota_user unless quota_user.nil? + command.query['userIp'] = user_ip unless user_ip.nil? + execute_or_queue_command(command, &block) + end + + # Gets a GTM Zone. + # @param [String] path + # GTM Zone's API relative path. Example: accounts/`account_id`/containers/` + # container_id`/workspaces/`workspace_id`/zones/`zone_id` + # @param [String] fields + # Selector specifying which fields to include in a partial response. + # @param [String] quota_user + # An opaque string that represents a user for quota purposes. Must not exceed 40 + # characters. + # @param [String] user_ip + # Deprecated. Please use quotaUser instead. + # @param [Google::Apis::RequestOptions] options + # Request-specific options + # + # @yield [result, err] Result & error if block supplied + # @yieldparam result [Google::Apis::TagmanagerV2::Zone] parsed result object + # @yieldparam err [StandardError] error object if request failed + # + # @return [Google::Apis::TagmanagerV2::Zone] + # + # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried + # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification + # @raise [Google::Apis::AuthorizationError] Authorization is required + def get_account_container_workspace_zone(path, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) + command = make_simple_command(:get, '{+path}', options) + command.response_representation = Google::Apis::TagmanagerV2::Zone::Representation + command.response_class = Google::Apis::TagmanagerV2::Zone + command.params['path'] = path unless path.nil? + command.query['fields'] = fields unless fields.nil? + command.query['quotaUser'] = quota_user unless quota_user.nil? + command.query['userIp'] = user_ip unless user_ip.nil? + execute_or_queue_command(command, &block) + end + + # Lists all GTM Zones of a GTM container workspace. + # @param [String] parent + # GTM Workspace's API relative path. Example: accounts/`account_id`/containers/` + # container_id`/workspaces/`workspace_id` + # @param [String] page_token + # Continuation token for fetching the next page of results. + # @param [String] fields + # Selector specifying which fields to include in a partial response. + # @param [String] quota_user + # An opaque string that represents a user for quota purposes. Must not exceed 40 + # characters. + # @param [String] user_ip + # Deprecated. Please use quotaUser instead. + # @param [Google::Apis::RequestOptions] options + # Request-specific options + # + # @yield [result, err] Result & error if block supplied + # @yieldparam result [Google::Apis::TagmanagerV2::ListZonesResponse] parsed result object + # @yieldparam err [StandardError] error object if request failed + # + # @return [Google::Apis::TagmanagerV2::ListZonesResponse] + # + # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried + # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification + # @raise [Google::Apis::AuthorizationError] Authorization is required + def list_account_container_workspace_zones(parent, page_token: nil, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) + command = make_simple_command(:get, '{+parent}/zones', options) + command.response_representation = Google::Apis::TagmanagerV2::ListZonesResponse::Representation + command.response_class = Google::Apis::TagmanagerV2::ListZonesResponse + command.params['parent'] = parent unless parent.nil? + command.query['pageToken'] = page_token unless page_token.nil? + command.query['fields'] = fields unless fields.nil? + command.query['quotaUser'] = quota_user unless quota_user.nil? + command.query['userIp'] = user_ip unless user_ip.nil? + execute_or_queue_command(command, &block) + end + + # Reverts changes to a GTM Zone in a GTM Workspace. + # @param [String] path + # GTM Zone's API relative path. Example: accounts/`account_id`/containers/` + # container_id`/workspaces/`workspace_id`/zones/`zone_id` + # @param [String] fingerprint + # When provided, this fingerprint must match the fingerprint of the zone in + # storage. + # @param [String] fields + # Selector specifying which fields to include in a partial response. + # @param [String] quota_user + # An opaque string that represents a user for quota purposes. Must not exceed 40 + # characters. + # @param [String] user_ip + # Deprecated. Please use quotaUser instead. + # @param [Google::Apis::RequestOptions] options + # Request-specific options + # + # @yield [result, err] Result & error if block supplied + # @yieldparam result [Google::Apis::TagmanagerV2::RevertZoneResponse] parsed result object + # @yieldparam err [StandardError] error object if request failed + # + # @return [Google::Apis::TagmanagerV2::RevertZoneResponse] + # + # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried + # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification + # @raise [Google::Apis::AuthorizationError] Authorization is required + def revert_account_container_workspace_zone(path, fingerprint: nil, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) + command = make_simple_command(:post, '{+path}:revert', options) + command.response_representation = Google::Apis::TagmanagerV2::RevertZoneResponse::Representation + command.response_class = Google::Apis::TagmanagerV2::RevertZoneResponse + command.params['path'] = path unless path.nil? + command.query['fingerprint'] = fingerprint unless fingerprint.nil? + command.query['fields'] = fields unless fields.nil? + command.query['quotaUser'] = quota_user unless quota_user.nil? + command.query['userIp'] = user_ip unless user_ip.nil? + execute_or_queue_command(command, &block) + end + + # Updates a GTM Zone. + # @param [String] path + # GTM Zone's API relative path. Example: accounts/`account_id`/containers/` + # container_id`/workspaces/`workspace_id`/zones/`zone_id` + # @param [Google::Apis::TagmanagerV2::Zone] zone_object + # @param [String] fingerprint + # When provided, this fingerprint must match the fingerprint of the zone in + # storage. + # @param [String] fields + # Selector specifying which fields to include in a partial response. + # @param [String] quota_user + # An opaque string that represents a user for quota purposes. Must not exceed 40 + # characters. + # @param [String] user_ip + # Deprecated. Please use quotaUser instead. + # @param [Google::Apis::RequestOptions] options + # Request-specific options + # + # @yield [result, err] Result & error if block supplied + # @yieldparam result [Google::Apis::TagmanagerV2::Zone] parsed result object + # @yieldparam err [StandardError] error object if request failed + # + # @return [Google::Apis::TagmanagerV2::Zone] + # + # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried + # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification + # @raise [Google::Apis::AuthorizationError] Authorization is required + def update_account_container_workspace_zone(path, zone_object = nil, fingerprint: nil, fields: nil, quota_user: nil, user_ip: nil, options: nil, &block) + command = make_simple_command(:put, '{+path}', options) + command.request_representation = Google::Apis::TagmanagerV2::Zone::Representation + command.request_object = zone_object + command.response_representation = Google::Apis::TagmanagerV2::Zone::Representation + command.response_class = Google::Apis::TagmanagerV2::Zone + command.params['path'] = path unless path.nil? + command.query['fingerprint'] = fingerprint unless fingerprint.nil? + command.query['fields'] = fields unless fields.nil? + command.query['quotaUser'] = quota_user unless quota_user.nil? + command.query['userIp'] = user_ip unless user_ip.nil? + execute_or_queue_command(command, &block) + end + # Creates a user's Account & Container access. # @param [String] parent # GTM Account's API relative path. Example: accounts/`account_id` diff --git a/generated/google/apis/testing_v1.rb b/generated/google/apis/testing_v1.rb index 43a0f61c1..7c1b3c379 100644 --- a/generated/google/apis/testing_v1.rb +++ b/generated/google/apis/testing_v1.rb @@ -26,7 +26,7 @@ module Google # @see https://developers.google.com/cloud-test-lab/ module TestingV1 VERSION = 'V1' - REVISION = '20190219' + REVISION = '20190220' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/videointelligence_v1.rb b/generated/google/apis/videointelligence_v1.rb index a86b661d9..cba9081cd 100644 --- a/generated/google/apis/videointelligence_v1.rb +++ b/generated/google/apis/videointelligence_v1.rb @@ -21,12 +21,13 @@ module Google # Cloud Video Intelligence API # # Detects objects, explicit content, and scene changes in videos. It also - # specifies the region for annotation and transcribes speech to text. + # specifies the region for annotation and transcribes speech to text. Supports + # both asynchronous API and streaming API. # # @see https://cloud.google.com/video-intelligence/docs/ module VideointelligenceV1 VERSION = 'V1' - REVISION = '20190112' + REVISION = '20190220' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/videointelligence_v1/classes.rb b/generated/google/apis/videointelligence_v1/classes.rb index d30aa4ea3..e26359c16 100644 --- a/generated/google/apis/videointelligence_v1/classes.rb +++ b/generated/google/apis/videointelligence_v1/classes.rb @@ -362,6 +362,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Config for SHOT_CHANGE_DETECTION. class GoogleCloudVideointelligenceV1ShotChangeDetectionConfig include Google::Apis::Core::Hashable @@ -574,6 +752,128 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Config for TEXT_DETECTION. + class GoogleCloudVideointelligenceV1TextDetectionConfig + include Google::Apis::Core::Hashable + + # Language hint can be specified if the language to be detected is known a + # priori. It can increase the accuracy of the detection. Language hint must + # be language code in BCP-47 format. + # Automatic language detection is performed if no hint is provided. + # Corresponds to the JSON property `languageHints` + # @return [Array] + attr_accessor :language_hints + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @language_hints = args[:language_hints] if args.key?(:language_hints) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -679,6 +979,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -701,6 +1006,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -711,10 +1023,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -749,6 +1063,11 @@ module Google # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechTranscriptionConfig] attr_accessor :speech_transcription_config + # Config for TEXT_DETECTION. + # Corresponds to the JSON property `textDetectionConfig` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextDetectionConfig] + attr_accessor :text_detection_config + def initialize(**args) update!(**args) end @@ -760,6 +1079,7 @@ module Google @segments = args[:segments] if args.key?(:segments) @shot_change_detection_config = args[:shot_change_detection_config] if args.key?(:shot_change_detection_config) @speech_transcription_config = args[:speech_transcription_config] if args.key?(:speech_transcription_config) + @text_detection_config = args[:text_detection_config] if args.key?(:text_detection_config) end end @@ -1062,6 +1382,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -1129,6 +1627,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1beta2TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1beta2TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1beta2TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -1234,6 +1832,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -1256,6 +1859,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -1266,10 +1876,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -1572,6 +2184,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -1639,6 +2429,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1p1beta1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1p1beta1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -1744,6 +2634,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -1766,6 +2661,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -1776,10 +2678,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -2671,6 +3575,923 @@ module Google end end + # Video annotation progress. Included in the `metadata` + # field of the `Operation` returned by the `GetOperation` + # call of the `google::longrunning::Operations` service. + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + include Google::Apis::Core::Hashable + + # Progress metadata for all videos specified in `AnnotateVideoRequest`. + # Corresponds to the JSON property `annotationProgress` + # @return [Array] + attr_accessor :annotation_progress + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress) + end + end + + # Video annotation response. Included in the `response` + # field of the `Operation` returned by the `GetOperation` + # call of the `google::longrunning::Operations` service. + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + include Google::Apis::Core::Hashable + + # Annotation results for all videos specified in `AnnotateVideoRequest`. + # Corresponds to the JSON property `annotationResults` + # @return [Array] + attr_accessor :annotation_results + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_results = args[:annotation_results] if args.key?(:annotation_results) + end + end + + # Detected entity from video analysis. + class GoogleCloudVideointelligenceV2beta1Entity + include Google::Apis::Core::Hashable + + # Textual description, e.g. `Fixed-gear bicycle`. + # Corresponds to the JSON property `description` + # @return [String] + attr_accessor :description + + # Opaque entity ID. Some IDs may be available in + # [Google Knowledge Graph Search + # API](https://developers.google.com/knowledge-graph/). + # Corresponds to the JSON property `entityId` + # @return [String] + attr_accessor :entity_id + + # Language code for `description` in BCP-47 format. + # Corresponds to the JSON property `languageCode` + # @return [String] + attr_accessor :language_code + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @description = args[:description] if args.key?(:description) + @entity_id = args[:entity_id] if args.key?(:entity_id) + @language_code = args[:language_code] if args.key?(:language_code) + end + end + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + include Google::Apis::Core::Hashable + + # All video frames where explicit content was detected. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @frames = args[:frames] if args.key?(:frames) + end + end + + # Video frame level annotation results for explicit content. + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + include Google::Apis::Core::Hashable + + # Likelihood of the pornography content.. + # Corresponds to the JSON property `pornographyLikelihood` + # @return [String] + attr_accessor :pornography_likelihood + + # Time-offset, relative to the beginning of the video, corresponding to the + # video frame for this location. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Label annotation. + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + include Google::Apis::Core::Hashable + + # Common categories for the detected entity. + # E.g. when the label is `Terrier` the category is likely `dog`. And in some + # cases there might be more than one categories e.g. `Terrier` could also be + # a `pet`. + # Corresponds to the JSON property `categoryEntities` + # @return [Array] + attr_accessor :category_entities + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1Entity] + attr_accessor :entity + + # All video frames where a label was detected. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # All video segments where a label was detected. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @category_entities = args[:category_entities] if args.key?(:category_entities) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segments = args[:segments] if args.key?(:segments) + end + end + + # Video frame level annotation results for label detection. + class GoogleCloudVideointelligenceV2beta1LabelFrame + include Google::Apis::Core::Hashable + + # Confidence that the label is accurate. Range: [0, 1]. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Time-offset, relative to the beginning of the video, corresponding to the + # video frame for this location. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for label detection. + class GoogleCloudVideointelligenceV2beta1LabelSegment + include Google::Apis::Core::Hashable + + # Confidence that the label is accurate. Range: [0, 1]. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @segment = args[:segment] if args.key?(:segment) + end + end + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Alternative hypotheses (a.k.a. n-best list). + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + include Google::Apis::Core::Hashable + + # The confidence estimate between 0.0 and 1.0. A higher number + # indicates an estimated greater likelihood that the recognized words are + # correct. This field is typically provided only for the top hypothesis, and + # only for `is_final=true` results. Clients should not rely on the + # `confidence` field as it is not guaranteed to be accurate or consistent. + # The default of 0.0 is a sentinel value indicating `confidence` was not set. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Transcript text representing the words that the user spoke. + # Corresponds to the JSON property `transcript` + # @return [String] + attr_accessor :transcript + + # A list of word-specific information for each recognized word. + # Corresponds to the JSON property `words` + # @return [Array] + attr_accessor :words + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @transcript = args[:transcript] if args.key?(:transcript) + @words = args[:words] if args.key?(:words) + end + end + + # A speech recognition result corresponding to a portion of the audio. + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + include Google::Apis::Core::Hashable + + # May contain one or more recognition hypotheses (up to the maximum specified + # in `max_alternatives`). These alternatives are ordered in terms of + # accuracy, with the top (first) alternative being the most probable, as + # ranked by the recognizer. + # Corresponds to the JSON property `alternatives` + # @return [Array] + attr_accessor :alternatives + + # Output only. The + # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the + # language in this result. This language code was detected to have the most + # likelihood of being spoken in the audio. + # Corresponds to the JSON property `languageCode` + # @return [String] + attr_accessor :language_code + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @alternatives = args[:alternatives] if args.key?(:alternatives) + @language_code = args[:language_code] if args.key?(:language_code) + end + end + + # `StreamingAnnotateVideoResponse` is the only message returned to the client + # by `StreamingAnnotateVideo`. A series of zero or more + # `StreamingAnnotateVideoResponse` messages are streamed back to the client. + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + include Google::Apis::Core::Hashable + + # Streaming annotation results corresponding to a portion of the video + # that is currently being processed. + # Corresponds to the JSON property `annotationResults` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults] + attr_accessor :annotation_results + + # GCS URI that stores annotation results of one streaming session. + # It is a directory that can hold multiple files in JSON format. + # Example uri format: + # gs://bucket_id/object_id/cloud_project_name-session_id + # Corresponds to the JSON property `annotationResultsUri` + # @return [String] + attr_accessor :annotation_results_uri + + # The `Status` type defines a logical error model that is suitable for different + # programming environments, including REST APIs and RPC APIs. It is used by + # [gRPC](https://github.com/grpc). The error model is designed to be: + # - Simple to use and understand for most users + # - Flexible enough to meet unexpected needs + # # Overview + # The `Status` message contains three pieces of data: error code, error message, + # and error details. The error code should be an enum value of + # google.rpc.Code, but it may accept additional error codes if needed. The + # error message should be a developer-facing English message that helps + # developers *understand* and *resolve* the error. If a localized user-facing + # error message is needed, put the localized message in the error details or + # localize it in the client. The optional error details may contain arbitrary + # information about the error. There is a predefined set of error detail types + # in the package `google.rpc` that can be used for common error conditions. + # # Language mapping + # The `Status` message is the logical representation of the error model, but it + # is not necessarily the actual wire format. When the `Status` message is + # exposed in different client libraries and different wire protocols, it can be + # mapped differently. For example, it will likely be mapped to some exceptions + # in Java, but more likely mapped to some error codes in C. + # # Other uses + # The error model and the `Status` message can be used in a variety of + # environments, either with or without APIs, to provide a + # consistent developer experience across different environments. + # Example uses of this error model include: + # - Partial errors. If a service needs to return partial errors to the client, + # it may embed the `Status` in the normal response to indicate the partial + # errors. + # - Workflow errors. A typical workflow has multiple steps. Each step may + # have a `Status` message for error reporting. + # - Batch operations. If a client uses batch request and batch response, the + # `Status` message should be used directly inside batch response, one for + # each error sub-response. + # - Asynchronous operations. If an API call embeds asynchronous operation + # results in its response, the status of those operations should be + # represented directly using the `Status` message. + # - Logging. If some API errors are stored in logs, the message `Status` could + # be used directly after any stripping needed for security/privacy reasons. + # Corresponds to the JSON property `error` + # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus] + attr_accessor :error + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_results = args[:annotation_results] if args.key?(:annotation_results) + @annotation_results_uri = args[:annotation_results_uri] if args.key?(:annotation_results_uri) + @error = args[:error] if args.key?(:error) + end + end + + # Streaming annotation results corresponding to a portion of the video + # that is currently being processed. + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + include Google::Apis::Core::Hashable + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + # Corresponds to the JSON property `explicitAnnotation` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation] + attr_accessor :explicit_annotation + + # Label annotation results. + # Corresponds to the JSON property `labelAnnotations` + # @return [Array] + attr_accessor :label_annotations + + # Object tracking results. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + + # Shot annotation results. Each shot is represented as a video segment. + # Corresponds to the JSON property `shotAnnotations` + # @return [Array] + attr_accessor :shot_annotations + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) + @label_annotations = args[:label_annotations] if args.key?(:label_annotations) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) + @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) + end + end + + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV2beta1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV2beta1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV2beta1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + + # Annotation progress for a single video. + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + include Google::Apis::Core::Hashable + + # Video file location in + # [Google Cloud Storage](https://cloud.google.com/storage/). + # Corresponds to the JSON property `inputUri` + # @return [String] + attr_accessor :input_uri + + # Approximate percentage processed thus far. Guaranteed to be + # 100 when fully processed. + # Corresponds to the JSON property `progressPercent` + # @return [Fixnum] + attr_accessor :progress_percent + + # Time when the request was received. + # Corresponds to the JSON property `startTime` + # @return [String] + attr_accessor :start_time + + # Time of the most recent update. + # Corresponds to the JSON property `updateTime` + # @return [String] + attr_accessor :update_time + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @input_uri = args[:input_uri] if args.key?(:input_uri) + @progress_percent = args[:progress_percent] if args.key?(:progress_percent) + @start_time = args[:start_time] if args.key?(:start_time) + @update_time = args[:update_time] if args.key?(:update_time) + end + end + + # Annotation results for a single video. + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + include Google::Apis::Core::Hashable + + # The `Status` type defines a logical error model that is suitable for different + # programming environments, including REST APIs and RPC APIs. It is used by + # [gRPC](https://github.com/grpc). The error model is designed to be: + # - Simple to use and understand for most users + # - Flexible enough to meet unexpected needs + # # Overview + # The `Status` message contains three pieces of data: error code, error message, + # and error details. The error code should be an enum value of + # google.rpc.Code, but it may accept additional error codes if needed. The + # error message should be a developer-facing English message that helps + # developers *understand* and *resolve* the error. If a localized user-facing + # error message is needed, put the localized message in the error details or + # localize it in the client. The optional error details may contain arbitrary + # information about the error. There is a predefined set of error detail types + # in the package `google.rpc` that can be used for common error conditions. + # # Language mapping + # The `Status` message is the logical representation of the error model, but it + # is not necessarily the actual wire format. When the `Status` message is + # exposed in different client libraries and different wire protocols, it can be + # mapped differently. For example, it will likely be mapped to some exceptions + # in Java, but more likely mapped to some error codes in C. + # # Other uses + # The error model and the `Status` message can be used in a variety of + # environments, either with or without APIs, to provide a + # consistent developer experience across different environments. + # Example uses of this error model include: + # - Partial errors. If a service needs to return partial errors to the client, + # it may embed the `Status` in the normal response to indicate the partial + # errors. + # - Workflow errors. A typical workflow has multiple steps. Each step may + # have a `Status` message for error reporting. + # - Batch operations. If a client uses batch request and batch response, the + # `Status` message should be used directly inside batch response, one for + # each error sub-response. + # - Asynchronous operations. If an API call embeds asynchronous operation + # results in its response, the status of those operations should be + # represented directly using the `Status` message. + # - Logging. If some API errors are stored in logs, the message `Status` could + # be used directly after any stripping needed for security/privacy reasons. + # Corresponds to the JSON property `error` + # @return [Google::Apis::VideointelligenceV1::GoogleRpcStatus] + attr_accessor :error + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + # Corresponds to the JSON property `explicitAnnotation` + # @return [Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation] + attr_accessor :explicit_annotation + + # Label annotations on frame level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `frameLabelAnnotations` + # @return [Array] + attr_accessor :frame_label_annotations + + # Video file location in + # [Google Cloud Storage](https://cloud.google.com/storage/). + # Corresponds to the JSON property `inputUri` + # @return [String] + attr_accessor :input_uri + + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + + # Label annotations on video level or user specified segment level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `segmentLabelAnnotations` + # @return [Array] + attr_accessor :segment_label_annotations + + # Shot annotations. Each shot is represented as a video segment. + # Corresponds to the JSON property `shotAnnotations` + # @return [Array] + attr_accessor :shot_annotations + + # Label annotations on shot level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `shotLabelAnnotations` + # @return [Array] + attr_accessor :shot_label_annotations + + # Speech transcription. + # Corresponds to the JSON property `speechTranscriptions` + # @return [Array] + attr_accessor :speech_transcriptions + + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @error = args[:error] if args.key?(:error) + @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) + @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) + @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) + @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) + @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) + @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) + @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) + end + end + + # Video segment. + class GoogleCloudVideointelligenceV2beta1VideoSegment + include Google::Apis::Core::Hashable + + # Time-offset, relative to the beginning of the video, + # corresponding to the end of the segment (inclusive). + # Corresponds to the JSON property `endTimeOffset` + # @return [String] + attr_accessor :end_time_offset + + # Time-offset, relative to the beginning of the video, + # corresponding to the start of the segment (inclusive). + # Corresponds to the JSON property `startTimeOffset` + # @return [String] + attr_accessor :start_time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset) + @start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset) + end + end + + # Word-specific information for recognized words. Word information is only + # included in the response when certain request parameters are set, such + # as `enable_word_time_offsets`. + class GoogleCloudVideointelligenceV2beta1WordInfo + include Google::Apis::Core::Hashable + + # Output only. The confidence estimate between 0.0 and 1.0. A higher number + # indicates an estimated greater likelihood that the recognized words are + # correct. This field is set only for the top alternative. + # This field is not guaranteed to be accurate and users should not rely on it + # to be always provided. + # The default of 0.0 is a sentinel value indicating `confidence` was not set. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Time offset relative to the beginning of the audio, and + # corresponding to the end of the spoken word. This field is only set if + # `enable_word_time_offsets=true` and only in the top hypothesis. This is an + # experimental feature and the accuracy of the time offset can vary. + # Corresponds to the JSON property `endTime` + # @return [String] + attr_accessor :end_time + + # Output only. A distinct integer value is assigned for every speaker within + # the audio. This field specifies which one of those speakers was detected to + # have spoken this word. Value ranges from 1 up to diarization_speaker_count, + # and is only set if speaker diarization is enabled. + # Corresponds to the JSON property `speakerTag` + # @return [Fixnum] + attr_accessor :speaker_tag + + # Time offset relative to the beginning of the audio, and + # corresponding to the start of the spoken word. This field is only set if + # `enable_word_time_offsets=true` and only in the top hypothesis. This is an + # experimental feature and the accuracy of the time offset can vary. + # Corresponds to the JSON property `startTime` + # @return [String] + attr_accessor :start_time + + # The word corresponding to this set of information. + # Corresponds to the JSON property `word` + # @return [String] + attr_accessor :word + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @end_time = args[:end_time] if args.key?(:end_time) + @speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag) + @start_time = args[:start_time] if args.key?(:start_time) + @word = args[:word] if args.key?(:word) + end + end + # The request message for Operations.CancelOperation. class GoogleLongrunningCancelOperationRequest include Google::Apis::Core::Hashable diff --git a/generated/google/apis/videointelligence_v1/representations.rb b/generated/google/apis/videointelligence_v1/representations.rb index ad77b1899..badc87c46 100644 --- a/generated/google/apis/videointelligence_v1/representations.rb +++ b/generated/google/apis/videointelligence_v1/representations.rb @@ -88,6 +88,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1ShotChangeDetectionConfig class Representation < Google::Apis::Core::JsonRepresentation; end @@ -118,6 +148,30 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextDetectionConfig + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -196,6 +250,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -208,6 +292,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1beta2TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -280,6 +382,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -292,6 +424,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -448,6 +598,150 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1Entity + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1WordInfo + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleLongrunningCancelOperationRequest class Representation < Google::Apis::Core::JsonRepresentation; end @@ -579,6 +873,55 @@ module Google end end + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1ShotChangeDetectionConfig # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -628,6 +971,42 @@ module Google end end + class GoogleCloudVideointelligenceV1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1TextDetectionConfig + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :language_hints, as: 'languageHints' + end + end + + class GoogleCloudVideointelligenceV1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -648,6 +1027,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1VideoSegment::Representation @@ -656,6 +1037,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextAnnotation::Representation + end end @@ -672,6 +1055,8 @@ module Google property :speech_transcription_config, as: 'speechTranscriptionConfig', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechTranscriptionConfig, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1SpeechTranscriptionConfig::Representation + property :text_detection_config, as: 'textDetectionConfig', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextDetectionConfig, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1TextDetectionConfig::Representation + end end @@ -766,6 +1151,55 @@ module Google end end + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedVertex, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -785,6 +1219,35 @@ module Google end end + class GoogleCloudVideointelligenceV1beta2TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2TextSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1beta2TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1beta2TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2TextFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -805,6 +1268,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation @@ -813,6 +1278,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2SpeechTranscription, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2TextAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1beta2TextAnnotation::Representation + end end @@ -907,6 +1374,55 @@ module Google end end + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -926,6 +1442,35 @@ module Google end end + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1TextSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1TextFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -946,6 +1491,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation @@ -954,6 +1501,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation::Representation + end end @@ -1199,6 +1748,254 @@ module Google end end + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :annotation_progress, as: 'annotationProgress', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :annotation_results, as: 'annotationResults', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoAnnotationResults, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoAnnotationResults::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1Entity + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :description, as: 'description' + property :entity_id, as: 'entityId' + property :language_code, as: 'languageCode' + end + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ExplicitContentFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ExplicitContentFrame::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :pornography_likelihood, as: 'pornographyLikelihood' + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :category_entities, as: 'categoryEntities', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelFrame::Representation + + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1LabelFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1LabelSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :transcript, as: 'transcript' + collection :words, as: 'words', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1WordInfo, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1WordInfo::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :alternatives, as: 'alternatives', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative::Representation + + property :language_code, as: 'languageCode' + end + end + + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :annotation_results, as: 'annotationResults', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults::Representation + + property :annotation_results_uri, as: 'annotationResultsUri' + property :error, as: 'error', class: Google::Apis::VideointelligenceV1::GoogleRpcStatus, decorator: Google::Apis::VideointelligenceV1::GoogleRpcStatus::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :explicit_annotation, as: 'explicitAnnotation', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation::Representation + + collection :label_annotations, as: 'labelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation::Representation + + collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1TextSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV2beta1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1TextFrame, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :input_uri, as: 'inputUri' + property :progress_percent, as: 'progressPercent' + property :start_time, as: 'startTime' + property :update_time, as: 'updateTime' + end + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :error, as: 'error', class: Google::Apis::VideointelligenceV1::GoogleRpcStatus, decorator: Google::Apis::VideointelligenceV1::GoogleRpcStatus::Representation + + property :explicit_annotation, as: 'explicitAnnotation', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation::Representation + + collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation::Representation + + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + collection :shot_label_annotations, as: 'shotLabelAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1SpeechTranscription::Representation + + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1TextAnnotation, decorator: Google::Apis::VideointelligenceV1::GoogleCloudVideointelligenceV2beta1TextAnnotation::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1VideoSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :end_time_offset, as: 'endTimeOffset' + property :start_time_offset, as: 'startTimeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1WordInfo + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :end_time, as: 'endTime' + property :speaker_tag, as: 'speakerTag' + property :start_time, as: 'startTime' + property :word, as: 'word' + end + end + class GoogleLongrunningCancelOperationRequest # @private class Representation < Google::Apis::Core::JsonRepresentation diff --git a/generated/google/apis/videointelligence_v1/service.rb b/generated/google/apis/videointelligence_v1/service.rb index 9818d8710..cc062c74a 100644 --- a/generated/google/apis/videointelligence_v1/service.rb +++ b/generated/google/apis/videointelligence_v1/service.rb @@ -23,7 +23,8 @@ module Google # Cloud Video Intelligence API # # Detects objects, explicit content, and scene changes in videos. It also - # specifies the region for annotation and transcribes speech to text. + # specifies the region for annotation and transcribes speech to text. Supports + # both asynchronous API and streaming API. # # @example # require 'google/apis/videointelligence_v1' diff --git a/generated/google/apis/videointelligence_v1beta2.rb b/generated/google/apis/videointelligence_v1beta2.rb index 1beb341cb..7d29f4f5e 100644 --- a/generated/google/apis/videointelligence_v1beta2.rb +++ b/generated/google/apis/videointelligence_v1beta2.rb @@ -21,12 +21,13 @@ module Google # Cloud Video Intelligence API # # Detects objects, explicit content, and scene changes in videos. It also - # specifies the region for annotation and transcribes speech to text. + # specifies the region for annotation and transcribes speech to text. Supports + # both asynchronous API and streaming API. # # @see https://cloud.google.com/video-intelligence/docs/ module VideointelligenceV1beta2 VERSION = 'V1beta2' - REVISION = '20190112' + REVISION = '20190220' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/videointelligence_v1beta2/classes.rb b/generated/google/apis/videointelligence_v1beta2/classes.rb index ba0413377..15ec38f9e 100644 --- a/generated/google/apis/videointelligence_v1beta2/classes.rb +++ b/generated/google/apis/videointelligence_v1beta2/classes.rb @@ -235,6 +235,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -302,6 +480,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -407,6 +685,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -429,6 +712,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -439,10 +729,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -872,6 +1164,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Config for SHOT_CHANGE_DETECTION. class GoogleCloudVideointelligenceV1beta2ShotChangeDetectionConfig include Google::Apis::Core::Hashable @@ -1084,6 +1554,128 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1beta2TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Config for TEXT_DETECTION. + class GoogleCloudVideointelligenceV1beta2TextDetectionConfig + include Google::Apis::Core::Hashable + + # Language hint can be specified if the language to be detected is known a + # priori. It can increase the accuracy of the detection. Language hint must + # be language code in BCP-47 format. + # Automatic language detection is performed if no hint is provided. + # Corresponds to the JSON property `languageHints` + # @return [Array] + attr_accessor :language_hints + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @language_hints = args[:language_hints] if args.key?(:language_hints) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1beta2TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1beta2TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -1189,6 +1781,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -1211,6 +1808,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -1221,10 +1825,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -1259,6 +1865,11 @@ module Google # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechTranscriptionConfig] attr_accessor :speech_transcription_config + # Config for TEXT_DETECTION. + # Corresponds to the JSON property `textDetectionConfig` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextDetectionConfig] + attr_accessor :text_detection_config + def initialize(**args) update!(**args) end @@ -1270,6 +1881,7 @@ module Google @segments = args[:segments] if args.key?(:segments) @shot_change_detection_config = args[:shot_change_detection_config] if args.key?(:shot_change_detection_config) @speech_transcription_config = args[:speech_transcription_config] if args.key?(:speech_transcription_config) + @text_detection_config = args[:text_detection_config] if args.key?(:text_detection_config) end end @@ -1572,6 +2184,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -1639,6 +2429,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1p1beta1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1p1beta1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -1744,6 +2634,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -1766,6 +2661,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -1776,10 +2678,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -2671,6 +3575,923 @@ module Google end end + # Video annotation progress. Included in the `metadata` + # field of the `Operation` returned by the `GetOperation` + # call of the `google::longrunning::Operations` service. + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + include Google::Apis::Core::Hashable + + # Progress metadata for all videos specified in `AnnotateVideoRequest`. + # Corresponds to the JSON property `annotationProgress` + # @return [Array] + attr_accessor :annotation_progress + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress) + end + end + + # Video annotation response. Included in the `response` + # field of the `Operation` returned by the `GetOperation` + # call of the `google::longrunning::Operations` service. + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + include Google::Apis::Core::Hashable + + # Annotation results for all videos specified in `AnnotateVideoRequest`. + # Corresponds to the JSON property `annotationResults` + # @return [Array] + attr_accessor :annotation_results + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_results = args[:annotation_results] if args.key?(:annotation_results) + end + end + + # Detected entity from video analysis. + class GoogleCloudVideointelligenceV2beta1Entity + include Google::Apis::Core::Hashable + + # Textual description, e.g. `Fixed-gear bicycle`. + # Corresponds to the JSON property `description` + # @return [String] + attr_accessor :description + + # Opaque entity ID. Some IDs may be available in + # [Google Knowledge Graph Search + # API](https://developers.google.com/knowledge-graph/). + # Corresponds to the JSON property `entityId` + # @return [String] + attr_accessor :entity_id + + # Language code for `description` in BCP-47 format. + # Corresponds to the JSON property `languageCode` + # @return [String] + attr_accessor :language_code + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @description = args[:description] if args.key?(:description) + @entity_id = args[:entity_id] if args.key?(:entity_id) + @language_code = args[:language_code] if args.key?(:language_code) + end + end + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + include Google::Apis::Core::Hashable + + # All video frames where explicit content was detected. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @frames = args[:frames] if args.key?(:frames) + end + end + + # Video frame level annotation results for explicit content. + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + include Google::Apis::Core::Hashable + + # Likelihood of the pornography content.. + # Corresponds to the JSON property `pornographyLikelihood` + # @return [String] + attr_accessor :pornography_likelihood + + # Time-offset, relative to the beginning of the video, corresponding to the + # video frame for this location. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Label annotation. + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + include Google::Apis::Core::Hashable + + # Common categories for the detected entity. + # E.g. when the label is `Terrier` the category is likely `dog`. And in some + # cases there might be more than one categories e.g. `Terrier` could also be + # a `pet`. + # Corresponds to the JSON property `categoryEntities` + # @return [Array] + attr_accessor :category_entities + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1Entity] + attr_accessor :entity + + # All video frames where a label was detected. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # All video segments where a label was detected. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @category_entities = args[:category_entities] if args.key?(:category_entities) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segments = args[:segments] if args.key?(:segments) + end + end + + # Video frame level annotation results for label detection. + class GoogleCloudVideointelligenceV2beta1LabelFrame + include Google::Apis::Core::Hashable + + # Confidence that the label is accurate. Range: [0, 1]. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Time-offset, relative to the beginning of the video, corresponding to the + # video frame for this location. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for label detection. + class GoogleCloudVideointelligenceV2beta1LabelSegment + include Google::Apis::Core::Hashable + + # Confidence that the label is accurate. Range: [0, 1]. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @segment = args[:segment] if args.key?(:segment) + end + end + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Alternative hypotheses (a.k.a. n-best list). + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + include Google::Apis::Core::Hashable + + # The confidence estimate between 0.0 and 1.0. A higher number + # indicates an estimated greater likelihood that the recognized words are + # correct. This field is typically provided only for the top hypothesis, and + # only for `is_final=true` results. Clients should not rely on the + # `confidence` field as it is not guaranteed to be accurate or consistent. + # The default of 0.0 is a sentinel value indicating `confidence` was not set. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Transcript text representing the words that the user spoke. + # Corresponds to the JSON property `transcript` + # @return [String] + attr_accessor :transcript + + # A list of word-specific information for each recognized word. + # Corresponds to the JSON property `words` + # @return [Array] + attr_accessor :words + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @transcript = args[:transcript] if args.key?(:transcript) + @words = args[:words] if args.key?(:words) + end + end + + # A speech recognition result corresponding to a portion of the audio. + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + include Google::Apis::Core::Hashable + + # May contain one or more recognition hypotheses (up to the maximum specified + # in `max_alternatives`). These alternatives are ordered in terms of + # accuracy, with the top (first) alternative being the most probable, as + # ranked by the recognizer. + # Corresponds to the JSON property `alternatives` + # @return [Array] + attr_accessor :alternatives + + # Output only. The + # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the + # language in this result. This language code was detected to have the most + # likelihood of being spoken in the audio. + # Corresponds to the JSON property `languageCode` + # @return [String] + attr_accessor :language_code + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @alternatives = args[:alternatives] if args.key?(:alternatives) + @language_code = args[:language_code] if args.key?(:language_code) + end + end + + # `StreamingAnnotateVideoResponse` is the only message returned to the client + # by `StreamingAnnotateVideo`. A series of zero or more + # `StreamingAnnotateVideoResponse` messages are streamed back to the client. + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + include Google::Apis::Core::Hashable + + # Streaming annotation results corresponding to a portion of the video + # that is currently being processed. + # Corresponds to the JSON property `annotationResults` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults] + attr_accessor :annotation_results + + # GCS URI that stores annotation results of one streaming session. + # It is a directory that can hold multiple files in JSON format. + # Example uri format: + # gs://bucket_id/object_id/cloud_project_name-session_id + # Corresponds to the JSON property `annotationResultsUri` + # @return [String] + attr_accessor :annotation_results_uri + + # The `Status` type defines a logical error model that is suitable for different + # programming environments, including REST APIs and RPC APIs. It is used by + # [gRPC](https://github.com/grpc). The error model is designed to be: + # - Simple to use and understand for most users + # - Flexible enough to meet unexpected needs + # # Overview + # The `Status` message contains three pieces of data: error code, error message, + # and error details. The error code should be an enum value of + # google.rpc.Code, but it may accept additional error codes if needed. The + # error message should be a developer-facing English message that helps + # developers *understand* and *resolve* the error. If a localized user-facing + # error message is needed, put the localized message in the error details or + # localize it in the client. The optional error details may contain arbitrary + # information about the error. There is a predefined set of error detail types + # in the package `google.rpc` that can be used for common error conditions. + # # Language mapping + # The `Status` message is the logical representation of the error model, but it + # is not necessarily the actual wire format. When the `Status` message is + # exposed in different client libraries and different wire protocols, it can be + # mapped differently. For example, it will likely be mapped to some exceptions + # in Java, but more likely mapped to some error codes in C. + # # Other uses + # The error model and the `Status` message can be used in a variety of + # environments, either with or without APIs, to provide a + # consistent developer experience across different environments. + # Example uses of this error model include: + # - Partial errors. If a service needs to return partial errors to the client, + # it may embed the `Status` in the normal response to indicate the partial + # errors. + # - Workflow errors. A typical workflow has multiple steps. Each step may + # have a `Status` message for error reporting. + # - Batch operations. If a client uses batch request and batch response, the + # `Status` message should be used directly inside batch response, one for + # each error sub-response. + # - Asynchronous operations. If an API call embeds asynchronous operation + # results in its response, the status of those operations should be + # represented directly using the `Status` message. + # - Logging. If some API errors are stored in logs, the message `Status` could + # be used directly after any stripping needed for security/privacy reasons. + # Corresponds to the JSON property `error` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus] + attr_accessor :error + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_results = args[:annotation_results] if args.key?(:annotation_results) + @annotation_results_uri = args[:annotation_results_uri] if args.key?(:annotation_results_uri) + @error = args[:error] if args.key?(:error) + end + end + + # Streaming annotation results corresponding to a portion of the video + # that is currently being processed. + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + include Google::Apis::Core::Hashable + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + # Corresponds to the JSON property `explicitAnnotation` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation] + attr_accessor :explicit_annotation + + # Label annotation results. + # Corresponds to the JSON property `labelAnnotations` + # @return [Array] + attr_accessor :label_annotations + + # Object tracking results. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + + # Shot annotation results. Each shot is represented as a video segment. + # Corresponds to the JSON property `shotAnnotations` + # @return [Array] + attr_accessor :shot_annotations + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) + @label_annotations = args[:label_annotations] if args.key?(:label_annotations) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) + @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) + end + end + + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV2beta1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV2beta1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV2beta1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + + # Annotation progress for a single video. + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + include Google::Apis::Core::Hashable + + # Video file location in + # [Google Cloud Storage](https://cloud.google.com/storage/). + # Corresponds to the JSON property `inputUri` + # @return [String] + attr_accessor :input_uri + + # Approximate percentage processed thus far. Guaranteed to be + # 100 when fully processed. + # Corresponds to the JSON property `progressPercent` + # @return [Fixnum] + attr_accessor :progress_percent + + # Time when the request was received. + # Corresponds to the JSON property `startTime` + # @return [String] + attr_accessor :start_time + + # Time of the most recent update. + # Corresponds to the JSON property `updateTime` + # @return [String] + attr_accessor :update_time + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @input_uri = args[:input_uri] if args.key?(:input_uri) + @progress_percent = args[:progress_percent] if args.key?(:progress_percent) + @start_time = args[:start_time] if args.key?(:start_time) + @update_time = args[:update_time] if args.key?(:update_time) + end + end + + # Annotation results for a single video. + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + include Google::Apis::Core::Hashable + + # The `Status` type defines a logical error model that is suitable for different + # programming environments, including REST APIs and RPC APIs. It is used by + # [gRPC](https://github.com/grpc). The error model is designed to be: + # - Simple to use and understand for most users + # - Flexible enough to meet unexpected needs + # # Overview + # The `Status` message contains three pieces of data: error code, error message, + # and error details. The error code should be an enum value of + # google.rpc.Code, but it may accept additional error codes if needed. The + # error message should be a developer-facing English message that helps + # developers *understand* and *resolve* the error. If a localized user-facing + # error message is needed, put the localized message in the error details or + # localize it in the client. The optional error details may contain arbitrary + # information about the error. There is a predefined set of error detail types + # in the package `google.rpc` that can be used for common error conditions. + # # Language mapping + # The `Status` message is the logical representation of the error model, but it + # is not necessarily the actual wire format. When the `Status` message is + # exposed in different client libraries and different wire protocols, it can be + # mapped differently. For example, it will likely be mapped to some exceptions + # in Java, but more likely mapped to some error codes in C. + # # Other uses + # The error model and the `Status` message can be used in a variety of + # environments, either with or without APIs, to provide a + # consistent developer experience across different environments. + # Example uses of this error model include: + # - Partial errors. If a service needs to return partial errors to the client, + # it may embed the `Status` in the normal response to indicate the partial + # errors. + # - Workflow errors. A typical workflow has multiple steps. Each step may + # have a `Status` message for error reporting. + # - Batch operations. If a client uses batch request and batch response, the + # `Status` message should be used directly inside batch response, one for + # each error sub-response. + # - Asynchronous operations. If an API call embeds asynchronous operation + # results in its response, the status of those operations should be + # represented directly using the `Status` message. + # - Logging. If some API errors are stored in logs, the message `Status` could + # be used directly after any stripping needed for security/privacy reasons. + # Corresponds to the JSON property `error` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus] + attr_accessor :error + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + # Corresponds to the JSON property `explicitAnnotation` + # @return [Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation] + attr_accessor :explicit_annotation + + # Label annotations on frame level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `frameLabelAnnotations` + # @return [Array] + attr_accessor :frame_label_annotations + + # Video file location in + # [Google Cloud Storage](https://cloud.google.com/storage/). + # Corresponds to the JSON property `inputUri` + # @return [String] + attr_accessor :input_uri + + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + + # Label annotations on video level or user specified segment level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `segmentLabelAnnotations` + # @return [Array] + attr_accessor :segment_label_annotations + + # Shot annotations. Each shot is represented as a video segment. + # Corresponds to the JSON property `shotAnnotations` + # @return [Array] + attr_accessor :shot_annotations + + # Label annotations on shot level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `shotLabelAnnotations` + # @return [Array] + attr_accessor :shot_label_annotations + + # Speech transcription. + # Corresponds to the JSON property `speechTranscriptions` + # @return [Array] + attr_accessor :speech_transcriptions + + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @error = args[:error] if args.key?(:error) + @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) + @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) + @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) + @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) + @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) + @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) + @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) + end + end + + # Video segment. + class GoogleCloudVideointelligenceV2beta1VideoSegment + include Google::Apis::Core::Hashable + + # Time-offset, relative to the beginning of the video, + # corresponding to the end of the segment (inclusive). + # Corresponds to the JSON property `endTimeOffset` + # @return [String] + attr_accessor :end_time_offset + + # Time-offset, relative to the beginning of the video, + # corresponding to the start of the segment (inclusive). + # Corresponds to the JSON property `startTimeOffset` + # @return [String] + attr_accessor :start_time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset) + @start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset) + end + end + + # Word-specific information for recognized words. Word information is only + # included in the response when certain request parameters are set, such + # as `enable_word_time_offsets`. + class GoogleCloudVideointelligenceV2beta1WordInfo + include Google::Apis::Core::Hashable + + # Output only. The confidence estimate between 0.0 and 1.0. A higher number + # indicates an estimated greater likelihood that the recognized words are + # correct. This field is set only for the top alternative. + # This field is not guaranteed to be accurate and users should not rely on it + # to be always provided. + # The default of 0.0 is a sentinel value indicating `confidence` was not set. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Time offset relative to the beginning of the audio, and + # corresponding to the end of the spoken word. This field is only set if + # `enable_word_time_offsets=true` and only in the top hypothesis. This is an + # experimental feature and the accuracy of the time offset can vary. + # Corresponds to the JSON property `endTime` + # @return [String] + attr_accessor :end_time + + # Output only. A distinct integer value is assigned for every speaker within + # the audio. This field specifies which one of those speakers was detected to + # have spoken this word. Value ranges from 1 up to diarization_speaker_count, + # and is only set if speaker diarization is enabled. + # Corresponds to the JSON property `speakerTag` + # @return [Fixnum] + attr_accessor :speaker_tag + + # Time offset relative to the beginning of the audio, and + # corresponding to the start of the spoken word. This field is only set if + # `enable_word_time_offsets=true` and only in the top hypothesis. This is an + # experimental feature and the accuracy of the time offset can vary. + # Corresponds to the JSON property `startTime` + # @return [String] + attr_accessor :start_time + + # The word corresponding to this set of information. + # Corresponds to the JSON property `word` + # @return [String] + attr_accessor :word + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @end_time = args[:end_time] if args.key?(:end_time) + @speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag) + @start_time = args[:start_time] if args.key?(:start_time) + @word = args[:word] if args.key?(:word) + end + end + # This resource represents a long-running operation that is the result of a # network API call. class GoogleLongrunningOperation diff --git a/generated/google/apis/videointelligence_v1beta2/representations.rb b/generated/google/apis/videointelligence_v1beta2/representations.rb index 6f52de376..50f288997 100644 --- a/generated/google/apis/videointelligence_v1beta2/representations.rb +++ b/generated/google/apis/videointelligence_v1beta2/representations.rb @@ -70,6 +70,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -82,6 +112,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -172,6 +220,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1beta2ShotChangeDetectionConfig class Representation < Google::Apis::Core::JsonRepresentation; end @@ -202,6 +280,30 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1beta2TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextDetectionConfig + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -280,6 +382,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -292,6 +424,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -448,6 +598,150 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1Entity + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1WordInfo + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleLongrunningOperation class Representation < Google::Apis::Core::JsonRepresentation; end @@ -532,6 +826,55 @@ module Google end end + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -551,6 +894,35 @@ module Google end end + class GoogleCloudVideointelligenceV1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1TextSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1TextFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -571,6 +943,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1VideoSegment::Representation @@ -579,6 +953,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1TextAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1TextAnnotation::Representation + end end @@ -702,6 +1078,55 @@ module Google end end + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedVertex, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1beta2ShotChangeDetectionConfig # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -751,6 +1176,42 @@ module Google end end + class GoogleCloudVideointelligenceV1beta2TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1beta2TextDetectionConfig + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :language_hints, as: 'languageHints' + end + end + + class GoogleCloudVideointelligenceV1beta2TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1beta2TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -771,6 +1232,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation @@ -779,6 +1242,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechTranscription, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextAnnotation::Representation + end end @@ -795,6 +1260,8 @@ module Google property :speech_transcription_config, as: 'speechTranscriptionConfig', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechTranscriptionConfig, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2SpeechTranscriptionConfig::Representation + property :text_detection_config, as: 'textDetectionConfig', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextDetectionConfig, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1beta2TextDetectionConfig::Representation + end end @@ -889,6 +1356,55 @@ module Google end end + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -908,6 +1424,35 @@ module Google end end + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1TextSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1TextFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -928,6 +1473,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation @@ -936,6 +1483,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1TextAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV1p1beta1TextAnnotation::Representation + end end @@ -1181,6 +1730,254 @@ module Google end end + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :annotation_progress, as: 'annotationProgress', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :annotation_results, as: 'annotationResults', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoAnnotationResults, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoAnnotationResults::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1Entity + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :description, as: 'description' + property :entity_id, as: 'entityId' + property :language_code, as: 'languageCode' + end + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ExplicitContentFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ExplicitContentFrame::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :pornography_likelihood, as: 'pornographyLikelihood' + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :category_entities, as: 'categoryEntities', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1Entity::Representation + + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelFrame::Representation + + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1LabelFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1LabelSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :transcript, as: 'transcript' + collection :words, as: 'words', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1WordInfo, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1WordInfo::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :alternatives, as: 'alternatives', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative::Representation + + property :language_code, as: 'languageCode' + end + end + + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :annotation_results, as: 'annotationResults', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults::Representation + + property :annotation_results_uri, as: 'annotationResultsUri' + property :error, as: 'error', class: Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus, decorator: Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :explicit_annotation, as: 'explicitAnnotation', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation::Representation + + collection :label_annotations, as: 'labelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation::Representation + + collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1TextSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV2beta1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1TextFrame, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :input_uri, as: 'inputUri' + property :progress_percent, as: 'progressPercent' + property :start_time, as: 'startTime' + property :update_time, as: 'updateTime' + end + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :error, as: 'error', class: Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus, decorator: Google::Apis::VideointelligenceV1beta2::GoogleRpcStatus::Representation + + property :explicit_annotation, as: 'explicitAnnotation', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation::Representation + + collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation::Representation + + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + collection :shot_label_annotations, as: 'shotLabelAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1SpeechTranscription::Representation + + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1TextAnnotation, decorator: Google::Apis::VideointelligenceV1beta2::GoogleCloudVideointelligenceV2beta1TextAnnotation::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1VideoSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :end_time_offset, as: 'endTimeOffset' + property :start_time_offset, as: 'startTimeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1WordInfo + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :end_time, as: 'endTime' + property :speaker_tag, as: 'speakerTag' + property :start_time, as: 'startTime' + property :word, as: 'word' + end + end + class GoogleLongrunningOperation # @private class Representation < Google::Apis::Core::JsonRepresentation diff --git a/generated/google/apis/videointelligence_v1beta2/service.rb b/generated/google/apis/videointelligence_v1beta2/service.rb index 3801435fa..443db02ad 100644 --- a/generated/google/apis/videointelligence_v1beta2/service.rb +++ b/generated/google/apis/videointelligence_v1beta2/service.rb @@ -23,7 +23,8 @@ module Google # Cloud Video Intelligence API # # Detects objects, explicit content, and scene changes in videos. It also - # specifies the region for annotation and transcribes speech to text. + # specifies the region for annotation and transcribes speech to text. Supports + # both asynchronous API and streaming API. # # @example # require 'google/apis/videointelligence_v1beta2' diff --git a/generated/google/apis/videointelligence_v1p1beta1.rb b/generated/google/apis/videointelligence_v1p1beta1.rb index a86c67a39..1dd4a475e 100644 --- a/generated/google/apis/videointelligence_v1p1beta1.rb +++ b/generated/google/apis/videointelligence_v1p1beta1.rb @@ -21,12 +21,13 @@ module Google # Cloud Video Intelligence API # # Detects objects, explicit content, and scene changes in videos. It also - # specifies the region for annotation and transcribes speech to text. + # specifies the region for annotation and transcribes speech to text. Supports + # both asynchronous API and streaming API. # # @see https://cloud.google.com/video-intelligence/docs/ module VideointelligenceV1p1beta1 VERSION = 'V1p1beta1' - REVISION = '20190112' + REVISION = '20190220' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/videointelligence_v1p1beta1/classes.rb b/generated/google/apis/videointelligence_v1p1beta1/classes.rb index 81b9bdf3d..59dd16592 100644 --- a/generated/google/apis/videointelligence_v1p1beta1/classes.rb +++ b/generated/google/apis/videointelligence_v1p1beta1/classes.rb @@ -235,6 +235,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -302,6 +480,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -407,6 +685,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -429,6 +712,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -439,10 +729,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -745,6 +1037,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -812,6 +1282,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1beta2TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1beta2TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1beta2TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -917,6 +1487,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -939,6 +1514,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -949,10 +1531,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -1382,6 +1966,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Config for SHOT_CHANGE_DETECTION. class GoogleCloudVideointelligenceV1p1beta1ShotChangeDetectionConfig include Google::Apis::Core::Hashable @@ -1594,6 +2356,128 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Config for TEXT_DETECTION. + class GoogleCloudVideointelligenceV1p1beta1TextDetectionConfig + include Google::Apis::Core::Hashable + + # Language hint can be specified if the language to be detected is known a + # priori. It can increase the accuracy of the detection. Language hint must + # be language code in BCP-47 format. + # Automatic language detection is performed if no hint is provided. + # Corresponds to the JSON property `languageHints` + # @return [Array] + attr_accessor :language_hints + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @language_hints = args[:language_hints] if args.key?(:language_hints) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1p1beta1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1p1beta1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -1699,6 +2583,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -1721,6 +2610,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -1731,10 +2627,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -1769,6 +2667,11 @@ module Google # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscriptionConfig] attr_accessor :speech_transcription_config + # Config for TEXT_DETECTION. + # Corresponds to the JSON property `textDetectionConfig` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextDetectionConfig] + attr_accessor :text_detection_config + def initialize(**args) update!(**args) end @@ -1780,6 +2683,7 @@ module Google @segments = args[:segments] if args.key?(:segments) @shot_change_detection_config = args[:shot_change_detection_config] if args.key?(:shot_change_detection_config) @speech_transcription_config = args[:speech_transcription_config] if args.key?(:speech_transcription_config) + @text_detection_config = args[:text_detection_config] if args.key?(:text_detection_config) end end @@ -2671,6 +3575,923 @@ module Google end end + # Video annotation progress. Included in the `metadata` + # field of the `Operation` returned by the `GetOperation` + # call of the `google::longrunning::Operations` service. + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + include Google::Apis::Core::Hashable + + # Progress metadata for all videos specified in `AnnotateVideoRequest`. + # Corresponds to the JSON property `annotationProgress` + # @return [Array] + attr_accessor :annotation_progress + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress) + end + end + + # Video annotation response. Included in the `response` + # field of the `Operation` returned by the `GetOperation` + # call of the `google::longrunning::Operations` service. + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + include Google::Apis::Core::Hashable + + # Annotation results for all videos specified in `AnnotateVideoRequest`. + # Corresponds to the JSON property `annotationResults` + # @return [Array] + attr_accessor :annotation_results + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_results = args[:annotation_results] if args.key?(:annotation_results) + end + end + + # Detected entity from video analysis. + class GoogleCloudVideointelligenceV2beta1Entity + include Google::Apis::Core::Hashable + + # Textual description, e.g. `Fixed-gear bicycle`. + # Corresponds to the JSON property `description` + # @return [String] + attr_accessor :description + + # Opaque entity ID. Some IDs may be available in + # [Google Knowledge Graph Search + # API](https://developers.google.com/knowledge-graph/). + # Corresponds to the JSON property `entityId` + # @return [String] + attr_accessor :entity_id + + # Language code for `description` in BCP-47 format. + # Corresponds to the JSON property `languageCode` + # @return [String] + attr_accessor :language_code + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @description = args[:description] if args.key?(:description) + @entity_id = args[:entity_id] if args.key?(:entity_id) + @language_code = args[:language_code] if args.key?(:language_code) + end + end + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + include Google::Apis::Core::Hashable + + # All video frames where explicit content was detected. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @frames = args[:frames] if args.key?(:frames) + end + end + + # Video frame level annotation results for explicit content. + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + include Google::Apis::Core::Hashable + + # Likelihood of the pornography content.. + # Corresponds to the JSON property `pornographyLikelihood` + # @return [String] + attr_accessor :pornography_likelihood + + # Time-offset, relative to the beginning of the video, corresponding to the + # video frame for this location. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Label annotation. + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + include Google::Apis::Core::Hashable + + # Common categories for the detected entity. + # E.g. when the label is `Terrier` the category is likely `dog`. And in some + # cases there might be more than one categories e.g. `Terrier` could also be + # a `pet`. + # Corresponds to the JSON property `categoryEntities` + # @return [Array] + attr_accessor :category_entities + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1Entity] + attr_accessor :entity + + # All video frames where a label was detected. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # All video segments where a label was detected. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @category_entities = args[:category_entities] if args.key?(:category_entities) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segments = args[:segments] if args.key?(:segments) + end + end + + # Video frame level annotation results for label detection. + class GoogleCloudVideointelligenceV2beta1LabelFrame + include Google::Apis::Core::Hashable + + # Confidence that the label is accurate. Range: [0, 1]. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Time-offset, relative to the beginning of the video, corresponding to the + # video frame for this location. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for label detection. + class GoogleCloudVideointelligenceV2beta1LabelSegment + include Google::Apis::Core::Hashable + + # Confidence that the label is accurate. Range: [0, 1]. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @segment = args[:segment] if args.key?(:segment) + end + end + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Alternative hypotheses (a.k.a. n-best list). + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + include Google::Apis::Core::Hashable + + # The confidence estimate between 0.0 and 1.0. A higher number + # indicates an estimated greater likelihood that the recognized words are + # correct. This field is typically provided only for the top hypothesis, and + # only for `is_final=true` results. Clients should not rely on the + # `confidence` field as it is not guaranteed to be accurate or consistent. + # The default of 0.0 is a sentinel value indicating `confidence` was not set. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Transcript text representing the words that the user spoke. + # Corresponds to the JSON property `transcript` + # @return [String] + attr_accessor :transcript + + # A list of word-specific information for each recognized word. + # Corresponds to the JSON property `words` + # @return [Array] + attr_accessor :words + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @transcript = args[:transcript] if args.key?(:transcript) + @words = args[:words] if args.key?(:words) + end + end + + # A speech recognition result corresponding to a portion of the audio. + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + include Google::Apis::Core::Hashable + + # May contain one or more recognition hypotheses (up to the maximum specified + # in `max_alternatives`). These alternatives are ordered in terms of + # accuracy, with the top (first) alternative being the most probable, as + # ranked by the recognizer. + # Corresponds to the JSON property `alternatives` + # @return [Array] + attr_accessor :alternatives + + # Output only. The + # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the + # language in this result. This language code was detected to have the most + # likelihood of being spoken in the audio. + # Corresponds to the JSON property `languageCode` + # @return [String] + attr_accessor :language_code + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @alternatives = args[:alternatives] if args.key?(:alternatives) + @language_code = args[:language_code] if args.key?(:language_code) + end + end + + # `StreamingAnnotateVideoResponse` is the only message returned to the client + # by `StreamingAnnotateVideo`. A series of zero or more + # `StreamingAnnotateVideoResponse` messages are streamed back to the client. + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + include Google::Apis::Core::Hashable + + # Streaming annotation results corresponding to a portion of the video + # that is currently being processed. + # Corresponds to the JSON property `annotationResults` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults] + attr_accessor :annotation_results + + # GCS URI that stores annotation results of one streaming session. + # It is a directory that can hold multiple files in JSON format. + # Example uri format: + # gs://bucket_id/object_id/cloud_project_name-session_id + # Corresponds to the JSON property `annotationResultsUri` + # @return [String] + attr_accessor :annotation_results_uri + + # The `Status` type defines a logical error model that is suitable for different + # programming environments, including REST APIs and RPC APIs. It is used by + # [gRPC](https://github.com/grpc). The error model is designed to be: + # - Simple to use and understand for most users + # - Flexible enough to meet unexpected needs + # # Overview + # The `Status` message contains three pieces of data: error code, error message, + # and error details. The error code should be an enum value of + # google.rpc.Code, but it may accept additional error codes if needed. The + # error message should be a developer-facing English message that helps + # developers *understand* and *resolve* the error. If a localized user-facing + # error message is needed, put the localized message in the error details or + # localize it in the client. The optional error details may contain arbitrary + # information about the error. There is a predefined set of error detail types + # in the package `google.rpc` that can be used for common error conditions. + # # Language mapping + # The `Status` message is the logical representation of the error model, but it + # is not necessarily the actual wire format. When the `Status` message is + # exposed in different client libraries and different wire protocols, it can be + # mapped differently. For example, it will likely be mapped to some exceptions + # in Java, but more likely mapped to some error codes in C. + # # Other uses + # The error model and the `Status` message can be used in a variety of + # environments, either with or without APIs, to provide a + # consistent developer experience across different environments. + # Example uses of this error model include: + # - Partial errors. If a service needs to return partial errors to the client, + # it may embed the `Status` in the normal response to indicate the partial + # errors. + # - Workflow errors. A typical workflow has multiple steps. Each step may + # have a `Status` message for error reporting. + # - Batch operations. If a client uses batch request and batch response, the + # `Status` message should be used directly inside batch response, one for + # each error sub-response. + # - Asynchronous operations. If an API call embeds asynchronous operation + # results in its response, the status of those operations should be + # represented directly using the `Status` message. + # - Logging. If some API errors are stored in logs, the message `Status` could + # be used directly after any stripping needed for security/privacy reasons. + # Corresponds to the JSON property `error` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus] + attr_accessor :error + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_results = args[:annotation_results] if args.key?(:annotation_results) + @annotation_results_uri = args[:annotation_results_uri] if args.key?(:annotation_results_uri) + @error = args[:error] if args.key?(:error) + end + end + + # Streaming annotation results corresponding to a portion of the video + # that is currently being processed. + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + include Google::Apis::Core::Hashable + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + # Corresponds to the JSON property `explicitAnnotation` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation] + attr_accessor :explicit_annotation + + # Label annotation results. + # Corresponds to the JSON property `labelAnnotations` + # @return [Array] + attr_accessor :label_annotations + + # Object tracking results. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + + # Shot annotation results. Each shot is represented as a video segment. + # Corresponds to the JSON property `shotAnnotations` + # @return [Array] + attr_accessor :shot_annotations + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) + @label_annotations = args[:label_annotations] if args.key?(:label_annotations) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) + @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) + end + end + + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV2beta1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV2beta1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV2beta1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + + # Annotation progress for a single video. + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + include Google::Apis::Core::Hashable + + # Video file location in + # [Google Cloud Storage](https://cloud.google.com/storage/). + # Corresponds to the JSON property `inputUri` + # @return [String] + attr_accessor :input_uri + + # Approximate percentage processed thus far. Guaranteed to be + # 100 when fully processed. + # Corresponds to the JSON property `progressPercent` + # @return [Fixnum] + attr_accessor :progress_percent + + # Time when the request was received. + # Corresponds to the JSON property `startTime` + # @return [String] + attr_accessor :start_time + + # Time of the most recent update. + # Corresponds to the JSON property `updateTime` + # @return [String] + attr_accessor :update_time + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @input_uri = args[:input_uri] if args.key?(:input_uri) + @progress_percent = args[:progress_percent] if args.key?(:progress_percent) + @start_time = args[:start_time] if args.key?(:start_time) + @update_time = args[:update_time] if args.key?(:update_time) + end + end + + # Annotation results for a single video. + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + include Google::Apis::Core::Hashable + + # The `Status` type defines a logical error model that is suitable for different + # programming environments, including REST APIs and RPC APIs. It is used by + # [gRPC](https://github.com/grpc). The error model is designed to be: + # - Simple to use and understand for most users + # - Flexible enough to meet unexpected needs + # # Overview + # The `Status` message contains three pieces of data: error code, error message, + # and error details. The error code should be an enum value of + # google.rpc.Code, but it may accept additional error codes if needed. The + # error message should be a developer-facing English message that helps + # developers *understand* and *resolve* the error. If a localized user-facing + # error message is needed, put the localized message in the error details or + # localize it in the client. The optional error details may contain arbitrary + # information about the error. There is a predefined set of error detail types + # in the package `google.rpc` that can be used for common error conditions. + # # Language mapping + # The `Status` message is the logical representation of the error model, but it + # is not necessarily the actual wire format. When the `Status` message is + # exposed in different client libraries and different wire protocols, it can be + # mapped differently. For example, it will likely be mapped to some exceptions + # in Java, but more likely mapped to some error codes in C. + # # Other uses + # The error model and the `Status` message can be used in a variety of + # environments, either with or without APIs, to provide a + # consistent developer experience across different environments. + # Example uses of this error model include: + # - Partial errors. If a service needs to return partial errors to the client, + # it may embed the `Status` in the normal response to indicate the partial + # errors. + # - Workflow errors. A typical workflow has multiple steps. Each step may + # have a `Status` message for error reporting. + # - Batch operations. If a client uses batch request and batch response, the + # `Status` message should be used directly inside batch response, one for + # each error sub-response. + # - Asynchronous operations. If an API call embeds asynchronous operation + # results in its response, the status of those operations should be + # represented directly using the `Status` message. + # - Logging. If some API errors are stored in logs, the message `Status` could + # be used directly after any stripping needed for security/privacy reasons. + # Corresponds to the JSON property `error` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus] + attr_accessor :error + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + # Corresponds to the JSON property `explicitAnnotation` + # @return [Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation] + attr_accessor :explicit_annotation + + # Label annotations on frame level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `frameLabelAnnotations` + # @return [Array] + attr_accessor :frame_label_annotations + + # Video file location in + # [Google Cloud Storage](https://cloud.google.com/storage/). + # Corresponds to the JSON property `inputUri` + # @return [String] + attr_accessor :input_uri + + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + + # Label annotations on video level or user specified segment level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `segmentLabelAnnotations` + # @return [Array] + attr_accessor :segment_label_annotations + + # Shot annotations. Each shot is represented as a video segment. + # Corresponds to the JSON property `shotAnnotations` + # @return [Array] + attr_accessor :shot_annotations + + # Label annotations on shot level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `shotLabelAnnotations` + # @return [Array] + attr_accessor :shot_label_annotations + + # Speech transcription. + # Corresponds to the JSON property `speechTranscriptions` + # @return [Array] + attr_accessor :speech_transcriptions + + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @error = args[:error] if args.key?(:error) + @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) + @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) + @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) + @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) + @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) + @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) + @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) + end + end + + # Video segment. + class GoogleCloudVideointelligenceV2beta1VideoSegment + include Google::Apis::Core::Hashable + + # Time-offset, relative to the beginning of the video, + # corresponding to the end of the segment (inclusive). + # Corresponds to the JSON property `endTimeOffset` + # @return [String] + attr_accessor :end_time_offset + + # Time-offset, relative to the beginning of the video, + # corresponding to the start of the segment (inclusive). + # Corresponds to the JSON property `startTimeOffset` + # @return [String] + attr_accessor :start_time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset) + @start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset) + end + end + + # Word-specific information for recognized words. Word information is only + # included in the response when certain request parameters are set, such + # as `enable_word_time_offsets`. + class GoogleCloudVideointelligenceV2beta1WordInfo + include Google::Apis::Core::Hashable + + # Output only. The confidence estimate between 0.0 and 1.0. A higher number + # indicates an estimated greater likelihood that the recognized words are + # correct. This field is set only for the top alternative. + # This field is not guaranteed to be accurate and users should not rely on it + # to be always provided. + # The default of 0.0 is a sentinel value indicating `confidence` was not set. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Time offset relative to the beginning of the audio, and + # corresponding to the end of the spoken word. This field is only set if + # `enable_word_time_offsets=true` and only in the top hypothesis. This is an + # experimental feature and the accuracy of the time offset can vary. + # Corresponds to the JSON property `endTime` + # @return [String] + attr_accessor :end_time + + # Output only. A distinct integer value is assigned for every speaker within + # the audio. This field specifies which one of those speakers was detected to + # have spoken this word. Value ranges from 1 up to diarization_speaker_count, + # and is only set if speaker diarization is enabled. + # Corresponds to the JSON property `speakerTag` + # @return [Fixnum] + attr_accessor :speaker_tag + + # Time offset relative to the beginning of the audio, and + # corresponding to the start of the spoken word. This field is only set if + # `enable_word_time_offsets=true` and only in the top hypothesis. This is an + # experimental feature and the accuracy of the time offset can vary. + # Corresponds to the JSON property `startTime` + # @return [String] + attr_accessor :start_time + + # The word corresponding to this set of information. + # Corresponds to the JSON property `word` + # @return [String] + attr_accessor :word + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @end_time = args[:end_time] if args.key?(:end_time) + @speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag) + @start_time = args[:start_time] if args.key?(:start_time) + @word = args[:word] if args.key?(:word) + end + end + # This resource represents a long-running operation that is the result of a # network API call. class GoogleLongrunningOperation diff --git a/generated/google/apis/videointelligence_v1p1beta1/representations.rb b/generated/google/apis/videointelligence_v1p1beta1/representations.rb index 835d8cba8..a4414cefa 100644 --- a/generated/google/apis/videointelligence_v1p1beta1/representations.rb +++ b/generated/google/apis/videointelligence_v1p1beta1/representations.rb @@ -70,6 +70,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -82,6 +112,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -154,6 +202,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -166,6 +244,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1beta2TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -256,6 +352,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1p1beta1ShotChangeDetectionConfig class Representation < Google::Apis::Core::JsonRepresentation; end @@ -286,6 +412,30 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextDetectionConfig + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -448,6 +598,150 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1Entity + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1WordInfo + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleLongrunningOperation class Representation < Google::Apis::Core::JsonRepresentation; end @@ -532,6 +826,55 @@ module Google end end + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1Entity, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -551,6 +894,35 @@ module Google end end + class GoogleCloudVideointelligenceV1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1TextSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1TextFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -571,6 +943,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1VideoSegment::Representation @@ -579,6 +953,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1TextAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1TextAnnotation::Representation + end end @@ -673,6 +1049,55 @@ module Google end end + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2Entity, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -692,6 +1117,35 @@ module Google end end + class GoogleCloudVideointelligenceV1beta2TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2TextSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1beta2TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1beta2TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2TextFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -712,6 +1166,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation @@ -720,6 +1176,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation::Representation + end end @@ -843,6 +1301,55 @@ module Google end end + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1Entity, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1p1beta1ShotChangeDetectionConfig # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -892,6 +1399,42 @@ module Google end end + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextDetectionConfig + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :language_hints, as: 'languageHints' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -912,6 +1455,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation @@ -920,6 +1465,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation::Representation + end end @@ -936,6 +1483,8 @@ module Google property :speech_transcription_config, as: 'speechTranscriptionConfig', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscriptionConfig, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscriptionConfig::Representation + property :text_detection_config, as: 'textDetectionConfig', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextDetectionConfig, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV1p1beta1TextDetectionConfig::Representation + end end @@ -1181,6 +1730,254 @@ module Google end end + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :annotation_progress, as: 'annotationProgress', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :annotation_results, as: 'annotationResults', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoAnnotationResults, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoAnnotationResults::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1Entity + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :description, as: 'description' + property :entity_id, as: 'entityId' + property :language_code, as: 'languageCode' + end + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentFrame::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :pornography_likelihood, as: 'pornographyLikelihood' + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :category_entities, as: 'categoryEntities', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelFrame::Representation + + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1LabelFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1LabelSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :transcript, as: 'transcript' + collection :words, as: 'words', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1WordInfo, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1WordInfo::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :alternatives, as: 'alternatives', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative::Representation + + property :language_code, as: 'languageCode' + end + end + + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :annotation_results, as: 'annotationResults', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults::Representation + + property :annotation_results_uri, as: 'annotationResultsUri' + property :error, as: 'error', class: Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :explicit_annotation, as: 'explicitAnnotation', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation::Representation + + collection :label_annotations, as: 'labelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation::Representation + + collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1TextSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV2beta1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1TextFrame, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :input_uri, as: 'inputUri' + property :progress_percent, as: 'progressPercent' + property :start_time, as: 'startTime' + property :update_time, as: 'updateTime' + end + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :error, as: 'error', class: Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleRpcStatus::Representation + + property :explicit_annotation, as: 'explicitAnnotation', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation::Representation + + collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation::Representation + + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + collection :shot_label_annotations, as: 'shotLabelAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1SpeechTranscription::Representation + + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1TextAnnotation, decorator: Google::Apis::VideointelligenceV1p1beta1::GoogleCloudVideointelligenceV2beta1TextAnnotation::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1VideoSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :end_time_offset, as: 'endTimeOffset' + property :start_time_offset, as: 'startTimeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1WordInfo + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :end_time, as: 'endTime' + property :speaker_tag, as: 'speakerTag' + property :start_time, as: 'startTime' + property :word, as: 'word' + end + end + class GoogleLongrunningOperation # @private class Representation < Google::Apis::Core::JsonRepresentation diff --git a/generated/google/apis/videointelligence_v1p1beta1/service.rb b/generated/google/apis/videointelligence_v1p1beta1/service.rb index f6107508d..9d8bd0418 100644 --- a/generated/google/apis/videointelligence_v1p1beta1/service.rb +++ b/generated/google/apis/videointelligence_v1p1beta1/service.rb @@ -23,7 +23,8 @@ module Google # Cloud Video Intelligence API # # Detects objects, explicit content, and scene changes in videos. It also - # specifies the region for annotation and transcribes speech to text. + # specifies the region for annotation and transcribes speech to text. Supports + # both asynchronous API and streaming API. # # @example # require 'google/apis/videointelligence_v1p1beta1' diff --git a/generated/google/apis/videointelligence_v1p2beta1.rb b/generated/google/apis/videointelligence_v1p2beta1.rb index d9789a9a9..60f0adcc5 100644 --- a/generated/google/apis/videointelligence_v1p2beta1.rb +++ b/generated/google/apis/videointelligence_v1p2beta1.rb @@ -21,12 +21,13 @@ module Google # Cloud Video Intelligence API # # Detects objects, explicit content, and scene changes in videos. It also - # specifies the region for annotation and transcribes speech to text. + # specifies the region for annotation and transcribes speech to text. Supports + # both asynchronous API and streaming API. # # @see https://cloud.google.com/video-intelligence/docs/ module VideointelligenceV1p2beta1 VERSION = 'V1p2beta1' - REVISION = '20190122' + REVISION = '20190220' # View and manage your data across Google Cloud Platform services AUTH_CLOUD_PLATFORM = 'https://www.googleapis.com/auth/cloud-platform' diff --git a/generated/google/apis/videointelligence_v1p2beta1/classes.rb b/generated/google/apis/videointelligence_v1p2beta1/classes.rb index 938c5492f..15f2a4cde 100644 --- a/generated/google/apis/videointelligence_v1p2beta1/classes.rb +++ b/generated/google/apis/videointelligence_v1p2beta1/classes.rb @@ -235,6 +235,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -302,6 +480,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -407,6 +685,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -429,6 +712,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -439,10 +729,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -745,6 +1037,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -812,6 +1282,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1beta2TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1beta2TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1beta2TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -917,6 +1487,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -939,6 +1514,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -949,10 +1531,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -1255,6 +1839,184 @@ module Google end end + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + # Alternative hypotheses (a.k.a. n-best list). class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative include Google::Apis::Core::Hashable @@ -1322,6 +2084,106 @@ module Google end end + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV1p1beta1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV1p1beta1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + # Annotation progress for a single video. class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress include Google::Apis::Core::Hashable @@ -1427,6 +2289,11 @@ module Google # @return [String] attr_accessor :input_uri + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + # Label annotations on video level or user specified segment level. # There is exactly one element for each unique label. # Corresponds to the JSON property `segmentLabelAnnotations` @@ -1449,6 +2316,13 @@ module Google # @return [Array] attr_accessor :speech_transcriptions + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + def initialize(**args) update!(**args) end @@ -1459,10 +2333,12 @@ module Google @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) end end @@ -2699,6 +3575,923 @@ module Google end end + # Video annotation progress. Included in the `metadata` + # field of the `Operation` returned by the `GetOperation` + # call of the `google::longrunning::Operations` service. + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + include Google::Apis::Core::Hashable + + # Progress metadata for all videos specified in `AnnotateVideoRequest`. + # Corresponds to the JSON property `annotationProgress` + # @return [Array] + attr_accessor :annotation_progress + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_progress = args[:annotation_progress] if args.key?(:annotation_progress) + end + end + + # Video annotation response. Included in the `response` + # field of the `Operation` returned by the `GetOperation` + # call of the `google::longrunning::Operations` service. + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + include Google::Apis::Core::Hashable + + # Annotation results for all videos specified in `AnnotateVideoRequest`. + # Corresponds to the JSON property `annotationResults` + # @return [Array] + attr_accessor :annotation_results + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_results = args[:annotation_results] if args.key?(:annotation_results) + end + end + + # Detected entity from video analysis. + class GoogleCloudVideointelligenceV2beta1Entity + include Google::Apis::Core::Hashable + + # Textual description, e.g. `Fixed-gear bicycle`. + # Corresponds to the JSON property `description` + # @return [String] + attr_accessor :description + + # Opaque entity ID. Some IDs may be available in + # [Google Knowledge Graph Search + # API](https://developers.google.com/knowledge-graph/). + # Corresponds to the JSON property `entityId` + # @return [String] + attr_accessor :entity_id + + # Language code for `description` in BCP-47 format. + # Corresponds to the JSON property `languageCode` + # @return [String] + attr_accessor :language_code + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @description = args[:description] if args.key?(:description) + @entity_id = args[:entity_id] if args.key?(:entity_id) + @language_code = args[:language_code] if args.key?(:language_code) + end + end + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + include Google::Apis::Core::Hashable + + # All video frames where explicit content was detected. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @frames = args[:frames] if args.key?(:frames) + end + end + + # Video frame level annotation results for explicit content. + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + include Google::Apis::Core::Hashable + + # Likelihood of the pornography content.. + # Corresponds to the JSON property `pornographyLikelihood` + # @return [String] + attr_accessor :pornography_likelihood + + # Time-offset, relative to the beginning of the video, corresponding to the + # video frame for this location. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @pornography_likelihood = args[:pornography_likelihood] if args.key?(:pornography_likelihood) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Label annotation. + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + include Google::Apis::Core::Hashable + + # Common categories for the detected entity. + # E.g. when the label is `Terrier` the category is likely `dog`. And in some + # cases there might be more than one categories e.g. `Terrier` could also be + # a `pet`. + # Corresponds to the JSON property `categoryEntities` + # @return [Array] + attr_accessor :category_entities + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1Entity] + attr_accessor :entity + + # All video frames where a label was detected. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # All video segments where a label was detected. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @category_entities = args[:category_entities] if args.key?(:category_entities) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segments = args[:segments] if args.key?(:segments) + end + end + + # Video frame level annotation results for label detection. + class GoogleCloudVideointelligenceV2beta1LabelFrame + include Google::Apis::Core::Hashable + + # Confidence that the label is accurate. Range: [0, 1]. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Time-offset, relative to the beginning of the video, corresponding to the + # video frame for this location. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for label detection. + class GoogleCloudVideointelligenceV2beta1LabelSegment + include Google::Apis::Core::Hashable + + # Confidence that the label is accurate. Range: [0, 1]. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @segment = args[:segment] if args.key?(:segment) + end + end + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + include Google::Apis::Core::Hashable + + # Bottom Y coordinate. + # Corresponds to the JSON property `bottom` + # @return [Float] + attr_accessor :bottom + + # Left X coordinate. + # Corresponds to the JSON property `left` + # @return [Float] + attr_accessor :left + + # Right X coordinate. + # Corresponds to the JSON property `right` + # @return [Float] + attr_accessor :right + + # Top Y coordinate. + # Corresponds to the JSON property `top` + # @return [Float] + attr_accessor :top + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @bottom = args[:bottom] if args.key?(:bottom) + @left = args[:left] if args.key?(:left) + @right = args[:right] if args.key?(:right) + @top = args[:top] if args.key?(:top) + end + end + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + include Google::Apis::Core::Hashable + + # Normalized vertices of the bounding polygon. + # Corresponds to the JSON property `vertices` + # @return [Array] + attr_accessor :vertices + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @vertices = args[:vertices] if args.key?(:vertices) + end + end + + # A vertex represents a 2D point in the image. + # NOTE: the normalized vertex coordinates are relative to the original image + # and range from 0 to 1. + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + include Google::Apis::Core::Hashable + + # X coordinate. + # Corresponds to the JSON property `x` + # @return [Float] + attr_accessor :x + + # Y coordinate. + # Corresponds to the JSON property `y` + # @return [Float] + attr_accessor :y + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @x = args[:x] if args.key?(:x) + @y = args[:y] if args.key?(:y) + end + end + + # Annotations corresponding to one tracked object. + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + include Google::Apis::Core::Hashable + + # Object category's labeling confidence of this track. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Detected entity from video analysis. + # Corresponds to the JSON property `entity` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1Entity] + attr_accessor :entity + + # Information corresponding to all frames where this object track appears. + # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + # messages in frames. + # Streaming mode: it can only be one ObjectTrackingFrame message in frames. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + # Streaming mode ONLY. + # In streaming mode, we do not know the end time of a tracked object + # before it is completed. Hence, there is no VideoSegment info returned. + # Instead, we provide a unique identifiable integer track_id so that + # the customers can correlate the results of the ongoing + # ObjectTrackAnnotation of the same track_id over time. + # Corresponds to the JSON property `trackId` + # @return [Fixnum] + attr_accessor :track_id + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @entity = args[:entity] if args.key?(:entity) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + @track_id = args[:track_id] if args.key?(:track_id) + end + end + + # Video frame level annotations for object detection and tracking. This field + # stores per frame location, time offset, and confidence. + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + include Google::Apis::Core::Hashable + + # Normalized bounding box. + # The normalized vertex coordinates are relative to the original image. + # Range: [0, 1]. + # Corresponds to the JSON property `normalizedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox] + attr_accessor :normalized_bounding_box + + # The timestamp of the frame in microseconds. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @normalized_bounding_box = args[:normalized_bounding_box] if args.key?(:normalized_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Alternative hypotheses (a.k.a. n-best list). + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + include Google::Apis::Core::Hashable + + # The confidence estimate between 0.0 and 1.0. A higher number + # indicates an estimated greater likelihood that the recognized words are + # correct. This field is typically provided only for the top hypothesis, and + # only for `is_final=true` results. Clients should not rely on the + # `confidence` field as it is not guaranteed to be accurate or consistent. + # The default of 0.0 is a sentinel value indicating `confidence` was not set. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Transcript text representing the words that the user spoke. + # Corresponds to the JSON property `transcript` + # @return [String] + attr_accessor :transcript + + # A list of word-specific information for each recognized word. + # Corresponds to the JSON property `words` + # @return [Array] + attr_accessor :words + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @transcript = args[:transcript] if args.key?(:transcript) + @words = args[:words] if args.key?(:words) + end + end + + # A speech recognition result corresponding to a portion of the audio. + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + include Google::Apis::Core::Hashable + + # May contain one or more recognition hypotheses (up to the maximum specified + # in `max_alternatives`). These alternatives are ordered in terms of + # accuracy, with the top (first) alternative being the most probable, as + # ranked by the recognizer. + # Corresponds to the JSON property `alternatives` + # @return [Array] + attr_accessor :alternatives + + # Output only. The + # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the + # language in this result. This language code was detected to have the most + # likelihood of being spoken in the audio. + # Corresponds to the JSON property `languageCode` + # @return [String] + attr_accessor :language_code + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @alternatives = args[:alternatives] if args.key?(:alternatives) + @language_code = args[:language_code] if args.key?(:language_code) + end + end + + # `StreamingAnnotateVideoResponse` is the only message returned to the client + # by `StreamingAnnotateVideo`. A series of zero or more + # `StreamingAnnotateVideoResponse` messages are streamed back to the client. + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + include Google::Apis::Core::Hashable + + # Streaming annotation results corresponding to a portion of the video + # that is currently being processed. + # Corresponds to the JSON property `annotationResults` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults] + attr_accessor :annotation_results + + # GCS URI that stores annotation results of one streaming session. + # It is a directory that can hold multiple files in JSON format. + # Example uri format: + # gs://bucket_id/object_id/cloud_project_name-session_id + # Corresponds to the JSON property `annotationResultsUri` + # @return [String] + attr_accessor :annotation_results_uri + + # The `Status` type defines a logical error model that is suitable for different + # programming environments, including REST APIs and RPC APIs. It is used by + # [gRPC](https://github.com/grpc). The error model is designed to be: + # - Simple to use and understand for most users + # - Flexible enough to meet unexpected needs + # # Overview + # The `Status` message contains three pieces of data: error code, error message, + # and error details. The error code should be an enum value of + # google.rpc.Code, but it may accept additional error codes if needed. The + # error message should be a developer-facing English message that helps + # developers *understand* and *resolve* the error. If a localized user-facing + # error message is needed, put the localized message in the error details or + # localize it in the client. The optional error details may contain arbitrary + # information about the error. There is a predefined set of error detail types + # in the package `google.rpc` that can be used for common error conditions. + # # Language mapping + # The `Status` message is the logical representation of the error model, but it + # is not necessarily the actual wire format. When the `Status` message is + # exposed in different client libraries and different wire protocols, it can be + # mapped differently. For example, it will likely be mapped to some exceptions + # in Java, but more likely mapped to some error codes in C. + # # Other uses + # The error model and the `Status` message can be used in a variety of + # environments, either with or without APIs, to provide a + # consistent developer experience across different environments. + # Example uses of this error model include: + # - Partial errors. If a service needs to return partial errors to the client, + # it may embed the `Status` in the normal response to indicate the partial + # errors. + # - Workflow errors. A typical workflow has multiple steps. Each step may + # have a `Status` message for error reporting. + # - Batch operations. If a client uses batch request and batch response, the + # `Status` message should be used directly inside batch response, one for + # each error sub-response. + # - Asynchronous operations. If an API call embeds asynchronous operation + # results in its response, the status of those operations should be + # represented directly using the `Status` message. + # - Logging. If some API errors are stored in logs, the message `Status` could + # be used directly after any stripping needed for security/privacy reasons. + # Corresponds to the JSON property `error` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus] + attr_accessor :error + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @annotation_results = args[:annotation_results] if args.key?(:annotation_results) + @annotation_results_uri = args[:annotation_results_uri] if args.key?(:annotation_results_uri) + @error = args[:error] if args.key?(:error) + end + end + + # Streaming annotation results corresponding to a portion of the video + # that is currently being processed. + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + include Google::Apis::Core::Hashable + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + # Corresponds to the JSON property `explicitAnnotation` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation] + attr_accessor :explicit_annotation + + # Label annotation results. + # Corresponds to the JSON property `labelAnnotations` + # @return [Array] + attr_accessor :label_annotations + + # Object tracking results. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + + # Shot annotation results. Each shot is represented as a video segment. + # Corresponds to the JSON property `shotAnnotations` + # @return [Array] + attr_accessor :shot_annotations + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) + @label_annotations = args[:label_annotations] if args.key?(:label_annotations) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) + @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) + end + end + + # Annotations related to one detected OCR text snippet. This will contain the + # corresponding text, confidence value, and frame level information for each + # detection. + class GoogleCloudVideointelligenceV2beta1TextAnnotation + include Google::Apis::Core::Hashable + + # All video segments where OCR detected text appears. + # Corresponds to the JSON property `segments` + # @return [Array] + attr_accessor :segments + + # The detected text. + # Corresponds to the JSON property `text` + # @return [String] + attr_accessor :text + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @segments = args[:segments] if args.key?(:segments) + @text = args[:text] if args.key?(:text) + end + end + + # Video frame level annotation results for text annotation (OCR). + # Contains information regarding timestamp and bounding box locations for the + # frames containing detected OCR text snippets. + class GoogleCloudVideointelligenceV2beta1TextFrame + include Google::Apis::Core::Hashable + + # Normalized bounding polygon for text (that might not be aligned with axis). + # Contains list of the corner points in clockwise order starting from + # top-left corner. For example, for a rectangular bounding box: + # When the text is horizontal it might look like: + # 0----1 + # | | + # 3----2 + # When it's clockwise rotated 180 degrees around the top-left corner it + # becomes: + # 2----3 + # | | + # 1----0 + # and the vertex order will still be (0, 1, 2, 3). Note that values can be less + # than 0, or greater than 1 due to trignometric calculations for location of + # the box. + # Corresponds to the JSON property `rotatedBoundingBox` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly] + attr_accessor :rotated_bounding_box + + # Timestamp of this frame. + # Corresponds to the JSON property `timeOffset` + # @return [String] + attr_accessor :time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @rotated_bounding_box = args[:rotated_bounding_box] if args.key?(:rotated_bounding_box) + @time_offset = args[:time_offset] if args.key?(:time_offset) + end + end + + # Video segment level annotation results for text detection. + class GoogleCloudVideointelligenceV2beta1TextSegment + include Google::Apis::Core::Hashable + + # Confidence for the track of detected text. It is calculated as the highest + # over all frames where OCR detected text appears. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Information related to the frames where OCR detected text appears. + # Corresponds to the JSON property `frames` + # @return [Array] + attr_accessor :frames + + # Video segment. + # Corresponds to the JSON property `segment` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment] + attr_accessor :segment + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @frames = args[:frames] if args.key?(:frames) + @segment = args[:segment] if args.key?(:segment) + end + end + + # Annotation progress for a single video. + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + include Google::Apis::Core::Hashable + + # Video file location in + # [Google Cloud Storage](https://cloud.google.com/storage/). + # Corresponds to the JSON property `inputUri` + # @return [String] + attr_accessor :input_uri + + # Approximate percentage processed thus far. Guaranteed to be + # 100 when fully processed. + # Corresponds to the JSON property `progressPercent` + # @return [Fixnum] + attr_accessor :progress_percent + + # Time when the request was received. + # Corresponds to the JSON property `startTime` + # @return [String] + attr_accessor :start_time + + # Time of the most recent update. + # Corresponds to the JSON property `updateTime` + # @return [String] + attr_accessor :update_time + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @input_uri = args[:input_uri] if args.key?(:input_uri) + @progress_percent = args[:progress_percent] if args.key?(:progress_percent) + @start_time = args[:start_time] if args.key?(:start_time) + @update_time = args[:update_time] if args.key?(:update_time) + end + end + + # Annotation results for a single video. + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + include Google::Apis::Core::Hashable + + # The `Status` type defines a logical error model that is suitable for different + # programming environments, including REST APIs and RPC APIs. It is used by + # [gRPC](https://github.com/grpc). The error model is designed to be: + # - Simple to use and understand for most users + # - Flexible enough to meet unexpected needs + # # Overview + # The `Status` message contains three pieces of data: error code, error message, + # and error details. The error code should be an enum value of + # google.rpc.Code, but it may accept additional error codes if needed. The + # error message should be a developer-facing English message that helps + # developers *understand* and *resolve* the error. If a localized user-facing + # error message is needed, put the localized message in the error details or + # localize it in the client. The optional error details may contain arbitrary + # information about the error. There is a predefined set of error detail types + # in the package `google.rpc` that can be used for common error conditions. + # # Language mapping + # The `Status` message is the logical representation of the error model, but it + # is not necessarily the actual wire format. When the `Status` message is + # exposed in different client libraries and different wire protocols, it can be + # mapped differently. For example, it will likely be mapped to some exceptions + # in Java, but more likely mapped to some error codes in C. + # # Other uses + # The error model and the `Status` message can be used in a variety of + # environments, either with or without APIs, to provide a + # consistent developer experience across different environments. + # Example uses of this error model include: + # - Partial errors. If a service needs to return partial errors to the client, + # it may embed the `Status` in the normal response to indicate the partial + # errors. + # - Workflow errors. A typical workflow has multiple steps. Each step may + # have a `Status` message for error reporting. + # - Batch operations. If a client uses batch request and batch response, the + # `Status` message should be used directly inside batch response, one for + # each error sub-response. + # - Asynchronous operations. If an API call embeds asynchronous operation + # results in its response, the status of those operations should be + # represented directly using the `Status` message. + # - Logging. If some API errors are stored in logs, the message `Status` could + # be used directly after any stripping needed for security/privacy reasons. + # Corresponds to the JSON property `error` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus] + attr_accessor :error + + # Explicit content annotation (based on per-frame visual signals only). + # If no explicit content has been detected in a frame, no annotations are + # present for that frame. + # Corresponds to the JSON property `explicitAnnotation` + # @return [Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation] + attr_accessor :explicit_annotation + + # Label annotations on frame level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `frameLabelAnnotations` + # @return [Array] + attr_accessor :frame_label_annotations + + # Video file location in + # [Google Cloud Storage](https://cloud.google.com/storage/). + # Corresponds to the JSON property `inputUri` + # @return [String] + attr_accessor :input_uri + + # Annotations for list of objects detected and tracked in video. + # Corresponds to the JSON property `objectAnnotations` + # @return [Array] + attr_accessor :object_annotations + + # Label annotations on video level or user specified segment level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `segmentLabelAnnotations` + # @return [Array] + attr_accessor :segment_label_annotations + + # Shot annotations. Each shot is represented as a video segment. + # Corresponds to the JSON property `shotAnnotations` + # @return [Array] + attr_accessor :shot_annotations + + # Label annotations on shot level. + # There is exactly one element for each unique label. + # Corresponds to the JSON property `shotLabelAnnotations` + # @return [Array] + attr_accessor :shot_label_annotations + + # Speech transcription. + # Corresponds to the JSON property `speechTranscriptions` + # @return [Array] + attr_accessor :speech_transcriptions + + # OCR text detection and tracking. + # Annotations for list of detected text snippets. Each will have list of + # frame information associated with it. + # Corresponds to the JSON property `textAnnotations` + # @return [Array] + attr_accessor :text_annotations + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @error = args[:error] if args.key?(:error) + @explicit_annotation = args[:explicit_annotation] if args.key?(:explicit_annotation) + @frame_label_annotations = args[:frame_label_annotations] if args.key?(:frame_label_annotations) + @input_uri = args[:input_uri] if args.key?(:input_uri) + @object_annotations = args[:object_annotations] if args.key?(:object_annotations) + @segment_label_annotations = args[:segment_label_annotations] if args.key?(:segment_label_annotations) + @shot_annotations = args[:shot_annotations] if args.key?(:shot_annotations) + @shot_label_annotations = args[:shot_label_annotations] if args.key?(:shot_label_annotations) + @speech_transcriptions = args[:speech_transcriptions] if args.key?(:speech_transcriptions) + @text_annotations = args[:text_annotations] if args.key?(:text_annotations) + end + end + + # Video segment. + class GoogleCloudVideointelligenceV2beta1VideoSegment + include Google::Apis::Core::Hashable + + # Time-offset, relative to the beginning of the video, + # corresponding to the end of the segment (inclusive). + # Corresponds to the JSON property `endTimeOffset` + # @return [String] + attr_accessor :end_time_offset + + # Time-offset, relative to the beginning of the video, + # corresponding to the start of the segment (inclusive). + # Corresponds to the JSON property `startTimeOffset` + # @return [String] + attr_accessor :start_time_offset + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @end_time_offset = args[:end_time_offset] if args.key?(:end_time_offset) + @start_time_offset = args[:start_time_offset] if args.key?(:start_time_offset) + end + end + + # Word-specific information for recognized words. Word information is only + # included in the response when certain request parameters are set, such + # as `enable_word_time_offsets`. + class GoogleCloudVideointelligenceV2beta1WordInfo + include Google::Apis::Core::Hashable + + # Output only. The confidence estimate between 0.0 and 1.0. A higher number + # indicates an estimated greater likelihood that the recognized words are + # correct. This field is set only for the top alternative. + # This field is not guaranteed to be accurate and users should not rely on it + # to be always provided. + # The default of 0.0 is a sentinel value indicating `confidence` was not set. + # Corresponds to the JSON property `confidence` + # @return [Float] + attr_accessor :confidence + + # Time offset relative to the beginning of the audio, and + # corresponding to the end of the spoken word. This field is only set if + # `enable_word_time_offsets=true` and only in the top hypothesis. This is an + # experimental feature and the accuracy of the time offset can vary. + # Corresponds to the JSON property `endTime` + # @return [String] + attr_accessor :end_time + + # Output only. A distinct integer value is assigned for every speaker within + # the audio. This field specifies which one of those speakers was detected to + # have spoken this word. Value ranges from 1 up to diarization_speaker_count, + # and is only set if speaker diarization is enabled. + # Corresponds to the JSON property `speakerTag` + # @return [Fixnum] + attr_accessor :speaker_tag + + # Time offset relative to the beginning of the audio, and + # corresponding to the start of the spoken word. This field is only set if + # `enable_word_time_offsets=true` and only in the top hypothesis. This is an + # experimental feature and the accuracy of the time offset can vary. + # Corresponds to the JSON property `startTime` + # @return [String] + attr_accessor :start_time + + # The word corresponding to this set of information. + # Corresponds to the JSON property `word` + # @return [String] + attr_accessor :word + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @confidence = args[:confidence] if args.key?(:confidence) + @end_time = args[:end_time] if args.key?(:end_time) + @speaker_tag = args[:speaker_tag] if args.key?(:speaker_tag) + @start_time = args[:start_time] if args.key?(:start_time) + @word = args[:word] if args.key?(:word) + end + end + # This resource represents a long-running operation that is the result of a # network API call. class GoogleLongrunningOperation diff --git a/generated/google/apis/videointelligence_v1p2beta1/representations.rb b/generated/google/apis/videointelligence_v1p2beta1/representations.rb index 047480a9c..a6563ae82 100644 --- a/generated/google/apis/videointelligence_v1p2beta1/representations.rb +++ b/generated/google/apis/videointelligence_v1p2beta1/representations.rb @@ -70,6 +70,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -82,6 +112,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -154,6 +202,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -166,6 +244,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1beta2TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1beta2TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -238,6 +334,36 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative class Representation < Google::Apis::Core::JsonRepresentation; end @@ -250,6 +376,24 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV1p1beta1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress class Representation < Google::Apis::Core::JsonRepresentation; end @@ -454,6 +598,150 @@ module Google include Google::Apis::Core::JsonObjectSupport end + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1Entity + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1LabelSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextAnnotation + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextFrame + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1TextSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1VideoSegment + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + + class GoogleCloudVideointelligenceV2beta1WordInfo + class Representation < Google::Apis::Core::JsonRepresentation; end + + include Google::Apis::Core::JsonObjectSupport + end + class GoogleLongrunningOperation class Representation < Google::Apis::Core::JsonRepresentation; end @@ -538,6 +826,55 @@ module Google end end + class GoogleCloudVideointelligenceV1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -557,6 +894,35 @@ module Google end end + class GoogleCloudVideointelligenceV1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -577,6 +943,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1VideoSegment::Representation @@ -585,6 +953,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1TextAnnotation::Representation + end end @@ -679,6 +1049,55 @@ module Google end end + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1beta2NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1beta2ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1beta2SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -698,6 +1117,35 @@ module Google end end + class GoogleCloudVideointelligenceV1beta2TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1beta2TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1beta2TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -718,6 +1166,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2VideoSegment::Representation @@ -726,6 +1176,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1beta2TextAnnotation::Representation + end end @@ -820,6 +1272,55 @@ module Google end end + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV1p1beta1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV1p1beta1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + class GoogleCloudVideointelligenceV1p1beta1SpeechRecognitionAlternative # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -839,6 +1340,35 @@ module Google end end + class GoogleCloudVideointelligenceV1p1beta1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV1p1beta1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation + + end + end + class GoogleCloudVideointelligenceV1p1beta1VideoAnnotationProgress # @private class Representation < Google::Apis::Core::JsonRepresentation @@ -859,6 +1389,8 @@ module Google collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation::Representation property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1ObjectTrackingAnnotation::Representation + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1LabelAnnotation::Representation collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1VideoSegment::Representation @@ -867,6 +1399,8 @@ module Google collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1SpeechTranscription::Representation + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV1p1beta1TextAnnotation::Representation + end end @@ -1196,6 +1730,254 @@ module Google end end + class GoogleCloudVideointelligenceV2beta1AnnotateVideoProgress + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :annotation_progress, as: 'annotationProgress', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1AnnotateVideoResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :annotation_results, as: 'annotationResults', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoAnnotationResults, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoAnnotationResults::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1Entity + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :description, as: 'description' + property :entity_id, as: 'entityId' + property :language_code, as: 'languageCode' + end + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentFrame::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1ExplicitContentFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :pornography_likelihood, as: 'pornographyLikelihood' + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1LabelAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :category_entities, as: 'categoryEntities', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelFrame::Representation + + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1LabelFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1LabelSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :bottom, as: 'bottom' + property :left, as: 'left' + property :right, as: 'right' + property :top, as: 'top' + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :vertices, as: 'vertices', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1NormalizedVertex, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1NormalizedVertex::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1NormalizedVertex + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :x, as: 'x' + property :y, as: 'y' + end + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :entity, as: 'entity', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1Entity, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1Entity::Representation + + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + property :track_id, :numeric_string => true, as: 'trackId' + end + end + + class GoogleCloudVideointelligenceV2beta1ObjectTrackingFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :normalized_bounding_box, as: 'normalizedBoundingBox', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingBox::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :transcript, as: 'transcript' + collection :words, as: 'words', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1WordInfo, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1WordInfo::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1SpeechTranscription + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :alternatives, as: 'alternatives', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1SpeechRecognitionAlternative::Representation + + property :language_code, as: 'languageCode' + end + end + + class GoogleCloudVideointelligenceV2beta1StreamingAnnotateVideoResponse + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :annotation_results, as: 'annotationResults', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults::Representation + + property :annotation_results_uri, as: 'annotationResultsUri' + property :error, as: 'error', class: Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1StreamingVideoAnnotationResults + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :explicit_annotation, as: 'explicitAnnotation', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation::Representation + + collection :label_annotations, as: 'labelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation::Representation + + collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1TextAnnotation + # @private + class Representation < Google::Apis::Core::JsonRepresentation + collection :segments, as: 'segments', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1TextSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1TextSegment::Representation + + property :text, as: 'text' + end + end + + class GoogleCloudVideointelligenceV2beta1TextFrame + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :rotated_bounding_box, as: 'rotatedBoundingBox', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1NormalizedBoundingPoly::Representation + + property :time_offset, as: 'timeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1TextSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + collection :frames, as: 'frames', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1TextFrame, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1TextFrame::Representation + + property :segment, as: 'segment', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationProgress + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :input_uri, as: 'inputUri' + property :progress_percent, as: 'progressPercent' + property :start_time, as: 'startTime' + property :update_time, as: 'updateTime' + end + end + + class GoogleCloudVideointelligenceV2beta1VideoAnnotationResults + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :error, as: 'error', class: Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleRpcStatus::Representation + + property :explicit_annotation, as: 'explicitAnnotation', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ExplicitContentAnnotation::Representation + + collection :frame_label_annotations, as: 'frameLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + property :input_uri, as: 'inputUri' + collection :object_annotations, as: 'objectAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1ObjectTrackingAnnotation::Representation + + collection :segment_label_annotations, as: 'segmentLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :shot_annotations, as: 'shotAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1VideoSegment::Representation + + collection :shot_label_annotations, as: 'shotLabelAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1LabelAnnotation::Representation + + collection :speech_transcriptions, as: 'speechTranscriptions', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1SpeechTranscription, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1SpeechTranscription::Representation + + collection :text_annotations, as: 'textAnnotations', class: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1TextAnnotation, decorator: Google::Apis::VideointelligenceV1p2beta1::GoogleCloudVideointelligenceV2beta1TextAnnotation::Representation + + end + end + + class GoogleCloudVideointelligenceV2beta1VideoSegment + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :end_time_offset, as: 'endTimeOffset' + property :start_time_offset, as: 'startTimeOffset' + end + end + + class GoogleCloudVideointelligenceV2beta1WordInfo + # @private + class Representation < Google::Apis::Core::JsonRepresentation + property :confidence, as: 'confidence' + property :end_time, as: 'endTime' + property :speaker_tag, as: 'speakerTag' + property :start_time, as: 'startTime' + property :word, as: 'word' + end + end + class GoogleLongrunningOperation # @private class Representation < Google::Apis::Core::JsonRepresentation diff --git a/generated/google/apis/videointelligence_v1p2beta1/service.rb b/generated/google/apis/videointelligence_v1p2beta1/service.rb index b6b3ea8fb..27aa746bd 100644 --- a/generated/google/apis/videointelligence_v1p2beta1/service.rb +++ b/generated/google/apis/videointelligence_v1p2beta1/service.rb @@ -23,7 +23,8 @@ module Google # Cloud Video Intelligence API # # Detects objects, explicit content, and scene changes in videos. It also - # specifies the region for annotation and transcribes speech to text. + # specifies the region for annotation and transcribes speech to text. Supports + # both asynchronous API and streaming API. # # @example # require 'google/apis/videointelligence_v1p2beta1'