feat: Automated regeneration of dataproc v1beta2 client (#2232)

This commit is contained in:
Yoshi Automation Bot 2021-01-07 10:11:21 -08:00 committed by GitHub
parent 304470a852
commit 9c0b7f6d4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 20 additions and 18 deletions

View File

@ -1,7 +1,7 @@
# Release history for google-apis-dataproc_v1beta2 # Release history for google-apis-dataproc_v1beta2
### v0.1.0 (2021-01-01) ### v0.1.0 (2021-01-07)
* Regenerated from discovery document revision 20201210 * Regenerated from discovery document revision 20201229
* Regenerated using generator version 0.1.0 * Regenerated using generator version 0.1.1

View File

@ -394,7 +394,8 @@ module Google
# cluster's staging bucket according to the Compute Engine zone where your # cluster's staging bucket according to the Compute Engine zone where your
# cluster is deployed, and then create and manage this project-level, per- # cluster is deployed, and then create and manage this project-level, per-
# location bucket (see Dataproc staging bucket (https://cloud.google.com/ # location bucket (see Dataproc staging bucket (https://cloud.google.com/
# dataproc/docs/concepts/configuring-clusters/staging-bucket)). # dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field
# requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.
# Corresponds to the JSON property `configBucket` # Corresponds to the JSON property `configBucket`
# @return [String] # @return [String]
attr_accessor :config_bucket attr_accessor :config_bucket
@ -469,7 +470,8 @@ module Google
# your cluster's temp bucket according to the Compute Engine zone where your # your cluster's temp bucket according to the Compute Engine zone where your
# cluster is deployed, and then create and manage this project-level, per- # cluster is deployed, and then create and manage this project-level, per-
# location bucket. The default bucket has a TTL of 90 days, but you can use any # location bucket. The default bucket has a TTL of 90 days, but you can use any
# TTL (or none) if you specify a bucket. # TTL (or none) if you specify a bucket. This field requires a Cloud Storage
# bucket name, not a URI to a Cloud Storage bucket.
# Corresponds to the JSON property `tempBucket` # Corresponds to the JSON property `tempBucket`
# @return [String] # @return [String]
attr_accessor :temp_bucket attr_accessor :temp_bucket
@ -3496,9 +3498,9 @@ module Google
# @return [String] # @return [String]
attr_accessor :dag_start_time attr_accessor :dag_start_time
# Output only. The timeout duration for the DAG of jobs. Minimum timeout # Output only. The timeout duration for the DAG of jobs, expressed in seconds (
# duration is 10 minutes and maximum is 24 hours, expressed as a google.protobuf. # see JSON representation of duration (https://developers.google.com/protocol-
# Duration. For example, "1800" = 1800 seconds/30 minutes duration. # buffers/docs/proto3#json)).
# Corresponds to the JSON property `dagTimeout` # Corresponds to the JSON property `dagTimeout`
# @return [String] # @return [String]
attr_accessor :dag_timeout attr_accessor :dag_timeout
@ -3624,13 +3626,13 @@ module Google
# @return [String] # @return [String]
attr_accessor :create_time attr_accessor :create_time
# Optional. Timeout duration for the DAG of jobs. You can use "s", "m", "h", and # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON
# "d" suffixes for second, minute, hour, and day duration values, respectively. # representation of duration (https://developers.google.com/protocol-buffers/
# The timeout duration must be from 10 minutes ("10m") to 24 hours ("24h" or "1d" # docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to
# ). The timer begins when the first job is submitted. If the workflow is # 24 hours ("86400s"). The timer begins when the first job is submitted. If the
# running at the end of the timeout period, any remaining jobs are cancelled, # workflow is running at the end of the timeout period, any remaining jobs are
# the workflow is ended, and if the workflow was running on a managed cluster, # cancelled, the workflow is ended, and if the workflow was running on a managed
# the cluster is deleted. # cluster, the cluster is deleted.
# Corresponds to the JSON property `dagTimeout` # Corresponds to the JSON property `dagTimeout`
# @return [String] # @return [String]
attr_accessor :dag_timeout attr_accessor :dag_timeout

View File

@ -19,10 +19,10 @@ module Google
GEM_VERSION = "0.1.0" GEM_VERSION = "0.1.0"
# Version of the code generator used to generate this client # Version of the code generator used to generate this client
GENERATOR_VERSION = "0.1.0" GENERATOR_VERSION = "0.1.1"
# Revision of the discovery document this client was generated from # Revision of the discovery document this client was generated from
REVISION = "20201210" REVISION = "20201229"
end end
end end
end end

View File

@ -4,7 +4,7 @@
"git": { "git": {
"name": ".", "name": ".",
"remote": "https://github.com/googleapis/google-api-ruby-client.git", "remote": "https://github.com/googleapis/google-api-ruby-client.git",
"sha": "033efab58aeef5d2b0ba4e8d75d0caf227dfbd5e" "sha": "6de8b4ee653db67c0e789203767a89b32fcf8a51"
} }
} }
] ]