From 7f910bcc4fc4704947ccfd3ceed015d16b9e00c2 Mon Sep 17 00:00:00 2001 From: Google APIs Date: Fri, 21 Feb 2020 09:37:03 -0800 Subject: [PATCH] Update Dataproc v1beta2 client. PiperOrigin-RevId: 296451205 --- .../v1beta2/autoscaling_policies.proto | 5 +- google/cloud/dataproc/v1beta2/clusters.proto | 110 +++++----- google/cloud/dataproc/v1beta2/jobs.proto | 194 +++++++++--------- .../cloud/dataproc/v1beta2/operations.proto | 3 +- google/cloud/dataproc/v1beta2/shared.proto | 3 +- .../dataproc/v1beta2/workflow_templates.proto | 27 +-- 6 files changed, 164 insertions(+), 178 deletions(-) diff --git a/google/cloud/dataproc/v1beta2/autoscaling_policies.proto b/google/cloud/dataproc/v1beta2/autoscaling_policies.proto index 36d507c8..e5d16fd9 100644 --- a/google/cloud/dataproc/v1beta2/autoscaling_policies.proto +++ b/google/cloud/dataproc/v1beta2/autoscaling_policies.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -30,7 +29,7 @@ option java_outer_classname = "AutoscalingPoliciesProto"; option java_package = "com.google.cloud.dataproc.v1beta2"; // The API interface for managing autoscaling policies in the -// Google Cloud Dataproc API. +// Cloud Dataproc API. service AutoscalingPolicyService { option (google.api.default_host) = "dataproc.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; diff --git a/google/cloud/dataproc/v1beta2/clusters.proto b/google/cloud/dataproc/v1beta2/clusters.proto index 4b2ee649..0b277e23 100644 --- a/google/cloud/dataproc/v1beta2/clusters.proto +++ b/google/cloud/dataproc/v1beta2/clusters.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -20,7 +19,7 @@ package google.cloud.dataproc.v1beta2; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; -import "google/cloud/dataproc/v1beta2/operations.proto"; +import "google/api/resource.proto"; import "google/cloud/dataproc/v1beta2/shared.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; @@ -40,7 +39,7 @@ service ClusterController { // Creates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1beta2/projects/{project_id}/regions/{region}/clusters" @@ -55,7 +54,7 @@ service ClusterController { // Updates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -70,7 +69,7 @@ service ClusterController { // Deletes a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { delete: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -101,7 +100,7 @@ service ClusterController { // Gets cluster diagnostic information. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). // After the operation completes, // [Operation.response][google.longrunning.Operation.response] // contains @@ -129,7 +128,7 @@ message Cluster { // unique. Names of deleted clusters can be reused. string cluster_name = 2 [(google.api.field_behavior) = REQUIRED]; - // Required. The cluster config. Note that Cloud Dataproc may set + // Required. The cluster config. Note that Dataproc may set // default values, and values may change when clusters are updated. ClusterConfig config = 3 [(google.api.field_behavior) = REQUIRED]; @@ -148,7 +147,7 @@ message Cluster { // Output only. The previous cluster status. repeated ClusterStatus status_history = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc + // Output only. A cluster UUID (Unique Universal Identifier). Dataproc // generates this value when it creates the cluster. string cluster_uuid = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -161,14 +160,14 @@ message Cluster { // The cluster config. message ClusterConfig { - // Optional. A Google Cloud Storage bucket used to stage job + // Optional. A Cloud Storage bucket used to stage job // dependencies, config files, and job driver console output. // If you do not specify a staging bucket, Cloud // Dataproc will determine a Cloud Storage location (US, - // ASIA, or EU) for your cluster's staging bucket according to the Google + // ASIA, or EU) for your cluster's staging bucket according to the // Compute Engine zone where your cluster is deployed, and then create // and manage this project-level, per-location bucket (see - // [Cloud Dataproc staging + // [Dataproc staging // bucket](/dataproc/docs/concepts/configuring-clusters/staging-bucket)). string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL]; @@ -244,7 +243,7 @@ message AutoscalingConfig { // * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` // * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` // - // Note that the policy must be in the same project and Cloud Dataproc region. + // Note that the policy must be in the same project and Dataproc region. string policy_uri = 1 [(google.api.field_behavior) = OPTIONAL]; } @@ -260,7 +259,7 @@ message EncryptionConfig { message GceClusterConfig { // Optional. The zone where the Compute Engine cluster will be located. // On a create request, it is required in the "global" region. If omitted - // in a non-global Cloud Dataproc region, the service will pick a zone in the + // in a non-global Dataproc region, the service will pick a zone in the // corresponding Compute Engine region. On a get request, zone will always be // present. // @@ -302,17 +301,17 @@ message GceClusterConfig { // configured to be accessible without external IP addresses. bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL]; - // Optional. The service account of the instances. Defaults to the default - // Compute Engine service account. Custom service accounts need - // permissions equivalent to the following IAM roles: + // Optional. The [Dataproc service + // account](/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc) + // (also see [VM Data Plane + // identity](/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) + // used by Dataproc cluster VM instances to access Google Cloud Platform + // services. // - // * roles/logging.logWriter - // * roles/storage.objectAdmin - // - // (see - // https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts - // for more information). - // Example: `[account_id]@[project_id].iam.gserviceaccount.com` + // If not specified, the + // [Compute Engine default service + // account](/compute/docs/access/service-accounts#default_service_account) + // is used. string service_account = 8 [(google.api.field_behavior) = OPTIONAL]; // Optional. The URIs of service account scopes to be included in @@ -351,7 +350,7 @@ message InstanceGroupConfig { // For master instance groups, must be set to 1. int32 num_instances = 1 [(google.api.field_behavior) = OPTIONAL]; - // Output only. The list of instance names. Cloud Dataproc derives the names + // Output only. The list of instance names. Dataproc derives the names // from `cluster_name`, `num_instances`, and the instance group. repeated string instance_names = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -368,7 +367,7 @@ message InstanceGroupConfig { // * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` // * `n1-standard-2` // - // **Auto Zone Exception**: If you are using the Cloud Dataproc + // **Auto Zone Exception**: If you are using the Dataproc // [Auto Zone // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) // feature, you must use the short name of the machine type @@ -392,7 +391,7 @@ message InstanceGroupConfig { repeated AcceleratorConfig accelerators = 8 [(google.api.field_behavior) = OPTIONAL]; // Specifies the minimum cpu platform for the Instance Group. - // See [Cloud Dataproc→Minimum CPU Platform] + // See [Dataproc→Minimum CPU Platform] // (/dataproc/docs/concepts/compute/dataproc-min-cpu). string min_cpu_platform = 9; } @@ -420,7 +419,7 @@ message AcceleratorConfig { // * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` // * `nvidia-tesla-k80` // - // **Auto Zone Exception**: If you are using the Cloud Dataproc + // **Auto Zone Exception**: If you are using the Dataproc // [Auto Zone // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) // feature, you must use the short name of the accelerator type @@ -452,29 +451,31 @@ message DiskConfig { // Specifies the cluster auto-delete schedule configuration. message LifecycleConfig { - // Optional. The duration to keep the cluster alive while idling. - // Passing this threshold will cause the cluster to be - // deleted. Valid range: **[10m, 14d]**. - // - // Example: **"10m"**, the minimum value, to delete the - // cluster when it has had no jobs running for 10 minutes. + // Optional. The duration to keep the cluster alive while idling (when no jobs + // are running). Passing this threshold will cause the cluster to be + // deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON + // representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json). google.protobuf.Duration idle_delete_ttl = 1 [(google.api.field_behavior) = OPTIONAL]; // Either the exact time the cluster should be deleted at or // the cluster maximum age. oneof ttl { - // Optional. The time when cluster will be auto-deleted. - google.protobuf.Timestamp auto_delete_time = 2; + // Optional. The time when cluster will be auto-deleted. (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp auto_delete_time = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The lifetime duration of cluster. The cluster will be - // auto-deleted at the end of this period. Valid range: **[10m, 14d]**. - // - // Example: **"1d"**, to delete the cluster 1 day after its creation.. - google.protobuf.Duration auto_delete_ttl = 3; + // auto-deleted at the end of this period. Minimum value is 10 minutes; + // maximum value is 14 days (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration auto_delete_ttl = 3 [(google.api.field_behavior) = OPTIONAL]; } // Output only. The time when cluster became idle (most recent job finished) - // and became eligible for deletion due to idleness. + // and became eligible for deletion due to idleness (see JSON representation + // of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). google.protobuf.Timestamp idle_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; } @@ -560,7 +561,10 @@ message NodeInitializationAction { string executable_file = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. Amount of time executable has to complete. Default is - // 10 minutes. Cluster creation fails with an explanatory error message (the + // 10 minutes (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // + // Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. google.protobuf.Duration execution_timeout = 2 [(google.api.field_behavior) = OPTIONAL]; @@ -602,7 +606,7 @@ message ClusterStatus { UNHEALTHY = 1; // The agent-reported status is out of date (may occur if - // Cloud Dataproc loses communication with Agent). + // Dataproc loses communication with Agent). // // Applies to RUNNING state. STALE_STATUS = 2; @@ -614,7 +618,8 @@ message ClusterStatus { // Output only. Optional details of cluster's state. string detail = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. Time when this state was entered. + // Output only. Time when this state was entered (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information that includes @@ -625,7 +630,7 @@ message ClusterStatus { // Specifies the selection and config of software inside the cluster. message SoftwareConfig { // Optional. The version of software inside the cluster. It must be one of the - // supported [Cloud Dataproc + // supported [Dataproc // Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions), // such as "1.2" (including a subminor version, such as "1.2.29"), or the // ["preview" @@ -675,7 +680,7 @@ message CreateClusterRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster to create. @@ -701,7 +706,7 @@ message UpdateClusterRequest { // cluster belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 5 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster name. @@ -715,7 +720,8 @@ message UpdateClusterRequest { // interrupting jobs in progress. Timeout specifies how long to wait for jobs // in progress to finish before forcefully removing nodes (and potentially // interrupting jobs). Default timeout is 0 (for forceful decommission), and - // the maximum allowed timeout is 1 day. + // the maximum allowed timeout is 1 day (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). // // Only supported on Dataproc image versions 1.2 and higher. google.protobuf.Duration graceful_decommission_timeout = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -802,7 +808,7 @@ message DeleteClusterRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster name. @@ -832,7 +838,7 @@ message GetClusterRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster name. @@ -845,7 +851,7 @@ message ListClustersRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 4 [(google.api.field_behavior) = REQUIRED]; // Optional. A filter constraining the clusters to list. Filters are @@ -893,7 +899,7 @@ message DiagnoseClusterRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The cluster name. diff --git a/google/cloud/dataproc/v1beta2/jobs.proto b/google/cloud/dataproc/v1beta2/jobs.proto index c1e643c9..3208822f 100644 --- a/google/cloud/dataproc/v1beta2/jobs.proto +++ b/google/cloud/dataproc/v1beta2/jobs.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -20,6 +19,7 @@ package google.cloud.dataproc.v1beta2; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -70,9 +70,9 @@ service JobController { // Starts a job cancellation request. To access the job resource // after cancellation, call - // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) + // [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) // or - // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + // [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). rpc CancelJob(CancelJobRequest) returns (Job) { option (google.api.http) = { post: "/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" @@ -132,7 +132,7 @@ message LoggingConfig { map driver_log_levels = 2; } -// A Cloud Dataproc job for running +// A Dataproc job for running // [Apache Hadoop // MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) // jobs on [Apache Hadoop @@ -159,33 +159,33 @@ message HadoopJob { // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as // job properties, since a collision may occur that causes an incorrect job // submission. - repeated string args = 3; + repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Jar file URIs to add to the CLASSPATHs of the // Hadoop driver and tasks. - repeated string jar_file_uris = 4; + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied // to the working directory of Hadoop drivers and distributed tasks. Useful // for naively parallel tasks. - repeated string file_uris = 5; + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of archives to be extracted in the working directory of // Hadoop drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, or .zip. - repeated string archive_uris = 6; + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure Hadoop. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site and // classes in user code. - map properties = 7; + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8; + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) +// A Dataproc job for running [Apache Spark](http://spark.apache.org/) // applications on YARN. // The specification of the main method to call to drive the job. // Specify either the jar file that contains the main class or the main class @@ -205,32 +205,32 @@ message SparkJob { // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. - repeated string args = 3; + repeated string args = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the // Spark driver and tasks. - repeated string jar_file_uris = 4; + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of files to be copied to the working directory of // Spark drivers and distributed tasks. Useful for naively parallel tasks. - repeated string file_uris = 5; + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of archives to be extracted in the working directory // of Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 6; + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure Spark. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 7; + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8; + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running +// A Dataproc job for running // [Apache // PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) // applications on YARN. @@ -242,32 +242,32 @@ message PySparkJob { // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. - repeated string args = 2; + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS file URIs of Python files to pass to the PySpark // framework. Supported file types: .py, .egg, and .zip. - repeated string python_file_uris = 3; + repeated string python_file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the // Python driver and tasks. - repeated string jar_file_uris = 4; + repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of files to be copied to the working directory of // Python drivers and distributed tasks. Useful for naively parallel tasks. - repeated string file_uris = 5; + repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of archives to be extracted in the working directory of // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 6; + repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure PySpark. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 7; + map properties = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 8; + LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL]; } // A list of queries to run on a cluster. @@ -289,7 +289,7 @@ message QueryList { repeated string queries = 1 [(google.api.field_behavior) = REQUIRED]; } -// A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) +// A Dataproc job for running [Apache Hive](https://hive.apache.org/) // queries on YARN. message HiveJob { // Required. The sequence of Hive queries to execute, specified as either @@ -305,25 +305,25 @@ message HiveJob { // Optional. Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when // executing independent parallel queries. - bool continue_on_failure = 3; + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Mapping of query variable names to values (equivalent to the // Hive command: `SET name="value";`). - map script_variables = 4; + map script_variables = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names and values, used to configure Hive. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/hive/conf/hive-site.xml, and classes in user code. - map properties = 5; + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to add to the CLASSPATH of the // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes // and UDFs. - repeated string jar_file_uris = 6; + repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running [Apache Spark +// A Dataproc job for running [Apache Spark // SQL](http://spark.apache.org/sql/) queries. message SparkSqlJob { // Required. The sequence of Spark SQL queries to execute, specified as @@ -338,21 +338,21 @@ message SparkSqlJob { // Optional. Mapping of query variable names to values (equivalent to the // Spark SQL command: SET `name="value";`). - map script_variables = 3; + map script_variables = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure // Spark SQL's SparkConf. Properties that conflict with values set by the - // Cloud Dataproc API may be overwritten. - map properties = 4; + // Dataproc API may be overwritten. + map properties = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. - repeated string jar_file_uris = 56; + repeated string jar_file_uris = 56 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 6; + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) +// A Dataproc job for running [Apache Pig](https://pig.apache.org/) // queries on YARN. message PigJob { // Required. The sequence of Pig queries to execute, specified as an HCFS @@ -368,27 +368,27 @@ message PigJob { // Optional. Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when // executing independent parallel queries. - bool continue_on_failure = 3; + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Mapping of query variable names to values (equivalent to the Pig // command: `name=[value]`). - map script_variables = 4; + map script_variables = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure Pig. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/pig/conf/pig.properties, and classes in user code. - map properties = 5; + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of jar files to add to the CLASSPATH of // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. - repeated string jar_file_uris = 6; + repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 7; + LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; } -// A Cloud Dataproc job for running +// A Dataproc job for running // [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) // applications on YARN. message SparkRJob { @@ -399,38 +399,38 @@ message SparkRJob { // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. - repeated string args = 2; + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of files to be copied to the working directory of // R drivers and distributed tasks. Useful for naively parallel tasks. - repeated string file_uris = 3; + repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. HCFS URIs of archives to be extracted in the working directory of // Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. - repeated string archive_uris = 4; + repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. A mapping of property names to values, used to configure SparkR. - // Properties that conflict with values set by the Cloud Dataproc API may be + // Properties that conflict with values set by the Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. - map properties = 5; + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. The runtime log config for job execution. - LoggingConfig logging_config = 6; + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; } -// Cloud Dataproc job config. +// Dataproc job config. message JobPlacement { // Required. The name of the cluster where the job will be submitted. string cluster_name = 1 [(google.api.field_behavior) = REQUIRED]; - // Output only. A cluster UUID generated by the Cloud Dataproc service when + // Output only. A cluster UUID generated by the Dataproc service when // the job is submitted. - string cluster_uuid = 2; + string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; } -// Cloud Dataproc job status. +// Dataproc job status. message JobStatus { // The job state. enum State { @@ -488,7 +488,7 @@ message JobStatus { QUEUED = 2; // The agent-reported status is out of date, which may be caused by a - // loss of communication between the agent and Cloud Dataproc. If the + // loss of communication between the agent and Dataproc. If the // agent does not send a timely update, the job will fail. // // Applies to RUNNING state. @@ -496,18 +496,18 @@ message JobStatus { } // Output only. A state message specifying the overall job state. - State state = 1; + State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. Optional job state details, such as an error + // Output only. Optional Job state details, such as an error // description if the state is ERROR. - string details = 2; + string details = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The time when this state was entered. - google.protobuf.Timestamp state_start_time = 6; + google.protobuf.Timestamp state_start_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information, which includes // status reported by the agent. - Substate substate = 7; + Substate substate = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; } // Encapsulates the full scoping used to reference a job. @@ -517,12 +517,11 @@ message JobReference { string project_id = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. The job ID, which must be unique within the project. - // // The ID must contain only letters (a-z, A-Z), numbers (0-9), // underscores (_), or hyphens (-). The maximum length is 100 characters. // // If not specified by the caller, the job ID will be provided by the server. - string job_id = 2; + string job_id = 2 [(google.api.field_behavior) = OPTIONAL]; } // A YARN application created by a job. Application information is a subset of @@ -571,20 +570,20 @@ message YarnApplication { // Output only. The numerical progress of the application, from 1 to 100. float progress = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Optional. Output only. The HTTP URL of the ApplicationMaster, HistoryServer, or + // Output only. The HTTP URL of the ApplicationMaster, HistoryServer, or // TimelineServer that provides application-specific information. The URL uses // the internal hostname, and requires a proxy server for resolution and, // possibly, access. string tracking_url = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; } -// A Cloud Dataproc job resource. +// A Dataproc job resource. message Job { // Optional. The fully qualified reference to the job, which can be used to // obtain the equivalent REST path of the job resource. If this property // is not specified when a job is created, the server generates a // job_id. - JobReference reference = 1; + JobReference reference = 1 [(google.api.field_behavior) = OPTIONAL]; // Required. Job information, including how, when, and where to // run the job. @@ -592,54 +591,47 @@ message Job { // Required. The application/framework-specific portion of the job. oneof type_job { - // Job is a Hadoop job. HadoopJob hadoop_job = 3; - // Job is a Spark job. SparkJob spark_job = 4; - // Job is a Pyspark job. PySparkJob pyspark_job = 5; - // Job is a Hive job. HiveJob hive_job = 6; - // Job is a Pig job. PigJob pig_job = 7; - // Job is a SparkR job. SparkRJob spark_r_job = 21; - // Job is a SparkSql job. SparkSqlJob spark_sql_job = 12; } // Output only. The job status. Additional application-specific // status information may be contained in the type_job // and yarn_applications fields. - JobStatus status = 8; + JobStatus status = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The previous job status. - repeated JobStatus status_history = 13; + repeated JobStatus status_history = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The collection of YARN applications spun up by this job. // // **Beta** Feature: This report is available for testing purposes only. It // may be changed before final release. - repeated YarnApplication yarn_applications = 9; + repeated YarnApplication yarn_applications = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The email address of the user submitting the job. For jobs // submitted on the cluster, the address is username@hostname. - string submitted_by = 10; + string submitted_by = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A URI pointing to the location of the stdout of the job's // driver program. - string driver_output_resource_uri = 17; + string driver_output_resource_uri = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. If present, the location of miscellaneous control files // which may be used as part of job setup and handling. If not present, // control files may be placed in the same location as `driver_output_uri`. - string driver_control_files_uri = 15; + string driver_control_files_uri = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The labels to associate with this job. // Label **keys** must contain 1 to 63 characters, and must conform to @@ -648,15 +640,15 @@ message Job { // characters, and must conform to [RFC // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be // associated with a job. - map labels = 18; + map labels = 18 [(google.api.field_behavior) = OPTIONAL]; // Optional. Job scheduling configuration. - JobScheduling scheduling = 20; + JobScheduling scheduling = 20 [(google.api.field_behavior) = OPTIONAL]; // Output only. A UUID that uniquely identifies a job within the project // over time. This is in contrast to a user-settable reference.job_id that // may be reused over time. - string job_uuid = 22; + string job_uuid = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; } // Job scheduling options. @@ -669,7 +661,7 @@ message JobScheduling { // 4 times within 10 minute window. // // Maximum value is 10. - int32 max_failures_per_hour = 1; + int32 max_failures_per_hour = 1 [(google.api.field_behavior) = OPTIONAL]; } // A request to submit a job. @@ -678,7 +670,7 @@ message SubmitJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The job resource. @@ -695,7 +687,7 @@ message SubmitJobRequest { // // The id must contain only letters (a-z, A-Z), numbers (0-9), // underscores (_), and hyphens (-). The maximum length is 40 characters. - string request_id = 4; + string request_id = 4 [(google.api.field_behavior) = OPTIONAL]; } // A request to get the resource representation for a job in a project. @@ -704,7 +696,7 @@ message GetJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The job ID. @@ -730,25 +722,25 @@ message ListJobsRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 6 [(google.api.field_behavior) = REQUIRED]; // Optional. The number of results to return in each response. - int32 page_size = 2; + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The page token, returned by a previous call, to request the // next page of results. - string page_token = 3; + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. If set, the returned jobs list includes only jobs that were // submitted to the named cluster. - string cluster_name = 4; + string cluster_name = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies enumerated categories of jobs to list. // (default = match ALL jobs). // // If `filter` is provided, `jobStateMatcher` will be ignored. - JobStateMatcher job_state_matcher = 5; + JobStateMatcher job_state_matcher = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. A filter constraining the jobs to list. Filters are // case-sensitive and have the following syntax: @@ -764,7 +756,7 @@ message ListJobsRequest { // Example filter: // // status.state = ACTIVE AND labels.env = staging AND labels.starred = * - string filter = 7; + string filter = 7 [(google.api.field_behavior) = OPTIONAL]; } // A request to update a job. @@ -773,7 +765,7 @@ message UpdateJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 2 [(google.api.field_behavior) = REQUIRED]; // Required. The job ID. @@ -794,12 +786,12 @@ message UpdateJobRequest { // A list of jobs in a project. message ListJobsResponse { // Output only. Jobs list. - repeated Job jobs = 1; + repeated Job jobs = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. This token is included in the response if there are more results // to fetch. To fetch additional results, provide this value as the // `page_token` in a subsequent ListJobsRequest. - string next_page_token = 2; + string next_page_token = 2 [(google.api.field_behavior) = OPTIONAL]; } // A request to cancel a job. @@ -808,7 +800,7 @@ message CancelJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The job ID. @@ -821,7 +813,7 @@ message DeleteJobRequest { // belongs to. string project_id = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. The Cloud Dataproc region in which to handle the request. + // Required. The Dataproc region in which to handle the request. string region = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The job ID. diff --git a/google/cloud/dataproc/v1beta2/operations.proto b/google/cloud/dataproc/v1beta2/operations.proto index 74cbde3c..2e98fb82 100644 --- a/google/cloud/dataproc/v1beta2/operations.proto +++ b/google/cloud/dataproc/v1beta2/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/dataproc/v1beta2/shared.proto b/google/cloud/dataproc/v1beta2/shared.proto index de1130d9..eba80918 100644 --- a/google/cloud/dataproc/v1beta2/shared.proto +++ b/google/cloud/dataproc/v1beta2/shared.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/dataproc/v1beta2/workflow_templates.proto b/google/cloud/dataproc/v1beta2/workflow_templates.proto index 2979593d..b8497e83 100644 --- a/google/cloud/dataproc/v1beta2/workflow_templates.proto +++ b/google/cloud/dataproc/v1beta2/workflow_templates.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -33,7 +32,7 @@ option java_outer_classname = "WorkflowTemplatesProto"; option java_package = "com.google.cloud.dataproc.v1beta2"; // The API interface for managing Workflow Templates in the -// Cloud Dataproc API. +// Dataproc API. service WorkflowTemplateService { option (google.api.default_host) = "dataproc.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; @@ -78,9 +77,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -119,9 +118,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -179,7 +178,7 @@ service WorkflowTemplateService { } } -// A Cloud Dataproc workflow template resource. +// A Dataproc workflow template resource. message WorkflowTemplate { option (google.api.resource) = { type: "dataproc.googleapis.com/WorkflowTemplate" @@ -327,22 +326,16 @@ message OrderedJob { // Required. The job definition. oneof job_type { - // Job is a Hadoop job. HadoopJob hadoop_job = 2; - // Job is a Spark job. SparkJob spark_job = 3; - // Job is a Pyspark job. PySparkJob pyspark_job = 4; - // Job is a Hive job. HiveJob hive_job = 5; - // Job is a Pig job. PigJob pig_job = 6; - // Job is a SparkSql job. SparkSqlJob spark_sql_job = 7; } @@ -465,7 +458,7 @@ message ValueValidation { repeated string values = 1; } -// A Cloud Dataproc workflow template resource. +// A Dataproc workflow template resource. message WorkflowMetadata { // The operation state. enum State { @@ -721,9 +714,7 @@ message UpdateWorkflowTemplateRequest { // Required. The updated workflow template. // // The `template.version` field must match the current version. - WorkflowTemplate template = 1 [ - (google.api.field_behavior) = REQUIRED - ]; + WorkflowTemplate template = 1 [(google.api.field_behavior) = REQUIRED]; } // A request to list workflow templates in a project.