From 8c12e2b4dca94e12bff9f538bdac29524ff7ef7a Mon Sep 17 00:00:00 2001 From: Google APIs Date: Thu, 20 Feb 2020 17:44:02 -0800 Subject: [PATCH] Update Dataproc v1 client. PiperOrigin-RevId: 296336662 --- .../dataproc/v1/autoscaling_policies.proto | 4 +- google/cloud/dataproc/v1/clusters.proto | 91 +++++++++++++--- google/cloud/dataproc/v1/jobs.proto | 102 +++++++++++++++--- google/cloud/dataproc/v1/operations.proto | 3 +- google/cloud/dataproc/v1/shared.proto | 3 +- .../dataproc/v1/workflow_templates.proto | 21 ++-- 6 files changed, 174 insertions(+), 50 deletions(-) diff --git a/google/cloud/dataproc/v1/autoscaling_policies.proto b/google/cloud/dataproc/v1/autoscaling_policies.proto index 51fbc87d..53321d89 100644 --- a/google/cloud/dataproc/v1/autoscaling_policies.proto +++ b/google/cloud/dataproc/v1/autoscaling_policies.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -28,7 +27,6 @@ option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1;dat option java_multiple_files = true; option java_outer_classname = "AutoscalingPoliciesProto"; option java_package = "com.google.cloud.dataproc.v1"; - option (google.api.resource_definition) = { type: "dataproc.googleapis.com/Region" pattern: "projects/{project}/regions/{region}" diff --git a/google/cloud/dataproc/v1/clusters.proto b/google/cloud/dataproc/v1/clusters.proto index bc254589..f8db707e 100644 --- a/google/cloud/dataproc/v1/clusters.proto +++ b/google/cloud/dataproc/v1/clusters.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -20,7 +19,6 @@ package google.cloud.dataproc.v1; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; -import "google/cloud/dataproc/v1/operations.proto"; import "google/cloud/dataproc/v1/shared.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; @@ -40,7 +38,7 @@ service ClusterController { // Creates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters" @@ -55,7 +53,7 @@ service ClusterController { // Updates a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -65,12 +63,11 @@ service ClusterController { response_type: "Cluster" metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata" }; - option (google.api.method_signature) = "project_id,region,cluster_name,cluster,update_mask"; } // Deletes a cluster in a project. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}" @@ -101,11 +98,11 @@ service ClusterController { // Gets cluster diagnostic information. The returned // [Operation.metadata][google.longrunning.Operation.metadata] will be - // [ClusterOperationMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). + // [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). // After the operation completes, // [Operation.response][google.longrunning.Operation.response] // contains - // [DiagnoseClusterResults](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). + // [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose" @@ -215,6 +212,9 @@ message ClusterConfig { // Optional. Security settings for the cluster. SecurityConfig security_config = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lifecycle setting for the cluster. + LifecycleConfig lifecycle_config = 17 [(google.api.field_behavior) = OPTIONAL]; } // Autoscaling Policy config associated with the cluster. @@ -322,9 +322,12 @@ message GceClusterConfig { // [Project and instance // metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). map metadata = 5; + + // Optional. Reservation Affinity for consuming Zonal reservation. + ReservationAffinity reservation_affinity = 11 [(google.api.field_behavior) = OPTIONAL]; } -// Optional. The config settings for Compute Engine resources in +// The config settings for Compute Engine resources in // an instance group, such as a master or worker group. message InstanceGroupConfig { // Optional. The number of VM instances in the instance group. @@ -438,7 +441,10 @@ message NodeInitializationAction { string executable_file = 1 [(google.api.field_behavior) = REQUIRED]; // Optional. Amount of time executable has to complete. Default is - // 10 minutes. Cluster creation fails with an explanatory error message (the + // 10 minutes (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + // + // Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. google.protobuf.Duration execution_timeout = 2 [(google.api.field_behavior) = OPTIONAL]; @@ -495,7 +501,8 @@ message ClusterStatus { (google.api.field_behavior) = OPTIONAL ]; - // Output only. Time when this state was entered. + // Output only. Time when this state was entered (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). google.protobuf.Timestamp state_start_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Additional state information that includes @@ -613,6 +620,36 @@ message SoftwareConfig { repeated Component optional_components = 3 [(google.api.field_behavior) = OPTIONAL]; } +// Specifies the cluster auto-delete schedule configuration. +message LifecycleConfig { + // Optional. The duration to keep the cluster alive while idling (when no jobs + // are running). Passing this threshold will cause the cluster to be + // deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON + // representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json). + google.protobuf.Duration idle_delete_ttl = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Either the exact time the cluster should be deleted at or + // the cluster maximum age. + oneof ttl { + // Optional. The time when cluster will be auto-deleted (see JSON representation of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp auto_delete_time = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The lifetime duration of cluster. The cluster will be + // auto-deleted at the end of this period. Minimum value is 10 minutes; + // maximum value is 14 days (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Duration auto_delete_ttl = 3 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. The time when cluster became idle (most recent job finished) + // and became eligible for deletion due to idleness (see JSON representation + // of + // [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). + google.protobuf.Timestamp idle_start_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + // Contains cluster daemon metrics, such as HDFS and YARN stats. // // **Beta Feature**: This report is available for testing purposes only. It may @@ -671,7 +708,8 @@ message UpdateClusterRequest { // interrupting jobs in progress. Timeout specifies how long to wait for jobs // in progress to finish before forcefully removing nodes (and potentially // interrupting jobs). Default timeout is 0 (for forceful decommission), and - // the maximum allowed timeout is 1 day. + // the maximum allowed timeout is 1 day. (see JSON representation of + // [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). // // Only supported on Dataproc image versions 1.2 and higher. google.protobuf.Duration graceful_decommission_timeout = 6 [(google.api.field_behavior) = OPTIONAL]; @@ -854,3 +892,30 @@ message DiagnoseClusterResults { // diagnostics. string output_uri = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; } + +// Reservation Affinity for consuming Zonal reservation. +message ReservationAffinity { + // Indicates whether to consume capacity from an reservation or not. + enum Type { + TYPE_UNSPECIFIED = 0; + + // Do not consume from any allocated capacity. + NO_RESERVATION = 1; + + // Consume any reservation available. + ANY_RESERVATION = 2; + + // Must consume from a specific reservation. Must specify key value fields + // for specifying the reservations. + SPECIFIC_RESERVATION = 3; + } + + // Optional. Type of reservation to consume + Type consume_reservation_type = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Corresponds to the label key of reservation resource. + string key = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Corresponds to the label values of reservation resource. + repeated string values = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/google/cloud/dataproc/v1/jobs.proto b/google/cloud/dataproc/v1/jobs.proto index bcb68fed..85921dc4 100644 --- a/google/cloud/dataproc/v1/jobs.proto +++ b/google/cloud/dataproc/v1/jobs.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ package google.cloud.dataproc.v1; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -69,9 +70,9 @@ service JobController { // Starts a job cancellation request. To access the job resource // after cancellation, call - // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) + // [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) // or - // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). + // [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). rpc CancelJob(CancelJobRequest) returns (Job) { option (google.api.http) = { post: "/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel" @@ -387,6 +388,71 @@ message PigJob { LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; } +// A Dataproc job for running +// [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) +// applications on YARN. +message SparkRJob { + // Required. The HCFS URI of the main R file to use as the driver. + // Must be a .R file. + string main_r_file_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + repeated string args = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of files to be copied to the working directory of + // R drivers and distributed tasks. Useful for naively parallel tasks. + repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. HCFS URIs of archives to be extracted in the working directory of + // Spark drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values, used to configure SparkR. + // Properties that conflict with values set by the Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + map properties = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// A Dataproc job for running [Presto](https://prestosql.io/) queries +message PrestoJob { + // Required. The sequence of Presto queries to execute, specified as + // either an HCFS file URI or as a list of queries. + oneof queries { + // The HCFS URI of the script that contains SQL queries. + string query_file_uri = 1; + + // A list of queries. + QueryList query_list = 2; + } + + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when + // executing independent parallel queries. + bool continue_on_failure = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The format in which query output will be displayed. See the + // Presto documentation for supported output formats + string output_format = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Presto client tags to attach to this query + repeated string client_tags = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A mapping of property names to values. Used to set Presto + // [session properties](https://prestodb.io/docs/current/sql/set-session.html) + // Equivalent to using the --session flag in the Presto CLI + map properties = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The runtime log config for job execution. + LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL]; +} + // Dataproc job config. message JobPlacement { // Required. The name of the cluster where the job will be submitted. @@ -562,23 +628,29 @@ message Job { // Required. The application/framework-specific portion of the job. oneof type_job { - // Job is a Hadoop job. - HadoopJob hadoop_job = 3; + // Optional. Job is a Hadoop job. + HadoopJob hadoop_job = 3 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Spark job. - SparkJob spark_job = 4; + // Optional. Job is a Spark job. + SparkJob spark_job = 4 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Pyspark job. - PySparkJob pyspark_job = 5; + // Optional. Job is a PySpark job. + PySparkJob pyspark_job = 5 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Hive job. - HiveJob hive_job = 6; + // Optional. Job is a Hive job. + HiveJob hive_job = 6 [(google.api.field_behavior) = OPTIONAL]; - // Job is a Pig job. - PigJob pig_job = 7; + // Optional. Job is a Pig job. + PigJob pig_job = 7 [(google.api.field_behavior) = OPTIONAL]; - // Job is a SparkSql job. - SparkSqlJob spark_sql_job = 12; + // Optional. Job is a SparkR job. + SparkRJob spark_r_job = 21 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a SparkSql job. + SparkSqlJob spark_sql_job = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job is a Presto job. + PrestoJob presto_job = 23 [(google.api.field_behavior) = OPTIONAL]; } // Output only. The job status. Additional application-specific diff --git a/google/cloud/dataproc/v1/operations.proto b/google/cloud/dataproc/v1/operations.proto index 4af2a5f8..724d2a89 100644 --- a/google/cloud/dataproc/v1/operations.proto +++ b/google/cloud/dataproc/v1/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/dataproc/v1/shared.proto b/google/cloud/dataproc/v1/shared.proto index 74bd56a8..c6ff8f28 100644 --- a/google/cloud/dataproc/v1/shared.proto +++ b/google/cloud/dataproc/v1/shared.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/dataproc/v1/workflow_templates.proto b/google/cloud/dataproc/v1/workflow_templates.proto index 30b5ced4..2db55798 100644 --- a/google/cloud/dataproc/v1/workflow_templates.proto +++ b/google/cloud/dataproc/v1/workflow_templates.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -78,9 +77,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -119,9 +118,9 @@ service WorkflowTemplateService { // clusters to be deleted. // // The [Operation.metadata][google.longrunning.Operation.metadata] will be - // [WorkflowMetadata](/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). + // [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). // Also see [Using - // WorkflowMetadata](/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). + // WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). // // On successful completion, // [Operation.response][google.longrunning.Operation.response] will be @@ -320,22 +319,16 @@ message OrderedJob { // Required. The job definition. oneof job_type { - // Job is a Hadoop job. HadoopJob hadoop_job = 2; - // Job is a Spark job. SparkJob spark_job = 3; - // Job is a Pyspark job. PySparkJob pyspark_job = 4; - // Job is a Hive job. HiveJob hive_job = 5; - // Job is a Pig job. PigJob pig_job = 6; - // Job is a SparkSql job. SparkSqlJob spark_sql_job = 7; } @@ -708,9 +701,7 @@ message UpdateWorkflowTemplateRequest { // Required. The updated workflow template. // // The `template.version` field must match the current version. - WorkflowTemplate template = 1 [ - (google.api.field_behavior) = REQUIRED - ]; + WorkflowTemplate template = 1 [(google.api.field_behavior) = REQUIRED]; } // A request to list workflow templates in a project.