feat!: (v1beta1) remove DOCKER/FLINK from Component enum; adds HBASE

Update the protobuf definitions for the v1beta2 Dataproc API.

BREAKING CHANGE:

1. The `DOCKER` and `FLINK` values have been removed from the `Component` enum, and an `HBASE` value was added.

Other changes:

1. There is a new `temp_bucket` field in `ClusterConfig`.
2. There is a new `preemptibility` field in `InstanceGroupConfig`.
3. The `project_id` field of `JobReference` is now optional instead of required.
4. There is a new `dag_timeout` field in `WorkflowTemplate`.
5. There are new `dag_timeout`, `dag_start_time`, and `dag_end_time` fields in `WorkflowMetadata`.
6. There are various updates to the doc comments.

PiperOrigin-RevId: 345127100
This commit is contained in:
Google APIs 2020-12-01 16:46:33 -08:00 committed by Copybara-Service
parent acbc6b1a3a
commit 269083b6ed
6 changed files with 199 additions and 81 deletions

View File

@ -36,10 +36,12 @@ option (google.api.resource_definition) = {
// Cloud Dataproc API.
service AutoscalingPolicyService {
option (google.api.default_host) = "dataproc.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates new autoscaling policy.
rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest) returns (AutoscalingPolicy) {
rpc CreateAutoscalingPolicy(CreateAutoscalingPolicyRequest)
returns (AutoscalingPolicy) {
option (google.api.http) = {
post: "/v1beta2/{parent=projects/*/locations/*}/autoscalingPolicies"
body: "policy"
@ -55,7 +57,8 @@ service AutoscalingPolicyService {
//
// Disabled check for update_mask, because all updates will be full
// replacements.
rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest) returns (AutoscalingPolicy) {
rpc UpdateAutoscalingPolicy(UpdateAutoscalingPolicyRequest)
returns (AutoscalingPolicy) {
option (google.api.http) = {
put: "/v1beta2/{policy.name=projects/*/locations/*/autoscalingPolicies/*}"
body: "policy"
@ -68,7 +71,8 @@ service AutoscalingPolicyService {
}
// Retrieves autoscaling policy.
rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest) returns (AutoscalingPolicy) {
rpc GetAutoscalingPolicy(GetAutoscalingPolicyRequest)
returns (AutoscalingPolicy) {
option (google.api.http) = {
get: "/v1beta2/{name=projects/*/locations/*/autoscalingPolicies/*}"
additional_bindings {
@ -79,7 +83,8 @@ service AutoscalingPolicyService {
}
// Lists autoscaling policies in the project.
rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest) returns (ListAutoscalingPoliciesResponse) {
rpc ListAutoscalingPolicies(ListAutoscalingPoliciesRequest)
returns (ListAutoscalingPoliciesResponse) {
option (google.api.http) = {
get: "/v1beta2/{parent=projects/*/locations/*}/autoscalingPolicies"
additional_bindings {
@ -91,7 +96,8 @@ service AutoscalingPolicyService {
// Deletes an autoscaling policy. It is an error to delete an autoscaling
// policy that is in use by one or more clusters.
rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest) returns (google.protobuf.Empty) {
rpc DeleteAutoscalingPolicy(DeleteAutoscalingPolicyRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1beta2/{name=projects/*/locations/*/autoscalingPolicies/*}"
additional_bindings {
@ -136,22 +142,26 @@ message AutoscalingPolicy {
}
// Required. Describes how the autoscaler will operate for primary workers.
InstanceGroupAutoscalingPolicyConfig worker_config = 4 [(google.api.field_behavior) = REQUIRED];
InstanceGroupAutoscalingPolicyConfig worker_config = 4
[(google.api.field_behavior) = REQUIRED];
// Optional. Describes how the autoscaler will operate for secondary workers.
InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5 [(google.api.field_behavior) = OPTIONAL];
InstanceGroupAutoscalingPolicyConfig secondary_worker_config = 5
[(google.api.field_behavior) = OPTIONAL];
}
// Basic algorithm for autoscaling.
message BasicAutoscalingAlgorithm {
// Required. YARN autoscaling configuration.
BasicYarnAutoscalingConfig yarn_config = 1 [(google.api.field_behavior) = REQUIRED];
BasicYarnAutoscalingConfig yarn_config = 1
[(google.api.field_behavior) = REQUIRED];
// Optional. Duration between scaling events. A scaling period starts after
// the update operation from the previous event has completed.
//
// Bounds: [2m, 1d]. Default: 2m.
google.protobuf.Duration cooldown_period = 2 [(google.api.field_behavior) = OPTIONAL];
google.protobuf.Duration cooldown_period = 2
[(google.api.field_behavior) = OPTIONAL];
}
// Basic autoscaling configurations for YARN.
@ -162,22 +172,29 @@ message BasicYarnAutoscalingConfig {
// downscaling operations.
//
// Bounds: [0s, 1d].
google.protobuf.Duration graceful_decommission_timeout = 5 [(google.api.field_behavior) = REQUIRED];
google.protobuf.Duration graceful_decommission_timeout = 5
[(google.api.field_behavior) = REQUIRED];
// Required. Fraction of average pending memory in the last cooldown period
// for which to add workers. A scale-up factor of 1.0 will result in scaling
// up so that there is no pending memory remaining after the update (more
// aggressive scaling). A scale-up factor closer to 0 will result in a smaller
// magnitude of scaling up (less aggressive scaling).
// Required. Fraction of average YARN pending memory in the last cooldown
// period for which to add workers. A scale-up factor of 1.0 will result in
// scaling up so that there is no pending memory remaining after the update
// (more aggressive scaling). A scale-up factor closer to 0 will result in a
// smaller magnitude of scaling up (less aggressive scaling). See [How
// autoscaling
// works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
// for more information.
//
// Bounds: [0.0, 1.0].
double scale_up_factor = 1 [(google.api.field_behavior) = REQUIRED];
// Required. Fraction of average pending memory in the last cooldown period
// for which to remove workers. A scale-down factor of 1 will result in
// Required. Fraction of average YARN pending memory in the last cooldown
// period for which to remove workers. A scale-down factor of 1 will result in
// scaling down so that there is no available memory remaining after the
// update (more aggressive scaling). A scale-down factor of 0 disables
// removing workers, which can be beneficial for autoscaling a single job.
// See [How autoscaling
// works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
// for more information.
//
// Bounds: [0.0, 1.0].
double scale_down_factor = 2 [(google.api.field_behavior) = REQUIRED];
@ -189,7 +206,8 @@ message BasicYarnAutoscalingConfig {
// on any recommended change.
//
// Bounds: [0.0, 1.0]. Default: 0.0.
double scale_up_min_worker_fraction = 3 [(google.api.field_behavior) = OPTIONAL];
double scale_up_min_worker_fraction = 3
[(google.api.field_behavior) = OPTIONAL];
// Optional. Minimum scale-down threshold as a fraction of total cluster size
// before scaling occurs. For example, in a 20-worker cluster, a threshold of
@ -198,7 +216,8 @@ message BasicYarnAutoscalingConfig {
// on any recommended change.
//
// Bounds: [0.0, 1.0]. Default: 0.0.
double scale_down_min_worker_fraction = 4 [(google.api.field_behavior) = OPTIONAL];
double scale_down_min_worker_fraction = 4
[(google.api.field_behavior) = OPTIONAL];
}
// Configuration for the size bounds of an instance group, including its
@ -341,7 +360,8 @@ message ListAutoscalingPoliciesRequest {
// A response to a request to list autoscaling policies in a project.
message ListAutoscalingPoliciesResponse {
// Output only. Autoscaling policies list.
repeated AutoscalingPolicy policies = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated AutoscalingPolicy policies = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. This token is included in the response if there are more
// results to fetch.

View File

@ -171,6 +171,17 @@ message ClusterConfig {
// bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL];
// Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
// such as Spark and MapReduce history files.
// If you do not specify a temp bucket,
// Dataproc will determine a Cloud Storage location (US,
// ASIA, or EU) for your cluster's temp bucket according to the
// Compute Engine zone where your cluster is deployed, and then create
// and manage this project-level, per-location bucket. The default bucket has
// a TTL of 90 days, but you can use any TTL (or none) if you specify a
// bucket.
string temp_bucket = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. The shared Compute Engine config settings for
// all instances in a cluster.
GceClusterConfig gce_cluster_config = 8 [(google.api.field_behavior) = OPTIONAL];
@ -330,7 +341,7 @@ message GceClusterConfig {
bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL];
// Optional. The [Dataproc service
// account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc)
// account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)
// (also see [VM Data Plane
// identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))
// used by Dataproc cluster VM instances to access Google Cloud Platform
@ -374,6 +385,27 @@ message GceClusterConfig {
// The config settings for Compute Engine resources in
// an instance group, such as a master or worker group.
message InstanceGroupConfig {
// Controls the use of
// [preemptible instances]
// (https://cloud.google.com/compute/docs/instances/preemptible)
// within the group.
enum Preemptibility {
// Preemptibility is unspecified, the system will choose the
// appropriate setting for each instance group.
PREEMPTIBILITY_UNSPECIFIED = 0;
// Instances are non-preemptible.
//
// This option is allowed for all instance groups and is the only valid
// value for Master and Worker instance groups.
NON_PREEMPTIBLE = 1;
// Instances are preemptible.
//
// This option is allowed only for secondary worker groups.
PREEMPTIBLE = 2;
}
// Optional. The number of VM instances in the instance group.
// For master instance groups, must be set to 1.
int32 num_instances = 1 [(google.api.field_behavior) = OPTIONAL];
@ -424,6 +456,15 @@ message InstanceGroupConfig {
// instances.
bool is_preemptible = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. Specifies the preemptibility of the instance group.
//
// The default value for master and worker groups is
// `NON_PREEMPTIBLE`. This default cannot be changed.
//
// The default value for secondary instances is
// `PREEMPTIBLE`.
Preemptibility preemptibility = 10 [(google.api.field_behavior) = OPTIONAL];
// Output only. The config for Compute Engine Instance Group
// Manager that manages this group.
// This is only used for preemptible instance groups.
@ -685,7 +726,7 @@ message ClusterStatus {
message SoftwareConfig {
// Optional. The version of software inside the cluster. It must be one of the
// supported [Dataproc
// Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
// Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions),
// such as "1.2" (including a subminor version, such as "1.2.29"), or the
// ["preview"
// version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).

View File

@ -28,14 +28,14 @@ documentation:
Sets the access control policy on the specified resource. Replaces
any existing policy.
Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and
PERMISSION_DENIED
Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED`
errors.
- selector: google.iam.v1.IAMPolicy.TestIamPermissions
description: |-
Returns permissions that a caller has on the specified resource. If the
resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
permissions, not a `NOT_FOUND` error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for authorization

View File

@ -224,12 +224,12 @@ message SparkJob {
// Spark driver and tasks.
repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. HCFS URIs of files to be copied to the working directory of
// Spark drivers and distributed tasks. Useful for naively parallel tasks.
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor. Useful for naively parallel tasks.
repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL];
// Optional. HCFS URIs of archives to be extracted in the working directory
// of Spark drivers and tasks. Supported file types:
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// .jar, .tar, .tar.gz, .tgz, and .zip.
repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL];
@ -265,11 +265,12 @@ message PySparkJob {
// Python driver and tasks.
repeated string jar_file_uris = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. HCFS URIs of files to be copied to the working directory of
// Python drivers and distributed tasks. Useful for naively parallel tasks.
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor. Useful for naively parallel tasks.
repeated string file_uris = 5 [(google.api.field_behavior) = OPTIONAL];
// Optional. HCFS URIs of archives to be extracted in the working directory of
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// .jar, .tar, .tar.gz, .tgz, and .zip.
repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL];
@ -414,12 +415,12 @@ message SparkRJob {
// occur that causes an incorrect job submission.
repeated string args = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. HCFS URIs of files to be copied to the working directory of
// R drivers and distributed tasks. Useful for naively parallel tasks.
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor. Useful for naively parallel tasks.
repeated string file_uris = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. HCFS URIs of archives to be extracted in the working directory of
// Spark drivers and tasks. Supported file types:
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// .jar, .tar, .tar.gz, .tgz, and .zip.
repeated string archive_uris = 4 [(google.api.field_behavior) = OPTIONAL];
@ -562,9 +563,9 @@ message JobStatus {
// Encapsulates the full scoping used to reference a job.
message JobReference {
// Required. The ID of the Google Cloud Platform project that the job
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Optional. The ID of the Google Cloud Platform project that the job belongs to. If
// specified, must match the request project ID.
string project_id = 1 [(google.api.field_behavior) = OPTIONAL];
// Optional. The job ID, which must be unique within the project.
// The ID must contain only letters (a-z, A-Z), numbers (0-9),

View File

@ -25,20 +25,17 @@ option java_package = "com.google.cloud.dataproc.v1beta2";
// Cluster components that can be activated.
enum Component {
// Unspecified component.
// Unspecified component. Specifying this will cause Cluster creation to fail.
COMPONENT_UNSPECIFIED = 0;
// The Anaconda python distribution.
ANACONDA = 5;
// Docker
DOCKER = 13;
// The Druid query engine.
DRUID = 9;
// Flink
FLINK = 14;
// HBase.
HBASE = 11;
// The Hive Web HCatalog (the REST service for accessing HCatalog).
HIVE_WEBHCAT = 3;

View File

@ -23,6 +23,7 @@ import "google/api/resource.proto";
import "google/cloud/dataproc/v1beta2/clusters.proto";
import "google/cloud/dataproc/v1beta2/jobs.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
@ -35,10 +36,12 @@ option java_package = "com.google.cloud.dataproc.v1beta2";
// Dataproc API.
service WorkflowTemplateService {
option (google.api.default_host) = "dataproc.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Creates new workflow template.
rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) {
rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest)
returns (WorkflowTemplate) {
option (google.api.http) = {
post: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates"
body: "template"
@ -54,7 +57,8 @@ service WorkflowTemplateService {
//
// Can retrieve previously instantiated template by specifying optional
// version parameter.
rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) {
rpc GetWorkflowTemplate(GetWorkflowTemplateRequest)
returns (WorkflowTemplate) {
option (google.api.http) = {
get: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}"
additional_bindings {
@ -84,7 +88,8 @@ service WorkflowTemplateService {
// On successful completion,
// [Operation.response][google.longrunning.Operation.response] will be
// [Empty][google.protobuf.Empty].
rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) {
rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}:instantiate"
body: "*"
@ -104,7 +109,8 @@ service WorkflowTemplateService {
// Instantiates a template and begins execution.
//
// This method is equivalent to executing the sequence
// [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
// [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate],
// [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
// [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
//
// The returned Operation can be used to track execution of
@ -125,7 +131,9 @@ service WorkflowTemplateService {
// On successful completion,
// [Operation.response][google.longrunning.Operation.response] will be
// [Empty][google.protobuf.Empty].
rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) {
rpc InstantiateInlineWorkflowTemplate(
InstantiateInlineWorkflowTemplateRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline"
body: "template"
@ -143,7 +151,8 @@ service WorkflowTemplateService {
// Updates (replaces) workflow template. The updated template
// must contain version that matches the current server version.
rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) {
rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest)
returns (WorkflowTemplate) {
option (google.api.http) = {
put: "/v1beta2/{template.name=projects/*/regions/*/workflowTemplates/*}"
body: "template"
@ -156,7 +165,8 @@ service WorkflowTemplateService {
}
// Lists workflows that match the specified filter in the request.
rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) {
rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest)
returns (ListWorkflowTemplatesResponse) {
option (google.api.http) = {
get: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates"
additional_bindings {
@ -167,7 +177,8 @@ service WorkflowTemplateService {
}
// Deletes a workflow template. It does not cancel in-progress workflows.
rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) {
rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest)
returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}"
additional_bindings {
@ -220,10 +231,12 @@ message WorkflowTemplate {
int32 version = 3 [(google.api.field_behavior) = OPTIONAL];
// Output only. The time template was created.
google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The time template was last updated.
google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp update_time = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The labels to associate with this template. These labels
// will be propagated to all jobs and clusters created by the workflow
@ -248,7 +261,20 @@ message WorkflowTemplate {
// Optional. Template parameters whose values are substituted into the
// template. Values for parameters must be provided when the template is
// instantiated.
repeated TemplateParameter parameters = 9 [(google.api.field_behavior) = OPTIONAL];
repeated TemplateParameter parameters = 9
[(google.api.field_behavior) = OPTIONAL];
// Optional. Timeout duration for the DAG of jobs. You can use "s", "m", "h",
// and "d" suffixes for second, minute, hour, and day duration values,
// respectively. The timeout duration must be from 10 minutes ("10m") to 24
// hours ("24h" or "1d"). The timer begins when the first job is submitted. If
// the workflow is running at the end of the timeout period, any remaining
// jobs are cancelled, the workflow is terminated, and if the workflow was
// running on a [managed
// cluster](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
// the cluster is deleted.
google.protobuf.Duration dag_timeout = 10
[(google.api.field_behavior) = OPTIONAL];
}
// Specifies workflow execution target.
@ -316,8 +342,8 @@ message OrderedJob {
//
// The step id is used as prefix for job id, as job
// `goog-dataproc-workflow-step-id` label, and in
// [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
// steps.
// [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids]
// field from other steps.
//
// The id must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). Cannot begin or end with underscore
@ -326,23 +352,29 @@ message OrderedJob {
// Required. The job definition.
oneof job_type {
HadoopJob hadoop_job = 2;
// Optional. Job is a Hadoop job.
HadoopJob hadoop_job = 2 [(google.api.field_behavior) = OPTIONAL];
SparkJob spark_job = 3;
// Optional. Job is a Spark job.
SparkJob spark_job = 3 [(google.api.field_behavior) = OPTIONAL];
PySparkJob pyspark_job = 4;
// Optional. Job is a PySpark job.
PySparkJob pyspark_job = 4 [(google.api.field_behavior) = OPTIONAL];
HiveJob hive_job = 5;
// Optional. Job is a Hive job.
HiveJob hive_job = 5 [(google.api.field_behavior) = OPTIONAL];
PigJob pig_job = 6;
// Optional. Job is a Pig job.
PigJob pig_job = 6 [(google.api.field_behavior) = OPTIONAL];
// Spark R job
SparkRJob spark_r_job = 11;
// Optional. Job is a SparkR job.
SparkRJob spark_r_job = 11 [(google.api.field_behavior) = OPTIONAL];
SparkSqlJob spark_sql_job = 7;
// Optional. Job is a SparkSql job.
SparkSqlJob spark_sql_job = 7 [(google.api.field_behavior) = OPTIONAL];
// Presto job
PrestoJob presto_job = 12;
// Optional. Job is a Presto job.
PrestoJob presto_job = 12 [(google.api.field_behavior) = OPTIONAL];
}
// Optional. The labels to associate with this job.
@ -362,7 +394,8 @@ message OrderedJob {
// Optional. The optional list of prerequisite job step_ids.
// If not specified, the job will start at the beginning of workflow.
repeated string prerequisite_step_ids = 10 [(google.api.field_behavior) = OPTIONAL];
repeated string prerequisite_step_ids = 10
[(google.api.field_behavior) = OPTIONAL];
}
// A configurable parameter that replaces one or more fields in the template.
@ -388,10 +421,10 @@ message TemplateParameter {
// A field is allowed to appear in at most one parameter's list of field
// paths.
//
// A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
// For example, a field path that references the zone field of a workflow
// template's cluster selector would be specified as
// `placement.clusterSelector.zone`.
// A field path is similar in syntax to a
// [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
// field path that references the zone field of a workflow template's cluster
// selector would be specified as `placement.clusterSelector.zone`.
//
// Also, field paths can reference fields using the following syntax:
//
@ -498,13 +531,15 @@ message WorkflowMetadata {
int32 version = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The create cluster operation metadata.
ClusterOperation create_cluster = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
ClusterOperation create_cluster = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The workflow graph.
WorkflowGraph graph = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The delete cluster operation metadata.
ClusterOperation delete_cluster = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
ClusterOperation delete_cluster = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The workflow state.
State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
@ -516,13 +551,35 @@ message WorkflowMetadata {
map<string, string> parameters = 8;
// Output only. Workflow start time.
google.protobuf.Timestamp start_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp start_time = 9
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Workflow end time.
google.protobuf.Timestamp end_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
google.protobuf.Timestamp end_time = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The UUID of target cluster.
string cluster_uuid = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The timeout duration for the DAG of jobs.
// Minimum timeout duration is 10 minutes and maximum is 24 hours, expressed
// as a
// [google.protobuf.Duration][https://developers.google.com/protocol-buffers/docs/proto3#json_mapping].
// For example, "1800" = 1800 seconds/30 minutes duration.
google.protobuf.Duration dag_timeout = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. DAG start time, which is only set for workflows with
// [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout]
// when the DAG begins.
google.protobuf.Timestamp dag_start_time = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. DAG end time, which is only set for workflows with
// [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout]
// when the DAG ends.
google.protobuf.Timestamp dag_end_time = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// The cluster operation triggered by a workflow.
@ -571,7 +628,8 @@ message WorkflowNode {
string step_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Node's prerequisite nodes.
repeated string prerequisite_step_ids = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated string prerequisite_step_ids = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The job id; populated after the node enters RUNNING state.
string job_id = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
@ -753,7 +811,8 @@ message ListWorkflowTemplatesRequest {
// A response to a request to list workflow templates in a project.
message ListWorkflowTemplatesResponse {
// Output only. WorkflowTemplates list.
repeated WorkflowTemplate templates = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated WorkflowTemplate templates = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. This token is included in the response if there are more
// results to fetch. To fetch additional results, provide this value as the