Add annotations to Dataproc v1. (Also forwarding comment changes from internal source control.)

Committer: @lukesneeringer
PiperOrigin-RevId: 285197557
This commit is contained in:
Google APIs 2019-12-12 08:38:14 -08:00 committed by Copybara-Service
parent 19c4589a3c
commit 9f6eeebf1f
6 changed files with 265 additions and 61 deletions

View File

@ -3,11 +3,12 @@ common:
api_version: v1
organization_name: google-cloud
proto_deps:
- name: google-common-protos
- name: google-common-protos
src_proto_paths:
- v1
service_yaml: v1/dataproc.yaml
gapic_yaml: v1/dataproc_gapic.yaml
proto_package: google.cloud.dataproc.v1
artifacts:
- name: gapic_config
type: GAPIC_CONFIG

View File

@ -30,7 +30,7 @@ option java_outer_classname = "AutoscalingPoliciesProto";
option java_package = "com.google.cloud.dataproc.v1";
// The API interface for managing autoscaling policies in the
// Google Cloud Dataproc API.
// Dataproc API.
service AutoscalingPolicyService {
option (google.api.default_host) = "dataproc.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";

View File

@ -65,6 +65,7 @@ service ClusterController {
response_type: "Cluster"
metadata_type: "google.cloud.dataproc.v1.ClusterOperationMetadata"
};
option (google.api.method_signature) = "project_id,region,cluster_name,cluster,update_mask";
}
// Deletes a cluster in a project. The returned
@ -122,15 +123,15 @@ service ClusterController {
// a cluster of Compute Engine instances.
message Cluster {
// Required. The Google Cloud Platform project ID that the cluster belongs to.
string project_id = 1;
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The cluster name. Cluster names within a project must be
// unique. Names of deleted clusters can be reused.
string cluster_name = 2;
string cluster_name = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The cluster config. Note that Cloud Dataproc may set
// Required. The cluster config. Note that Dataproc may set
// default values, and values may change when clusters are updated.
ClusterConfig config = 3;
ClusterConfig config = 3 [(google.api.field_behavior) = REQUIRED];
// Optional. The labels to associate with this cluster.
// Label **keys** must contain 1 to 63 characters, and must conform to
@ -147,7 +148,7 @@ message Cluster {
// Output only. The previous cluster status.
repeated ClusterStatus status_history = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
// Output only. A cluster UUID (Unique Universal Identifier). Dataproc
// generates this value when it creates the cluster.
string cluster_uuid = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
@ -160,14 +161,14 @@ message Cluster {
// The cluster config.
message ClusterConfig {
// Optional. A Google Cloud Storage bucket used to stage job
// Optional. A Cloud Storage bucket used to stage job
// dependencies, config files, and job driver console output.
// If you do not specify a staging bucket, Cloud
// Dataproc will determine a Cloud Storage location (US,
// ASIA, or EU) for your cluster's staging bucket according to the Google
// ASIA, or EU) for your cluster's staging bucket according to the
// Compute Engine zone where your cluster is deployed, and then create
// and manage this project-level, per-location bucket (see
// [Cloud Dataproc staging
// [Dataproc staging
// bucket](/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
string config_bucket = 1 [(google.api.field_behavior) = OPTIONAL];
@ -226,7 +227,7 @@ message AutoscalingConfig {
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
// * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
//
// Note that the policy must be in the same project and Cloud Dataproc region.
// Note that the policy must be in the same project and Dataproc region.
string policy_uri = 1 [(google.api.field_behavior) = OPTIONAL];
}
@ -242,7 +243,7 @@ message EncryptionConfig {
message GceClusterConfig {
// Optional. The zone where the Compute Engine cluster will be located.
// On a create request, it is required in the "global" region. If omitted
// in a non-global Cloud Dataproc region, the service will pick a zone in the
// in a non-global Dataproc region, the service will pick a zone in the
// corresponding Compute Engine region. On a get request, zone will
// always be present.
//
@ -284,17 +285,17 @@ message GceClusterConfig {
// configured to be accessible without external IP addresses.
bool internal_ip_only = 7 [(google.api.field_behavior) = OPTIONAL];
// Optional. The service account of the instances. Defaults to the default
// Compute Engine service account. Custom service accounts need
// permissions equivalent to the following IAM roles:
// Optional. The [Dataproc service
// account](/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc)
// (also see [VM Data Plane
// identity](/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))
// used by Dataproc cluster VM instances to access Google Cloud Platform
// services.
//
// * roles/logging.logWriter
// * roles/storage.objectAdmin
//
// (see
// https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
// for more information).
// Example: `[account_id]@[project_id].iam.gserviceaccount.com`
// If not specified, the
// [Compute Engine default service
// account](/compute/docs/access/service-accounts#default_service_account)
// is used.
string service_account = 8 [(google.api.field_behavior) = OPTIONAL];
// Optional. The URIs of service account scopes to be included in
@ -330,7 +331,7 @@ message InstanceGroupConfig {
// For master instance groups, must be set to 1.
int32 num_instances = 1 [(google.api.field_behavior) = OPTIONAL];
// Output only. The list of instance names. Cloud Dataproc derives the names
// Output only. The list of instance names. Dataproc derives the names
// from `cluster_name`, `num_instances`, and the instance group.
repeated string instance_names = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
@ -347,7 +348,7 @@ message InstanceGroupConfig {
// * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
// * `n1-standard-2`
//
// **Auto Zone Exception**: If you are using the Cloud Dataproc
// **Auto Zone Exception**: If you are using the Dataproc
// [Auto Zone
// Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
// feature, you must use the short name of the machine type
@ -371,7 +372,7 @@ message InstanceGroupConfig {
repeated AcceleratorConfig accelerators = 8 [(google.api.field_behavior) = OPTIONAL];
// Optional. Specifies the minimum cpu platform for the Instance Group.
// See [Cloud Dataproc→Minimum CPU Platform]
// See [Dataproc→Minimum CPU Platform]
// (/dataproc/docs/concepts/compute/dataproc-min-cpu).
string min_cpu_platform = 9 [(google.api.field_behavior) = OPTIONAL];
}
@ -400,7 +401,7 @@ message AcceleratorConfig {
// * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
// * `nvidia-tesla-k80`
//
// **Auto Zone Exception**: If you are using the Cloud Dataproc
// **Auto Zone Exception**: If you are using the Dataproc
// [Auto Zone
// Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
// feature, you must use the short name of the accelerator type
@ -479,7 +480,7 @@ message ClusterStatus {
UNHEALTHY = 1;
// The agent-reported status is out of date (may occur if
// Cloud Dataproc loses communication with Agent).
// Dataproc loses communication with Agent).
//
// Applies to RUNNING state.
STALE_STATUS = 2;
@ -580,7 +581,7 @@ message KerberosConfig {
// Specifies the selection and config of software inside the cluster.
message SoftwareConfig {
// Optional. The version of software inside the cluster. It must be one of the
// supported [Cloud Dataproc
// supported [Dataproc
// Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
// such as "1.2" (including a subminor version, such as "1.2.29"), or the
// ["preview"
@ -630,7 +631,7 @@ message CreateClusterRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The cluster to create.
@ -656,7 +657,7 @@ message UpdateClusterRequest {
// cluster belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 5 [(google.api.field_behavior) = REQUIRED];
// Required. The cluster name.
@ -748,7 +749,7 @@ message DeleteClusterRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The cluster name.
@ -778,7 +779,7 @@ message GetClusterRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The cluster name.
@ -791,7 +792,7 @@ message ListClustersRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 4 [(google.api.field_behavior) = REQUIRED];
// Optional. A filter constraining the clusters to list. Filters are
@ -839,7 +840,7 @@ message DiagnoseClusterRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The cluster name.

View File

@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
@ -132,7 +131,7 @@ message LoggingConfig {
map<string, Level> driver_log_levels = 2;
}
// A Cloud Dataproc job for running
// A Dataproc job for running
// [Apache Hadoop
// MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
// jobs on [Apache Hadoop
@ -176,7 +175,7 @@ message HadoopJob {
repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL];
// Optional. A mapping of property names to values, used to configure Hadoop.
// Properties that conflict with values set by the Cloud Dataproc API may be
// Properties that conflict with values set by the Dataproc API may be
// overwritten. Can include properties set in /etc/hadoop/conf/*-site and
// classes in user code.
map<string, string> properties = 7 [(google.api.field_behavior) = OPTIONAL];
@ -185,7 +184,7 @@ message HadoopJob {
LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL];
}
// A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/)
// A Dataproc job for running [Apache Spark](http://spark.apache.org/)
// applications on YARN.
message SparkJob {
// Required. The specification of the main method to call to drive the job.
@ -221,7 +220,7 @@ message SparkJob {
repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL];
// Optional. A mapping of property names to values, used to configure Spark.
// Properties that conflict with values set by the Cloud Dataproc API may be
// Properties that conflict with values set by the Dataproc API may be
// overwritten. Can include properties set in
// /etc/spark/conf/spark-defaults.conf and classes in user code.
map<string, string> properties = 7 [(google.api.field_behavior) = OPTIONAL];
@ -230,7 +229,7 @@ message SparkJob {
LoggingConfig logging_config = 8 [(google.api.field_behavior) = OPTIONAL];
}
// A Cloud Dataproc job for running
// A Dataproc job for running
// [Apache
// PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
// applications on YARN.
@ -261,7 +260,7 @@ message PySparkJob {
repeated string archive_uris = 6 [(google.api.field_behavior) = OPTIONAL];
// Optional. A mapping of property names to values, used to configure PySpark.
// Properties that conflict with values set by the Cloud Dataproc API may be
// Properties that conflict with values set by the Dataproc API may be
// overwritten. Can include properties set in
// /etc/spark/conf/spark-defaults.conf and classes in user code.
map<string, string> properties = 7 [(google.api.field_behavior) = OPTIONAL];
@ -289,7 +288,7 @@ message QueryList {
repeated string queries = 1 [(google.api.field_behavior) = REQUIRED];
}
// A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/)
// A Dataproc job for running [Apache Hive](https://hive.apache.org/)
// queries on YARN.
message HiveJob {
// Required. The sequence of Hive queries to execute, specified as either
@ -312,7 +311,7 @@ message HiveJob {
map<string, string> script_variables = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. A mapping of property names and values, used to configure Hive.
// Properties that conflict with values set by the Cloud Dataproc API may be
// Properties that conflict with values set by the Dataproc API may be
// overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
// /etc/hive/conf/hive-site.xml, and classes in user code.
map<string, string> properties = 5 [(google.api.field_behavior) = OPTIONAL];
@ -323,7 +322,7 @@ message HiveJob {
repeated string jar_file_uris = 6 [(google.api.field_behavior) = OPTIONAL];
}
// A Cloud Dataproc job for running [Apache Spark
// A Dataproc job for running [Apache Spark
// SQL](http://spark.apache.org/sql/) queries.
message SparkSqlJob {
// Required. The sequence of Spark SQL queries to execute, specified as
@ -342,7 +341,7 @@ message SparkSqlJob {
// Optional. A mapping of property names to values, used to configure
// Spark SQL's SparkConf. Properties that conflict with values set by the
// Cloud Dataproc API may be overwritten.
// Dataproc API may be overwritten.
map<string, string> properties = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
@ -352,7 +351,7 @@ message SparkSqlJob {
LoggingConfig logging_config = 6 [(google.api.field_behavior) = OPTIONAL];
}
// A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/)
// A Dataproc job for running [Apache Pig](https://pig.apache.org/)
// queries on YARN.
message PigJob {
// Required. The sequence of Pig queries to execute, specified as an HCFS
@ -375,7 +374,7 @@ message PigJob {
map<string, string> script_variables = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. A mapping of property names to values, used to configure Pig.
// Properties that conflict with values set by the Cloud Dataproc API may be
// Properties that conflict with values set by the Dataproc API may be
// overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
// /etc/pig/conf/pig.properties, and classes in user code.
map<string, string> properties = 5 [(google.api.field_behavior) = OPTIONAL];
@ -388,17 +387,17 @@ message PigJob {
LoggingConfig logging_config = 7 [(google.api.field_behavior) = OPTIONAL];
}
// Cloud Dataproc job config.
// Dataproc job config.
message JobPlacement {
// Required. The name of the cluster where the job will be submitted.
string cluster_name = 1 [(google.api.field_behavior) = REQUIRED];
// Output only. A cluster UUID generated by the Cloud Dataproc service when
// Output only. A cluster UUID generated by the Dataproc service when
// the job is submitted.
string cluster_uuid = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Cloud Dataproc job status.
// Dataproc job status.
message JobStatus {
// The job state.
enum State {
@ -456,7 +455,7 @@ message JobStatus {
QUEUED = 2;
// The agent-reported status is out of date, which may be caused by a
// loss of communication between the agent and Cloud Dataproc. If the
// loss of communication between the agent and Dataproc. If the
// agent does not send a timely update, the job will fail.
//
// Applies to RUNNING state.
@ -549,7 +548,7 @@ message YarnApplication {
string tracking_url = 4 [(google.api.field_behavior) = OPTIONAL];
}
// A Cloud Dataproc job resource.
// A Dataproc job resource.
message Job {
// Optional. The fully qualified reference to the job, which can be used to
// obtain the equivalent REST path of the job resource. If this property
@ -642,7 +641,7 @@ message SubmitJobRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The job resource.
@ -668,7 +667,7 @@ message GetJobRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The job ID.
@ -694,7 +693,7 @@ message ListJobsRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 6 [(google.api.field_behavior) = REQUIRED];
// Optional. The number of results to return in each response.
@ -737,7 +736,7 @@ message UpdateJobRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The job ID.
@ -772,7 +771,7 @@ message CancelJobRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The job ID.
@ -785,7 +784,7 @@ message DeleteJobRequest {
// belongs to.
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The Cloud Dataproc region in which to handle the request.
// Required. The Dataproc region in which to handle the request.
string region = 3 [(google.api.field_behavior) = REQUIRED];
// Required. The job ID.

View File

@ -33,7 +33,7 @@ option java_outer_classname = "WorkflowTemplatesProto";
option java_package = "com.google.cloud.dataproc.v1";
// The API interface for managing Workflow Templates in the
// Cloud Dataproc API.
// Dataproc API.
service WorkflowTemplateService {
option (google.api.default_host) = "dataproc.googleapis.com";
option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
@ -179,7 +179,7 @@ service WorkflowTemplateService {
}
}
// A Cloud Dataproc workflow template resource.
// A Dataproc workflow template resource.
message WorkflowTemplate {
option (google.api.resource) = {
type: "dataproc.googleapis.com/WorkflowTemplate"
@ -458,7 +458,7 @@ message ValueValidation {
repeated string values = 1 [(google.api.field_behavior) = REQUIRED];
}
// A Cloud Dataproc workflow template resource.
// A Dataproc workflow template resource.
message WorkflowMetadata {
// The operation state.
enum State {

View File

@ -0,0 +1,203 @@
{
"methodConfig": [
{
"name": [
{
"service": "google.cloud.dataproc.v1.AutoscalingPolicyService",
"method": "CreateAutoscalingPolicy"
},
{
"service": "google.cloud.dataproc.v1.AutoscalingPolicyService",
"method": "DeleteAutoscalingPolicy"
}
],
"timeout": "600s"
},
{
"name": [
{
"service": "google.cloud.dataproc.v1.AutoscalingPolicyService",
"method": "UpdateAutoscalingPolicy"
},
{
"service": "google.cloud.dataproc.v1.AutoscalingPolicyService",
"method": "GetAutoscalingPolicy"
},
{
"service": "google.cloud.dataproc.v1.AutoscalingPolicyService",
"method": "ListAutoscalingPolicies"
}
],
"timeout": "600s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
]
}
},
{
"name": [
{
"service": "google.cloud.dataproc.v1.JobController",
"method": "GetJob"
},
{
"service": "google.cloud.dataproc.v1.JobController",
"method": "ListJobs"
},
{
"service": "google.cloud.dataproc.v1.JobController",
"method": "CancelJob"
}
],
"timeout": "900s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"DEADLINE_EXCEEDED",
"INTERNAL",
"UNAVAILABLE"
]
}
},
{
"name": [
{
"service": "google.cloud.dataproc.v1.WorkflowTemplateService",
"method": "CreateWorkflowTemplate"
},
{
"service": "google.cloud.dataproc.v1.WorkflowTemplateService",
"method": "InstantiateWorkflowTemplate"
},
{
"service": "google.cloud.dataproc.v1.WorkflowTemplateService",
"method": "InstantiateInlineWorkflowTemplate"
},
{
"service": "google.cloud.dataproc.v1.WorkflowTemplateService",
"method": "UpdateWorkflowTemplate"
},
{
"service": "google.cloud.dataproc.v1.WorkflowTemplateService",
"method": "DeleteWorkflowTemplate"
}
],
"timeout": "600s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"UNAVAILABLE"
]
}
},
{
"name": [
{
"service": "google.cloud.dataproc.v1.WorkflowTemplateService",
"method": "GetWorkflowTemplate"
},
{
"service": "google.cloud.dataproc.v1.WorkflowTemplateService",
"method": "ListWorkflowTemplates"
}
],
"timeout": "600s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"DEADLINE_EXCEEDED",
"INTERNAL",
"UNAVAILABLE"
]
}
},
{
"name": [
{
"service": "google.cloud.dataproc.v1.ClusterController",
"method": "CreateCluster"
},
{
"service": "google.cloud.dataproc.v1.ClusterController",
"method": "UpdateCluster"
},
{
"service": "google.cloud.dataproc.v1.ClusterController",
"method": "DeleteCluster"
},
{
"service": "google.cloud.dataproc.v1.ClusterController",
"method": "DiagnoseCluster"
}
],
"timeout": "300s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"UNAVAILABLE"
]
}
},
{
"name": [
{
"service": "google.cloud.dataproc.v1.ClusterController",
"method": "GetCluster"
},
{
"service": "google.cloud.dataproc.v1.ClusterController",
"method": "ListClusters"
}
],
"timeout": "300s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"INTERNAL",
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
]
}
},
{
"name": [
{
"service": "google.cloud.dataproc.v1.JobController",
"method": "SubmitJob"
},
{
"service": "google.cloud.dataproc.v1.JobController",
"method": "UpdateJob"
},
{
"service": "google.cloud.dataproc.v1.JobController",
"method": "DeleteJob"
}
],
"timeout": "900s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"UNAVAILABLE"
]
}
}
]
}