Synchronize new proto/yaml changes.

PiperOrigin-RevId: 214624321
This commit is contained in:
Google APIs 2018-09-26 10:16:57 -07:00 committed by Copybara-Service
parent f9dc259418
commit 9d0fbbbc88
7 changed files with 606 additions and 0 deletions

View File

@ -0,0 +1,34 @@
common:
api_name: bigquerystorage
api_version: v1beta1
organization_name: google-cloud
proto_deps:
- name: google-common-protos
src_proto_paths:
- v1beta1
service_yaml: storage_v1beta1.yaml
gapic_yaml: v1beta1/bigquerystorage_gapic.yaml
artifacts:
- name: gapic_config
type: GAPIC_CONFIG
- name: java_gapic
type: GAPIC
language: JAVA
- name: python_gapic
type: GAPIC
language: PYTHON
- name: nodejs_gapic
type: GAPIC
language: NODEJS
- name: php_gapic
type: GAPIC
language: PHP
- name: go_gapic
type: GAPIC
language: GO
- name: ruby_gapic
type: GAPIC
language: RUBY
- name: csharp_gapic
type: GAPIC
language: CSHARP

View File

@ -0,0 +1,15 @@
type: google.api.Service
config_version: 3
name: bigquerystorage.googleapis.com
title: BigQuery Storage API
apis:
- name: google.cloud.bigquery.storage.v1beta1.BigQueryStorage
authentication:
rules:
- selector: '*'
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/bigquery,
https://www.googleapis.com/auth/cloud-platform

View File

@ -0,0 +1,39 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.cloud.bigquery.storage.v1beta1;
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage";
option java_outer_classname = "AvroProto";
option java_package = "com.google.cloud.bigquery.storage.v1beta1";
// Avro schema.
message AvroSchema {
// Json serialized schema, as described at
// https://avro.apache.org/docs/1.8.1/spec.html
string schema = 1;
}
// Avro rows.
message AvroRows {
// Binary serialized rows in a block.
bytes serialized_binary_rows = 1;
// The count of rows in the returning block.
int64 row_count = 2;
}

View File

@ -0,0 +1,188 @@
type: com.google.api.codegen.ConfigProto
config_schema_version: 1.0.0
# The settings of generated code in a specific language.
language_settings:
java:
package_name: com.google.cloud.bigquery.storage.v1beta1
interface_names:
google.cloud.bigquery.storage.v1beta1.BigQueryStorage: BaseBigQueryStorage
python:
package_name: google.cloud.bigquery.storage_v1beta1.gapic
go:
package_name: cloud.google.com/go/bigquery/storage/apiv1beta1
csharp:
package_name: Google.Cloud.Bigquery.Storage.V1beta1
ruby:
package_name: Google::Cloud::Bigquery::Storage::V1beta1
php:
package_name: Google\Cloud\Bigquery\Storage\V1beta1
nodejs:
package_name: storage.v1beta1
# The configuration for the license header to put on generated files.
license_header:
# The file containing the copyright line(s).
copyright_file: copyright-google.txt
# The file containing the raw license header without any copyright line(s).
license_file: license-header-apache-2.0.txt
# A list of API interface configurations.
interfaces:
# The fully qualified name of the API interface.
- name: google.cloud.bigquery.storage.v1beta1.BigQueryStorage
# A list of resource collection configurations.
# Consists of a name_pattern and an entity_name.
# The name_pattern is a pattern to describe the names of the resources of this
# collection, using the platform's conventions for URI patterns. A generator
# may use this to generate methods to compose and decompose such names. The
# pattern should use named placeholders as in `shelves/{shelf}/books/{book}`;
# those will be taken as hints for the parameter names of the generated
# methods. If empty, no name methods are generated.
# The entity_name is the name to be used as a basis for generated methods and
# classes.
collections: []
# Definition for retryable codes.
retry_codes_def:
- name: idempotent
retry_codes:
- DEADLINE_EXCEEDED
- UNAVAILABLE
- name: non_idempotent
retry_codes: []
# Definition for retry/backoff parameters.
retry_params_def:
- name: default
initial_retry_delay_millis: 100
retry_delay_multiplier: 1.3
max_retry_delay_millis: 60000
initial_rpc_timeout_millis: 20000
rpc_timeout_multiplier: 1
max_rpc_timeout_millis: 20000
total_timeout_millis: 600000
# A list of method configurations.
# Common properties:
#
# name - The simple name of the method.
#
# flattening - Specifies the configuration for parameter flattening.
# Describes the parameter groups for which a generator should produce method
# overloads which allow a client to directly pass request message fields as
# method parameters. This information may or may not be used, depending on
# the target language.
# Consists of groups, which each represent a list of parameters to be
# flattened. Each parameter listed must be a field of the request message.
#
# required_fields - Fields that are always required for a request to be
# valid.
#
# request_object_method - Turns on or off the generation of a method whose
# sole parameter is a request object. Not all languages will generate this
# method.
#
# resource_name_treatment - An enum that specifies how to treat the resource
# name formats defined in the field_name_patterns and
# response_field_name_patterns fields.
# UNSET: default value
# NONE: the collection configs will not be used by the generated code.
# VALIDATE: string fields will be validated by the client against the
# specified resource name formats.
# STATIC_TYPES: the client will use generated types for resource names.
#
# page_streaming - Specifies the configuration for paging.
# Describes information for generating a method which transforms a paging
# list RPC into a stream of resources.
# Consists of a request and a response.
# The request specifies request information of the list method. It defines
# which fields match the paging pattern in the request. The request consists
# of a page_size_field and a token_field. The page_size_field is the name of
# the optional field specifying the maximum number of elements to be
# returned in the response. The token_field is the name of the field in the
# request containing the page token.
# The response specifies response information of the list method. It defines
# which fields match the paging pattern in the response. The response
# consists of a token_field and a resources_field. The token_field is the
# name of the field in the response containing the next page token. The
# resources_field is the name of the field in the response containing the
# list of resources belonging to the page.
#
# retry_codes_name - Specifies the configuration for retryable codes. The
# name must be defined in interfaces.retry_codes_def.
#
# retry_params_name - Specifies the configuration for retry/backoff
# parameters. The name must be defined in interfaces.retry_params_def.
#
# field_name_patterns - Maps the field name of the request type to
# entity_name of interfaces.collections.
# Specifies the string pattern that the field must follow.
#
# timeout_millis - Specifies the default timeout for a non-retrying call. If
# the call is retrying, refer to retry_params_name instead.
methods:
- name: CreateReadSession
flattening:
groups:
- parameters:
- table_reference
- requested_streams
required_fields:
- table_reference
- requested_streams
request_object_method: true
retry_codes_name: idempotent
retry_params_name: default
timeout_millis: 60000
header_request_params:
- table_reference.project_id
- table_reference.dataset_id
- name: ReadRows
flattening:
groups:
- parameters:
- read_position
required_fields:
- read_position
request_object_method: false
retry_codes_name: idempotent
retry_params_name: default
timeout_millis: 86400000
header_request_params:
- read_position.stream.name
- name: BatchCreateReadSessionStreams
flattening:
groups:
- parameters:
- session
- requested_streams
required_fields:
- session
- requested_streams
request_object_method: true
retry_codes_name: idempotent
retry_params_name: default
timeout_millis: 60000
header_request_params:
- session.name
- name: FinalizeStream
flattening:
groups:
- parameters:
- stream
required_fields:
- stream
request_object_method: false
retry_codes_name: idempotent
retry_params_name: default
timeout_millis: 60000
header_request_params:
- stream.name
- name: SplitReadStream
flattening:
groups:
- parameters:
- original_stream
required_fields:
- original_stream
request_object_method: true
retry_codes_name: idempotent
retry_params_name: default
timeout_millis: 60000
header_request_params:
- original_stream.name

View File

@ -0,0 +1,39 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.cloud.bigquery.storage.v1beta1;
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage";
option java_package = "com.google.cloud.bigquery.storage.v1beta1";
// Options dictating how we read a table.
message TableReadOptions {
// Optional. Names of the fields in the table that should be read. If empty,
// all fields will be read. If the specified field is a nested field, all the
// sub-fields in the field will be selected. The output field order is
// unrelated to the order of fields in selected_fields.
repeated string selected_fields = 1;
// Optional. SQL text filtering statement, similar to a WHERE clause in
// a query. Currently, we support combinations of predicates that are
// a comparison between a column and a constant value in SQL statement.
// Aggregates are not supported.
//
// Example: "a > DATE '2014-9-27' AND (b > 5 and C LIKE 'date')"
string row_restriction = 2;
}

View File

@ -0,0 +1,248 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.cloud.bigquery.storage.v1beta1;
import "google/cloud/bigquery/storage/v1beta1/avro.proto";
import "google/cloud/bigquery/storage/v1beta1/read_options.proto";
import "google/cloud/bigquery/storage/v1beta1/table_reference.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage";
option java_package = "com.google.cloud.bigquery.storage.v1beta1";
// BigQuery storage API.
//
// The BigQuery storage API can be used to read data stored in BigQuery.
service BigQueryStorage {
// Creates a new read session. A read session divides the contents of a
// BigQuery table into one or more streams, which can then be used to read
// data from the table. The read session also specifies properties of the
// data to be read, such as a list of columns or a push-down filter describing
// the rows to be returned.
//
// A particular row can be read by at most one stream. When the caller has
// reached the end of each stream in the session, then all the data in the
// table has been read.
//
// Read sessions automatically expire 24 hours after they are created and do
// not require manual clean-up by the caller.
rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) {
}
// Reads rows from the table in the format prescribed by the read session.
// Each response contains one or more table rows, up to a maximum of 10 MiB
// per response; read requests which attempt to read individual rows larger
// than this will fail.
//
// Each request also returns a set of stream statistics reflecting the
// estimated total number of rows in the read stream. This number is computed
// based on the total table size and the number of active streams in the read
// session, and may change as other streams continue to read data.
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
}
// Creates additional streams for a ReadSession. This API can be used to
// dynamically adjust the parallelism of a batch processing task upwards by
// adding additional workers.
rpc BatchCreateReadSessionStreams(BatchCreateReadSessionStreamsRequest) returns (BatchCreateReadSessionStreamsResponse) {
}
// Triggers the graceful termination of a single stream in a ReadSession. This
// API can be used to dynamically adjust the parallelism of a batch processing
// task downwards without losing data.
//
// This API does not delete the stream -- it remains visible in the
// ReadSession, and any data processed by the stream is not released to other
// streams. However, no additional data will be assigned to the stream once
// this call completes. Callers must continue reading data on the stream until
// the end of the stream is reached so that data which has already been
// assigned to the stream will be processed.
//
// This method will return an error if there are no other live streams
// in the Session, or if SplitReadStream() has been called on the given
// Stream.
rpc FinalizeStream(FinalizeStreamRequest) returns (google.protobuf.Empty) {
}
// Splits a given read stream into two Streams. These streams are referred to
// as the primary and the residual of the split. The original stream can still
// be read from in the same manner as before. Both of the returned streams can
// also be read from, and the total rows return by both child streams will be
// the same as the rows read from the original stream.
//
// Moreover, the two child streams will be allocated back to back in the
// original Stream. Concretely, it is guaranteed that for streams Original,
// Primary, and Residual, that Original[0-j] = Primary[0-j] and
// Original[j-n] = Residual[0-m] once the streams have been read to
// completion.
//
// This method is guaranteed to be idempotent.
rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) {
}
}
message Stream {
// Name of the stream. In the form
// `/projects/{project_id}/stream/{stream_id}`
string name = 1;
// Rows in the stream.
int64 row_count = 2;
}
message StreamPosition {
Stream stream = 1;
// Position in the stream.
int64 offset = 2;
}
message ReadSession {
// Unique identifier for the session. In the form
// `projects/{project_id}/sessions/{session_id}`
string name = 1;
// Time at which the session becomes invalid. After this time, subsequent
// requests to read this Session will return errors.
google.protobuf.Timestamp expire_time = 2;
// The schema for the read. If read_options.selected_fields is set, the
// schema may be different from the table schema as it will only contain
// the selected fields.
oneof schema {
// Avro schema.
AvroSchema avro_schema = 5;
}
// Streams associated with this session.
repeated Stream streams = 4;
// Table that this ReadSession is reading from.
TableReference table_reference = 7;
// Any modifiers which are applied when reading from the specified table.
TableModifiers table_modifiers = 8;
}
message CreateReadSessionRequest {
// Required. Reference to the table to read.
TableReference table_reference = 1;
// Required. Project which this ReadSession is associated with. This is the
// project that will be billed for usage.
string parent = 6;
// Optional. Any modifiers to the Table (e.g. snapshot timestamp).
TableModifiers table_modifiers = 2;
// Optional. Initial number of streams. If unset or 0, we will
// provide a value of streams so as to produce reasonable throughput. Must be
// non-negative. The number of streams may be lower than the requested number,
// depending on the amount parallelism that is reasonable for the table and
// the maximum amount of parallelism allowed by the system.
//
// Streams must be read starting from offset 0.
int32 requested_streams = 3;
// Optional. Read options for this session (e.g. column selection, filters).
TableReadOptions read_options = 4;
// Data output format. Currently default to Avro.
DataFormat format = 5;
}
message ReadRowsRequest {
// Required. Identifier of the position in the stream to start reading from.
// The offset requested must be less than the last row read from ReadRows.
// Requesting a larger offset is undefined.
StreamPosition read_position = 1;
}
message StreamStatus {
// Number of estimated rows in the current stream. May change over time as
// different readers in the stream progress at rates which are relatively fast
// or slow.
int64 estimated_row_count = 1;
}
// Information on if the current connection is being throttled.
message ThrottleStatus {
// How much this connection is being throttled.
// 0 is no throttling, 100 is completely throttled.
int32 throttle_percent = 1;
}
message ReadRowsResponse {
oneof rows {
// Serialized row data in AVRO format.
AvroRows avro_rows = 3;
}
// Estimated stream statistics.
StreamStatus status = 2;
// Throttling status. If unset, the latest response still describes
// the current throttling status.
ThrottleStatus throttle_status = 5;
}
message BatchCreateReadSessionStreamsRequest {
// Required. Must be a non-expired session obtained from a call to
// CreateReadSession. Only the name field needs to be set.
ReadSession session = 1;
// Required. Number of new streams requested. Must be positive.
// Number of added streams may be less than this, see CreateReadSessionRequest
// for more information.
int32 requested_streams = 2;
}
message BatchCreateReadSessionStreamsResponse {
// Newly added streams.
repeated Stream streams = 1;
}
message FinalizeStreamRequest {
// Stream to finalize.
Stream stream = 2;
}
message SplitReadStreamRequest {
// Stream to split.
Stream original_stream = 1;
}
message SplitReadStreamResponse {
// Primary stream. Will contain the beginning portion of
// |original_stream|.
Stream primary_stream = 1;
// Remainder stream. Will contain the tail of |original_stream|.
Stream remainder_stream = 2;
}
// Data format for input or output data.
enum DataFormat {
DATA_FORMAT_UNSPECIFIED = 0;
// Avro is a standard open source row based file format.
// See https://avro.apache.org/ for more details.
AVRO = 1;
}

View File

@ -0,0 +1,43 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.cloud.bigquery.storage.v1beta1;
import "google/protobuf/timestamp.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1;storage";
option java_outer_classname = "TableReferenceProto";
option java_package = "com.google.cloud.bigquery.storage.v1beta1";
// Table reference that includes just the 3 strings needed to identify a table.
message TableReference {
// The assigned project ID of the project.
string project_id = 1;
// The ID of the dataset in the above project.
string dataset_id = 2;
// The ID of the table in the above dataset.
string table_id = 3;
}
// All fields in this message optional.
message TableModifiers {
// The snapshot time of the table. If not set, interpreted as now.
google.protobuf.Timestamp snapshot_time = 1;
}