feat: BigQuery Write API V1Beta2 public interface.

PiperOrigin-RevId: 339152802
This commit is contained in:
Google APIs 2020-10-26 17:26:54 -07:00 committed by Copybara-Service
parent 3c8c2d8136
commit b3c2eb02e5
9 changed files with 575 additions and 52 deletions

View File

@ -12,15 +12,21 @@ proto_library(
srcs = [
"arrow.proto",
"avro.proto",
"protobuf.proto",
"storage.proto",
"stream.proto",
"table.proto",
],
deps = [
"//google/api:annotations_proto",
"//google/api:client_proto",
"//google/api:field_behavior_proto",
"//google/rpc:status_proto",
"//google/api:resource_proto",
"@com_google_protobuf//:descriptor_proto",
"@com_google_protobuf//:empty_proto",
"@com_google_protobuf//:timestamp_proto",
"@com_google_protobuf//:wrappers_proto",
],
)
@ -107,6 +113,7 @@ go_proto_library(
protos = [":storage_proto"],
deps = [
"//google/api:annotations_go_proto",
"//google/rpc:status_go_proto",
],
)
@ -122,10 +129,10 @@ go_gapic_library(
)
go_test(
name = "storage_go_gapic_test",
srcs = [":storage_go_gapic_srcjar_test"],
embed = [":storage_go_gapic"],
importpath = "cloud.google.com/go/bigquery/storage/apiv1beta2",
name = "storage_go_gapic_test",
srcs = [":storage_go_gapic_srcjar_test"],
embed = [":storage_go_gapic"],
importpath = "cloud.google.com/go/bigquery/storage/apiv1beta2",
)
# Open Source Packages
@ -234,46 +241,45 @@ nodejs_gapic_assembly_pkg(
##############################################################################
# Ruby
##############################################################################
load(
"@com_google_googleapis_imports//:imports.bzl",
"ruby_gapic_assembly_pkg",
"ruby_gapic_library",
"ruby_grpc_library",
"ruby_proto_library",
)
# load(
# "@com_google_googleapis_imports//:imports.bzl",
# "ruby_gapic_assembly_pkg",
# "ruby_gapic_library",
# "ruby_grpc_library",
# "ruby_proto_library",
# )
ruby_proto_library(
name = "storage_ruby_proto",
deps = [":storage_proto"],
)
# ruby_proto_library(
# name = "storage_ruby_proto",
# deps = [":storage_proto"],
# )
ruby_grpc_library(
name = "storage_ruby_grpc",
srcs = [":storage_proto"],
deps = [":storage_ruby_proto"],
)
# ruby_grpc_library(
# name = "storage_ruby_grpc",
# srcs = [":storage_proto"],
# deps = [":storage_ruby_proto"],
# )
ruby_gapic_library(
name = "storage_ruby_gapic",
src = ":storage_proto_with_info",
gapic_yaml = "bigquerystorage_gapic.yaml",
package = "google.cloud.bigquery.storage.v1beta2",
service_yaml = "bigquerystorage_v1beta2.yaml",
deps = [
":storage_ruby_grpc",
":storage_ruby_proto",
],
)
# ruby_gapic_library(
# name = "storage_ruby_gapic",
# src = ":storage_proto_with_info",
# gapic_yaml = "bigquerystorage_gapic.yaml",
# package = "google.cloud.bigquery.storage.v1beta2",
# service_yaml = "bigquerystorage_v1beta2.yaml",
# deps = [
# ":storage_ruby_grpc",
# ":storage_ruby_proto",
# ],
# )
# Open Source Packages
ruby_gapic_assembly_pkg(
name = "google-cloud-bigquery-storage-v1beta2-ruby",
deps = [
":storage_ruby_gapic",
":storage_ruby_grpc",
":storage_ruby_proto",
],
)
# ruby_gapic_assembly_pkg(
# name = "google-cloud-bigquery-storage-v1beta2-ruby",
# deps = [
# ":storage_ruby_gapic",
# ":storage_ruby_grpc",
# ":storage_ruby_proto",
# ],
# )
##############################################################################
# C#
@ -300,8 +306,8 @@ csharp_grpc_library(
csharp_gapic_library(
name = "storage_csharp_gapic",
srcs = [":storage_proto_with_info"],
grpc_service_config = "bigquerystorage_grpc_service_config.json",
common_resources_config = "@gax_dotnet//:Google.Api.Gax/ResourceNames/CommonResourcesConfig.json",
grpc_service_config = "bigquerystorage_grpc_service_config.json",
deps = [
":storage_csharp_grpc",
":storage_csharp_proto",

View File

@ -1,4 +1,4 @@
// Copyright 2019 Google LLC.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";

View File

@ -1,4 +1,4 @@
// Copyright 2019 Google LLC.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";

View File

@ -52,6 +52,73 @@
"UNAVAILABLE"
]
}
},
{
"name": [
{
"service": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite",
"method": "CreateWriteStream"
}
],
"timeout": "600s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE",
"RESOURCE_EXHAUSTED"
]
}
},
{
"name": [
{
"service": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite",
"method": "AppendRows"
}
],
"timeout": "86400s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"UNAVAILABLE",
"RESOURCE_EXHAUSTED"
]
}
},
{
"name": [
{
"service": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite",
"method": "BatchCommitWriteStreams"
},
{
"service": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite",
"method": "FinalizeWriteStream"
},
{
"service": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite",
"method": "GetWriteStream"
},
{
"service": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite",
"method": "FlushRows"
}
],
"timeout": "600s",
"retryPolicy": {
"initialBackoff": "0.100s",
"maxBackoff": "60s",
"backoffMultiplier": 1.3,
"retryableStatusCodes": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
]
}
}
]
}

View File

@ -5,6 +5,7 @@ title: BigQuery Storage API
apis:
- name: google.cloud.bigquery.storage.v1beta2.BigQueryRead
- name: google.cloud.bigquery.storage.v1beta2.BigQueryWrite
backend:
rules:
@ -14,6 +15,10 @@ backend:
deadline: 21600.0
- selector: google.cloud.bigquery.storage.v1beta2.BigQueryRead.SplitReadStream
deadline: 120.0
- selector: 'google.cloud.bigquery.storage.v1beta2.BigQueryWrite.*'
deadline: 120.0
- selector: google.cloud.bigquery.storage.v1beta2.BigQueryWrite.AppendRows
deadline: 21600.0
authentication:
rules:
@ -23,3 +28,9 @@ authentication:
https://www.googleapis.com/auth/bigquery,
https://www.googleapis.com/auth/bigquery.readonly,
https://www.googleapis.com/auth/cloud-platform
- selector: 'google.cloud.bigquery.storage.v1beta2.BigQueryWrite.*'
oauth:
canonical_scopes: |-
https://www.googleapis.com/auth/bigquery,
https://www.googleapis.com/auth/bigquery.insertdata,
https://www.googleapis.com/auth/cloud-platform

View File

@ -0,0 +1,41 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.bigquery.storage.v1beta2;
import "google/protobuf/descriptor.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2;storage";
option java_multiple_files = true;
option java_outer_classname = "ProtoBufProto";
option java_package = "com.google.cloud.bigquery.storage.v1beta2";
// Protobuf schema is an API presentation the proto buffer schema.
message ProtoSchema {
// Descriptor for input message. The descriptor has to be self contained,
// including all the nested types, excepted for proto buffer well known types
// (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf).
google.protobuf.DescriptorProto proto_descriptor = 1;
}
// Protobuf rows.
message ProtoRows {
// A sequence of rows serialized as a Protocol Buffer.
//
// See https://developers.google.com/protocol-buffers/docs/overview for more
// information on deserializing this field.
repeated bytes serialized_rows = 1;
}

View File

@ -1,4 +1,4 @@
// Copyright 2019 Google LLC.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
@ -23,7 +22,12 @@ import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/bigquery/storage/v1beta2/arrow.proto";
import "google/cloud/bigquery/storage/v1beta2/avro.proto";
import "google/cloud/bigquery/storage/v1beta2/protobuf.proto";
import "google/cloud/bigquery/storage/v1beta2/stream.proto";
import "google/cloud/bigquery/storage/v1beta2/table.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/wrappers.proto";
import "google/rpc/status.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2;storage";
option java_multiple_files = true;
@ -33,6 +37,9 @@ option java_package = "com.google.cloud.bigquery.storage.v1beta2";
// BigQuery Read API.
//
// The Read API can be used to read data from BigQuery.
//
// New code should use the v1 Read API going forward, if they don't use Write
// API at the same time.
service BigQueryRead {
option (google.api.default_host) = "bigquerystorage.googleapis.com";
option (google.api.oauth_scopes) =
@ -64,7 +71,8 @@ service BigQueryRead {
post: "/v1beta2/{read_session.table=projects/*/datasets/*/tables/*}"
body: "*"
};
option (google.api.method_signature) = "parent,read_session,max_stream_count";
option (google.api.method_signature) =
"parent,read_session,max_stream_count";
}
// Reads rows from the stream in the format prescribed by the ReadSession.
@ -93,13 +101,113 @@ service BigQueryRead {
// original, primary, and residual, that original[0-j] = primary[0-j] and
// original[j-n] = residual[0-m] once the streams have been read to
// completion.
rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) {
rpc SplitReadStream(SplitReadStreamRequest)
returns (SplitReadStreamResponse) {
option (google.api.http) = {
get: "/v1beta2/{name=projects/*/locations/*/sessions/*/streams/*}"
};
}
}
// BigQuery Write API.
//
// The Write API can be used to write data to BigQuery.
service BigQueryWrite {
option (google.api.default_host) = "bigquerystorage.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/bigquery,"
"https://www.googleapis.com/auth/bigquery.insertdata,"
"https://www.googleapis.com/auth/cloud-platform";
// Creates a write stream to the given table.
// Additionally, every table has a special COMMITTED stream named '_default'
// to which data can be written. This stream doesn't need to be created using
// CreateWriteStream. It is a stream that can be used simultaneously by any
// number of clients. Data written to this stream is considered committed as
// soon as an acknowledgement is received.
rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) {
option (google.api.http) = {
post: "/v1beta2/{parent=projects/*/datasets/*/tables/*}"
body: "write_stream"
};
option (google.api.method_signature) = "parent,write_stream";
}
// Appends data to the given stream.
//
// If `offset` is specified, the `offset` is checked against the end of
// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
// attempt is made to append to an offset beyond the current end of the stream
// or `ALREADY_EXISTS` if user provids an `offset` that has already been
// written to. User can retry with adjusted offset within the same RPC
// stream. If `offset` is not specified, append happens at the end of the
// stream.
//
// The response contains the offset at which the append happened. Responses
// are received in the same order in which requests are sent. There will be
// one response for each successful request. If the `offset` is not set in
// response, it means append didn't happen due to some errors. If one request
// fails, all the subsequent requests will also fail until a success request
// is made again.
//
// If the stream is of `PENDING` type, data will only be available for read
// operations after the stream is committed.
rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) {
option (google.api.http) = {
post: "/v1beta2/{write_stream=projects/*/datasets/*/tables/*/streams/*}"
body: "*"
};
option (google.api.method_signature) = "write_stream";
}
// Gets a write stream.
rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) {
option (google.api.http) = {
post: "/v1beta2/{name=projects/*/datasets/*/tables/*/streams/*}"
body: "*"
};
option (google.api.method_signature) = "name";
}
// Finalize a write stream so that no new data can be appended to the
// stream. Finalize is not supported on the '_default' stream.
rpc FinalizeWriteStream(FinalizeWriteStreamRequest)
returns (FinalizeWriteStreamResponse) {
option (google.api.http) = {
post: "/v1beta2/{name=projects/*/datasets/*/tables/*/streams/*}"
body: "*"
};
option (google.api.method_signature) = "name";
}
// Atomically commits a group of `PENDING` streams that belong to the same
// `parent` table.
// Streams must be finalized before commit and cannot be committed multiple
// times. Once a stream is committed, data in the stream becomes available
// for read operations.
rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest)
returns (BatchCommitWriteStreamsResponse) {
option (google.api.http) = {
get: "/v1beta2/{parent=projects/*/datasets/*/tables/*}"
};
option (google.api.method_signature) = "parent";
}
// Flushes rows to a BUFFERED stream.
// If users are appending rows to BUFFERED stream, flush operation is
// required in order for the rows to become available for reading. A
// Flush operation flushes up to any previously flushed offset in a BUFFERED
// stream, to the offset specified in the request.
// Flush is not supported on the _default stream, since it is not BUFFERED.
rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) {
option (google.api.http) = {
post: "/v1beta2/{write_stream=projects/*/datasets/*/tables/*/streams/*}"
body: "*"
};
option (google.api.method_signature) = "write_stream";
}
}
// Request message for `CreateReadSession`.
message CreateReadSessionRequest {
// Required. The request project that owns the session, in the form of
@ -217,7 +325,6 @@ message SplitReadStreamRequest {
double fraction = 2;
}
// Response message for `SplitReadStream`.
message SplitReadStreamResponse {
// Primary stream, which contains the beginning portion of
// |original_stream|. An empty value indicates that the original stream can no
@ -228,3 +335,144 @@ message SplitReadStreamResponse {
// value indicates that the original stream can no longer be split.
ReadStream remainder_stream = 2;
}
// Request message for `CreateWriteStream`.
message CreateWriteStreamRequest {
// Required. Reference to the table to which the stream belongs, in the format
// of `projects/{project}/datasets/{dataset}/tables/{table}`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" }
];
// Required. Stream to be created.
WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED];
}
// Request message for `AppendRows`.
message AppendRowsRequest {
// Proto schema and data.
message ProtoData {
// Proto schema used to serialize the data.
ProtoSchema writer_schema = 1;
// Serialized row data in protobuf message format.
ProtoRows rows = 2;
}
// Required. The stream that is the target of the append operation. This value
// must be specified for the initial request. If subsequent requests specify
// the stream name, it must equal to the value provided in the first request.
// To write to the _default stream, populate this field with a string in the
// format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
string write_stream = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "bigquerystorage.googleapis.com/WriteStream"
}
];
// If present, the write is only performed if the next append offset is same
// as the provided value. If not present, the write is performed at the
// current end of stream. Specifying a value for this field is not allowed
// when calling AppendRows for the '_default' stream.
google.protobuf.Int64Value offset = 2;
// Input rows. The `writer_schema` field must be specified at the initial
// request and currently, it will be ignored if specified in following
// requests. Following requests must have data in the same format as the
// initial request.
oneof rows {
// Rows in proto format.
ProtoData proto_rows = 4;
}
// Only initial request setting is respected. If true, drop unknown input
// fields. Otherwise, the extra fields will cause append to fail. Default
// value is false.
bool ignore_unknown_fields = 5;
}
// Response message for `AppendRows`.
message AppendRowsResponse {
oneof response {
// The row offset at which the last append occurred.
int64 offset = 1;
// Error in case of append failure. If set, it means rows are not accepted
// into the system. Users can retry within the same connection.
google.rpc.Status error = 2;
}
// If backend detects a schema update, pass it to user so that user can
// use it to input new type of message. It will be empty when there is no
// schema updates.
TableSchema updated_schema = 3;
}
// Request message for `GetWriteStreamRequest`.
message GetWriteStreamRequest {
// Required. Name of the stream to get, in the form of
// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "bigquerystorage.googleapis.com/WriteStream"
}
];
}
// Request message for `BatchCommitWriteStreams`.
message BatchCommitWriteStreamsRequest {
// Required. Parent table that all the streams should belong to, in the form
// of `projects/{project}/datasets/{dataset}/tables/{table}`.
string parent = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The group of streams that will be committed atomically.
repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for `BatchCommitWriteStreams`.
message BatchCommitWriteStreamsResponse {
// The time at which streams were committed in microseconds granularity.
google.protobuf.Timestamp commit_time = 1;
}
// Request message for invoking `FinalizeWriteStream`.
message FinalizeWriteStreamRequest {
// Required. Name of the stream to finalize, in the form of
// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "bigquerystorage.googleapis.com/WriteStream"
}
];
}
// Response message for `FinalizeWriteStream`.
message FinalizeWriteStreamResponse {
// Number of rows in the finalized stream.
int64 row_count = 1;
}
// Request message for `FlushRows`.
message FlushRowsRequest {
// Required. The stream that is the target of the flush operation.
string write_stream = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "bigquerystorage.googleapis.com/WriteStream"
}
];
// Ending offset of the flush operation. Rows before this offset(including
// this offset) will be flushed.
google.protobuf.Int64Value offset = 2;
}
// Respond message for `FlushRows`.
message FlushRowsResponse {
// The rows before this offset (including this offset) are flushed.
int64 offset = 1;
}

View File

@ -1,4 +1,4 @@
// Copyright 2019 Google LLC.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
@ -21,6 +20,7 @@ import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/bigquery/storage/v1beta2/arrow.proto";
import "google/cloud/bigquery/storage/v1beta2/avro.proto";
import "google/cloud/bigquery/storage/v1beta2/table.proto";
import "google/protobuf/timestamp.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2;storage";
@ -140,3 +140,53 @@ message ReadStream {
// `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Information about a single stream that gets data inside the storage system.
message WriteStream {
option (google.api.resource) = {
type: "bigquerystorage.googleapis.com/WriteStream"
pattern: "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}"
};
// Type enum of the stream.
enum Type {
// Unknown type.
TYPE_UNSPECIFIED = 0;
// Data will commit automatically and appear as soon as the write is
// acknowledged.
COMMITTED = 1;
// Data is invisible until the stream is committed.
PENDING = 2;
// Data is only visible up to the offset to which it was flushed.
BUFFERED = 3;
}
// Output only. Name of the stream, in the form
// `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Immutable. Type of the stream.
Type type = 2 [(google.api.field_behavior) = IMMUTABLE];
// Output only. Create time of the stream. For the _default stream, this is the
// creation_time of the table.
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Commit time of the stream.
// If a stream is of `COMMITTED` type, then it will have a commit_time same as
// `create_time`. If the stream is of `PENDING` type, commit_time being empty
// means it is not committed.
google.protobuf.Timestamp commit_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The schema of the destination table. It is only returned in
// `CreateWriteStream` response. Caller should generate data that's
// compatible with this schema to send in initial `AppendRowsRequest`.
// The table schema could go out of date during the life time of the stream.
TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Id set by client to annotate its identity.
string external_id = 6;
}

View File

@ -0,0 +1,102 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.bigquery.storage.v1beta2;
import "google/api/field_behavior.proto";
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2;storage";
option java_multiple_files = true;
option java_outer_classname = "TableProto";
option java_package = "com.google.cloud.bigquery.storage.v1beta2";
// Schema of a table
message TableSchema {
// Describes the fields in a table.
repeated TableFieldSchema fields = 1;
}
// A field in TableSchema
message TableFieldSchema {
enum Type {
// Illegal value
TYPE_UNSPECIFIED = 0;
// 64K, UTF8
STRING = 1;
// 64-bit signed
INT64 = 2;
// 64-bit IEEE floating point
DOUBLE = 3;
// Aggregate type
STRUCT = 4;
// 64K, Binary
BYTES = 5;
// 2-valued
BOOL = 6;
// 64-bit signed usec since UTC epoch
TIMESTAMP = 7;
// Civil date - Year, Month, Day
DATE = 8;
// Civil time - Hour, Minute, Second, Microseconds
TIME = 9;
// Combination of civil date and civil time
DATETIME = 10;
// Geography object
GEOGRAPHY = 11;
// Numeric value
NUMERIC = 12;
}
enum Mode {
// Illegal value
MODE_UNSPECIFIED = 0;
NULLABLE = 1;
REQUIRED = 2;
REPEATED = 3;
}
// Required. The field name. The name must contain only letters (a-z, A-Z),
// numbers (0-9), or underscores (_), and must start with a letter or
// underscore. The maximum length is 128 characters.
string name = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The field data type.
Type type = 2 [(google.api.field_behavior) = REQUIRED];
// Optional. The field mode. The default value is NULLABLE.
Mode mode = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. Describes the nested schema fields if the type property is set to STRUCT.
repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL];
// Optional. The field description. The maximum length is 1,024 characters.
string description = 6 [(google.api.field_behavior) = OPTIONAL];
}