BigQuery Storage Write API v1alpha2 clients. The service is enabled by whitelist only.
PiperOrigin-RevId: 287379998
This commit is contained in:
parent
650d7f1f8a
commit
a202fb3b91
|
|
@ -0,0 +1,95 @@
|
|||
# This file was automatically generated by BuildFileGenerator
|
||||
|
||||
# This is an API workspace, having public visibility by default makes perfect sense.
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
##############################################################################
|
||||
# Common
|
||||
##############################################################################
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
load("@com_google_googleapis_imports//:imports.bzl", "proto_library_with_info")
|
||||
|
||||
proto_library(
|
||||
name = "storage_proto",
|
||||
srcs = [
|
||||
"protobuf.proto",
|
||||
"storage.proto",
|
||||
"stream.proto",
|
||||
"table.proto",
|
||||
],
|
||||
deps = [
|
||||
"//google/api:annotations_proto",
|
||||
"//google/api:client_proto",
|
||||
"//google/api:field_behavior_proto",
|
||||
"//google/api:resource_proto",
|
||||
"//google/rpc:status_proto",
|
||||
"@com_google_protobuf//:empty_proto",
|
||||
"@com_google_protobuf//:descriptor_proto",
|
||||
"@com_google_protobuf//:timestamp_proto",
|
||||
"@com_google_protobuf//:wrappers_proto",
|
||||
],
|
||||
)
|
||||
|
||||
proto_library_with_info(
|
||||
name = "storage_proto_with_info",
|
||||
deps = [
|
||||
":storage_proto",
|
||||
"//google/cloud:common_resources_proto",
|
||||
],
|
||||
)
|
||||
|
||||
##############################################################################
|
||||
# Java
|
||||
##############################################################################
|
||||
load(
|
||||
"@com_google_googleapis_imports//:imports.bzl",
|
||||
"java_gapic_assembly_gradle_pkg",
|
||||
"java_gapic_library",
|
||||
"java_gapic_test",
|
||||
"java_grpc_library",
|
||||
"java_proto_library",
|
||||
)
|
||||
|
||||
java_proto_library(
|
||||
name = "storage_java_proto",
|
||||
deps = [":storage_proto"],
|
||||
)
|
||||
|
||||
java_grpc_library(
|
||||
name = "storage_java_grpc",
|
||||
srcs = [":storage_proto"],
|
||||
deps = [":storage_java_proto"],
|
||||
)
|
||||
|
||||
java_gapic_library(
|
||||
name = "storage_java_gapic",
|
||||
src = ":storage_proto_with_info",
|
||||
gapic_yaml = "bigquerystorage_gapic.yaml",
|
||||
package = "google.cloud.bigquery.storage.v1alpha2",
|
||||
service_yaml = "bigquerystorage_v1alpha2.yaml",
|
||||
test_deps = [
|
||||
":storage_java_grpc",
|
||||
],
|
||||
deps = [
|
||||
":storage_java_proto",
|
||||
],
|
||||
)
|
||||
|
||||
java_gapic_test(
|
||||
name = "storage_java_gapic_test_suite",
|
||||
test_classes = [
|
||||
"com.google.cloud.bigquery.storage.v1alpha2.BaseBigQueryWriteClientTest",
|
||||
],
|
||||
runtime_deps = [":storage_java_gapic_test"],
|
||||
)
|
||||
|
||||
# Open Source Packages
|
||||
java_gapic_assembly_gradle_pkg(
|
||||
name = "google-cloud-bigquery-storage-v1alpha2-java",
|
||||
deps = [
|
||||
":storage_java_gapic",
|
||||
":storage_java_grpc",
|
||||
":storage_java_proto",
|
||||
":storage_proto",
|
||||
],
|
||||
)
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
type: com.google.api.codegen.ConfigProto
|
||||
config_schema_version: 1.0.0
|
||||
# The settings of generated code in a specific language.
|
||||
language_settings:
|
||||
java:
|
||||
package_name: com.google.cloud.bigquery.storage.v1alpha2
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
{
|
||||
"methodConfig": [
|
||||
{
|
||||
"name": [
|
||||
{
|
||||
"service": "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite",
|
||||
"method": "CreateWriteStream"
|
||||
}
|
||||
],
|
||||
"timeout": "600s",
|
||||
"retryPolicy": {
|
||||
"initialBackoff": "0.100s",
|
||||
"maxBackoff": "60s",
|
||||
"backoffMultiplier": 1.3,
|
||||
"retryableStatusCodes": [
|
||||
"DEADLINE_EXCEEDED",
|
||||
"UNAVAILABLE"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": [
|
||||
{
|
||||
"service": "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite",
|
||||
"method": "AppendRows"
|
||||
}
|
||||
],
|
||||
"timeout": "86400s",
|
||||
"retryPolicy": {
|
||||
"initialBackoff": "0.100s",
|
||||
"maxBackoff": "60s",
|
||||
"backoffMultiplier": 1.3,
|
||||
"retryableStatusCodes": [
|
||||
"UNAVAILABLE"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": [
|
||||
{
|
||||
"service": "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite",
|
||||
"method": "BatchCommitWriteStreams"
|
||||
},
|
||||
{
|
||||
"service": "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite",
|
||||
"method": "FinalizeWriteStream"
|
||||
},
|
||||
{
|
||||
"service": "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite",
|
||||
"method": "GetWriteStream"
|
||||
}
|
||||
],
|
||||
"timeout": "600s",
|
||||
"retryPolicy": {
|
||||
"initialBackoff": "0.100s",
|
||||
"maxBackoff": "60s",
|
||||
"backoffMultiplier": 1.3,
|
||||
"retryableStatusCodes": [
|
||||
"DEADLINE_EXCEEDED",
|
||||
"UNAVAILABLE"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
type: google.api.Service
|
||||
config_version: 3
|
||||
name: bigquerystorage.googleapis.com
|
||||
title: BigQuery Storage API
|
||||
|
||||
apis:
|
||||
- name: google.cloud.bigquery.storage.v1alpha2.BigQueryWrite
|
||||
|
||||
authentication:
|
||||
rules:
|
||||
- selector: 'google.cloud.bigquery.storage.v1alpha2.BigQueryWrite.*'
|
||||
oauth:
|
||||
canonical_scopes: |-
|
||||
https://www.googleapis.com/auth/bigquery,
|
||||
https://www.googleapis.com/auth/bigquery.insertdata,
|
||||
https://www.googleapis.com/auth/cloud-platform
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2019 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.cloud.bigquery.storage.v1alpha2;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage";
|
||||
option java_outer_classname = "ProtoBufProto";
|
||||
option java_package = "com.google.cloud.bigquery.storage.v1alpha2";
|
||||
|
||||
// Protobuf schema is an API presentation the proto buffer schema.
|
||||
message ProtoSchema {
|
||||
// Message descriptor for the data. The descriptor has to be self contained
|
||||
// to include all the nested type definition, excepted for proto buffer well
|
||||
// known types
|
||||
// (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
|
||||
// and zetasql public protos
|
||||
// (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
|
||||
google.protobuf.DescriptorProto proto_descriptor = 1;
|
||||
}
|
||||
|
||||
// Protobuf rows.
|
||||
message ProtoRows {
|
||||
// A sequence of rows serialized as a Protocol Buffer.
|
||||
//
|
||||
// See https://developers.google.com/protocol-buffers/docs/overview for more
|
||||
// information on deserializing this field.
|
||||
repeated bytes serialized_rows = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,191 @@
|
|||
// Copyright 2019 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.cloud.bigquery.storage.v1alpha2;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/api/client.proto";
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/cloud/bigquery/storage/v1alpha2/protobuf.proto";
|
||||
import "google/cloud/bigquery/storage/v1alpha2/stream.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
import "google/rpc/status.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage";
|
||||
option java_package = "com.google.cloud.bigquery.storage.v1alpha2";
|
||||
|
||||
// Request message for `CreateWriteStream`.
|
||||
message CreateWriteStreamRequest {
|
||||
// Required. Reference to the table to which the stream belongs, in the format
|
||||
// of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. Stream to be created.
|
||||
WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// Request message for `AppendRows`.
|
||||
message AppendRowsRequest {
|
||||
message ProtoData {
|
||||
// Proto schema used to serialize the data.
|
||||
ProtoSchema writer_schema = 1;
|
||||
|
||||
// Serialized row data in protobuf message format.
|
||||
ProtoRows rows = 2;
|
||||
}
|
||||
|
||||
// Required. The stream that is the target of the append operation. This value must be
|
||||
// specified for the initial request. If subsequent requests specify the
|
||||
// stream name, it must equal to the value provided in the first request.
|
||||
string write_stream = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Optional. If present, the write is only performed if the next append offset is same
|
||||
// as the provided value. If not present, the write is performed at the
|
||||
// current end of stream.
|
||||
google.protobuf.Int64Value offset = 2 [(google.api.field_behavior) = OPTIONAL];
|
||||
|
||||
// Input rows. The `writer_schema` field must be specified at the initial
|
||||
// request and currently, it will be ignored if specified in following
|
||||
// requests. Following requests must have data in the same format as the
|
||||
// initial request.
|
||||
oneof rows {
|
||||
ProtoData proto_rows = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// Response message for `AppendRows`.
|
||||
message AppendRowsResponse {
|
||||
oneof response {
|
||||
// The row offset at which the last append occurred.
|
||||
int64 offset = 1;
|
||||
|
||||
// Error in case of append failure. If set, it means rows are not accepted
|
||||
// into the system. Users can retry within the same connection.
|
||||
google.rpc.Status error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Request message for `GetWriteStreamRequest`.
|
||||
message GetWriteStreamRequest {
|
||||
// Required. Name of the stream to get, in the form of
|
||||
// `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
|
||||
string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// Request message for `BatchCommitWriteStreams`.
|
||||
message BatchCommitWriteStreamsRequest {
|
||||
// Required. Parent table that all the streams should belong to, in the form of
|
||||
// `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The group of streams that will be committed atomically.
|
||||
repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// Response message for `BatchCommitWriteStreams`.
|
||||
message BatchCommitWriteStreamsResponse {
|
||||
// The time at which streams were committed in microseconds granularity.
|
||||
google.protobuf.Timestamp commit_time = 1;
|
||||
}
|
||||
|
||||
// Request message for invoking `FinalizeWriteStream`.
|
||||
message FinalizeWriteStreamRequest {
|
||||
// Required. Name of the stream to finalize, in the form of
|
||||
// `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
|
||||
string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// Response message for `FinalizeWriteStream`.
|
||||
message FinalizeWriteStreamResponse {
|
||||
// Number of rows in the finalized stream.
|
||||
int64 row_count = 1;
|
||||
}
|
||||
|
||||
// BigQuery Write API.
|
||||
//
|
||||
// The Write API can be used to write data to BigQuery.
|
||||
service BigQueryWrite {
|
||||
option (google.api.default_host) = "bigquerystorage.googleapis.com";
|
||||
option (google.api.oauth_scopes) =
|
||||
"https://www.googleapis.com/auth/bigquery,"
|
||||
"https://www.googleapis.com/auth/bigquery.insertdata,"
|
||||
"https://www.googleapis.com/auth/cloud-platform";
|
||||
|
||||
// Creates a write stream to the given table.
|
||||
rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Appends data to the given stream.
|
||||
//
|
||||
// If `offset` is specified, the `offset` is checked against the end of
|
||||
// stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
|
||||
// attempt is made to append to an offset beyond the current end of the stream
|
||||
// or `ALREADY_EXISTS` if user provids an `offset` that has already been
|
||||
// written to. User can retry with adjusted offset within the same RPC
|
||||
// stream. If `offset` is not specified, append happens at the end of the
|
||||
// stream.
|
||||
//
|
||||
// The response contains the offset at which the append happened. Responses
|
||||
// are received in the same order in which requests are sent. There will be
|
||||
// one response for each successful request. If the `offset` is not set in
|
||||
// response, it means append didn't happen due to some errors. If one request
|
||||
// fails, all the subsequent requests will also fail until a success request
|
||||
// is made again.
|
||||
//
|
||||
// If the stream is of `PENDING` type, data will only be available for read
|
||||
// operations after the stream is committed.
|
||||
rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets a write stream.
|
||||
rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Finalize a write stream so that no new data can be appended to the
|
||||
// stream.
|
||||
rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Atomically commits a group of `PENDING` streams that belong to the same
|
||||
// `parent` table.
|
||||
// Streams must be finalized before commit and cannot be committed multiple
|
||||
// times. Once a stream is committed, data in the stream becomes available
|
||||
// for read operations.
|
||||
rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2019 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.cloud.bigquery.storage.v1alpha2;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/cloud/bigquery/storage/v1alpha2/table.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage";
|
||||
option java_package = "com.google.cloud.bigquery.storage.v1alpha2";
|
||||
|
||||
// Information about a single stream that gets data inside the storage system.
|
||||
message WriteStream {
|
||||
enum Type {
|
||||
// Unknown type.
|
||||
TYPE_UNSPECIFIED = 0;
|
||||
|
||||
// Data will commit automatically and appear as soon as the write is
|
||||
// acknowledged.
|
||||
COMMITTED = 1;
|
||||
|
||||
// Data is invisible until the stream is committed.
|
||||
PENDING = 2;
|
||||
}
|
||||
|
||||
// Output only. Name of the stream, in the form
|
||||
// `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
|
||||
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
Type type = 2 [(google.api.field_behavior) = IMMUTABLE];
|
||||
|
||||
// Output only. Create time of the stream.
|
||||
google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. Commit time of the stream.
|
||||
// If a stream is of `COMMITTED` type, then it will have a commit_time same as
|
||||
// `create_time`. If the stream is of `PENDING` type, commit_time being empty
|
||||
// means it is not committed.
|
||||
google.protobuf.Timestamp commit_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Output only. The schema of the destination table. It is only returned in
|
||||
// `CreateWriteStream` response. Caller should generate data that's
|
||||
// compatible with this schema to send in initial `AppendRowsRequest`.
|
||||
// The table schema could go out of date during the life time of the stream.
|
||||
TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
|
||||
|
||||
// Id set by client to annotate its identity.
|
||||
string external_id = 6;
|
||||
}
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2019 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.cloud.bigquery.storage.v1alpha2;
|
||||
|
||||
import "google/api/field_behavior.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage";
|
||||
option java_package = "com.google.cloud.bigquery.storage.v1alpha2";
|
||||
|
||||
// Schema of a table
|
||||
message TableSchema {
|
||||
// Describes the fields in a table.
|
||||
repeated TableFieldSchema fields = 1;
|
||||
}
|
||||
|
||||
// A field in TableSchema
|
||||
message TableFieldSchema {
|
||||
enum Type {
|
||||
// Illegal value
|
||||
TYPE_UNSPECIFIED = 0;
|
||||
|
||||
// 64K, UTF8
|
||||
STRING = 1;
|
||||
|
||||
// 64-bit signed
|
||||
INT64 = 2;
|
||||
|
||||
// 64-bit IEEE floating point
|
||||
DOUBLE = 3;
|
||||
|
||||
// Aggregate type
|
||||
STRUCT = 4;
|
||||
|
||||
// 64K, Binary
|
||||
BYTES = 5;
|
||||
|
||||
// 2-valued
|
||||
BOOL = 6;
|
||||
|
||||
// 64-bit signed usec since UTC epoch
|
||||
TIMESTAMP = 7;
|
||||
|
||||
// Civil date - Year, Month, Day
|
||||
DATE = 8;
|
||||
|
||||
// Civil time - Hour, Minute, Second, Microseconds
|
||||
TIME = 9;
|
||||
|
||||
// Combination of civil date and civil time
|
||||
DATETIME = 10;
|
||||
|
||||
// Geography object (go/googlesql_geography)
|
||||
GEOGRAPHY = 11;
|
||||
|
||||
// Numeric value (go/googlesql_numeric)
|
||||
NUMERIC = 12;
|
||||
}
|
||||
|
||||
enum Mode {
|
||||
// Illegal value
|
||||
MODE_UNSPECIFIED = 0;
|
||||
|
||||
NULLABLE = 1;
|
||||
|
||||
REQUIRED = 2;
|
||||
|
||||
REPEATED = 3;
|
||||
}
|
||||
|
||||
// Required. The field name. The name must contain only letters (a-z, A-Z),
|
||||
// numbers (0-9), or underscores (_), and must start with a letter or
|
||||
// underscore. The maximum length is 128 characters.
|
||||
string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The field data type.
|
||||
Type type = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Optional. The field mode. The default value is NULLABLE.
|
||||
Mode mode = 3 [(google.api.field_behavior) = OPTIONAL];
|
||||
|
||||
// Optional. Describes the nested schema fields if the type property is set to STRUCT.
|
||||
repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL];
|
||||
|
||||
// Optional. The field description. The maximum length is 1,024 characters.
|
||||
string description = 6 [(google.api.field_behavior) = OPTIONAL];
|
||||
}
|
||||
Loading…
Reference in New Issue