// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.cloud.bigquery.storage.v1alpha2; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/cloud/bigquery/storage/v1alpha2/protobuf.proto"; import "google/cloud/bigquery/storage/v1alpha2/stream.proto"; import "google/cloud/bigquery/storage/v1alpha2/table.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "google/rpc/status.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; // Request message for `CreateWriteStream`. message CreateWriteStreamRequest { // Required. Reference to the table to which the stream belongs, in the format // of `projects/{project}/datasets/{dataset}/tables/{table}`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "bigquerystorage.googleapis.com/Table" } ]; // Required. Stream to be created. WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED]; } // BigQuery Write API. // // The Write API can be used to write data to BigQuery. service BigQueryWrite { option (google.api.default_host) = "bigquerystorage.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/bigquery," "https://www.googleapis.com/auth/bigquery.insertdata," "https://www.googleapis.com/auth/cloud-platform"; // Creates a write stream to the given table. rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { option (google.api.http) = { post: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}" body: "write_stream" }; option (google.api.method_signature) = "parent,write_stream"; } // Appends data to the given stream. // // If `offset` is specified, the `offset` is checked against the end of // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an // attempt is made to append to an offset beyond the current end of the stream // or `ALREADY_EXISTS` if user provids an `offset` that has already been // written to. User can retry with adjusted offset within the same RPC // stream. If `offset` is not specified, append happens at the end of the // stream. // // The response contains the offset at which the append happened. Responses // are received in the same order in which requests are sent. There will be // one response for each successful request. If the `offset` is not set in // response, it means append didn't happen due to some errors. If one request // fails, all the subsequent requests will also fail until a success request // is made again. // // If the stream is of `PENDING` type, data will only be available for read // operations after the stream is committed. rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { option (google.api.http) = { post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}" body: "*" }; option (google.api.method_signature) = "write_stream"; } // Gets a write stream. rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) { option (google.api.http) = { post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}" body: "*" }; option (google.api.method_signature) = "name"; } // Finalize a write stream so that no new data can be appended to the // stream. rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) { option (google.api.http) = { post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}" body: "*" }; option (google.api.method_signature) = "name"; } // Atomically commits a group of `PENDING` streams that belong to the same // `parent` table. // Streams must be finalized before commit and cannot be committed multiple // times. Once a stream is committed, data in the stream becomes available // for read operations. rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) { option (google.api.http) = { get: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}" }; option (google.api.method_signature) = "parent"; } // Flushes rows to a BUFFERED stream. // If users are appending rows to BUFFERED stream, flush operation is // required in order for the rows to become available for reading. A // Flush operation flushes up to any previously flushed offset in a BUFFERED // stream, to the offset specified in the request. rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) { option (google.api.http) = { post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}" body: "*" }; option (google.api.method_signature) = "write_stream"; } } // Request message for `AppendRows`. message AppendRowsRequest { message ProtoData { // Proto schema used to serialize the data. ProtoSchema writer_schema = 1; // Serialized row data in protobuf message format. ProtoRows rows = 2; } // Required. The stream that is the target of the append operation. This value must be // specified for the initial request. If subsequent requests specify the // stream name, it must equal to the value provided in the first request. string write_stream = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "bigquerystorage.googleapis.com/WriteStream" } ]; // Optional. If present, the write is only performed if the next append offset is same // as the provided value. If not present, the write is performed at the // current end of stream. google.protobuf.Int64Value offset = 2 [(google.api.field_behavior) = OPTIONAL]; // Input rows. The `writer_schema` field must be specified at the initial // request and currently, it will be ignored if specified in following // requests. Following requests must have data in the same format as the // initial request. oneof rows { ProtoData proto_rows = 4; } // Only initial request setting is respected. If true, drop unknown input // fields. Otherwise, the extra fields will cause append to fail. Default // value is false. bool ignore_unknown_fields = 5; } // Response message for `AppendRows`. message AppendRowsResponse { oneof response { // The row offset at which the last append occurred. int64 offset = 1; // Error in case of append failure. If set, it means rows are not accepted // into the system. Users can retry within the same connection. google.rpc.Status error = 2; } // If backend detects a schema update, pass it to user so that user can // use it to input new type of message. It will be empty when there is no // schema updates. TableSchema updated_schema = 3; } // Request message for `GetWriteStreamRequest`. message GetWriteStreamRequest { // Required. Name of the stream to get, in the form of // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "bigquerystorage.googleapis.com/WriteStream" } ]; } // Request message for `BatchCommitWriteStreams`. message BatchCommitWriteStreamsRequest { // Required. Parent table that all the streams should belong to, in the form of // `projects/{project}/datasets/{dataset}/tables/{table}`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "bigquerystorage.googleapis.com/Table" } ]; // Required. The group of streams that will be committed atomically. repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; } // Response message for `BatchCommitWriteStreams`. message BatchCommitWriteStreamsResponse { // The time at which streams were committed in microseconds granularity. google.protobuf.Timestamp commit_time = 1; } // Request message for invoking `FinalizeWriteStream`. message FinalizeWriteStreamRequest { // Required. Name of the stream to finalize, in the form of // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "bigquerystorage.googleapis.com/WriteStream" } ]; } // Response message for `FinalizeWriteStream`. message FinalizeWriteStreamResponse { // Number of rows in the finalized stream. int64 row_count = 1; } // Request message for `FlushRows`. message FlushRowsRequest { // Required. The stream that is the target of the flush operation. string write_stream = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "bigquerystorage.googleapis.com/WriteStream" } ]; // Ending offset of the flush operation. Rows before this offset(including // this offset) will be flushed. int64 offset = 2; } // Respond message for `FlushRows`. message FlushRowsResponse { // The rows before this offset (including this offset) are flushed. int64 offset = 1; }