// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.bigquery.v2;
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/bigquery/v2/dataset_reference.proto";
import "google/cloud/bigquery/v2/model.proto";
import "google/cloud/bigquery/v2/query_parameter.proto";
import "google/cloud/bigquery/v2/routine_reference.proto";
import "google/cloud/bigquery/v2/row_access_policy_reference.proto";
import "google/cloud/bigquery/v2/session_info.proto";
import "google/cloud/bigquery/v2/table_reference.proto";
import "google/cloud/bigquery/v2/table_schema.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/wrappers.proto";
option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb";
option java_outer_classname = "JobStatsProto";
option java_package = "com.google.cloud.bigquery.v2";
option (google.api.resource_definition) = {
type: "cloudkms.googleapis.com/CryptoKey"
pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}"
};
option (google.api.resource_definition) = {
type: "storage.googleapis.com/Bucket"
pattern: "*"
};
// An operation within a stage.
message ExplainQueryStep {
// Machine-readable operation type.
string kind = 1;
// Human-readable description of the step(s).
repeated string substeps = 2;
}
// A single stage of query execution.
message ExplainQueryStage {
// Indicates the type of compute mode.
enum ComputeMode {
// ComputeMode type not specified.
COMPUTE_MODE_UNSPECIFIED = 0;
// This stage was processed using BigQuery slots.
BIGQUERY = 1;
// This stage was processed using BI Engine compute.
BI_ENGINE = 2;
}
// Human-readable name for the stage.
string name = 1;
// Unique ID for the stage within the plan.
google.protobuf.Int64Value id = 2;
// Stage start time represented as milliseconds since the epoch.
int64 start_ms = 3;
// Stage end time represented as milliseconds since the epoch.
int64 end_ms = 4;
// IDs for stages that are inputs to this stage.
repeated int64 input_stages = 5;
// Relative amount of time the average shard spent waiting to be
// scheduled.
google.protobuf.DoubleValue wait_ratio_avg = 6;
// Milliseconds the average shard spent waiting to be scheduled.
google.protobuf.Int64Value wait_ms_avg = 7;
// Relative amount of time the slowest shard spent waiting to be
// scheduled.
google.protobuf.DoubleValue wait_ratio_max = 8;
// Milliseconds the slowest shard spent waiting to be scheduled.
google.protobuf.Int64Value wait_ms_max = 9;
// Relative amount of time the average shard spent reading input.
google.protobuf.DoubleValue read_ratio_avg = 10;
// Milliseconds the average shard spent reading input.
google.protobuf.Int64Value read_ms_avg = 11;
// Relative amount of time the slowest shard spent reading input.
google.protobuf.DoubleValue read_ratio_max = 12;
// Milliseconds the slowest shard spent reading input.
google.protobuf.Int64Value read_ms_max = 13;
// Relative amount of time the average shard spent on CPU-bound tasks.
google.protobuf.DoubleValue compute_ratio_avg = 14;
// Milliseconds the average shard spent on CPU-bound tasks.
google.protobuf.Int64Value compute_ms_avg = 15;
// Relative amount of time the slowest shard spent on CPU-bound tasks.
google.protobuf.DoubleValue compute_ratio_max = 16;
// Milliseconds the slowest shard spent on CPU-bound tasks.
google.protobuf.Int64Value compute_ms_max = 17;
// Relative amount of time the average shard spent on writing output.
google.protobuf.DoubleValue write_ratio_avg = 18;
// Milliseconds the average shard spent on writing output.
google.protobuf.Int64Value write_ms_avg = 19;
// Relative amount of time the slowest shard spent on writing output.
google.protobuf.DoubleValue write_ratio_max = 20;
// Milliseconds the slowest shard spent on writing output.
google.protobuf.Int64Value write_ms_max = 21;
// Total number of bytes written to shuffle.
google.protobuf.Int64Value shuffle_output_bytes = 22;
// Total number of bytes written to shuffle and spilled to disk.
google.protobuf.Int64Value shuffle_output_bytes_spilled = 23;
// Number of records read into the stage.
google.protobuf.Int64Value records_read = 24;
// Number of records written by the stage.
google.protobuf.Int64Value records_written = 25;
// Number of parallel input segments to be processed
google.protobuf.Int64Value parallel_inputs = 26;
// Number of parallel input segments completed.
google.protobuf.Int64Value completed_parallel_inputs = 27;
// Current status for this stage.
string status = 28;
// List of operations within the stage in dependency order (approximately
// chronological).
repeated ExplainQueryStep steps = 29;
// Slot-milliseconds used by the stage.
google.protobuf.Int64Value slot_ms = 30;
// Output only. Compute mode for this stage.
ComputeMode compute_mode = 31 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Summary of the state of query execution at a given time.
message QueryTimelineSample {
// Milliseconds elapsed since the start of query execution.
google.protobuf.Int64Value elapsed_ms = 1;
// Cumulative slot-ms consumed by the query.
google.protobuf.Int64Value total_slot_ms = 2;
// Total units of work remaining for the query. This number can be revised
// (increased or decreased) while the query is running.
google.protobuf.Int64Value pending_units = 3;
// Total parallel units of work completed by this query.
google.protobuf.Int64Value completed_units = 4;
// Total number of active workers. This does not correspond directly to
// slot usage. This is the largest value observed since the last sample.
google.protobuf.Int64Value active_units = 5;
// Units of work that can be scheduled immediately. Providing additional slots
// for these units of work will accelerate the query, if no other query in
// the reservation needs additional slots.
google.protobuf.Int64Value estimated_runnable_units = 7;
}
// The external service cost is a portion of the total cost, these costs are not
// additive with total_bytes_billed. Moreover, this field only track external
// service costs that will show up as BigQuery costs (e.g. training BigQuery
// ML job with google cloud CAIP or Automl Tables services), not other costs
// which may be accrued by running the query (e.g. reading from Bigtable or
// Cloud Storage). The external service costs with different billing sku (e.g.
// CAIP job is charged based on VM usage) are converted to BigQuery
// billed_bytes and slot_ms with equivalent amount of US dollars. Services may
// not directly correlate to these metrics, but these are the equivalents for
// billing purposes.
// Output only.
message ExternalServiceCost {
// External service name.
string external_service = 1;
// External service cost in terms of bigquery bytes processed.
google.protobuf.Int64Value bytes_processed = 2;
// External service cost in terms of bigquery bytes billed.
google.protobuf.Int64Value bytes_billed = 3;
// External service cost in terms of bigquery slot milliseconds.
google.protobuf.Int64Value slot_ms = 4;
// Non-preemptable reserved slots used for external job.
// For example, reserved slots for Cloua AI Platform job are the VM usages
// converted to BigQuery slot with equivalent mount of price.
int64 reserved_slot_count = 5;
}
// Statistics for the EXPORT DATA statement as part of Query Job. EXTRACT
// JOB statistics are populated in JobStatistics4.
message ExportDataStatistics {
// Number of destination files generated in case of EXPORT DATA
// statement only.
google.protobuf.Int64Value file_count = 1;
// [Alpha] Number of destination rows generated in case of EXPORT DATA
// statement only.
google.protobuf.Int64Value row_count = 2;
}
// Reason why BI Engine didn't accelerate the query (or sub-query).
message BiEngineReason {
// Indicates the high-level reason for no/partial acceleration
enum Code {
// BiEngineReason not specified.
CODE_UNSPECIFIED = 0;
// No reservation available for BI Engine acceleration.
NO_RESERVATION = 1;
// Not enough memory available for BI Engine acceleration.
INSUFFICIENT_RESERVATION = 2;
// This particular SQL text is not supported for acceleration by BI Engine.
UNSUPPORTED_SQL_TEXT = 4;
// Input too large for acceleration by BI Engine.
INPUT_TOO_LARGE = 5;
// Catch-all code for all other cases for partial or disabled acceleration.
OTHER_REASON = 6;
// One or more tables were not eligible for BI Engine acceleration.
TABLE_EXCLUDED = 7;
}
// Output only. High-level BI Engine reason for partial or disabled
// acceleration
Code code = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Free form human-readable reason for partial or disabled
// acceleration.
string message = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Statistics for a BI Engine specific query.
// Populated as part of JobStatistics2
message BiEngineStatistics {
// Indicates the type of BI Engine acceleration.
enum BiEngineMode {
// BiEngineMode type not specified.
ACCELERATION_MODE_UNSPECIFIED = 0;
// BI Engine disabled the acceleration. bi_engine_reasons
// specifies a more detailed reason.
DISABLED = 1;
// Part of the query was accelerated using BI Engine.
// See bi_engine_reasons for why parts of the query were not
// accelerated.
PARTIAL = 2;
// All of the query was accelerated using BI Engine.
FULL = 3;
}
// Indicates the type of BI Engine acceleration.
enum BiEngineAccelerationMode {
// BiEngineMode type not specified.
BI_ENGINE_ACCELERATION_MODE_UNSPECIFIED = 0;
// BI Engine acceleration was attempted but disabled. bi_engine_reasons
// specifies a more detailed reason.
BI_ENGINE_DISABLED = 1;
// Some inputs were accelerated using BI Engine.
// See bi_engine_reasons for why parts of the query were not
// accelerated.
PARTIAL_INPUT = 2;
// All of the query inputs were accelerated using BI Engine.
FULL_INPUT = 3;
// All of the query was accelerated using BI Engine.
FULL_QUERY = 4;
}
// Output only. Specifies which mode of BI Engine acceleration was performed
// (if any).
BiEngineMode bi_engine_mode = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Specifies which mode of BI Engine acceleration was performed
// (if any).
BiEngineAccelerationMode acceleration_mode = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// In case of DISABLED or PARTIAL bi_engine_mode, these contain the
// explanatory reasons as to why BI Engine could not accelerate.
// In case the full query was accelerated, this field is not populated.
repeated BiEngineReason bi_engine_reasons = 2;
}
// Reason about why no search index was used in the search query (or
// sub-query).
message IndexUnusedReason {
// Indicates the high-level reason for the scenario when no search index was
// used.
enum Code {
// Code not specified.
CODE_UNSPECIFIED = 0;
// Indicates the search index configuration has not been created.
INDEX_CONFIG_NOT_AVAILABLE = 1;
// Indicates the search index creation has not been completed.
PENDING_INDEX_CREATION = 2;
// Indicates the base table has been truncated (rows have been removed
// from table with TRUNCATE TABLE statement) since the last time the search
// index was refreshed.
BASE_TABLE_TRUNCATED = 3;
// Indicates the search index configuration has been changed since the last
// time the search index was refreshed.
INDEX_CONFIG_MODIFIED = 4;
// Indicates the search query accesses data at a timestamp before the last
// time the search index was refreshed.
TIME_TRAVEL_QUERY = 5;
// Indicates the usage of search index will not contribute to any pruning
// improvement for the search function, e.g. when the search predicate is in
// a disjunction with other non-search predicates.
NO_PRUNING_POWER = 6;
// Indicates the search index does not cover all fields in the search
// function.
UNINDEXED_SEARCH_FIELDS = 7;
// Indicates the search index does not support the given search query
// pattern.
UNSUPPORTED_SEARCH_PATTERN = 8;
// Indicates the query has been optimized by using a materialized view.
OPTIMIZED_WITH_MATERIALIZED_VIEW = 9;
// Indicates the query has been secured by data masking, and thus search
// indexes are not applicable.
SECURED_BY_DATA_MASKING = 11;
// Indicates that the search index and the search function call do not
// have the same text analyzer.
MISMATCHED_TEXT_ANALYZER = 12;
// Indicates the base table is too small (below a certain threshold).
// The index does not provide noticeable search performance gains
// when the base table is too small.
BASE_TABLE_TOO_SMALL = 13;
// Indicates that the total size of indexed base tables in your organization
// exceeds your region's limit and the index is not used in the query. To
// index larger base tables, you can
// use
// your own reservation for index-management jobs.
BASE_TABLE_TOO_LARGE = 14;
// Indicates that the estimated performance gain from using the search index
// is too low for the given search query.
ESTIMATED_PERFORMANCE_GAIN_TOO_LOW = 15;
// Indicates that search indexes can not be used for search query with
// STANDARD edition.
NOT_SUPPORTED_IN_STANDARD_EDITION = 17;
// Indicates that an option in the search function that cannot make use of
// the index has been selected.
INDEX_SUPPRESSED_BY_FUNCTION_OPTION = 18;
// Indicates that the query was cached, and thus the search index was not
// used.
QUERY_CACHE_HIT = 19;
// The index cannot be used in the search query because it is stale.
STALE_INDEX = 20;
// Indicates an internal error that causes the search index to be unused.
INTERNAL_ERROR = 10;
// Indicates that the reason search indexes cannot be used in the query is
// not covered by any of the other IndexUnusedReason options.
OTHER_REASON = 16;
}
// Specifies the high-level reason for the scenario when no search index was
// used.
optional Code code = 1;
// Free form human-readable reason for the scenario when no search index was
// used.
optional string message = 2;
// Specifies the base table involved in the reason that no search index was
// used.
optional TableReference base_table = 3;
// Specifies the name of the unused search index, if available.
optional string index_name = 4;
}
// Statistics for a search query.
// Populated as part of JobStatistics2.
message SearchStatistics {
// Indicates the type of search index usage in the entire search query.
enum IndexUsageMode {
// Index usage mode not specified.
INDEX_USAGE_MODE_UNSPECIFIED = 0;
// No search indexes were used in the search query. See
// [`indexUnusedReasons`]
// (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason)
// for detailed reasons.
UNUSED = 1;
// Part of the search query used search indexes. See [`indexUnusedReasons`]
// (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason)
// for why other parts of the query did not use search indexes.
PARTIALLY_USED = 2;
// The entire search query used search indexes.
FULLY_USED = 4;
}
// Specifies the index usage mode for the query.
IndexUsageMode index_usage_mode = 1;
// When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains
// why indexes were not used in all or part of the search query. If
// `indexUsageMode` is `FULLY_USED`, this field is not populated.
repeated IndexUnusedReason index_unused_reasons = 2;
}
// Statistics for a vector search query.
// Populated as part of JobStatistics2.
message VectorSearchStatistics {
// Indicates the type of vector index usage in the entire vector search query.
enum IndexUsageMode {
// Index usage mode not specified.
INDEX_USAGE_MODE_UNSPECIFIED = 0;
// No vector indexes were used in the vector search query. See
// [`indexUnusedReasons`]
// (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason)
// for detailed reasons.
UNUSED = 1;
// Part of the vector search query used vector indexes. See
// [`indexUnusedReasons`]
// (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason)
// for why other parts of the query did not use vector indexes.
PARTIALLY_USED = 2;
// The entire vector search query used vector indexes.
FULLY_USED = 4;
}
// Specifies the index usage mode for the query.
IndexUsageMode index_usage_mode = 1;
// When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains
// why indexes were not used in all or part of the vector search query. If
// `indexUsageMode` is `FULLY_USED`, this field is not populated.
repeated IndexUnusedReason index_unused_reasons = 2;
}
// Query optimization information for a QUERY job.
message QueryInfo {
// Output only. Information about query optimizations.
google.protobuf.Struct optimization_details = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Statistics for a LOAD query.
message LoadQueryStatistics {
// Output only. Number of source files in a LOAD query.
google.protobuf.Int64Value input_files = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of bytes of source data in a LOAD query.
google.protobuf.Int64Value input_file_bytes = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of rows imported in a LOAD query.
// Note that while a LOAD query is in the running state, this value may
// change.
google.protobuf.Int64Value output_rows = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Size of the loaded data in bytes. Note that while a LOAD query
// is in the running state, this value may change.
google.protobuf.Int64Value output_bytes = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of bad records encountered while processing a LOAD
// query. Note that if the job has failed because of more bad records
// encountered than the maximum allowed in the load job configuration, then
// this number can be less than the total number of bad records present in the
// input data.
google.protobuf.Int64Value bad_records = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Statistics for a query job.
message JobStatistics2 {
// Output only. Describes execution plan for the query.
repeated ExplainQueryStage query_plan = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The original estimate of bytes processed for the job.
google.protobuf.Int64Value estimated_bytes_processed = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Describes a timeline of job execution.
repeated QueryTimelineSample timeline = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Total number of partitions processed from all partitioned
// tables referenced in the job.
google.protobuf.Int64Value total_partitions_processed = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Total bytes processed for the job.
google.protobuf.Int64Value total_bytes_processed = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. For dry-run jobs, totalBytesProcessed is an estimate and this
// field specifies the accuracy of the estimate. Possible values can be:
// UNKNOWN: accuracy of the estimate is unknown.
// PRECISE: estimate is precise.
// LOWER_BOUND: estimate is lower bound of what the query would cost.
// UPPER_BOUND: estimate is upper bound of what the query would cost.
string total_bytes_processed_accuracy = 21
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. If the project is configured to use on-demand pricing,
// then this field contains the total bytes billed for the job.
// If the project is configured to use flat-rate pricing, then you are
// not billed for bytes and this field is informational only.
google.protobuf.Int64Value total_bytes_billed = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Billing tier for the job. This is a BigQuery-specific concept
// which is not related to the Google Cloud notion of "free tier". The value
// here is a measure of the query's resource consumption relative to the
// amount of data scanned. For on-demand queries, the limit is 100, and all
// queries within this limit are billed at the standard on-demand rates.
// On-demand queries that exceed this limit will fail with a
// billingTierLimitExceeded error.
google.protobuf.Int32Value billing_tier = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Slot-milliseconds for the job.
google.protobuf.Int64Value total_slot_ms = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Whether the query result was fetched from the query cache.
google.protobuf.BoolValue cache_hit = 9
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Referenced tables for the job. Queries that reference more
// than 50 tables will not have a complete list.
repeated TableReference referenced_tables = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Referenced routines for the job.
repeated RoutineReference referenced_routines = 24
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The schema of the results. Present only for successful dry
// run of non-legacy SQL queries.
TableSchema schema = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of rows affected by a DML statement. Present
// only for DML statements INSERT, UPDATE or DELETE.
google.protobuf.Int64Value num_dml_affected_rows = 12
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE,
// MERGE or TRUNCATE.
DmlStats dml_stats = 32 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. GoogleSQL only: list of undeclared query
// parameters detected during a dry run validation.
repeated QueryParameter undeclared_query_parameters = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The type of query statement, if valid.
// Possible values:
//
// * `SELECT`:
// [`SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_list)
// statement.
// * `ASSERT`:
// [`ASSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements#assert)
// statement.
// * `INSERT`:
// [`INSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement)
// statement.
// * `UPDATE`:
// [`UPDATE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#update_statement)
// statement.
// * `DELETE`:
// [`DELETE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language)
// statement.
// * `MERGE`:
// [`MERGE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language)
// statement.
// * `CREATE_TABLE`: [`CREATE
// TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement)
// statement, without `AS SELECT`.
// * `CREATE_TABLE_AS_SELECT`: [`CREATE TABLE AS
// SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#query_statement)
// statement.
// * `CREATE_VIEW`: [`CREATE
// VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement)
// statement.
// * `CREATE_MODEL`: [`CREATE
// MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement)
// statement.
// * `CREATE_MATERIALIZED_VIEW`: [`CREATE MATERIALIZED
// VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement)
// statement.
// * `CREATE_FUNCTION`: [`CREATE
// FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement)
// statement.
// * `CREATE_TABLE_FUNCTION`: [`CREATE TABLE
// FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement)
// statement.
// * `CREATE_PROCEDURE`: [`CREATE
// PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure)
// statement.
// * `CREATE_ROW_ACCESS_POLICY`: [`CREATE ROW ACCESS
// POLICY`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement)
// statement.
// * `CREATE_SCHEMA`: [`CREATE
// SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement)
// statement.
// * `CREATE_SNAPSHOT_TABLE`: [`CREATE SNAPSHOT
// TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement)
// statement.
// * `CREATE_SEARCH_INDEX`: [`CREATE SEARCH
// INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement)
// statement.
// * `DROP_TABLE`: [`DROP
// TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement)
// statement.
// * `DROP_EXTERNAL_TABLE`: [`DROP EXTERNAL
// TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement)
// statement.
// * `DROP_VIEW`: [`DROP
// VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement)
// statement.
// * `DROP_MODEL`: [`DROP
// MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model)
// statement.
// * `DROP_MATERIALIZED_VIEW`: [`DROP MATERIALIZED
// VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement)
// statement.
// * `DROP_FUNCTION` : [`DROP
// FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement)
// statement.
// * `DROP_TABLE_FUNCTION` : [`DROP TABLE
// FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function)
// statement.
// * `DROP_PROCEDURE`: [`DROP
// PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement)
// statement.
// * `DROP_SEARCH_INDEX`: [`DROP SEARCH
// INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index)
// statement.
// * `DROP_SCHEMA`: [`DROP
// SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement)
// statement.
// * `DROP_SNAPSHOT_TABLE`: [`DROP SNAPSHOT
// TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement)
// statement.
// * `DROP_ROW_ACCESS_POLICY`: [`DROP [ALL] ROW ACCESS
// POLICY|POLICIES`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement)
// statement.
// * `ALTER_TABLE`: [`ALTER
// TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement)
// statement.
// * `ALTER_VIEW`: [`ALTER
// VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement)
// statement.
// * `ALTER_MATERIALIZED_VIEW`: [`ALTER MATERIALIZED
// VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement)
// statement.
// * `ALTER_SCHEMA`: [`ALTER
// SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema_set_options_statement)
// statement.
// * `SCRIPT`:
// [`SCRIPT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language).
// * `TRUNCATE_TABLE`: [`TRUNCATE
// TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement)
// statement.
// * `CREATE_EXTERNAL_TABLE`: [`CREATE EXTERNAL
// TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement)
// statement.
// * `EXPORT_DATA`: [`EXPORT
// DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement)
// statement.
// * `EXPORT_MODEL`: [`EXPORT
// MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model)
// statement.
// * `LOAD_DATA`: [`LOAD
// DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#load_data_statement)
// statement.
// * `CALL`:
// [`CALL`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call)
// statement.
string statement_type = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The DDL operation performed, possibly
// dependent on the pre-existence of the DDL target.
string ddl_operation_performed = 15
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The DDL target table. Present only for
// CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries.
TableReference ddl_target_table = 16
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The table after rename. Present only for ALTER TABLE RENAME TO
// query.
TableReference ddl_destination_table = 31
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The DDL target row access policy. Present only for
// CREATE/DROP ROW ACCESS POLICY queries.
RowAccessPolicyReference ddl_target_row_access_policy = 26
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of row access policies affected by a DDL statement.
// Present only for DROP ALL ROW ACCESS POLICIES queries.
google.protobuf.Int64Value ddl_affected_row_access_policy_count = 27
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [Beta] The DDL target routine. Present only for
// CREATE/DROP FUNCTION/PROCEDURE queries.
RoutineReference ddl_target_routine = 22
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The DDL target dataset. Present only for CREATE/ALTER/DROP
// SCHEMA(dataset) queries.
DatasetReference ddl_target_dataset = 30
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics of a BigQuery ML training job.
MlStatistics ml_statistics = 23 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Stats for EXPORT DATA statement.
ExportDataStatistics export_data_statistics = 25
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Job cost breakdown as bigquery internal cost and external
// service costs.
repeated ExternalServiceCost external_service_costs = 28
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. BI Engine specific Statistics.
BiEngineStatistics bi_engine_statistics = 29
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics for a LOAD query.
LoadQueryStatistics load_query_statistics = 33
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Referenced table for DCL statement.
TableReference dcl_target_table = 34
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Referenced view for DCL statement.
TableReference dcl_target_view = 35
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Referenced dataset for DCL statement.
DatasetReference dcl_target_dataset = 36
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Search query specific statistics.
SearchStatistics search_statistics = 37
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Vector Search query specific statistics.
VectorSearchStatistics vector_search_statistics = 44
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Performance insights.
PerformanceInsights performance_insights = 38
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Query optimization information for a QUERY job.
QueryInfo query_info = 39 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics of a Spark procedure job.
SparkStatistics spark_statistics = 40
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Total bytes transferred for cross-cloud queries such as Cross
// Cloud Transfer and CREATE TABLE AS SELECT (CTAS).
google.protobuf.Int64Value transferred_bytes = 41
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics of materialized views of a query job.
MaterializedViewStatistics materialized_view_statistics = 42
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics of metadata cache usage in a query for BigLake
// tables.
MetadataCacheStatistics metadata_cache_statistics = 43
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Statistics for a load job.
message JobStatistics3 {
// Output only. Number of source files in a load job.
google.protobuf.Int64Value input_files = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of bytes of source data in a load job.
google.protobuf.Int64Value input_file_bytes = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of rows imported in a load job.
// Note that while an import job is in the running state, this
// value may change.
google.protobuf.Int64Value output_rows = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Size of the loaded data in bytes. Note
// that while a load job is in the running state, this value may change.
google.protobuf.Int64Value output_bytes = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of bad records encountered. Note that if the job
// has failed because of more bad records encountered than the maximum
// allowed in the load job configuration, then this number can be less than
// the total number of bad records present in the input data.
google.protobuf.Int64Value bad_records = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Describes a timeline of job execution.
repeated QueryTimelineSample timeline = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Statistics for an extract job.
message JobStatistics4 {
// Output only. Number of files per destination URI or URI pattern
// specified in the extract configuration. These values will be in the same
// order as the URIs specified in the 'destinationUris' field.
repeated int64 destination_uri_file_counts = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of user bytes extracted into the result. This is the
// byte count as computed by BigQuery for billing purposes
// and doesn't have any relationship with the number of actual
// result bytes extracted in the desired format.
google.protobuf.Int64Value input_bytes = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Describes a timeline of job execution.
repeated QueryTimelineSample timeline = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Statistics for a copy job.
message CopyJobStatistics {
// Output only. Number of rows copied to the destination table.
google.protobuf.Int64Value copied_rows = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of logical bytes copied to the destination table.
google.protobuf.Int64Value copied_logical_bytes = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Job statistics specific to a BigQuery ML training job.
message MlStatistics {
// Training type.
enum TrainingType {
// Unspecified training type.
TRAINING_TYPE_UNSPECIFIED = 0;
// Single training with fixed parameter space.
SINGLE_TRAINING = 1;
// [Hyperparameter tuning
// training](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview).
HPARAM_TUNING = 2;
}
// Output only. Maximum number of iterations specified as max_iterations in
// the 'CREATE MODEL' query. The actual number of iterations may be less than
// this number due to early stop.
int64 max_iterations = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Results for all completed iterations.
// Empty for [hyperparameter tuning
// jobs](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview).
repeated Model.TrainingRun.IterationResult iteration_results = 2;
// Output only. The type of the model that is being trained.
Model.ModelType model_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Training type of the job.
TrainingType training_type = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Trials of a [hyperparameter tuning
// job](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview)
// sorted by trial_id.
repeated Model.HparamTuningTrial hparam_trials = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Job statistics specific to the child job of a script.
message ScriptStatistics {
// Describes how the job is evaluated.
enum EvaluationKind {
// Default value.
EVALUATION_KIND_UNSPECIFIED = 0;
// The statement appears directly in the script.
STATEMENT = 1;
// The statement evaluates an expression that appears in the script.
EXPRESSION = 2;
}
// Represents the location of the statement/expression being evaluated.
// Line and column numbers are defined as follows:
//
// - Line and column numbers start with one. That is, line 1 column 1 denotes
// the start of the script.
// - When inside a stored procedure, all line/column numbers are relative
// to the procedure body, not the script in which the procedure was defined.
// - Start/end positions exclude leading/trailing comments and whitespace.
// The end position always ends with a ";", when present.
// - Multi-byte Unicode characters are treated as just one column.
// - If the original script (or procedure definition) contains TAB characters,
// a tab "snaps" the indentation forward to the nearest multiple of 8
// characters, plus 1. For example, a TAB on column 1, 2, 3, 4, 5, 6 , or 8
// will advance the next character to column 9. A TAB on column 9, 10, 11,
// 12, 13, 14, 15, or 16 will advance the next character to column 17.
message ScriptStackFrame {
// Output only. One-based start line.
int32 start_line = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. One-based start column.
int32 start_column = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. One-based end line.
int32 end_line = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. One-based end column.
int32 end_column = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Name of the active procedure, empty if in a top-level
// script.
string procedure_id = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Text of the current statement/expression.
string text = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Whether this child job was a statement or expression.
EvaluationKind evaluation_kind = 1;
// Stack trace showing the line/column/procedure name of each frame on the
// stack at the point where the current evaluation happened. The leaf frame
// is first, the primary script is last. Never empty.
repeated ScriptStackFrame stack_frames = 2;
}
// Statistics for row-level security.
message RowLevelSecurityStatistics {
// Whether any accessed data was protected by row access policies.
bool row_level_security_applied = 1;
}
// Statistics for data-masking.
message DataMaskingStatistics {
// Whether any accessed data was protected by the data masking.
bool data_masking_applied = 1;
}
// Statistics for a single job execution.
message JobStatistics {
// [Alpha] Information of a multi-statement transaction.
message TransactionInfo {
// Output only. [Alpha] Id of the transaction.
string transaction_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Output only. Creation time of this job, in milliseconds since the epoch.
// This field will be present on all jobs.
int64 creation_time = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Start time of this job, in milliseconds since the epoch.
// This field will be present when the job transitions from the PENDING state
// to either RUNNING or DONE.
int64 start_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. End time of this job, in milliseconds since the epoch. This
// field will be present whenever a job is in the DONE state.
int64 end_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Total bytes processed for the job.
google.protobuf.Int64Value total_bytes_processed = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [TrustedTester] Job progress (0.0 -> 1.0) for LOAD and
// EXTRACT jobs.
google.protobuf.DoubleValue completion_ratio = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Quotas which delayed this job's start time.
repeated string quota_deferments = 9
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics for a query job.
JobStatistics2 query = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics for a load job.
JobStatistics3 load = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics for an extract job.
JobStatistics4 extract = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics for a copy job.
CopyJobStatistics copy = 21 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Slot-milliseconds for the job.
google.protobuf.Int64Value total_slot_ms = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Name of the primary reservation assigned to this job. Note
// that this could be different than reservations reported in the reservation
// usage field if parent reservations were used to execute this job.
string reservation_id = 15 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of child jobs executed.
int64 num_child_jobs = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. If this is a child job, specifies the job ID of the parent.
string parent_job_id = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. If this a child job of a script, specifies information about
// the context of this job within the script.
ScriptStatistics script_statistics = 14
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics for row-level security. Present only for query and
// extract jobs.
RowLevelSecurityStatistics row_level_security_statistics = 16
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Statistics for data-masking. Present only for query and
// extract jobs.
DataMaskingStatistics data_masking_statistics = 20
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [Alpha] Information of the multi-statement transaction if this
// job is part of one.
//
// This property is only expected on a child job or a job that is in a
// session. A script parent job is not part of the transaction started in the
// script.
TransactionInfo transaction_info = 17
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Information of the session if this job is part of one.
SessionInfo session_info = 18 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The duration in milliseconds of the execution of the final
// attempt of this job, as BigQuery may internally re-attempt to execute the
// job.
int64 final_execution_duration_ms = 22
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Name of edition corresponding to the reservation for this job
// at the time of this update.
ReservationEdition edition = 24 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Detailed statistics for DML statements
message DmlStats {
// Output only. Number of inserted Rows. Populated by DML INSERT and MERGE
// statements
google.protobuf.Int64Value inserted_row_count = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of deleted Rows. populated by DML DELETE, MERGE and
// TRUNCATE statements.
google.protobuf.Int64Value deleted_row_count = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of updated Rows. Populated by DML UPDATE and MERGE
// statements.
google.protobuf.Int64Value updated_row_count = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Performance insights for the job.
message PerformanceInsights {
// Output only. Average execution ms of previous runs. Indicates the job ran
// slow compared to previous executions. To find previous executions, use
// INFORMATION_SCHEMA tables and filter jobs with same query hash.
int64 avg_previous_execution_ms = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Standalone query stage performance insights, for exploring
// potential improvements.
repeated StagePerformanceStandaloneInsight
stage_performance_standalone_insights = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Query stage performance insights compared to previous runs,
// for diagnosing performance regression.
repeated StagePerformanceChangeInsight stage_performance_change_insights = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Performance insights compared to the previous executions for a specific
// stage.
message StagePerformanceChangeInsight {
// Output only. The stage id that the insight mapped to.
int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Input data change insight of the query stage.
optional InputDataChange input_data_change = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Details about the input data change insight.
message InputDataChange {
// Output only. Records read difference percentage compared to a previous run.
float records_read_diff_percentage = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Standalone performance insights for a specific stage.
message StagePerformanceStandaloneInsight {
// Output only. The stage id that the insight mapped to.
int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. True if the stage has a slot contention issue.
optional bool slot_contention = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. True if the stage has insufficient shuffle quota.
optional bool insufficient_shuffle_quota = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. If present, the stage had the following reasons for being
// disqualified from BI Engine execution.
repeated BiEngineReason bi_engine_reasons = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. High cardinality joins in the stage.
repeated HighCardinalityJoin high_cardinality_joins = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Partition skew in the stage.
optional PartitionSkew partition_skew = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// High cardinality join detailed information.
message HighCardinalityJoin {
// Output only. Count of left input rows.
int64 left_rows = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Count of right input rows.
int64 right_rows = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Count of the output rows.
int64 output_rows = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The index of the join operator in the ExplainQueryStep lists.
int32 step_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Partition skew detailed information.
message PartitionSkew {
// Details about source stages which produce skewed data.
message SkewSource {
// Output only. Stage id of the skew source stage.
int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Output only. Source stages which produce skewed data.
repeated SkewSource skew_sources = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Statistics for a BigSpark query.
// Populated as part of JobStatistics2
message SparkStatistics {
// Spark job logs can be filtered by these fields in Cloud Logging.
message LoggingInfo {
// Output only. Resource type used for logging.
string resource_type = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Project ID where the Spark logs were written.
string project_id = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Output only. Spark job ID if a Spark job is created successfully.
optional string spark_job_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Location where the Spark job is executed.
// A location is selected by BigQueury for jobs configured to run in a
// multi-region.
optional string spark_job_location = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Endpoints returned from Dataproc.
// Key list:
// - history_server_endpoint: A link to Spark job UI.
map endpoints = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Logging info is used to generate a link to Cloud Logging.
optional LoggingInfo logging_info = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The Cloud KMS encryption key that is used to protect the
// resources created by the Spark job. If the Spark procedure uses the invoker
// security mode, the Cloud KMS encryption key is either inferred from the
// provided system variable,
// `@@spark_proc_properties.kms_key_name`, or the default key of the BigQuery
// job's project (if the CMEK organization policy is enforced). Otherwise, the
// Cloud KMS key is either inferred from the Spark connection associated with
// the procedure (if it is provided), or from the default key of the Spark
// connection's project if the CMEK organization policy is enforced.
//
// Example:
//
// * `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`
optional string kms_key_name = 5 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = {
type: "cloudkms.googleapis.com/CryptoKey"
}
];
// Output only. The Google Cloud Storage bucket that is used as the default
// file system by the Spark application. This field is only filled when the
// Spark procedure uses the invoker security mode. The `gcsStagingBucket`
// bucket is inferred from the `@@spark_proc_properties.staging_bucket` system
// variable (if it is provided). Otherwise, BigQuery creates a default staging
// bucket for the job and returns the bucket name in this field.
//
// Example:
//
// * `gs://[bucket_name]`
optional string gcs_staging_bucket = 6 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" }
];
}
// Statistics of materialized views considered in a query job.
message MaterializedViewStatistics {
// Materialized views considered for the query job. Only certain materialized
// views are used. For a detailed list, see the child message.
//
// If many materialized views are considered, then the list might be
// incomplete.
repeated MaterializedView materialized_view = 1;
}
// A materialized view considered for a query job.
message MaterializedView {
// Reason why a materialized view was not chosen for a query. For more
// information, see [Understand why materialized views were
// rejected](https://cloud.google.com/bigquery/docs/materialized-views-use#understand-rejected).
enum RejectedReason {
// Default unspecified value.
REJECTED_REASON_UNSPECIFIED = 0;
// View has no cached data because it has not refreshed yet.
NO_DATA = 1;
// The estimated cost of the view is more expensive than another view or the
// base table.
//
// Note: The estimate cost might not match the billed cost.
COST = 2;
// View has no cached data because a base table is truncated.
BASE_TABLE_TRUNCATED = 3;
// View is invalidated because of a data change in one or more base tables.
// It could be any recent change if the
// [`max_staleness`](https://cloud.google.com/bigquery/docs/materialized-views-create#max_staleness)
// option is not set for the view, or otherwise any change outside of the
// staleness window.
BASE_TABLE_DATA_CHANGE = 4;
// View is invalidated because a base table's partition expiration has
// changed.
BASE_TABLE_PARTITION_EXPIRATION_CHANGE = 5;
// View is invalidated because a base table's partition has expired.
BASE_TABLE_EXPIRED_PARTITION = 6;
// View is invalidated because a base table has an incompatible metadata
// change.
BASE_TABLE_INCOMPATIBLE_METADATA_CHANGE = 7;
// View is invalidated because it was refreshed with a time zone other than
// that of the current job.
TIME_ZONE = 8;
// View is outside the time travel window.
OUT_OF_TIME_TRAVEL_WINDOW = 9;
// View is inaccessible to the user because of a fine-grained security
// policy on one of its base tables.
BASE_TABLE_FINE_GRAINED_SECURITY_POLICY = 10;
// One of the view's base tables is too stale. For example, the cached
// metadata of a BigLake external table needs to be updated.
BASE_TABLE_TOO_STALE = 11;
}
// The candidate materialized view.
optional TableReference table_reference = 1;
// Whether the materialized view is chosen for the query.
//
// A materialized view can be chosen to rewrite multiple parts of the same
// query. If a materialized view is chosen to rewrite any part of the query,
// then this field is true, even if the materialized view was not chosen to
// rewrite others parts.
optional bool chosen = 2;
// If present, specifies a best-effort estimation of the bytes saved by using
// the materialized view rather than its base tables.
optional int64 estimated_bytes_saved = 3;
// If present, specifies the reason why the materialized view was not chosen
// for the query.
optional RejectedReason rejected_reason = 4;
}
// Table level detail on the usage of metadata caching. Only set for Metadata
// caching eligible tables referenced in the query.
message TableMetadataCacheUsage {
// Reasons for not using metadata caching.
enum UnusedReason {
// Unused reasons not specified.
UNUSED_REASON_UNSPECIFIED = 0;
// Metadata cache was outside the table's maxStaleness.
EXCEEDED_MAX_STALENESS = 1;
// Metadata caching feature is not enabled. [Update BigLake tables]
// (/bigquery/docs/create-cloud-storage-table-biglake#update-biglake-tables)
// to enable the metadata caching.
METADATA_CACHING_NOT_ENABLED = 3;
// Other unknown reason.
OTHER_REASON = 2;
}
// Metadata caching eligible table referenced in the query.
optional TableReference table_reference = 1;
// Reason for not using metadata caching for the table.
optional UnusedReason unused_reason = 2;
// Free form human-readable reason metadata caching was unused for
// the job.
optional string explanation = 3;
// Duration since last refresh as of this job for managed tables (indicates
// metadata cache staleness as seen by this job).
google.protobuf.Duration staleness = 5;
// [Table
// type](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.type).
string table_type = 6;
}
// Statistics for metadata caching in BigLake tables.
message MetadataCacheStatistics {
// Set for the Metadata caching eligible tables referenced in the query.
repeated TableMetadataCacheUsage table_metadata_cache_usage = 1;
}
// The type of editions.
// Different features and behaviors are provided to different editions
// Capacity commitments and reservations are linked to editions.
enum ReservationEdition {
// Default value, which will be treated as ENTERPRISE.
RESERVATION_EDITION_UNSPECIFIED = 0;
// Standard edition.
STANDARD = 1;
// Enterprise edition.
ENTERPRISE = 2;
// Enterprise plus edition.
ENTERPRISE_PLUS = 3;
}