// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.cloud.bigquery.v2; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/cloud/bigquery/v2/clustering.proto"; import "google/cloud/bigquery/v2/dataset_reference.proto"; import "google/cloud/bigquery/v2/decimal_target_types.proto"; import "google/cloud/bigquery/v2/encryption_config.proto"; import "google/cloud/bigquery/v2/external_data_config.proto"; import "google/cloud/bigquery/v2/file_set_specification_type.proto"; import "google/cloud/bigquery/v2/hive_partitioning.proto"; import "google/cloud/bigquery/v2/json_extension.proto"; import "google/cloud/bigquery/v2/model_reference.proto"; import "google/cloud/bigquery/v2/query_parameter.proto"; import "google/cloud/bigquery/v2/range_partitioning.proto"; import "google/cloud/bigquery/v2/system_variable.proto"; import "google/cloud/bigquery/v2/table_reference.proto"; import "google/cloud/bigquery/v2/table_schema.proto"; import "google/cloud/bigquery/v2/time_partitioning.proto"; import "google/cloud/bigquery/v2/udf_resource.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; option java_outer_classname = "JobConfigProto"; option java_package = "com.google.cloud.bigquery.v2"; // Properties for the destination table. message DestinationTableProperties { // Optional. Friendly name for the destination table. If the table already // exists, it should be same as the existing friendly name. google.protobuf.StringValue friendly_name = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The description for the destination table. // This will only be used if the destination table is newly created. // If the table already exists and a value different than the current // description is provided, the job will fail. google.protobuf.StringValue description = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The labels associated with this table. You can use these to // organize and group your tables. This will only be used if the destination // table is newly created. If the table already exists and labels are // different than the current labels are provided, the job will fail. map labels = 3 [(google.api.field_behavior) = OPTIONAL]; } // A connection-level property to customize query behavior. Under JDBC, these // correspond directly to connection properties passed to the DriverManager. // Under ODBC, these correspond to properties in the connection string. // // Currently supported connection properties: // // * **dataset_project_id**: represents the default project for datasets that // are used in the query. Setting the // system variable `@@dataset_project_id` achieves the same behavior. For // more information about system variables, see: // https://cloud.google.com/bigquery/docs/reference/system-variables // // * **time_zone**: represents the default timezone used to run the query. // // * **session_id**: associates the query with a given session. // // * **query_label**: associates the query with a given job label. If set, // all subsequent queries in a script or session will have this label. For the // format in which a you can specify a query label, see labels // in the JobConfiguration resource type: // https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration // // * **service_account**: indicates the service account to use to run a // continuous query. If set, the query job uses the service account to access // Google Cloud resources. Service account access is bounded by the IAM // permissions that you have granted to the service account. // // Additional properties are allowed, but ignored. Specifying multiple // connection properties with the same key returns an error. message ConnectionProperty { // The key of the property to set. string key = 1; // The value of the property to set. string value = 2; } // JobConfigurationQuery configures a BigQuery query job. message JobConfigurationQuery { // [Required] SQL query text to execute. The useLegacySql field can be used // to indicate whether the query uses legacy SQL or GoogleSQL. string query = 1; // Optional. Describes the table where the query results should be stored. // This property must be set for large results that exceed the maximum // response size. For queries that produce anonymous (cached) results, this // field will be populated by BigQuery. TableReference destination_table = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. You can specify external table definitions, which operate as // ephemeral tables that can be queried. These definitions are configured // using a JSON map, where the string key represents the table identifier, and // the value is the corresponding external data configuration object. map external_table_definitions = 23 [(google.api.field_behavior) = OPTIONAL]; // Describes user-defined function resources used in the query. repeated UserDefinedFunctionResource user_defined_function_resources = 4; // Optional. Specifies whether the job is allowed to create new tables. // The following values are supported: // // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the // table. // * CREATE_NEVER: The table must already exist. If it does not, // a 'notFound' error is returned in the job result. // // The default value is CREATE_IF_NEEDED. // Creation, truncation and append actions occur as one atomic update // upon job completion. string create_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies the action that occurs if the destination table // already exists. The following values are supported: // // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the // data, removes the constraints, and uses the schema from the query result. // * WRITE_APPEND: If the table already exists, BigQuery appends the data to // the table. // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' // error is returned in the job result. // // The default value is WRITE_EMPTY. Each action is atomic and only occurs if // BigQuery is able to complete the job successfully. Creation, truncation and // append actions occur as one atomic update upon job completion. string write_disposition = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies the default dataset to use for unqualified // table names in the query. This setting does not alter behavior of // unqualified dataset names. Setting the system variable // `@@dataset_id` achieves the same behavior. See // https://cloud.google.com/bigquery/docs/reference/system-variables for more // information on system variables. DatasetReference default_dataset = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies a priority for the query. Possible values include // INTERACTIVE and BATCH. The default value is INTERACTIVE. string priority = 8 [(google.api.field_behavior) = OPTIONAL]; // Optional. If true and query uses legacy SQL dialect, allows the query // to produce arbitrarily large result tables at a slight cost in performance. // Requires destinationTable to be set. // For GoogleSQL queries, this flag is ignored and large results are // always allowed. However, you must still set destinationTable when result // size exceeds the allowed maximum response size. google.protobuf.BoolValue allow_large_results = 10 [(google.api.field_behavior) = OPTIONAL]; // Optional. Whether to look for the result in the query cache. The query // cache is a best-effort cache that will be flushed whenever tables in the // query are modified. Moreover, the query cache is only available when a // query does not have a destination table specified. The default value is // true. google.protobuf.BoolValue use_query_cache = 11 [(google.api.field_behavior) = OPTIONAL]; // Optional. If true and query uses legacy SQL dialect, flattens all nested // and repeated fields in the query results. // allowLargeResults must be true if this is set to false. // For GoogleSQL queries, this flag is ignored and results are never // flattened. google.protobuf.BoolValue flatten_results = 12 [(google.api.field_behavior) = OPTIONAL]; // Limits the bytes billed for this job. Queries that will have // bytes billed beyond this limit will fail (without incurring a charge). // If unspecified, this will be set to your project default. google.protobuf.Int64Value maximum_bytes_billed = 14; // Optional. Specifies whether to use BigQuery's legacy SQL dialect for this // query. The default value is true. If set to false, the query will use // BigQuery's GoogleSQL: // https://cloud.google.com/bigquery/sql-reference/ // // When useLegacySql is set to false, the value of flattenResults is ignored; // query will be run as if flattenResults is false. google.protobuf.BoolValue use_legacy_sql = 15 [(google.api.field_behavior) = OPTIONAL]; // GoogleSQL only. Set to POSITIONAL to use positional (?) query parameters // or to NAMED to use named (@myparam) query parameters in this query. string parameter_mode = 16; // Query parameters for GoogleSQL queries. repeated QueryParameter query_parameters = 17; // Output only. System variables for GoogleSQL queries. A system variable is // output if the variable is settable and its value differs from the system // default. // "@@" prefix is not included in the name of the System variables. optional SystemVariables system_variables = 35 [(google.api.field_behavior) = OUTPUT_ONLY]; // Allows the schema of the destination table to be updated as a side effect // of the query job. Schema update options are supported in two cases: // when writeDisposition is WRITE_APPEND; // when writeDisposition is WRITE_TRUNCATE and the destination table is a // partition of a table, specified by partition decorators. For normal tables, // WRITE_TRUNCATE will always overwrite the schema. // One or more of the following values are specified: // // * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. // * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original // schema to nullable. repeated string schema_update_options = 18; // Time-based partitioning specification for the destination table. Only one // of timePartitioning and rangePartitioning should be specified. TimePartitioning time_partitioning = 19; // Range partitioning specification for the destination table. // Only one of timePartitioning and rangePartitioning should be specified. RangePartitioning range_partitioning = 22; // Clustering specification for the destination table. Clustering clustering = 20; // Custom encryption configuration (e.g., Cloud KMS keys) EncryptionConfiguration destination_encryption_configuration = 21; // Options controlling the execution of scripts. ScriptOptions script_options = 24; // Connection properties which can modify the query behavior. repeated ConnectionProperty connection_properties = 33; // If this property is true, the job creates a new session using a randomly // generated session_id. To continue using a created session with // subsequent queries, pass the existing session identifier as a // `ConnectionProperty` value. The session identifier is returned as part of // the `SessionInfo` message within the query statistics. // // The new session's location will be set to `Job.JobReference.location` if it // is present, otherwise it's set to the default location based on existing // routing logic. google.protobuf.BoolValue create_session = 34; // Optional. Whether to run the query as continuous or a regular query. // Continuous query is currently in experimental stage and not ready for // general usage. google.protobuf.BoolValue continuous = 36 [(google.api.field_behavior) = OPTIONAL]; } // Options related to script execution. message ScriptOptions { // KeyResultStatementKind controls how the key result is determined. enum KeyResultStatementKind { // Default value. KEY_RESULT_STATEMENT_KIND_UNSPECIFIED = 0; // The last result determines the key result. LAST = 1; // The first SELECT statement determines the key result. FIRST_SELECT = 2; } // Timeout period for each statement in a script. google.protobuf.Int64Value statement_timeout_ms = 1; // Limit on the number of bytes billed per statement. Exceeding this budget // results in an error. google.protobuf.Int64Value statement_byte_budget = 2; // Determines which statement in the script represents the "key result", // used to populate the schema and query results of the script job. // Default is LAST. KeyResultStatementKind key_result_statement = 4; } // JobConfigurationLoad contains the configuration properties for loading data // into a destination table. message JobConfigurationLoad { // Indicates the character map used for column names. enum ColumnNameCharacterMap { // Unspecified column name character map. COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED = 0; // Support flexible column name and reject invalid column names. STRICT = 1; // Support alphanumeric + underscore characters and names must start with a // letter or underscore. Invalid column names will be normalized. V1 = 2; // Support flexible column name. Invalid column names will be normalized. V2 = 3; } // [Required] The fully-qualified URIs that point to your data in Google // Cloud. // For Google Cloud Storage URIs: // Each URI can contain one '*' wildcard character and it must come after // the 'bucket' name. Size limits related to load jobs apply to external // data sources. // For Google Cloud Bigtable URIs: // Exactly one URI can be specified and it has be a fully specified and // valid HTTPS URL for a Google Cloud Bigtable table. // For Google Cloud Datastore backups: // Exactly one URI can be specified. Also, the '*' wildcard character is not // allowed. repeated string source_uris = 1; // Optional. Specifies how source URIs are interpreted for constructing the // file set to load. By default, source URIs are expanded against the // underlying storage. You can also specify manifest files to control how the // file set is constructed. This option is only applicable to object storage // systems. FileSetSpecType file_set_spec_type = 49 [(google.api.field_behavior) = OPTIONAL]; // Optional. The schema for the destination table. The schema can be // omitted if the destination table already exists, or if you're loading data // from Google Cloud Datastore. TableSchema schema = 2 [(google.api.field_behavior) = OPTIONAL]; // [Required] The destination table to load the data into. TableReference destination_table = 3; // Optional. [Experimental] Properties with which to create the destination // table if it is new. DestinationTableProperties destination_table_properties = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies whether the job is allowed to create new tables. // The following values are supported: // // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the // table. // * CREATE_NEVER: The table must already exist. If it does not, // a 'notFound' error is returned in the job result. // The default value is CREATE_IF_NEEDED. // Creation, truncation and append actions occur as one atomic update // upon job completion. string create_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies the action that occurs if the destination table // already exists. The following values are supported: // // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the // data, removes the constraints and uses the schema from the load job. // * WRITE_APPEND: If the table already exists, BigQuery appends the data to // the table. // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' // error is returned in the job result. // // The default value is WRITE_APPEND. // Each action is atomic and only occurs if BigQuery is able to complete the // job successfully. // Creation, truncation and append actions occur as one atomic update // upon job completion. string write_disposition = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies a string that represents a null value in a CSV file. // For example, if you specify "\N", BigQuery interprets "\N" as a null value // when loading a CSV file. // The default value is the empty string. If you set this property to a custom // value, BigQuery throws an error if an empty string is present for all data // types except for STRING and BYTE. For STRING and BYTE columns, BigQuery // interprets the empty string as an empty value. google.protobuf.StringValue null_marker = 7 [(google.api.field_behavior) = OPTIONAL]; // Optional. The separator character for fields in a CSV file. The separator // is interpreted as a single byte. For files encoded in ISO-8859-1, any // single character can be used as a separator. For files encoded in UTF-8, // characters represented in decimal range 1-127 (U+0001-U+007F) can be used // without any modification. UTF-8 characters encoded with multiple bytes // (i.e. U+0080 and above) will have only the first byte used for separating // fields. The remaining bytes will be treated as a part of the field. // BigQuery also supports the escape sequence "\t" (U+0009) to specify a tab // separator. The default value is comma (",", U+002C). string field_delimiter = 8 [(google.api.field_behavior) = OPTIONAL]; // Optional. The number of rows at the top of a CSV file that BigQuery will // skip when loading the data. The default value is 0. This property is useful // if you have header rows in the file that should be skipped. When autodetect // is on, the behavior is the following: // // * skipLeadingRows unspecified - Autodetect tries to detect headers in the // first row. If they are not detected, the row is read as data. Otherwise // data is read starting from the second row. // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and // data should be read starting from the first row. // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect // headers in row N. If headers are not detected, row N is just skipped. // Otherwise row N is used to extract column names for the detected schema. google.protobuf.Int32Value skip_leading_rows = 9 [(google.api.field_behavior) = OPTIONAL]; // Optional. The character encoding of the data. // The supported values are UTF-8, ISO-8859-1, UTF-16BE, UTF-16LE, UTF-32BE, // and UTF-32LE. The default value is UTF-8. BigQuery decodes the data after // the raw, binary data has been split using the values of the `quote` and // `fieldDelimiter` properties. // // If you don't specify an encoding, or if you specify a UTF-8 encoding when // the CSV file is not UTF-8 encoded, BigQuery attempts to convert the data to // UTF-8. Generally, your data loads successfully, but it may not match // byte-for-byte what you expect. To avoid this, specify the correct encoding // by using the `--encoding` flag. // // If BigQuery can't convert a character other than the ASCII `0` character, // BigQuery converts the character to the standard Unicode replacement // character: �. string encoding = 10 [(google.api.field_behavior) = OPTIONAL]; // Optional. The value that is used to quote data sections in a CSV file. // BigQuery converts the string to ISO-8859-1 encoding, and then uses the // first byte of the encoded string to split the data in its raw, binary // state. // The default value is a double-quote ('"'). // If your data does not contain quoted sections, set the property value to an // empty string. // If your data contains quoted newline characters, you must also set the // allowQuotedNewlines property to true. // To include the specific quote character within a quoted value, precede it // with an additional matching quote character. For example, if you want to // escape the default character ' " ', use ' "" '. // @default " google.protobuf.StringValue quote = 11 [(google.api.field_behavior) = OPTIONAL]; // Optional. The maximum number of bad records that BigQuery can ignore when // running the job. If the number of bad records exceeds this value, an // invalid error is returned in the job result. // The default value is 0, which requires that all records are valid. // This is only supported for CSV and NEWLINE_DELIMITED_JSON file formats. google.protobuf.Int32Value max_bad_records = 12 [(google.api.field_behavior) = OPTIONAL]; // Indicates if BigQuery should allow quoted data sections that contain // newline characters in a CSV file. The default value is false. google.protobuf.BoolValue allow_quoted_newlines = 15; // Optional. The format of the data files. // For CSV files, specify "CSV". For datastore backups, // specify "DATASTORE_BACKUP". For newline-delimited JSON, // specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". // For parquet, specify "PARQUET". For orc, specify "ORC". // The default value is CSV. string source_format = 16 [(google.api.field_behavior) = OPTIONAL]; // Optional. Accept rows that are missing trailing optional columns. // The missing values are treated as nulls. // If false, records with missing trailing columns are treated as bad records, // and if there are too many bad records, an invalid error is returned in the // job result. // The default value is false. // Only applicable to CSV, ignored for other formats. google.protobuf.BoolValue allow_jagged_rows = 17 [(google.api.field_behavior) = OPTIONAL]; // Optional. Indicates if BigQuery should allow extra values that are not // represented in the table schema. // If true, the extra values are ignored. // If false, records with extra columns are treated as bad records, and if // there are too many bad records, an invalid error is returned in the job // result. The default value is false. // The sourceFormat property determines what BigQuery treats as an extra // value: // CSV: Trailing columns // JSON: Named values that don't match any column names in the table schema // Avro, Parquet, ORC: Fields in the file schema that don't exist in the // table schema. google.protobuf.BoolValue ignore_unknown_values = 18 [(google.api.field_behavior) = OPTIONAL]; // If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity // properties to load into BigQuery from a Cloud Datastore backup. Property // names are case sensitive and must be top-level properties. If no properties // are specified, BigQuery loads all properties. If any named property isn't // found in the Cloud Datastore backup, an invalid error is returned in the // job result. repeated string projection_fields = 19; // Optional. Indicates if we should automatically infer the options and // schema for CSV and JSON sources. google.protobuf.BoolValue autodetect = 20 [(google.api.field_behavior) = OPTIONAL]; // Allows the schema of the destination table to be updated as a side effect // of the load job if a schema is autodetected or supplied in the job // configuration. // Schema update options are supported in two cases: // when writeDisposition is WRITE_APPEND; // when writeDisposition is WRITE_TRUNCATE and the destination table is a // partition of a table, specified by partition decorators. For normal tables, // WRITE_TRUNCATE will always overwrite the schema. // One or more of the following values are specified: // // * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. // * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original // schema to nullable. repeated string schema_update_options = 21; // Time-based partitioning specification for the destination table. Only one // of timePartitioning and rangePartitioning should be specified. TimePartitioning time_partitioning = 22; // Range partitioning specification for the destination table. // Only one of timePartitioning and rangePartitioning should be specified. RangePartitioning range_partitioning = 26; // Clustering specification for the destination table. Clustering clustering = 23; // Custom encryption configuration (e.g., Cloud KMS keys) EncryptionConfiguration destination_encryption_configuration = 24; // Optional. If sourceFormat is set to "AVRO", indicates whether to interpret // logical types as the corresponding BigQuery data type (for example, // TIMESTAMP), instead of using the raw type (for example, INTEGER). google.protobuf.BoolValue use_avro_logical_types = 25 [(google.api.field_behavior) = OPTIONAL]; // Optional. The user can provide a reference file with the reader schema. // This file is only loaded if it is part of source URIs, but is not loaded // otherwise. It is enabled for the following formats: AVRO, PARQUET, ORC. google.protobuf.StringValue reference_file_schema_uri = 45 [(google.api.field_behavior) = OPTIONAL]; // Optional. When set, configures hive partitioning support. // Not all storage formats support hive partitioning -- requesting hive // partitioning on an unsupported format will lead to an error, as will // providing an invalid specification. HivePartitioningOptions hive_partitioning_options = 37 [(google.api.field_behavior) = OPTIONAL]; // Defines the list of possible SQL data types to which the source decimal // values are converted. This list and the precision and the scale parameters // of the decimal field determine the target type. In the order of NUMERIC, // BIGNUMERIC, and STRING, a // type is picked if it is in the specified list and if it supports the // precision and the scale. STRING supports all precision and scale values. // If none of the listed types supports the precision and the scale, the type // supporting the widest range in the specified list is picked, and if a value // exceeds the supported range when reading the data, an error will be thrown. // // Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. // If (precision,scale) is: // // * (38,9) -> NUMERIC; // * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); // * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); // * (76,38) -> BIGNUMERIC; // * (77,38) -> BIGNUMERIC (error if value exeeds supported range). // // This field cannot contain duplicate types. The order of the types in this // field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as // ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over // BIGNUMERIC. // // Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other // file formats. repeated DecimalTargetType decimal_target_types = 39; // Optional. Load option to be used together with source_format // newline-delimited JSON to indicate that a variant of JSON is being loaded. // To load newline-delimited GeoJSON, specify GEOJSON (and source_format must // be set to NEWLINE_DELIMITED_JSON). JsonExtension json_extension = 41 [(google.api.field_behavior) = OPTIONAL]; // Optional. Additional properties to set if sourceFormat is set to PARQUET. ParquetOptions parquet_options = 42 [(google.api.field_behavior) = OPTIONAL]; // Optional. When sourceFormat is set to "CSV", this indicates whether the // embedded ASCII control characters (the first 32 characters in the // ASCII-table, from // '\x00' to '\x1F') are preserved. google.protobuf.BoolValue preserve_ascii_control_characters = 44 [(google.api.field_behavior) = OPTIONAL]; // Optional. Connection properties which can modify the load job behavior. // Currently, only the 'session_id' connection property is supported, and is // used to resolve _SESSION appearing as the dataset id. repeated ConnectionProperty connection_properties = 46 [(google.api.field_behavior) = OPTIONAL]; // Optional. If this property is true, the job creates a new session using a // randomly generated session_id. To continue using a created session with // subsequent queries, pass the existing session identifier as a // `ConnectionProperty` value. The session identifier is returned as part of // the `SessionInfo` message within the query statistics. // // The new session's location will be set to `Job.JobReference.location` if it // is present, otherwise it's set to the default location based on existing // routing logic. google.protobuf.BoolValue create_session = 47 [(google.api.field_behavior) = OPTIONAL]; // Optional. Character map supported for column names in CSV/Parquet loads. // Defaults to STRICT and can be overridden by Project Config Service. Using // this option with unsupporting load formats will result in an error. ColumnNameCharacterMap column_name_character_map = 50 [(google.api.field_behavior) = OPTIONAL]; // Optional. [Experimental] Configures the load job to copy files directly to // the destination BigLake managed table, bypassing file content reading and // rewriting. // // Copying files only is supported when all the following are true: // // * `source_uris` are located in the same Cloud Storage location as the // destination table's `storage_uri` location. // * `source_format` is `PARQUET`. // * `destination_table` is an existing BigLake managed table. The table's // schema does not have flexible column names. The table's columns do not // have type parameters other than precision and scale. // * No options other than the above are specified. google.protobuf.BoolValue copy_files_only = 51 [(google.api.field_behavior) = OPTIONAL]; } // JobConfigurationTableCopy configures a job that copies data from one table // to another. // For more information on copying tables, see [Copy a // table](https://cloud.google.com/bigquery/docs/managing-tables#copy-table). message JobConfigurationTableCopy { // Indicates different operation types supported in table copy job. enum OperationType { // Unspecified operation type. OPERATION_TYPE_UNSPECIFIED = 0; // The source and destination table have the same table type. COPY = 1; // The source table type is TABLE and // the destination table type is SNAPSHOT. SNAPSHOT = 2; // The source table type is SNAPSHOT and // the destination table type is TABLE. RESTORE = 3; // The source and destination table have the same table type, // but only bill for unique data. CLONE = 4; } // [Pick one] Source table to copy. TableReference source_table = 1; // [Pick one] Source tables to copy. repeated TableReference source_tables = 2; // [Required] The destination table. TableReference destination_table = 3; // Optional. Specifies whether the job is allowed to create new tables. // The following values are supported: // // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the // table. // * CREATE_NEVER: The table must already exist. If it does not, // a 'notFound' error is returned in the job result. // // The default value is CREATE_IF_NEEDED. // Creation, truncation and append actions occur as one atomic update // upon job completion. string create_disposition = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. Specifies the action that occurs if the destination table // already exists. The following values are supported: // // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the // table data and uses the schema and table constraints from the source table. // * WRITE_APPEND: If the table already exists, BigQuery appends the data to // the table. // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' // error is returned in the job result. // // The default value is WRITE_EMPTY. Each action is atomic and only occurs if // BigQuery is able to complete the job successfully. Creation, truncation and // append actions occur as one atomic update upon job completion. string write_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; // Custom encryption configuration (e.g., Cloud KMS keys). EncryptionConfiguration destination_encryption_configuration = 6; // Optional. Supported operation types in table copy job. OperationType operation_type = 8 [(google.api.field_behavior) = OPTIONAL]; // Optional. The time when the destination table expires. Expired tables will // be deleted and their storage reclaimed. google.protobuf.Timestamp destination_expiration_time = 9 [(google.api.field_behavior) = OPTIONAL]; } // JobConfigurationExtract configures a job that exports data from a BigQuery // table into Google Cloud Storage. message JobConfigurationExtract { // Options related to model extraction. message ModelExtractOptions { // The 1-based ID of the trial to be exported from a hyperparameter tuning // model. If not specified, the trial with id = // [Model](https://cloud.google.com/bigquery/docs/reference/rest/v2/models#resource:-model).defaultTrialId // is exported. This field is ignored for models not trained with // hyperparameter tuning. google.protobuf.Int64Value trial_id = 1; } // Required. Source reference for the export. oneof source { // A reference to the table being exported. TableReference source_table = 1; // A reference to the model being exported. ModelReference source_model = 9; } // [Pick one] A list of fully-qualified Google Cloud Storage URIs where the // extracted table should be written. repeated string destination_uris = 3; // Optional. Whether to print out a header row in the results. // Default is true. Not applicable when extracting models. google.protobuf.BoolValue print_header = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. When extracting data in CSV format, this defines the // delimiter to use between fields in the exported data. // Default is ','. Not applicable when extracting models. string field_delimiter = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. The exported file format. Possible values include CSV, // NEWLINE_DELIMITED_JSON, PARQUET, or AVRO for tables and ML_TF_SAVED_MODEL // or ML_XGBOOST_BOOSTER for models. The default value for tables is CSV. // Tables with nested or repeated fields cannot be exported as CSV. The // default value for models is ML_TF_SAVED_MODEL. string destination_format = 6 [(google.api.field_behavior) = OPTIONAL]; // Optional. The compression type to use for exported files. Possible values // include DEFLATE, GZIP, NONE, SNAPPY, and ZSTD. The default value is NONE. // Not all compression formats are support for all file formats. DEFLATE is // only supported for Avro. ZSTD is only supported for Parquet. Not applicable // when extracting models. string compression = 7 [(google.api.field_behavior) = OPTIONAL]; // Whether to use logical types when extracting to AVRO format. Not applicable // when extracting models. google.protobuf.BoolValue use_avro_logical_types = 13; // Optional. Model extract options only applicable when extracting models. ModelExtractOptions model_extract_options = 14 [(google.api.field_behavior) = OPTIONAL]; } message JobConfiguration { // Output only. The type of the job. Can be QUERY, LOAD, EXTRACT, COPY or // UNKNOWN. string job_type = 8; // [Pick one] Configures a query job. JobConfigurationQuery query = 1; // [Pick one] Configures a load job. JobConfigurationLoad load = 2; // [Pick one] Copies a table. JobConfigurationTableCopy copy = 3; // [Pick one] Configures an extract job. JobConfigurationExtract extract = 4; // Optional. If set, don't actually run this job. A valid query will return // a mostly empty response with some processing statistics, while an invalid // query will return the same error it would if it wasn't a dry run. Behavior // of non-query jobs is undefined. google.protobuf.BoolValue dry_run = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. Job timeout in milliseconds. If this time limit is exceeded, // BigQuery will attempt to stop a longer job, but may not always succeed in // canceling it before the job completes. For example, a job that takes more // than 60 seconds to complete has a better chance of being stopped than a job // that takes 10 seconds to complete. google.protobuf.Int64Value job_timeout_ms = 6 [(google.api.field_behavior) = OPTIONAL]; // The labels associated with this job. You can use these to organize and // group your jobs. // Label keys and values can be no longer than 63 characters, can only contain // lowercase letters, numeric characters, underscores and dashes. // International characters are allowed. Label values are optional. Label // keys must start with a letter and each label in the list must have a // different key. map labels = 7; }