/// Represents a hardware accelerator type. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum AcceleratorType { /// Unspecified accelerator type, which means no accelerator. Unspecified = 0, /// Nvidia Tesla K80 GPU. NvidiaTeslaK80 = 1, /// Nvidia Tesla P100 GPU. NvidiaTeslaP100 = 2, /// Nvidia Tesla V100 GPU. NvidiaTeslaV100 = 3, /// Nvidia Tesla P4 GPU. NvidiaTeslaP4 = 4, /// Nvidia Tesla T4 GPU. NvidiaTeslaT4 = 5, /// Nvidia Tesla A100 GPU. NvidiaTeslaA100 = 8, } /// References an API call. It contains more information about long running /// operation and Jobs that are triggered by the API call. #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserActionReference { /// The method name of the API RPC call. For example, /// "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset" #[prost(string, tag = "3")] pub method: ::prost::alloc::string::String, #[prost(oneof = "user_action_reference::Reference", tags = "1, 2")] pub reference: ::core::option::Option, } /// Nested message and enum types in `UserActionReference`. pub mod user_action_reference { #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Reference { /// For API calls that return a long running operation. /// Resource name of the long running operation. /// Format: /// 'projects/{project}/locations/{location}/operations/{operation}' #[prost(string, tag = "1")] Operation(::prost::alloc::string::String), /// For API calls that start a LabelingJob. /// Resource name of the LabelingJob. /// Format: /// 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' #[prost(string, tag = "2")] DataLabelingJob(::prost::alloc::string::String), } } /// Used to assign specific AnnotationSpec to a particular area of a DataItem or /// the whole part of the DataItem. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Annotation { /// Output only. Resource name of the Annotation. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. Google Cloud Storage URI points to a YAML file describing \[payload][google.cloud.aiplatform.v1beta1.Annotation.payload\]. The /// schema is defined as an [OpenAPI 3.0.2 Schema /// Object](). /// The schema files that can be used here are found in /// gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the /// chosen schema must be consistent with the parent Dataset's /// \[metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri\]. #[prost(string, tag = "2")] pub payload_schema_uri: ::prost::alloc::string::String, /// Required. The schema of the payload can be found in /// \[payload_schema][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri\]. #[prost(message, optional, tag = "3")] pub payload: ::core::option::Option<::prost_types::Value>, /// Output only. Timestamp when this Annotation was created. #[prost(message, optional, tag = "4")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Timestamp when this Annotation was last updated. #[prost(message, optional, tag = "7")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Optional. Used to perform consistent read-modify-write updates. If not set, a blind /// "overwrite" update happens. #[prost(string, tag = "8")] pub etag: ::prost::alloc::string::String, /// Output only. The source of the Annotation. #[prost(message, optional, tag = "5")] pub annotation_source: ::core::option::Option, /// Optional. The labels with user-defined metadata to organize your Annotations. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// No more than 64 user labels can be associated with one Annotation(System /// labels are excluded). /// /// See for more information and examples of labels. /// System reserved label keys are prefixed with "aiplatform.googleapis.com/" /// and are immutable. Following system labels exist for each Annotation: /// /// * "aiplatform.googleapis.com/annotation_set_name": /// optional, name of the UI's annotation set this Annotation belongs to. /// If not set, the Annotation is not visible in the UI. /// /// * "aiplatform.googleapis.com/payload_schema": /// output only, its value is the \[payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri\] /// title. #[prost(map = "string, string", tag = "6")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } /// Identifies a concept with which DataItems may be annotated with. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnnotationSpec { /// Output only. Resource name of the AnnotationSpec. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The user-defined name of the AnnotationSpec. /// The name can be up to 128 characters long and can be consist of any UTF-8 /// characters. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// Output only. Timestamp when this AnnotationSpec was created. #[prost(message, optional, tag = "3")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Timestamp when AnnotationSpec was last updated. #[prost(message, optional, tag = "4")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Optional. Used to perform consistent read-modify-write updates. If not set, a blind /// "overwrite" update happens. #[prost(string, tag = "5")] pub etag: ::prost::alloc::string::String, } /// Value is the value of the field. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Value { #[prost(oneof = "value::Value", tags = "1, 2, 3")] pub value: ::core::option::Option, } /// Nested message and enum types in `Value`. pub mod value { #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { /// An integer value. #[prost(int64, tag = "1")] IntValue(i64), /// A double value. #[prost(double, tag = "2")] DoubleValue(f64), /// A string value. #[prost(string, tag = "3")] StringValue(::prost::alloc::string::String), } } /// Instance of a general artifact. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Artifact { /// Output only. The resource name of the Artifact. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// User provided display name of the Artifact. /// May be up to 128 Unicode characters. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// The uniform resource identifier of the artifact file. /// May be empty if there is no actual artifact file. #[prost(string, tag = "6")] pub uri: ::prost::alloc::string::String, /// An eTag used to perform consistent read-modify-write updates. If not set, a /// blind "overwrite" update happens. #[prost(string, tag = "9")] pub etag: ::prost::alloc::string::String, /// The labels with user-defined metadata to organize your Artifacts. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// No more than 64 user labels can be associated with one Artifact (System /// labels are excluded). #[prost(map = "string, string", tag = "10")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Output only. Timestamp when this Artifact was created. #[prost(message, optional, tag = "11")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Timestamp when this Artifact was last updated. #[prost(message, optional, tag = "12")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// The state of this Artifact. This is a property of the Artifact, and does /// not imply or capture any ongoing process. This property is managed by /// clients (such as Vertex Pipelines), and the system does not prescribe /// or check the validity of state transitions. #[prost(enumeration = "artifact::State", tag = "13")] pub state: i32, /// The title of the schema describing the metadata. /// /// Schema title and version is expected to be registered in earlier Create /// Schema calls. And both are used together as unique identifiers to identify /// schemas within the local metadata store. #[prost(string, tag = "14")] pub schema_title: ::prost::alloc::string::String, /// The version of the schema in schema_name to use. /// /// Schema title and version is expected to be registered in earlier Create /// Schema calls. And both are used together as unique identifiers to identify /// schemas within the local metadata store. #[prost(string, tag = "15")] pub schema_version: ::prost::alloc::string::String, /// Properties of the Artifact. /// The size of this field should not exceed 200KB. #[prost(message, optional, tag = "16")] pub metadata: ::core::option::Option<::prost_types::Struct>, /// Description of the Artifact #[prost(string, tag = "17")] pub description: ::prost::alloc::string::String, } /// Nested message and enum types in `Artifact`. pub mod artifact { /// Describes the state of the Artifact. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum State { /// Unspecified state for the Artifact. Unspecified = 0, /// A state used by systems like Vertex Pipelines to indicate that the /// underlying data item represented by this Artifact is being created. Pending = 1, /// A state indicating that the Artifact should exist, unless something /// external to the system deletes it. Live = 2, } } /// Success and error statistics of processing multiple entities /// (for example, DataItems or structured data rows) in batch. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompletionStats { /// Output only. The number of entities that had been processed successfully. #[prost(int64, tag = "1")] pub successful_count: i64, /// Output only. The number of entities for which any error was encountered. #[prost(int64, tag = "2")] pub failed_count: i64, /// Output only. In cases when enough errors are encountered a job, pipeline, or operation /// may be failed as a whole. Below is the number of entities for which the /// processing had not been finished (either in successful or failed state). /// Set to -1 if the number is unknown (for example, the operation failed /// before the total entity number could be collected). #[prost(int64, tag = "3")] pub incomplete_count: i64, } /// Represents a customer-managed encryption key spec that can be applied to /// a top-level resource. #[derive(Clone, PartialEq, ::prost::Message)] pub struct EncryptionSpec { /// Required. The Cloud KMS resource identifier of the customer managed encryption key /// used to protect a resource. Has the form: /// `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. /// The key needs to be in the same region as where the compute resource is /// created. #[prost(string, tag = "1")] pub kms_key_name: ::prost::alloc::string::String, } /// Metadata describing the Model's input and output for explanation. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplanationMetadata { /// Required. Map from feature names to feature input metadata. Keys are the name of the /// features. Values are the specification of the feature. /// /// An empty InputMetadata is valid. It describes a text feature which has the /// name specified as the key in \[ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs\]. The baseline /// of the empty feature is chosen by Vertex AI. /// /// For Vertex AI-provided Tensorflow images, the key can be any friendly /// name of the feature. Once specified, /// \[featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions\] are keyed by /// this key (if not grouped with another feature). /// /// For custom images, the key must match with the key in /// \[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances\]. #[prost(map = "string, message", tag = "1")] pub inputs: ::std::collections::HashMap< ::prost::alloc::string::String, explanation_metadata::InputMetadata, >, /// Required. Map from output names to output metadata. /// /// For Vertex AI-provided Tensorflow images, keys can be any user defined /// string that consists of any UTF-8 characters. /// /// For custom images, keys are the name of the output field in the prediction /// to be explained. /// /// Currently only one key is allowed. #[prost(map = "string, message", tag = "2")] pub outputs: ::std::collections::HashMap< ::prost::alloc::string::String, explanation_metadata::OutputMetadata, >, /// Points to a YAML file stored on Google Cloud Storage describing the format /// of the [feature attributions]\[google.cloud.aiplatform.v1beta1.Attribution.feature_attributions\]. /// The schema is defined as an OpenAPI 3.0.2 [Schema /// Object](). /// AutoML tabular Models always have this field populated by Vertex AI. /// Note: The URI given on output may be different, including the URI scheme, /// than the one given on input. The output URI will point to a location where /// the user only has a read access. #[prost(string, tag = "3")] pub feature_attributions_schema_uri: ::prost::alloc::string::String, } /// Nested message and enum types in `ExplanationMetadata`. pub mod explanation_metadata { /// Metadata of the input of a feature. /// /// Fields other than \[InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines\] are applicable only /// for Models that are using Vertex AI-provided images for Tensorflow. #[derive(Clone, PartialEq, ::prost::Message)] pub struct InputMetadata { /// Baseline inputs for this feature. /// /// If no baseline is specified, Vertex AI chooses the baseline for this /// feature. If multiple baselines are specified, Vertex AI returns the /// average attributions across them in \[Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions\]. /// /// For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape /// of each baseline must match the shape of the input tensor. If a scalar is /// provided, we broadcast to the same shape as the input tensor. /// /// For custom images, the element of the baselines must be in the same /// format as the feature's input in the /// \[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][\]. The schema of any single instance /// may be specified via Endpoint's DeployedModels' /// \[Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model\] /// \[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata\] /// \[instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri\]. #[prost(message, repeated, tag = "1")] pub input_baselines: ::prost::alloc::vec::Vec<::prost_types::Value>, /// Name of the input tensor for this feature. Required and is only /// applicable to Vertex AI-provided images for Tensorflow. #[prost(string, tag = "2")] pub input_tensor_name: ::prost::alloc::string::String, /// Defines how the feature is encoded into the input tensor. Defaults to /// IDENTITY. #[prost(enumeration = "input_metadata::Encoding", tag = "3")] pub encoding: i32, /// Modality of the feature. Valid values are: numeric, image. Defaults to /// numeric. #[prost(string, tag = "4")] pub modality: ::prost::alloc::string::String, /// The domain details of the input feature value. Like min/max, original /// mean or standard deviation if normalized. #[prost(message, optional, tag = "5")] pub feature_value_domain: ::core::option::Option, /// Specifies the index of the values of the input tensor. /// Required when the input tensor is a sparse representation. Refer to /// Tensorflow documentation for more details: /// #[prost(string, tag = "6")] pub indices_tensor_name: ::prost::alloc::string::String, /// Specifies the shape of the values of the input if the input is a sparse /// representation. Refer to Tensorflow documentation for more details: /// #[prost(string, tag = "7")] pub dense_shape_tensor_name: ::prost::alloc::string::String, /// A list of feature names for each index in the input tensor. /// Required when the input \[InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding\] is BAG_OF_FEATURES, /// BAG_OF_FEATURES_SPARSE, INDICATOR. #[prost(string, repeated, tag = "8")] pub index_feature_mapping: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Encoded tensor is a transformation of the input tensor. Must be provided /// if choosing /// [Integrated Gradients attribution]\[google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution\] /// or [XRAI attribution]\[google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution\] and the /// input tensor is not differentiable. /// /// An encoded tensor is generated if the input tensor is encoded by a lookup /// table. #[prost(string, tag = "9")] pub encoded_tensor_name: ::prost::alloc::string::String, /// A list of baselines for the encoded tensor. /// /// The shape of each baseline should match the shape of the encoded tensor. /// If a scalar is provided, Vertex AI broadcasts to the same shape as the /// encoded tensor. #[prost(message, repeated, tag = "10")] pub encoded_baselines: ::prost::alloc::vec::Vec<::prost_types::Value>, /// Visualization configurations for image explanation. #[prost(message, optional, tag = "11")] pub visualization: ::core::option::Option, /// Name of the group that the input belongs to. Features with the same group /// name will be treated as one feature when computing attributions. Features /// grouped together can have different shapes in value. If provided, there /// will be one single attribution generated in /// \[Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions\], keyed by the group name. #[prost(string, tag = "12")] pub group_name: ::prost::alloc::string::String, } /// Nested message and enum types in `InputMetadata`. pub mod input_metadata { /// Domain details of the input feature value. Provides numeric information /// about the feature, such as its range (min, max). If the feature has been /// pre-processed, for example with z-scoring, then it provides information /// about how to recover the original feature. For example, if the input /// feature is an image and it has been pre-processed to obtain 0-mean and /// stddev = 1 values, then original_mean, and original_stddev refer to the /// mean and stddev of the original feature (e.g. image tensor) from which /// input feature (with mean = 0 and stddev = 1) was obtained. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeatureValueDomain { /// The minimum permissible value for this feature. #[prost(float, tag = "1")] pub min_value: f32, /// The maximum permissible value for this feature. #[prost(float, tag = "2")] pub max_value: f32, /// If this input feature has been normalized to a mean value of 0, /// the original_mean specifies the mean value of the domain prior to /// normalization. #[prost(float, tag = "3")] pub original_mean: f32, /// If this input feature has been normalized to a standard deviation of /// 1.0, the original_stddev specifies the standard deviation of the domain /// prior to normalization. #[prost(float, tag = "4")] pub original_stddev: f32, } /// Visualization configurations for image explanation. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Visualization { /// Type of the image visualization. Only applicable to /// [Integrated Gradients attribution]\[google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution\]. /// OUTLINES shows regions of attribution, while PIXELS shows per-pixel /// attribution. Defaults to OUTLINES. #[prost(enumeration = "visualization::Type", tag = "1")] pub r#type: i32, /// Whether to only highlight pixels with positive contributions, negative /// or both. Defaults to POSITIVE. #[prost(enumeration = "visualization::Polarity", tag = "2")] pub polarity: i32, /// The color scheme used for the highlighted areas. /// /// Defaults to PINK_GREEN for /// [Integrated Gradients attribution]\[google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution\], /// which shows positive attributions in green and negative in pink. /// /// Defaults to VIRIDIS for /// [XRAI attribution]\[google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution\], which /// highlights the most influential regions in yellow and the least /// influential in blue. #[prost(enumeration = "visualization::ColorMap", tag = "3")] pub color_map: i32, /// Excludes attributions above the specified percentile from the /// highlighted areas. Using the clip_percent_upperbound and /// clip_percent_lowerbound together can be useful for filtering out noise /// and making it easier to see areas of strong attribution. Defaults to /// 99.9. #[prost(float, tag = "4")] pub clip_percent_upperbound: f32, /// Excludes attributions below the specified percentile, from the /// highlighted areas. Defaults to 62. #[prost(float, tag = "5")] pub clip_percent_lowerbound: f32, /// How the original image is displayed in the visualization. /// Adjusting the overlay can help increase visual clarity if the original /// image makes it difficult to view the visualization. Defaults to NONE. #[prost(enumeration = "visualization::OverlayType", tag = "6")] pub overlay_type: i32, } /// Nested message and enum types in `Visualization`. pub mod visualization { /// Type of the image visualization. Only applicable to /// [Integrated Gradients attribution]\[google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution\]. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum Type { /// Should not be used. Unspecified = 0, /// Shows which pixel contributed to the image prediction. Pixels = 1, /// Shows which region contributed to the image prediction by outlining /// the region. Outlines = 2, } /// Whether to only highlight pixels with positive contributions, negative /// or both. Defaults to POSITIVE. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum Polarity { /// Default value. This is the same as POSITIVE. Unspecified = 0, /// Highlights the pixels/outlines that were most influential to the /// model's prediction. Positive = 1, /// Setting polarity to negative highlights areas that does not lead to /// the models's current prediction. Negative = 2, /// Shows both positive and negative attributions. Both = 3, } /// The color scheme used for highlighting areas. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum ColorMap { /// Should not be used. Unspecified = 0, /// Positive: green. Negative: pink. PinkGreen = 1, /// Viridis color map: A perceptually uniform color mapping which is /// easier to see by those with colorblindness and progresses from yellow /// to green to blue. Positive: yellow. Negative: blue. Viridis = 2, /// Positive: red. Negative: red. Red = 3, /// Positive: green. Negative: green. Green = 4, /// Positive: green. Negative: red. RedGreen = 6, /// PiYG palette. PinkWhiteGreen = 5, } /// How the original image is displayed in the visualization. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum OverlayType { /// Default value. This is the same as NONE. Unspecified = 0, /// No overlay. None = 1, /// The attributions are shown on top of the original image. Original = 2, /// The attributions are shown on top of grayscaled version of the /// original image. Grayscale = 3, /// The attributions are used as a mask to reveal predictive parts of /// the image and hide the un-predictive parts. MaskBlack = 4, } } /// Defines how a feature is encoded. Defaults to IDENTITY. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum Encoding { /// Default value. This is the same as IDENTITY. Unspecified = 0, /// The tensor represents one feature. Identity = 1, /// The tensor represents a bag of features where each index maps to /// a feature. \[InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping\] must be provided for /// this encoding. For example: /// ``` /// input = [27, 6.0, 150] /// index_feature_mapping = ["age", "height", "weight"] /// ``` BagOfFeatures = 2, /// The tensor represents a bag of features where each index maps to a /// feature. Zero values in the tensor indicates feature being /// non-existent. \[InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping\] must be provided /// for this encoding. For example: /// ``` /// input = [2, 0, 5, 0, 1] /// index_feature_mapping = ["a", "b", "c", "d", "e"] /// ``` BagOfFeaturesSparse = 3, /// The tensor is a list of binaries representing whether a feature exists /// or not (1 indicates existence). \[InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping\] /// must be provided for this encoding. For example: /// ``` /// input = [1, 0, 1, 0, 1] /// index_feature_mapping = ["a", "b", "c", "d", "e"] /// ``` Indicator = 4, /// The tensor is encoded into a 1-dimensional array represented by an /// encoded tensor. \[InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name\] must be provided /// for this encoding. For example: /// ``` /// input = ["This", "is", "a", "test", "."] /// encoded = [0.1, 0.2, 0.3, 0.4, 0.5] /// ``` CombinedEmbedding = 5, /// Select this encoding when the input tensor is encoded into a /// 2-dimensional array represented by an encoded tensor. /// \[InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name\] must be provided for this /// encoding. The first dimension of the encoded tensor's shape is the same /// as the input tensor's shape. For example: /// ``` /// input = ["This", "is", "a", "test", "."] /// encoded = [[0.1, 0.2, 0.3, 0.4, 0.5], /// [0.2, 0.1, 0.4, 0.3, 0.5], /// [0.5, 0.1, 0.3, 0.5, 0.4], /// [0.5, 0.3, 0.1, 0.2, 0.4], /// [0.4, 0.3, 0.2, 0.5, 0.1]] /// ``` ConcatEmbedding = 6, } } /// Metadata of the prediction output to be explained. #[derive(Clone, PartialEq, ::prost::Message)] pub struct OutputMetadata { /// Name of the output tensor. Required and is only applicable to Vertex /// AI provided images for Tensorflow. #[prost(string, tag = "3")] pub output_tensor_name: ::prost::alloc::string::String, /// Defines how to map \[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\] to /// \[Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name\]. /// /// If neither of the fields are specified, /// \[Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name\] will not be populated. #[prost(oneof = "output_metadata::DisplayNameMapping", tags = "1, 2")] pub display_name_mapping: ::core::option::Option, } /// Nested message and enum types in `OutputMetadata`. pub mod output_metadata { /// Defines how to map \[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\] to /// \[Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name\]. /// /// If neither of the fields are specified, /// \[Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name\] will not be populated. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum DisplayNameMapping { /// Static mapping between the index and display name. /// /// Use this if the outputs are a deterministic n-dimensional array, e.g. a /// list of scores of all the classes in a pre-defined order for a /// multi-classification Model. It's not feasible if the outputs are /// non-deterministic, e.g. the Model produces top-k classes or sort the /// outputs by their values. /// /// The shape of the value must be an n-dimensional array of strings. The /// number of dimensions must match that of the outputs to be explained. /// The \[Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name\] is populated by locating in the /// mapping with \[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\]. #[prost(message, tag = "1")] IndexDisplayNameMapping(::prost_types::Value), /// Specify a field name in the prediction to look for the display name. /// /// Use this if the prediction contains the display names for the outputs. /// /// The display names in the prediction must have the same shape of the /// outputs, so that it can be located by \[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\] for /// a specific output. #[prost(string, tag = "2")] DisplayNameMappingKey(::prost::alloc::string::String), } } } /// The storage details for Avro input content. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AvroSource { /// Required. Google Cloud Storage location. #[prost(message, optional, tag = "1")] pub gcs_source: ::core::option::Option, } /// The storage details for CSV input content. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CsvSource { /// Required. Google Cloud Storage location. #[prost(message, optional, tag = "1")] pub gcs_source: ::core::option::Option, } /// The Google Cloud Storage location for the input content. #[derive(Clone, PartialEq, ::prost::Message)] pub struct GcsSource { /// Required. Google Cloud Storage URI(-s) to the input file(s). May contain /// wildcards. For more information on wildcards, see /// #[prost(string, repeated, tag = "1")] pub uris: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// The Google Cloud Storage location where the output is to be written to. #[derive(Clone, PartialEq, ::prost::Message)] pub struct GcsDestination { /// Required. Google Cloud Storage URI to output directory. If the uri doesn't end with /// '/', a '/' will be automatically appended. The directory is created if it /// doesn't exist. #[prost(string, tag = "1")] pub output_uri_prefix: ::prost::alloc::string::String, } /// The BigQuery location for the input content. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BigQuerySource { /// Required. BigQuery URI to a table, up to 2000 characters long. /// Accepted forms: /// /// * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. #[prost(string, tag = "1")] pub input_uri: ::prost::alloc::string::String, } /// The BigQuery location for the output content. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BigQueryDestination { /// Required. BigQuery URI to a project or table, up to 2000 characters long. /// /// When only the project is specified, the Dataset and Table is created. /// When the full table reference is specified, the Dataset must exist and /// table must not exist. /// /// Accepted forms: /// /// * BigQuery path. For example: /// `bq://projectId` or `bq://projectId.bqDatasetId` or /// `bq://projectId.bqDatasetId.bqTableId`. #[prost(string, tag = "1")] pub output_uri: ::prost::alloc::string::String, } /// The storage details for CSV output content. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CsvDestination { /// Required. Google Cloud Storage location. #[prost(message, optional, tag = "1")] pub gcs_destination: ::core::option::Option, } /// The storage details for TFRecord output content. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TfRecordDestination { /// Required. Google Cloud Storage location. #[prost(message, optional, tag = "1")] pub gcs_destination: ::core::option::Option, } /// The Container Registry location for the container image. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ContainerRegistryDestination { /// Required. Container Registry URI of a container image. /// Only Google Container Registry and Artifact Registry are supported now. /// Accepted forms: /// /// * Google Container Registry path. For example: /// `gcr.io/projectId/imageName:tag`. /// /// * Artifact Registry path. For example: /// `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. /// /// If a tag is not specified, "latest" will be used as the default tag. #[prost(string, tag = "1")] pub output_uri: ::prost::alloc::string::String, } /// Explanation of a prediction (provided in \[PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions\]) /// produced by the Model on a given \[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances\]. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Explanation { /// Output only. Feature attributions grouped by predicted outputs. /// /// For Models that predict only one output, such as regression Models that /// predict only one score, there is only one attibution that explains the /// predicted output. For Models that predict multiple outputs, such as /// multiclass Models that predict multiple classes, each element explains one /// specific item. \[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\] can be used to identify which /// output this attribution is explaining. /// /// If users set \[ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k\], the attributions are sorted /// by \[instance_output_value][Attributions.instance_output_value\] in /// descending order. If \[ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices\] is specified, /// the attributions are stored by \[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\] in the same /// order as they appear in the output_indices. #[prost(message, repeated, tag = "1")] pub attributions: ::prost::alloc::vec::Vec, } /// Aggregated explanation metrics for a Model over a set of instances. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModelExplanation { /// Output only. Aggregated attributions explaining the Model's prediction outputs over the /// set of instances. The attributions are grouped by outputs. /// /// For Models that predict only one output, such as regression Models that /// predict only one score, there is only one attibution that explains the /// predicted output. For Models that predict multiple outputs, such as /// multiclass Models that predict multiple classes, each element explains one /// specific item. \[Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\] can be used to identify which /// output this attribution is explaining. /// /// The \[baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value\], /// \[instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value\] and /// \[featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions\] fields are /// averaged over the test data. /// /// NOTE: Currently AutoML tabular classification Models produce only one /// attribution, which averages attributions over all the classes it predicts. /// \[Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error\] is not populated. #[prost(message, repeated, tag = "1")] pub mean_attributions: ::prost::alloc::vec::Vec, } /// Attribution that explains a particular prediction output. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Attribution { /// Output only. Model predicted output if the input instance is constructed from the /// baselines of all the features defined in \[ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs\]. /// The field name of the output is determined by the key in /// \[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs\]. /// /// If the Model's predicted output has multiple dimensions (rank > 1), this is /// the value in the output located by \[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\]. /// /// If there are multiple baselines, their output values are averaged. #[prost(double, tag = "1")] pub baseline_output_value: f64, /// Output only. Model predicted output on the corresponding [explanation /// instance]\[ExplainRequest.instances\]. The field name of the output is /// determined by the key in \[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs\]. /// /// If the Model predicted output has multiple dimensions, this is the value in /// the output located by \[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\]. #[prost(double, tag = "2")] pub instance_output_value: f64, /// Output only. Attributions of each explained feature. Features are extracted from /// the [prediction instances]\[google.cloud.aiplatform.v1beta1.ExplainRequest.instances\] according to /// [explanation metadata for inputs]\[google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs\]. /// /// The value is a struct, whose keys are the name of the feature. The values /// are how much the feature in the \[instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances\] /// contributed to the predicted result. /// /// The format of the value is determined by the feature's input format: /// /// * If the feature is a scalar value, the attribution value is a /// [floating number]\[google.protobuf.Value.number_value\]. /// /// * If the feature is an array of scalar values, the attribution value is /// an \[array][google.protobuf.Value.list_value\]. /// /// * If the feature is a struct, the attribution value is a /// \[struct][google.protobuf.Value.struct_value\]. The keys in the /// attribution value struct are the same as the keys in the feature /// struct. The formats of the values in the attribution struct are /// determined by the formats of the values in the feature struct. /// /// The \[ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri\] field, /// pointed to by the \[ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec\] field of the /// \[Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models\] object, points to the schema file that /// describes the features and their attribution values (if it is populated). #[prost(message, optional, tag = "3")] pub feature_attributions: ::core::option::Option<::prost_types::Value>, /// Output only. The index that locates the explained prediction output. /// /// If the prediction output is a scalar value, output_index is not populated. /// If the prediction output has multiple dimensions, the length of the /// output_index list is the same as the number of dimensions of the output. /// The i-th element in output_index is the element index of the i-th dimension /// of the output vector. Indices start from 0. #[prost(int32, repeated, packed = "false", tag = "4")] pub output_index: ::prost::alloc::vec::Vec, /// Output only. The display name of the output identified by \[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\]. For example, /// the predicted class name by a multi-classification Model. /// /// This field is only populated iff the Model predicts display names as a /// separate field along with the explained output. The predicted display name /// must has the same shape of the explained output, and can be located using /// output_index. #[prost(string, tag = "5")] pub output_display_name: ::prost::alloc::string::String, /// Output only. Error of \[feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions\] caused by approximation used in the /// explanation method. Lower value means more precise attributions. /// /// * For Sampled Shapley /// \[attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution\], /// increasing \[path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count\] might reduce /// the error. /// * For Integrated Gradients /// \[attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution\], /// increasing \[step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count\] might /// reduce the error. /// * For [XRAI attribution]\[google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution\], /// increasing /// \[step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count\] might reduce the error. /// /// See [this introduction](/vertex-ai/docs/explainable-ai/overview) /// for more information. #[prost(double, tag = "6")] pub approximation_error: f64, /// Output only. Name of the explain output. Specified as the key in /// \[ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs\]. #[prost(string, tag = "7")] pub output_name: ::prost::alloc::string::String, } /// Specification of Model explanation. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplanationSpec { /// Required. Parameters that configure explaining of the Model's predictions. #[prost(message, optional, tag = "1")] pub parameters: ::core::option::Option, /// Required. Metadata describing the Model's input and output for explanation. #[prost(message, optional, tag = "2")] pub metadata: ::core::option::Option, } /// Parameters to configure explaining for Model's predictions. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplanationParameters { /// If populated, returns attributions for top K indices of outputs /// (defaults to 1). Only applies to Models that predicts more than one outputs /// (e,g, multi-class Models). When set to -1, returns explanations for all /// outputs. #[prost(int32, tag = "4")] pub top_k: i32, /// If populated, only returns attributions that have /// \[output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index\] contained in output_indices. It /// must be an ndarray of integers, with the same shape of the output it's /// explaining. /// /// If not populated, returns attributions for \[top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k\] indices of outputs. /// If neither top_k nor output_indeices is populated, returns the argmax /// index of the outputs. /// /// Only applicable to Models that predict multiple outputs (e,g, multi-class /// Models that predict multiple classes). #[prost(message, optional, tag = "5")] pub output_indices: ::core::option::Option<::prost_types::ListValue>, #[prost(oneof = "explanation_parameters::Method", tags = "1, 2, 3, 7")] pub method: ::core::option::Option, } /// Nested message and enum types in `ExplanationParameters`. pub mod explanation_parameters { #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Method { /// An attribution method that approximates Shapley values for features that /// contribute to the label being predicted. A sampling strategy is used to /// approximate the value rather than considering all subsets of features. /// Refer to this paper for model details: #[prost(message, tag = "1")] SampledShapleyAttribution(super::SampledShapleyAttribution), /// An attribution method that computes Aumann-Shapley values taking /// advantage of the model's fully differentiable structure. Refer to this /// paper for more details: #[prost(message, tag = "2")] IntegratedGradientsAttribution(super::IntegratedGradientsAttribution), /// An attribution method that redistributes Integrated Gradients /// attribution to segmented regions, taking advantage of the model's fully /// differentiable structure. Refer to this paper for /// more details: /// /// XRAI currently performs better on natural images, like a picture of a /// house or an animal. If the images are taken in artificial environments, /// like a lab or manufacturing line, or from diagnostic equipment, like /// x-rays or quality-control cameras, use Integrated Gradients instead. #[prost(message, tag = "3")] XraiAttribution(super::XraiAttribution), /// Similarity explainability that returns the nearest neighbors from the /// provided dataset. #[prost(message, tag = "7")] Similarity(super::Similarity), } } /// An attribution method that approximates Shapley values for features that /// contribute to the label being predicted. A sampling strategy is used to /// approximate the value rather than considering all subsets of features. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SampledShapleyAttribution { /// Required. The number of feature permutations to consider when approximating the /// Shapley values. /// /// Valid range of its value is [1, 50], inclusively. #[prost(int32, tag = "1")] pub path_count: i32, } /// An attribution method that computes the Aumann-Shapley value taking advantage /// of the model's fully differentiable structure. Refer to this paper for /// more details: #[derive(Clone, PartialEq, ::prost::Message)] pub struct IntegratedGradientsAttribution { /// Required. The number of steps for approximating the path integral. /// A good value to start is 50 and gradually increase until the /// sum to diff property is within the desired error range. /// /// Valid range of its value is [1, 100], inclusively. #[prost(int32, tag = "1")] pub step_count: i32, /// Config for SmoothGrad approximation of gradients. /// /// When enabled, the gradients are approximated by averaging the gradients /// from noisy samples in the vicinity of the inputs. Adding /// noise can help improve the computed gradients. Refer to this paper for more /// details: #[prost(message, optional, tag = "2")] pub smooth_grad_config: ::core::option::Option, } /// An explanation method that redistributes Integrated Gradients /// attributions to segmented regions, taking advantage of the model's fully /// differentiable structure. Refer to this paper for more details: /// /// /// Supported only by image Models. #[derive(Clone, PartialEq, ::prost::Message)] pub struct XraiAttribution { /// Required. The number of steps for approximating the path integral. /// A good value to start is 50 and gradually increase until the /// sum to diff property is met within the desired error range. /// /// Valid range of its value is [1, 100], inclusively. #[prost(int32, tag = "1")] pub step_count: i32, /// Config for SmoothGrad approximation of gradients. /// /// When enabled, the gradients are approximated by averaging the gradients /// from noisy samples in the vicinity of the inputs. Adding /// noise can help improve the computed gradients. Refer to this paper for more /// details: #[prost(message, optional, tag = "2")] pub smooth_grad_config: ::core::option::Option, } /// Config for SmoothGrad approximation of gradients. /// /// When enabled, the gradients are approximated by averaging the gradients from /// noisy samples in the vicinity of the inputs. Adding noise can help improve /// the computed gradients. Refer to this paper for more details: /// #[derive(Clone, PartialEq, ::prost::Message)] pub struct SmoothGradConfig { /// The number of gradient samples to use for /// approximation. The higher this number, the more accurate the gradient /// is, but the runtime complexity increases by this factor as well. /// Valid range of its value is [1, 50]. Defaults to 3. #[prost(int32, tag = "3")] pub noisy_sample_count: i32, /// Represents the standard deviation of the gaussian kernel /// that will be used to add noise to the interpolated inputs /// prior to computing gradients. #[prost(oneof = "smooth_grad_config::GradientNoiseSigma", tags = "1, 2")] pub gradient_noise_sigma: ::core::option::Option, } /// Nested message and enum types in `SmoothGradConfig`. pub mod smooth_grad_config { /// Represents the standard deviation of the gaussian kernel /// that will be used to add noise to the interpolated inputs /// prior to computing gradients. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum GradientNoiseSigma { /// This is a single float value and will be used to add noise to all the /// features. Use this field when all features are normalized to have the /// same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where /// features are normalized to have 0-mean and 1-variance. Learn more about /// \[normalization\](). /// /// For best results the recommended value is about 10% - 20% of the standard /// deviation of the input feature. Refer to section 3.2 of the SmoothGrad /// paper: Defaults to 0.1. /// /// If the distribution is different per feature, set /// \[feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma\] instead /// for each feature. #[prost(float, tag = "1")] NoiseSigma(f32), /// This is similar to \[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma\], but /// provides additional flexibility. A separate noise sigma can be provided /// for each feature, which is useful if their distributions are different. /// No noise is added to features that are not set. If this field is unset, /// \[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma\] will be used for all /// features. #[prost(message, tag = "2")] FeatureNoiseSigma(super::FeatureNoiseSigma), } } /// Noise sigma by features. Noise sigma represents the standard deviation of the /// gaussian kernel that will be used to add noise to interpolated inputs prior /// to computing gradients. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeatureNoiseSigma { /// Noise sigma per feature. No noise is added to features that are not set. #[prost(message, repeated, tag = "1")] pub noise_sigma: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `FeatureNoiseSigma`. pub mod feature_noise_sigma { /// Noise sigma for a single feature. #[derive(Clone, PartialEq, ::prost::Message)] pub struct NoiseSigmaForFeature { /// The name of the input feature for which noise sigma is provided. The /// features are defined in /// [explanation metadata inputs]\[google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs\]. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// This represents the standard deviation of the Gaussian kernel that will /// be used to add noise to the feature prior to computing gradients. Similar /// to \[noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma\] but represents the /// noise added to the current feature. Defaults to 0.1. #[prost(float, tag = "2")] pub sigma: f32, } } /// Similarity explainability that returns the nearest neighbors from the /// provided dataset. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Similarity { /// The Cloud Storage location for the input instances. #[prost(message, optional, tag = "1")] pub gcs_source: ::core::option::Option, /// The configuration for the generated index, the semantics are the same as /// \[metadata][google.cloud.aiplatform.v1beta1.Index.metadata\] and should match NearestNeighborSearchConfig. #[prost(message, optional, tag = "2")] pub nearest_neighbor_search_config: ::core::option::Option<::prost_types::Value>, } /// The \[ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec\] entries that can be overridden at /// [online explanation]\[google.cloud.aiplatform.v1beta1.PredictionService.Explain\] time. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplanationSpecOverride { /// The parameters to be overridden. Note that the /// \[method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method\] cannot be changed. If not specified, /// no parameter is overridden. #[prost(message, optional, tag = "1")] pub parameters: ::core::option::Option, /// The metadata to be overridden. If not specified, no metadata is overridden. #[prost(message, optional, tag = "2")] pub metadata: ::core::option::Option, } /// The \[ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata\] entries that can be overridden at /// [online explanation]\[google.cloud.aiplatform.v1beta1.PredictionService.Explain\] time. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplanationMetadataOverride { /// Required. Overrides the [input metadata]\[google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs\] of the features. /// The key is the name of the feature to be overridden. The keys specified /// here must exist in the input metadata to be overridden. If a feature is /// not specified here, the corresponding feature's input metadata is not /// overridden. #[prost(map = "string, message", tag = "1")] pub inputs: ::std::collections::HashMap< ::prost::alloc::string::String, explanation_metadata_override::InputMetadataOverride, >, } /// Nested message and enum types in `ExplanationMetadataOverride`. pub mod explanation_metadata_override { /// The [input metadata]\[google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata\] entries to be /// overridden. #[derive(Clone, PartialEq, ::prost::Message)] pub struct InputMetadataOverride { /// Baseline inputs for this feature. /// /// This overrides the `input_baseline` field of the /// \[ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata\] /// object of the corresponding feature's input metadata. If it's not /// specified, the original baselines are not overridden. #[prost(message, repeated, tag = "1")] pub input_baselines: ::prost::alloc::vec::Vec<::prost_types::Value>, } } /// Describes the state of a job. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum JobState { /// The job state is unspecified. Unspecified = 0, /// The job has been just created or resumed and processing has not yet begun. Queued = 1, /// The service is preparing to run the job. Pending = 2, /// The job is in progress. Running = 3, /// The job completed successfully. Succeeded = 4, /// The job failed. Failed = 5, /// The job is being cancelled. From this state the job may only go to /// either `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. Cancelling = 6, /// The job has been cancelled. Cancelled = 7, /// The job has been stopped, and can be resumed. Paused = 8, /// The job has expired. Expired = 9, } /// Specification of a single machine. #[derive(Clone, PartialEq, ::prost::Message)] pub struct MachineSpec { /// Immutable. The type of the machine. /// /// See the [list of machine types supported for /// prediction]() /// /// See the [list of machine types supported for custom /// training](). /// /// For \[DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel\] this field is optional, and the default /// value is `n1-standard-2`. For \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\] or as part of /// \[WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec\] this field is required. #[prost(string, tag = "1")] pub machine_type: ::prost::alloc::string::String, /// Immutable. The type of accelerator(s) that may be attached to the machine as per /// \[accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count\]. #[prost(enumeration = "AcceleratorType", tag = "2")] pub accelerator_type: i32, /// The number of accelerators to attach to the machine. #[prost(int32, tag = "3")] pub accelerator_count: i32, } /// A description of resources that are dedicated to a DeployedModel, and /// that need a higher degree of manual configuration. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DedicatedResources { /// Required. Immutable. The specification of a single machine used by the prediction. #[prost(message, optional, tag = "1")] pub machine_spec: ::core::option::Option, /// Required. Immutable. The minimum number of machine replicas this DeployedModel will be always /// deployed on. This value must be greater than or equal to 1. /// /// If traffic against the DeployedModel increases, it may dynamically be /// deployed onto more replicas, and as traffic decreases, some of these extra /// replicas may be freed. #[prost(int32, tag = "2")] pub min_replica_count: i32, /// Immutable. The maximum number of replicas this DeployedModel may be deployed on when /// the traffic against it increases. If the requested value is too large, /// the deployment will error, but if deployment succeeds then the ability /// to scale the model to that many replicas is guaranteed (barring service /// outages). If traffic against the DeployedModel increases beyond what its /// replicas at maximum may handle, a portion of the traffic will be dropped. /// If this value is not provided, will use \[min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count\] as the /// default value. #[prost(int32, tag = "3")] pub max_replica_count: i32, /// Immutable. The metric specifications that overrides a resource /// utilization metric (CPU utilization, accelerator's duty cycle, and so on) /// target value (default to 60 if not set). At most one entry is allowed per /// metric. /// /// If \[machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count\] is /// above 0, the autoscaling will be based on both CPU utilization and /// accelerator's duty cycle metrics and scale up when either metrics exceeds /// its target value while scale down if both metrics are under their target /// value. The default target value is 60 for both metrics. /// /// If \[machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count\] is /// 0, the autoscaling will be based on CPU utilization metric only with /// default target value 60 if not explicitly set. /// /// For example, in the case of Online Prediction, if you want to override /// target CPU utilization to 80, you should set /// \[autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.metric_name\] /// to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and /// \[autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target\] to `80`. #[prost(message, repeated, tag = "4")] pub autoscaling_metric_specs: ::prost::alloc::vec::Vec, } /// A description of resources that to large degree are decided by Vertex AI, /// and require only a modest additional configuration. /// Each Model supporting these resources documents its specific guidelines. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AutomaticResources { /// Immutable. The minimum number of replicas this DeployedModel will be always deployed /// on. If traffic against it increases, it may dynamically be deployed onto /// more replicas up to \[max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count\], and as traffic decreases, some /// of these extra replicas may be freed. /// If the requested value is too large, the deployment will error. #[prost(int32, tag = "1")] pub min_replica_count: i32, /// Immutable. The maximum number of replicas this DeployedModel may be deployed on when /// the traffic against it increases. If the requested value is too large, /// the deployment will error, but if deployment succeeds then the ability /// to scale the model to that many replicas is guaranteed (barring service /// outages). If traffic against the DeployedModel increases beyond what its /// replicas at maximum may handle, a portion of the traffic will be dropped. /// If this value is not provided, a no upper bound for scaling under heavy /// traffic will be assume, though Vertex AI may be unable to scale beyond /// certain replica number. #[prost(int32, tag = "2")] pub max_replica_count: i32, } /// A description of resources that are used for performing batch operations, are /// dedicated to a Model, and need manual configuration. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BatchDedicatedResources { /// Required. Immutable. The specification of a single machine. #[prost(message, optional, tag = "1")] pub machine_spec: ::core::option::Option, /// Immutable. The number of machine replicas used at the start of the batch operation. /// If not set, Vertex AI decides starting number, not greater than /// \[max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count\] #[prost(int32, tag = "2")] pub starting_replica_count: i32, /// Immutable. The maximum number of machine replicas the batch operation may be scaled /// to. The default value is 10. #[prost(int32, tag = "3")] pub max_replica_count: i32, } /// Statistics information about resource consumption. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ResourcesConsumed { /// Output only. The number of replica hours used. Note that many replicas may run in /// parallel, and additionally any given work may be queued for some time. /// Therefore this value is not strictly related to wall time. #[prost(double, tag = "1")] pub replica_hours: f64, } /// Represents the spec of disk options. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DiskSpec { /// Type of the boot disk (default is "pd-ssd"). /// Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or /// "pd-standard" (Persistent Disk Hard Disk Drive). #[prost(string, tag = "1")] pub boot_disk_type: ::prost::alloc::string::String, /// Size in GB of the boot disk (default is 100GB). #[prost(int32, tag = "2")] pub boot_disk_size_gb: i32, } /// The metric specification that defines the target resource utilization /// (CPU utilization, accelerator's duty cycle, and so on) for calculating the /// desired replica count. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AutoscalingMetricSpec { /// Required. The resource metric name. /// Supported metrics: /// /// * For Online Prediction: /// * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` /// * `aiplatform.googleapis.com/prediction/online/cpu/utilization` #[prost(string, tag = "1")] pub metric_name: ::prost::alloc::string::String, /// The target resource utilization in percentage (1% - 100%) for the given /// metric; once the real usage deviates from the target by a certain /// percentage, the machine replicas change. The default value is 60 /// (representing 60%) if not provided. #[prost(int32, tag = "2")] pub target: i32, } /// Manual batch tuning parameters. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ManualBatchTuningParameters { /// Immutable. The number of the records (e.g. instances) of the operation given in /// each batch to a machine replica. Machine type, and size of a single /// record should be considered when setting this parameter, higher value /// speeds up the batch operation's execution, but too high value will result /// in a whole batch not fitting in a machine's memory, and the whole /// operation will fail. /// The default value is 4. #[prost(int32, tag = "1")] pub batch_size: i32, } /// Next ID: 6 #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModelMonitoringObjectiveConfig { /// Training dataset for models. This field has to be set only if /// TrainingPredictionSkewDetectionConfig is specified. #[prost(message, optional, tag = "1")] pub training_dataset: ::core::option::Option, /// The config for skew between training data and prediction data. #[prost(message, optional, tag = "2")] pub training_prediction_skew_detection_config: ::core::option::Option< model_monitoring_objective_config::TrainingPredictionSkewDetectionConfig, >, /// The config for drift of prediction data. #[prost(message, optional, tag = "3")] pub prediction_drift_detection_config: ::core::option::Option, /// The config for integrated with Explainable AI. #[prost(message, optional, tag = "5")] pub explanation_config: ::core::option::Option, } /// Nested message and enum types in `ModelMonitoringObjectiveConfig`. pub mod model_monitoring_objective_config { /// Training Dataset information. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TrainingDataset { /// Data format of the dataset, only applicable if the input is from /// Google Cloud Storage. /// The possible formats are: /// /// "tf-record" /// The source file is a TFRecord file. /// /// "csv" /// The source file is a CSV file. #[prost(string, tag = "2")] pub data_format: ::prost::alloc::string::String, /// The target field name the model is to predict. /// This field will be excluded when doing Predict and (or) Explain for the /// training data. #[prost(string, tag = "6")] pub target_field: ::prost::alloc::string::String, /// Strategy to sample data from Training Dataset. /// If not set, we process the whole dataset. #[prost(message, optional, tag = "7")] pub logging_sampling_strategy: ::core::option::Option, #[prost(oneof = "training_dataset::DataSource", tags = "3, 4, 5")] pub data_source: ::core::option::Option, } /// Nested message and enum types in `TrainingDataset`. pub mod training_dataset { #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum DataSource { /// The resource name of the Dataset used to train this Model. #[prost(string, tag = "3")] Dataset(::prost::alloc::string::String), /// The Google Cloud Storage uri of the unmanaged Dataset used to train /// this Model. #[prost(message, tag = "4")] GcsSource(super::super::GcsSource), /// The BigQuery table of the unmanaged Dataset used to train this /// Model. #[prost(message, tag = "5")] BigquerySource(super::super::BigQuerySource), } } /// The config for Training & Prediction data skew detection. It specifies the /// training dataset sources and the skew detection parameters. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TrainingPredictionSkewDetectionConfig { /// Key is the feature name and value is the threshold. If a feature needs to /// be monitored for skew, a value threshold must be configured for that /// feature. The threshold here is against feature distribution distance /// between the training and prediction feature. #[prost(map = "string, message", tag = "1")] pub skew_thresholds: ::std::collections::HashMap<::prost::alloc::string::String, super::ThresholdConfig>, /// Key is the feature name and value is the threshold. The threshold here is /// against attribution score distance between the training and prediction /// feature. #[prost(map = "string, message", tag = "2")] pub attribution_score_skew_thresholds: ::std::collections::HashMap<::prost::alloc::string::String, super::ThresholdConfig>, } /// The config for Prediction data drift detection. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PredictionDriftDetectionConfig { /// Key is the feature name and value is the threshold. If a feature needs to /// be monitored for drift, a value threshold must be configured for that /// feature. The threshold here is against feature distribution distance /// between different time windws. #[prost(map = "string, message", tag = "1")] pub drift_thresholds: ::std::collections::HashMap<::prost::alloc::string::String, super::ThresholdConfig>, /// Key is the feature name and value is the threshold. The threshold here is /// against attribution score distance between different time windows. #[prost(map = "string, message", tag = "2")] pub attribution_score_drift_thresholds: ::std::collections::HashMap<::prost::alloc::string::String, super::ThresholdConfig>, } /// The config for integrated with Explainable AI. Only applicable if the Model /// has explanation_spec populated. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplanationConfig { /// If want to analyze the Explainable AI feature attribute scores or not. /// If set to true, Vertex AI will log the feature attributions from /// explain response and do the skew/drift detection for them. #[prost(bool, tag = "1")] pub enable_feature_attributes: bool, /// Predictions generated by the BatchPredictionJob using baseline dataset. #[prost(message, optional, tag = "2")] pub explanation_baseline: ::core::option::Option, } /// Nested message and enum types in `ExplanationConfig`. pub mod explanation_config { /// Output from \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\] for Model Monitoring baseline dataset, /// which can be used to generate baseline attribution scores. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplanationBaseline { /// The storage format of the predictions generated BatchPrediction job. #[prost(enumeration = "explanation_baseline::PredictionFormat", tag = "1")] pub prediction_format: i32, /// The configuration specifying of BatchExplain job output. This can be /// used to generate the baseline of feature attribution scores. #[prost(oneof = "explanation_baseline::Destination", tags = "2, 3")] pub destination: ::core::option::Option, } /// Nested message and enum types in `ExplanationBaseline`. pub mod explanation_baseline { /// The storage format of the predictions generated BatchPrediction job. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum PredictionFormat { /// Should not be set. Unspecified = 0, /// Predictions are in JSONL files. Jsonl = 2, /// Predictions are in BigQuery. Bigquery = 3, } /// The configuration specifying of BatchExplain job output. This can be /// used to generate the baseline of feature attribution scores. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Destination { /// Cloud Storage location for BatchExplain output. #[prost(message, tag = "2")] Gcs(super::super::super::GcsDestination), /// BigQuery location for BatchExplain output. #[prost(message, tag = "3")] Bigquery(super::super::super::BigQueryDestination), } } } } /// Next ID: 2 #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModelMonitoringAlertConfig { #[prost(oneof = "model_monitoring_alert_config::Alert", tags = "1")] pub alert: ::core::option::Option, } /// Nested message and enum types in `ModelMonitoringAlertConfig`. pub mod model_monitoring_alert_config { /// The config for email alert. #[derive(Clone, PartialEq, ::prost::Message)] pub struct EmailAlertConfig { /// The email addresses to send the alert. #[prost(string, repeated, tag = "1")] pub user_emails: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Alert { /// Email alert config. #[prost(message, tag = "1")] EmailAlertConfig(EmailAlertConfig), } } /// The config for feature monitoring threshold. /// Next ID: 3 #[derive(Clone, PartialEq, ::prost::Message)] pub struct ThresholdConfig { #[prost(oneof = "threshold_config::Threshold", tags = "1")] pub threshold: ::core::option::Option, } /// Nested message and enum types in `ThresholdConfig`. pub mod threshold_config { #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Threshold { /// Specify a threshold value that can trigger the alert. /// If this threshold config is for feature distribution distance: /// 1. For categorical feature, the distribution distance is calculated by /// L-inifinity norm. /// 2. For numerical feature, the distribution distance is calculated by /// Jensen–Shannon divergence. /// Each feature must have a non-zero threshold if they need to be monitored. /// Otherwise no alert will be triggered for that feature. #[prost(double, tag = "1")] Value(f64), } } /// Sampling Strategy for logging, can be for both training and prediction /// dataset. /// Next ID: 2 #[derive(Clone, PartialEq, ::prost::Message)] pub struct SamplingStrategy { /// Random sample config. Will support more sampling strategies later. #[prost(message, optional, tag = "1")] pub random_sample_config: ::core::option::Option, } /// Nested message and enum types in `SamplingStrategy`. pub mod sampling_strategy { /// Requests are randomly selected. #[derive(Clone, PartialEq, ::prost::Message)] pub struct RandomSampleConfig { /// Sample rate (0, 1] #[prost(double, tag = "1")] pub sample_rate: f64, } } /// A job that uses a \[Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model\] to produce predictions /// on multiple [input instances]\[google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config\]. If /// predictions for significant portion of the instances fail, the job may finish /// without attempting predictions for all remaining instances. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BatchPredictionJob { /// Output only. Resource name of the BatchPredictionJob. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The user-defined name of this BatchPredictionJob. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// Required. The name of the Model that produces the predictions via this job, /// must share the same ancestor Location. /// Starting this job has no impact on any existing deployments of the Model /// and their resources. #[prost(string, tag = "3")] pub model: ::prost::alloc::string::String, /// Required. Input configuration of the instances on which predictions are performed. /// The schema of any single instance may be specified via /// the \[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model\] /// \[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata\] /// \[instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri\]. #[prost(message, optional, tag = "4")] pub input_config: ::core::option::Option, /// The parameters that govern the predictions. The schema of the parameters /// may be specified via the \[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model\] /// \[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata\] /// \[parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri\]. #[prost(message, optional, tag = "5")] pub model_parameters: ::core::option::Option<::prost_types::Value>, /// Required. The Configuration specifying where output predictions should /// be written. /// The schema of any single prediction may be specified as a concatenation /// of \[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model\] /// \[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata\] /// \[instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri\] /// and /// \[prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri\]. #[prost(message, optional, tag = "6")] pub output_config: ::core::option::Option, /// The config of resources used by the Model during the batch prediction. If /// the Model \[supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types\] /// DEDICATED_RESOURCES this config may be provided (and the job will use these /// resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config /// must be provided. #[prost(message, optional, tag = "7")] pub dedicated_resources: ::core::option::Option, /// Immutable. Parameters configuring the batch behavior. Currently only applicable when /// \[dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources\] are used (in other cases Vertex AI does /// the tuning itself). #[prost(message, optional, tag = "8")] pub manual_batch_tuning_parameters: ::core::option::Option, /// Generate explanation with the batch prediction results. /// /// When set to `true`, the batch prediction output changes based on the /// `predictions_format` field of the /// \[BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config\] object: /// /// * `bigquery`: output includes a column named `explanation`. The value /// is a struct that conforms to the \[Explanation][google.cloud.aiplatform.v1beta1.Explanation\] object. /// * `jsonl`: The JSON objects on each line include an additional entry /// keyed `explanation`. The value of the entry is a JSON object that /// conforms to the \[Explanation][google.cloud.aiplatform.v1beta1.Explanation\] object. /// * `csv`: Generating explanations for CSV format is not supported. /// /// If this field is set to true, either the \[Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec\] or /// \[explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec\] must be populated. #[prost(bool, tag = "23")] pub generate_explanation: bool, /// Explanation configuration for this BatchPredictionJob. Can be /// specified only if \[generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation\] is set to `true`. /// /// This value overrides the value of \[Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec\]. All fields of /// \[explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec\] are optional in the request. If a field of the /// \[explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec\] object is not populated, the corresponding field of /// the \[Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec\] object is inherited. #[prost(message, optional, tag = "25")] pub explanation_spec: ::core::option::Option, /// Output only. Information further describing the output of this job. #[prost(message, optional, tag = "9")] pub output_info: ::core::option::Option, /// Output only. The detailed state of the job. #[prost(enumeration = "JobState", tag = "10")] pub state: i32, /// Output only. Only populated when the job's state is JOB_STATE_FAILED or /// JOB_STATE_CANCELLED. #[prost(message, optional, tag = "11")] pub error: ::core::option::Option, /// Output only. Partial failures encountered. /// For example, single files that can't be read. /// This field never exceeds 20 entries. /// Status details fields contain standard GCP error details. #[prost(message, repeated, tag = "12")] pub partial_failures: ::prost::alloc::vec::Vec, /// Output only. Information about resources that had been consumed by this job. /// Provided in real time at best effort basis, as well as a final value /// once the job completes. /// /// Note: This field currently may be not populated for batch predictions that /// use AutoML Models. #[prost(message, optional, tag = "13")] pub resources_consumed: ::core::option::Option, /// Output only. Statistics on completed and failed prediction instances. #[prost(message, optional, tag = "14")] pub completion_stats: ::core::option::Option, /// Output only. Time when the BatchPredictionJob was created. #[prost(message, optional, tag = "15")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the BatchPredictionJob for the first time entered the /// `JOB_STATE_RUNNING` state. #[prost(message, optional, tag = "16")] pub start_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the BatchPredictionJob entered any of the following states: /// `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. #[prost(message, optional, tag = "17")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the BatchPredictionJob was most recently updated. #[prost(message, optional, tag = "18")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// The labels with user-defined metadata to organize BatchPredictionJobs. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// /// See for more information and examples of labels. #[prost(map = "string, string", tag = "19")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Customer-managed encryption key options for a BatchPredictionJob. If this /// is set, then all resources created by the BatchPredictionJob will be /// encrypted with the provided encryption key. #[prost(message, optional, tag = "24")] pub encryption_spec: ::core::option::Option, } /// Nested message and enum types in `BatchPredictionJob`. pub mod batch_prediction_job { /// Configures the input to \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\]. /// See \[Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats\] for Model's supported input /// formats, and how instances should be expressed via any of them. #[derive(Clone, PartialEq, ::prost::Message)] pub struct InputConfig { /// Required. The format in which instances are given, must be one of the /// \[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model\] /// \[supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats\]. #[prost(string, tag = "1")] pub instances_format: ::prost::alloc::string::String, /// Required. The source of the input. #[prost(oneof = "input_config::Source", tags = "2, 3")] pub source: ::core::option::Option, } /// Nested message and enum types in `InputConfig`. pub mod input_config { /// Required. The source of the input. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Source { /// The Cloud Storage location for the input instances. #[prost(message, tag = "2")] GcsSource(super::super::GcsSource), /// The BigQuery location of the input table. /// The schema of the table should be in the format described by the given /// context OpenAPI Schema, if one is provided. The table may contain /// additional columns that are not described by the schema, and they will /// be ignored. #[prost(message, tag = "3")] BigquerySource(super::super::BigQuerySource), } } /// Configures the output of \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\]. /// See \[Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats\] for supported output /// formats, and how predictions are expressed via any of them. #[derive(Clone, PartialEq, ::prost::Message)] pub struct OutputConfig { /// Required. The format in which Vertex AI gives the predictions, must be one of the /// \[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model\] /// \[supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats\]. #[prost(string, tag = "1")] pub predictions_format: ::prost::alloc::string::String, /// Required. The destination of the output. #[prost(oneof = "output_config::Destination", tags = "2, 3")] pub destination: ::core::option::Option, } /// Nested message and enum types in `OutputConfig`. pub mod output_config { /// Required. The destination of the output. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Destination { /// The Cloud Storage location of the directory where the output is /// to be written to. In the given directory a new directory is created. /// Its name is `prediction--`, /// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. /// Inside of it files `predictions_0001.`, /// `predictions_0002.`, ..., `predictions_N.` /// are created where `` depends on chosen /// \[predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format\], and N may equal 0001 and depends on the total /// number of successfully predicted instances. /// If the Model has both \[instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri\] /// and \[prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri\] schemata /// defined then each such file contains predictions as per the /// \[predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format\]. /// If prediction for any instance failed (partially or completely), then /// an additional `errors_0001.`, `errors_0002.`,..., /// `errors_N.` files are created (N depends on total number /// of failed predictions). These files contain the failed instances, /// as per their schema, followed by an additional `error` field which as /// value has \[google.rpc.Status][google.rpc.Status\] /// containing only `code` and `message` fields. #[prost(message, tag = "2")] GcsDestination(super::super::GcsDestination), /// The BigQuery project or dataset location where the output is to be /// written to. If project is provided, a new dataset is created with name /// `prediction__` /// where is made /// BigQuery-dataset-name compatible (for example, most special characters /// become underscores), and timestamp is in /// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset /// two tables will be created, `predictions`, and `errors`. /// If the Model has both \[instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri\] /// and \[prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri\] schemata /// defined then the tables have columns as follows: The `predictions` /// table contains instances for which the prediction succeeded, it /// has columns as per a concatenation of the Model's instance and /// prediction schemata. The `errors` table contains rows for which the /// prediction has failed, it has instance columns, as per the /// instance schema, followed by a single "errors" column, which as values /// has \[google.rpc.Status][google.rpc.Status\] /// represented as a STRUCT, and containing only `code` and `message`. #[prost(message, tag = "3")] BigqueryDestination(super::super::BigQueryDestination), } } /// Further describes this job's output. /// Supplements \[output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config\]. #[derive(Clone, PartialEq, ::prost::Message)] pub struct OutputInfo { /// Output only. The name of the BigQuery table created, in /// `predictions_` /// format, into which the prediction output is written. /// Can be used by UI to generate the BigQuery output path, for example. #[prost(string, tag = "4")] pub bigquery_output_table: ::prost::alloc::string::String, /// The output location into which prediction output is written. #[prost(oneof = "output_info::OutputLocation", tags = "1, 2")] pub output_location: ::core::option::Option, } /// Nested message and enum types in `OutputInfo`. pub mod output_info { /// The output location into which prediction output is written. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum OutputLocation { /// Output only. The full path of the Cloud Storage directory created, into which /// the prediction output is written. #[prost(string, tag = "1")] GcsOutputDirectory(::prost::alloc::string::String), /// Output only. The path of the BigQuery dataset created, in /// `bq://projectId.bqDatasetId` /// format, into which the prediction output is written. #[prost(string, tag = "2")] BigqueryOutputDataset(::prost::alloc::string::String), } } } /// Instance of a general context. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Context { /// Output only. The resource name of the Context. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// User provided display name of the Context. /// May be up to 128 Unicode characters. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// An eTag used to perform consistent read-modify-write updates. If not set, a /// blind "overwrite" update happens. #[prost(string, tag = "8")] pub etag: ::prost::alloc::string::String, /// The labels with user-defined metadata to organize your Contexts. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// No more than 64 user labels can be associated with one Context (System /// labels are excluded). #[prost(map = "string, string", tag = "9")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Output only. Timestamp when this Context was created. #[prost(message, optional, tag = "10")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Timestamp when this Context was last updated. #[prost(message, optional, tag = "11")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. A list of resource names of Contexts that are parents of this Context. /// A Context may have at most 10 parent_contexts. #[prost(string, repeated, tag = "12")] pub parent_contexts: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// The title of the schema describing the metadata. /// /// Schema title and version is expected to be registered in earlier Create /// Schema calls. And both are used together as unique identifiers to identify /// schemas within the local metadata store. #[prost(string, tag = "13")] pub schema_title: ::prost::alloc::string::String, /// The version of the schema in schema_name to use. /// /// Schema title and version is expected to be registered in earlier Create /// Schema calls. And both are used together as unique identifiers to identify /// schemas within the local metadata store. #[prost(string, tag = "14")] pub schema_version: ::prost::alloc::string::String, /// Properties of the Context. /// The size of this field should not exceed 200KB. #[prost(message, optional, tag = "15")] pub metadata: ::core::option::Option<::prost_types::Struct>, /// Description of the Context #[prost(string, tag = "16")] pub description: ::prost::alloc::string::String, } /// Represents an environment variable present in a Container or Python Module. #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnvVar { /// Required. Name of the environment variable. Must be a valid C identifier. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. Variables that reference a $(VAR_NAME) are expanded /// using the previous defined environment variables in the container and /// any service environment variables. If a variable cannot be resolved, /// the reference in the input string will be unchanged. The $(VAR_NAME) /// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped /// references will never be expanded, regardless of whether the variable /// exists or not. #[prost(string, tag = "2")] pub value: ::prost::alloc::string::String, } /// Represents a job that runs custom workloads such as a Docker container or a /// Python package. A CustomJob can have multiple worker pools and each worker /// pool can have its own machine and input spec. A CustomJob will be cleaned up /// once the job enters terminal state (failed or succeeded). #[derive(Clone, PartialEq, ::prost::Message)] pub struct CustomJob { /// Output only. Resource name of a CustomJob. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The display name of the CustomJob. /// The name can be up to 128 characters long and can be consist of any UTF-8 /// characters. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// Required. Job spec. #[prost(message, optional, tag = "4")] pub job_spec: ::core::option::Option, /// Output only. The detailed state of the job. #[prost(enumeration = "JobState", tag = "5")] pub state: i32, /// Output only. Time when the CustomJob was created. #[prost(message, optional, tag = "6")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the CustomJob for the first time entered the /// `JOB_STATE_RUNNING` state. #[prost(message, optional, tag = "7")] pub start_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the CustomJob entered any of the following states: /// `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. #[prost(message, optional, tag = "8")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the CustomJob was most recently updated. #[prost(message, optional, tag = "9")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Only populated when job's state is `JOB_STATE_FAILED` or /// `JOB_STATE_CANCELLED`. #[prost(message, optional, tag = "10")] pub error: ::core::option::Option, /// The labels with user-defined metadata to organize CustomJobs. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// /// See for more information and examples of labels. #[prost(map = "string, string", tag = "11")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Customer-managed encryption key options for a CustomJob. If this is set, /// then all resources created by the CustomJob will be encrypted with the /// provided encryption key. #[prost(message, optional, tag = "12")] pub encryption_spec: ::core::option::Option, /// Output only. URIs for accessing [interactive /// shells]() /// (one URI for each training node). Only available if /// \[job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access\] is `true`. /// /// The keys are names of each node in the training job; for example, /// `workerpool0-0` for the primary node, `workerpool1-0` for the first node in /// the second worker pool, and `workerpool1-1` for the second node in the /// second worker pool. /// /// The values are the URIs for each node's interactive shell. #[prost(map = "string, string", tag = "16")] pub web_access_uris: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } /// Represents the spec of a CustomJob. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CustomJobSpec { /// Required. The spec of the worker pools including machine type and Docker image. /// All worker pools except the first one are optional and can be skipped by /// providing an empty value. #[prost(message, repeated, tag = "1")] pub worker_pool_specs: ::prost::alloc::vec::Vec, /// Scheduling options for a CustomJob. #[prost(message, optional, tag = "3")] pub scheduling: ::core::option::Option, /// Specifies the service account for workload run-as account. /// Users submitting jobs must have act-as permission on this run-as account. /// If unspecified, the [Vertex AI Custom Code Service /// Agent]() /// for the CustomJob's project is used. #[prost(string, tag = "4")] pub service_account: ::prost::alloc::string::String, /// The full name of the Compute Engine /// \[network\](/compute/docs/networks-and-firewalls#networks) to which the Job /// should be peered. For example, `projects/12345/global/networks/myVPC`. /// \[Format\](/compute/docs/reference/rest/v1/networks/insert) /// is of the form `projects/{project}/global/networks/{network}`. /// Where {project} is a project number, as in `12345`, and {network} is a /// network name. /// /// Private services access must already be configured for the network. If left /// unspecified, the job is not peered with any network. #[prost(string, tag = "5")] pub network: ::prost::alloc::string::String, /// The Cloud Storage location to store the output of this CustomJob or /// HyperparameterTuningJob. For HyperparameterTuningJob, /// the baseOutputDirectory of /// each child CustomJob backing a Trial is set to a subdirectory of name /// \[id][google.cloud.aiplatform.v1beta1.Trial.id\] under its parent HyperparameterTuningJob's /// baseOutputDirectory. /// /// The following Vertex AI environment variables will be passed to /// containers or python modules when this field is set: /// /// For CustomJob: /// /// * AIP_MODEL_DIR = `/model/` /// * AIP_CHECKPOINT_DIR = `/checkpoints/` /// * AIP_TENSORBOARD_LOG_DIR = `/logs/` /// /// For CustomJob backing a Trial of HyperparameterTuningJob: /// /// * AIP_MODEL_DIR = `//model/` /// * AIP_CHECKPOINT_DIR = `//checkpoints/` /// * AIP_TENSORBOARD_LOG_DIR = `//logs/` #[prost(message, optional, tag = "6")] pub base_output_directory: ::core::option::Option, /// Optional. The name of a Vertex AI \[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard\] resource to which this CustomJob /// will upload Tensorboard logs. /// Format: /// `projects/{project}/locations/{location}/tensorboards/{tensorboard}` #[prost(string, tag = "7")] pub tensorboard: ::prost::alloc::string::String, /// Optional. Whether you want Vertex AI to enable [interactive shell /// access]() /// to training containers. /// /// If set to `true`, you can access interactive shells at the URIs given /// by \[CustomJob.web_access_uris][google.cloud.aiplatform.v1beta1.CustomJob.web_access_uris\] or \[Trial.web_access_uris][google.cloud.aiplatform.v1beta1.Trial.web_access_uris\] (within /// \[HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials\]). #[prost(bool, tag = "10")] pub enable_web_access: bool, } /// Represents the spec of a worker pool in a job. #[derive(Clone, PartialEq, ::prost::Message)] pub struct WorkerPoolSpec { /// Optional. Immutable. The specification of a single machine. #[prost(message, optional, tag = "1")] pub machine_spec: ::core::option::Option, /// Optional. The number of worker replicas to use for this worker pool. #[prost(int64, tag = "2")] pub replica_count: i64, /// Disk spec. #[prost(message, optional, tag = "5")] pub disk_spec: ::core::option::Option, /// The custom task to be executed in this worker pool. #[prost(oneof = "worker_pool_spec::Task", tags = "6, 7")] pub task: ::core::option::Option, } /// Nested message and enum types in `WorkerPoolSpec`. pub mod worker_pool_spec { /// The custom task to be executed in this worker pool. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Task { /// The custom container task. #[prost(message, tag = "6")] ContainerSpec(super::ContainerSpec), /// The Python packaged task. #[prost(message, tag = "7")] PythonPackageSpec(super::PythonPackageSpec), } } /// The spec of a Container. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ContainerSpec { /// Required. The URI of a container image in the Container Registry that is to be run on /// each worker replica. #[prost(string, tag = "1")] pub image_uri: ::prost::alloc::string::String, /// The command to be invoked when the container is started. /// It overrides the entrypoint instruction in Dockerfile when provided. #[prost(string, repeated, tag = "2")] pub command: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// The arguments to be passed when starting the container. #[prost(string, repeated, tag = "3")] pub args: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// The spec of a Python packaged code. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PythonPackageSpec { /// Required. The URI of a container image in Artifact Registry that will run the /// provided Python package. Vertex AI provides a wide range of executor /// images with pre-installed packages to meet users' various use cases. See /// the list of [pre-built containers for /// training](). /// You must use an image from this list. #[prost(string, tag = "1")] pub executor_image_uri: ::prost::alloc::string::String, /// Required. The Google Cloud Storage location of the Python package files which are /// the training program and its dependent packages. /// The maximum number of package URIs is 100. #[prost(string, repeated, tag = "2")] pub package_uris: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Required. The Python module name to run after installing the packages. #[prost(string, tag = "3")] pub python_module: ::prost::alloc::string::String, /// Command line arguments to be passed to the Python task. #[prost(string, repeated, tag = "4")] pub args: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// All parameters related to queuing and scheduling of custom jobs. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Scheduling { /// The maximum job running time. The default is 7 days. #[prost(message, optional, tag = "1")] pub timeout: ::core::option::Option<::prost_types::Duration>, /// Restarts the entire CustomJob if a worker gets restarted. /// This feature can be used by distributed training jobs that are not /// resilient to workers leaving and joining a job. #[prost(bool, tag = "3")] pub restart_job_on_worker_restart: bool, } /// A piece of data in a Dataset. Could be an image, a video, a document or plain /// text. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DataItem { /// Output only. The resource name of the DataItem. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Output only. Timestamp when this DataItem was created. #[prost(message, optional, tag = "2")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Timestamp when this DataItem was last updated. #[prost(message, optional, tag = "6")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Optional. The labels with user-defined metadata to organize your DataItems. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// No more than 64 user labels can be associated with one DataItem(System /// labels are excluded). /// /// See for more information and examples of labels. /// System reserved label keys are prefixed with "aiplatform.googleapis.com/" /// and are immutable. #[prost(map = "string, string", tag = "3")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Required. The data that the DataItem represents (for example, an image or a text /// snippet). The schema of the payload is stored in the parent Dataset's /// [metadata schema's]\[google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri\] dataItemSchemaUri field. #[prost(message, optional, tag = "4")] pub payload: ::core::option::Option<::prost_types::Value>, /// Optional. Used to perform consistent read-modify-write updates. If not set, a blind /// "overwrite" update happens. #[prost(string, tag = "7")] pub etag: ::prost::alloc::string::String, } /// SpecialistPool represents customers' own workforce to work on their data /// labeling jobs. It includes a group of specialist managers and workers. /// Managers are responsible for managing the workers in this pool as well as /// customers' data labeling jobs associated with this pool. Customers create /// specialist pool as well as start data labeling jobs on Cloud, managers and /// workers handle the jobs using CrowdCompute console. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SpecialistPool { /// Required. The resource name of the SpecialistPool. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The user-defined name of the SpecialistPool. /// The name can be up to 128 characters long and can be consist of any UTF-8 /// characters. /// This field should be unique on project-level. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// Output only. The number of managers in this SpecialistPool. #[prost(int32, tag = "3")] pub specialist_managers_count: i32, /// The email addresses of the managers in the SpecialistPool. #[prost(string, repeated, tag = "4")] pub specialist_manager_emails: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Output only. The resource name of the pending data labeling jobs. #[prost(string, repeated, tag = "5")] pub pending_data_labeling_jobs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// The email addresses of workers in the SpecialistPool. #[prost(string, repeated, tag = "7")] pub specialist_worker_emails: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// DataLabelingJob is used to trigger a human labeling job on unlabeled data /// from the following Dataset: #[derive(Clone, PartialEq, ::prost::Message)] pub struct DataLabelingJob { /// Output only. Resource name of the DataLabelingJob. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The user-defined name of the DataLabelingJob. /// The name can be up to 128 characters long and can be consist of any UTF-8 /// characters. /// Display name of a DataLabelingJob. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// Required. Dataset resource names. Right now we only support labeling from a single /// Dataset. /// Format: /// `projects/{project}/locations/{location}/datasets/{dataset}` #[prost(string, repeated, tag = "3")] pub datasets: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Labels to assign to annotations generated by this DataLabelingJob. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// See for more information and examples of labels. /// System reserved label keys are prefixed with "aiplatform.googleapis.com/" /// and are immutable. #[prost(map = "string, string", tag = "12")] pub annotation_labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Required. Number of labelers to work on each DataItem. #[prost(int32, tag = "4")] pub labeler_count: i32, /// Required. The Google Cloud Storage location of the instruction pdf. This pdf is /// shared with labelers, and provides detailed description on how to label /// DataItems in Datasets. #[prost(string, tag = "5")] pub instruction_uri: ::prost::alloc::string::String, /// Required. Points to a YAML file stored on Google Cloud Storage describing the /// config for a specific type of DataLabelingJob. /// The schema files that can be used here are found in the /// bucket in the /// /schema/datalabelingjob/inputs/ folder. #[prost(string, tag = "6")] pub inputs_schema_uri: ::prost::alloc::string::String, /// Required. Input config parameters for the DataLabelingJob. #[prost(message, optional, tag = "7")] pub inputs: ::core::option::Option<::prost_types::Value>, /// Output only. The detailed state of the job. #[prost(enumeration = "JobState", tag = "8")] pub state: i32, /// Output only. Current labeling job progress percentage scaled in interval [0, 100], /// indicating the percentage of DataItems that has been finished. #[prost(int32, tag = "13")] pub labeling_progress: i32, /// Output only. Estimated cost(in US dollars) that the DataLabelingJob has incurred to /// date. #[prost(message, optional, tag = "14")] pub current_spend: ::core::option::Option, /// Output only. Timestamp when this DataLabelingJob was created. #[prost(message, optional, tag = "9")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Timestamp when this DataLabelingJob was updated most recently. #[prost(message, optional, tag = "10")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. DataLabelingJob errors. It is only populated when job's state is /// `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. #[prost(message, optional, tag = "22")] pub error: ::core::option::Option, /// The labels with user-defined metadata to organize your DataLabelingJobs. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// /// See for more information and examples of labels. /// System reserved label keys are prefixed with "aiplatform.googleapis.com/" /// and are immutable. Following system labels exist for each DataLabelingJob: /// /// * "aiplatform.googleapis.com/schema": output only, its value is the /// \[inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri\]'s title. #[prost(map = "string, string", tag = "11")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// The SpecialistPools' resource names associated with this job. #[prost(string, repeated, tag = "16")] pub specialist_pools: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Customer-managed encryption key spec for a DataLabelingJob. If set, this /// DataLabelingJob will be secured by this key. /// /// Note: Annotations created in the DataLabelingJob are associated with /// the EncryptionSpec of the Dataset they are exported to. #[prost(message, optional, tag = "20")] pub encryption_spec: ::core::option::Option, /// Parameters that configure the active learning pipeline. Active learning /// will label the data incrementally via several iterations. For every /// iteration, it will select a batch of data based on the sampling strategy. #[prost(message, optional, tag = "21")] pub active_learning_config: ::core::option::Option, } /// Parameters that configure the active learning pipeline. Active learning will /// label the data incrementally by several iterations. For every iteration, it /// will select a batch of data based on the sampling strategy. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActiveLearningConfig { /// Active learning data sampling config. For every active learning labeling /// iteration, it will select a batch of data based on the sampling strategy. #[prost(message, optional, tag = "3")] pub sample_config: ::core::option::Option, /// CMLE training config. For every active learning labeling iteration, system /// will train a machine learning model on CMLE. The trained model will be used /// by data sampling algorithm to select DataItems. #[prost(message, optional, tag = "4")] pub training_config: ::core::option::Option, /// Required. Max human labeling DataItems. The rest part will be labeled by /// machine. #[prost(oneof = "active_learning_config::HumanLabelingBudget", tags = "1, 2")] pub human_labeling_budget: ::core::option::Option, } /// Nested message and enum types in `ActiveLearningConfig`. pub mod active_learning_config { /// Required. Max human labeling DataItems. The rest part will be labeled by /// machine. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum HumanLabelingBudget { /// Max number of human labeled DataItems. #[prost(int64, tag = "1")] MaxDataItemCount(i64), /// Max percent of total DataItems for human labeling. #[prost(int32, tag = "2")] MaxDataItemPercentage(i32), } } /// Active learning data sampling config. For every active learning labeling /// iteration, it will select a batch of data based on the sampling strategy. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SampleConfig { /// Field to choose sampling strategy. Sampling strategy will decide which data /// should be selected for human labeling in every batch. #[prost(enumeration = "sample_config::SampleStrategy", tag = "5")] pub sample_strategy: i32, /// Decides sample size for the initial batch. initial_batch_sample_percentage /// is used by default. #[prost(oneof = "sample_config::InitialBatchSampleSize", tags = "1")] pub initial_batch_sample_size: ::core::option::Option, /// Decides sample size for the following batches. /// following_batch_sample_percentage is used by default. #[prost(oneof = "sample_config::FollowingBatchSampleSize", tags = "3")] pub following_batch_sample_size: ::core::option::Option, } /// Nested message and enum types in `SampleConfig`. pub mod sample_config { /// Sample strategy decides which subset of DataItems should be selected for /// human labeling in every batch. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SampleStrategy { /// Default will be treated as UNCERTAINTY. Unspecified = 0, /// Sample the most uncertain data to label. Uncertainty = 1, } /// Decides sample size for the initial batch. initial_batch_sample_percentage /// is used by default. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum InitialBatchSampleSize { /// The percentage of data needed to be labeled in the first batch. #[prost(int32, tag = "1")] InitialBatchSamplePercentage(i32), } /// Decides sample size for the following batches. /// following_batch_sample_percentage is used by default. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum FollowingBatchSampleSize { /// The percentage of data needed to be labeled in each following batch /// (except the first batch). #[prost(int32, tag = "3")] FollowingBatchSamplePercentage(i32), } } /// CMLE training config. For every active learning labeling iteration, system /// will train a machine learning model on CMLE. The trained model will be used /// by data sampling algorithm to select DataItems. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TrainingConfig { /// The timeout hours for the CMLE training job, expressed in milli hours /// i.e. 1,000 value in this field means 1 hour. #[prost(int64, tag = "1")] pub timeout_training_milli_hours: i64, } /// A collection of DataItems and Annotations on them. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Dataset { /// Output only. The resource name of the Dataset. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The user-defined name of the Dataset. /// The name can be up to 128 characters long and can be consist of any UTF-8 /// characters. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// Optional. The description of the Dataset. #[prost(string, tag = "16")] pub description: ::prost::alloc::string::String, /// Required. Points to a YAML file stored on Google Cloud Storage describing additional /// information about the Dataset. /// The schema is defined as an OpenAPI 3.0.2 Schema Object. /// The schema files that can be used here are found in /// gs://google-cloud-aiplatform/schema/dataset/metadata/. #[prost(string, tag = "3")] pub metadata_schema_uri: ::prost::alloc::string::String, /// Required. Additional information about the Dataset. #[prost(message, optional, tag = "8")] pub metadata: ::core::option::Option<::prost_types::Value>, /// Output only. Timestamp when this Dataset was created. #[prost(message, optional, tag = "4")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Timestamp when this Dataset was last updated. #[prost(message, optional, tag = "5")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Used to perform consistent read-modify-write updates. If not set, a blind /// "overwrite" update happens. #[prost(string, tag = "6")] pub etag: ::prost::alloc::string::String, /// The labels with user-defined metadata to organize your Datasets. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// No more than 64 user labels can be associated with one Dataset (System /// labels are excluded). /// /// See for more information and examples of labels. /// System reserved label keys are prefixed with "aiplatform.googleapis.com/" /// and are immutable. Following system labels exist for each Dataset: /// /// * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its /// value is the \[metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri\] title. #[prost(map = "string, string", tag = "7")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Customer-managed encryption key spec for a Dataset. If set, this Dataset /// and all sub-resources of this Dataset will be secured by this key. #[prost(message, optional, tag = "11")] pub encryption_spec: ::core::option::Option, } /// Describes the location from where we import data into a Dataset, together /// with the labels that will be applied to the DataItems and the Annotations. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ImportDataConfig { /// Labels that will be applied to newly imported DataItems. If an identical /// DataItem as one being imported already exists in the Dataset, then these /// labels will be appended to these of the already existing one, and if labels /// with identical key is imported before, the old label value will be /// overwritten. If two DataItems are identical in the same import data /// operation, the labels will be combined and if key collision happens in this /// case, one of the values will be picked randomly. Two DataItems are /// considered identical if their content bytes are identical (e.g. image bytes /// or pdf bytes). /// These labels will be overridden by Annotation labels specified inside index /// file referenced by \[import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri\], e.g. jsonl file. #[prost(map = "string, string", tag = "2")] pub data_item_labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Required. Points to a YAML file stored on Google Cloud Storage describing the import /// format. Validation will be done against the schema. The schema is defined /// as an [OpenAPI 3.0.2 Schema /// Object](). #[prost(string, tag = "4")] pub import_schema_uri: ::prost::alloc::string::String, /// The source of the input. #[prost(oneof = "import_data_config::Source", tags = "1")] pub source: ::core::option::Option, } /// Nested message and enum types in `ImportDataConfig`. pub mod import_data_config { /// The source of the input. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Source { /// The Google Cloud Storage location for the input content. #[prost(message, tag = "1")] GcsSource(super::GcsSource), } } /// Describes what part of the Dataset is to be exported, the destination of /// the export and how to export. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportDataConfig { /// A filter on Annotations of the Dataset. Only Annotations on to-be-exported /// DataItems(specified by \[data_items_filter][\]) that match this filter will /// be exported. The filter syntax is the same as in /// \[ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations\]. #[prost(string, tag = "2")] pub annotations_filter: ::prost::alloc::string::String, /// The destination of the output. #[prost(oneof = "export_data_config::Destination", tags = "1")] pub destination: ::core::option::Option, } /// Nested message and enum types in `ExportDataConfig`. pub mod export_data_config { /// The destination of the output. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Destination { /// The Google Cloud Storage location where the output is to be written to. /// In the given directory a new directory will be created with name: /// `export-data--` where /// timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export /// output will be written into that directory. Inside that directory, /// annotations with the same schema will be grouped into sub directories /// which are named with the corresponding annotations' schema title. Inside /// these sub directories, a schema.yaml will be created to describe the /// output format. #[prost(message, tag = "1")] GcsDestination(super::GcsDestination), } } /// Generic Metadata shared by all operations. #[derive(Clone, PartialEq, ::prost::Message)] pub struct GenericOperationMetadata { /// Output only. Partial failures encountered. /// E.g. single files that couldn't be read. /// This field should never exceed 20 entries. /// Status details field will contain standard GCP error details. #[prost(message, repeated, tag = "1")] pub partial_failures: ::prost::alloc::vec::Vec, /// Output only. Time when the operation was created. #[prost(message, optional, tag = "2")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the operation was updated for the last time. /// If the operation has finished (successfully or not), this is the finish /// time. #[prost(message, optional, tag = "3")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, } /// Details of operations that perform deletes of any entities. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteOperationMetadata { /// The common part of the operation metadata. #[prost(message, optional, tag = "1")] pub generic_metadata: ::core::option::Option, } /// Points to a DeployedModel. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeployedModelRef { /// Immutable. A resource name of an Endpoint. #[prost(string, tag = "1")] pub endpoint: ::prost::alloc::string::String, /// Immutable. An ID of a DeployedModel in the above Endpoint. #[prost(string, tag = "2")] pub deployed_model_id: ::prost::alloc::string::String, } /// A trained machine learning Model. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Model { /// The resource name of the Model. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The display name of the Model. /// The name can be up to 128 characters long and can be consist of any UTF-8 /// characters. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// The description of the Model. #[prost(string, tag = "3")] pub description: ::prost::alloc::string::String, /// The schemata that describe formats of the Model's predictions and /// explanations as given and returned via /// \[PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict\] and \[PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain\]. #[prost(message, optional, tag = "4")] pub predict_schemata: ::core::option::Option, /// Immutable. Points to a YAML file stored on Google Cloud Storage describing additional /// information about the Model, that is specific to it. Unset if the Model /// does not have any additional information. /// The schema is defined as an OpenAPI 3.0.2 [Schema /// Object](). /// AutoML Models always have this field populated by Vertex AI, if no /// additional metadata is needed, this field is set to an empty string. /// Note: The URI given on output will be immutable and probably different, /// including the URI scheme, than the one given on input. The output URI will /// point to a location where the user only has a read access. #[prost(string, tag = "5")] pub metadata_schema_uri: ::prost::alloc::string::String, /// Immutable. An additional information about the Model; the schema of the metadata can /// be found in \[metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri\]. /// Unset if the Model does not have any additional information. #[prost(message, optional, tag = "6")] pub metadata: ::core::option::Option<::prost_types::Value>, /// Output only. The formats in which this Model may be exported. If empty, this Model is /// not available for export. #[prost(message, repeated, tag = "20")] pub supported_export_formats: ::prost::alloc::vec::Vec, /// Output only. The resource name of the TrainingPipeline that uploaded this Model, if any. #[prost(string, tag = "7")] pub training_pipeline: ::prost::alloc::string::String, /// Input only. The specification of the container that is to be used when deploying /// this Model. The specification is ingested upon /// \[ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel\], and all binaries it contains are copied /// and stored internally by Vertex AI. /// Not present for AutoML Models. #[prost(message, optional, tag = "9")] pub container_spec: ::core::option::Option, /// Immutable. The path to the directory containing the Model artifact and any of its /// supporting files. /// Not present for AutoML Models. #[prost(string, tag = "26")] pub artifact_uri: ::prost::alloc::string::String, /// Output only. When this Model is deployed, its prediction resources are described by the /// `prediction_resources` field of the \[Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models\] object. /// Because not all Models support all resource configuration types, the /// configuration types this Model supports are listed here. If no /// configuration types are listed, the Model cannot be deployed to an /// \[Endpoint][google.cloud.aiplatform.v1beta1.Endpoint\] and does not support /// online predictions (\[PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict\] or /// \[PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain\]). Such a Model can serve predictions by /// using a \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\], if it has at least one entry each in /// \[supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats\] and /// \[supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats\]. #[prost( enumeration = "model::DeploymentResourcesType", repeated, packed = "false", tag = "10" )] pub supported_deployment_resources_types: ::prost::alloc::vec::Vec, /// Output only. The formats this Model supports in /// \[BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config\]. If /// \[PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri\] exists, the instances /// should be given as per that schema. /// /// The possible formats are: /// /// * `jsonl` /// The JSON Lines format, where each instance is a single line. Uses /// \[GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source\]. /// /// * `csv` /// The CSV format, where each instance is a single comma-separated line. /// The first line in the file is the header, containing comma-separated field /// names. Uses \[GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source\]. /// /// * `tf-record` /// The TFRecord format, where each instance is a single record in tfrecord /// syntax. Uses \[GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source\]. /// /// * `tf-record-gzip` /// Similar to `tf-record`, but the file is gzipped. Uses /// \[GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source\]. /// /// * `bigquery` /// Each instance is a single row in BigQuery. Uses /// \[BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source\]. /// /// * `file-list` /// Each line of the file is the location of an instance to process, uses /// `gcs_source` field of the /// \[InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig\] object. /// /// /// If this Model doesn't support any of these formats it means it cannot be /// used with a \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\]. However, if it has /// \[supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types\], it could serve online /// predictions by using \[PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict\] or /// \[PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain\]. #[prost(string, repeated, tag = "11")] pub supported_input_storage_formats: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Output only. The formats this Model supports in /// \[BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config\]. If both /// \[PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri\] and /// \[PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri\] exist, the predictions /// are returned together with their instances. In other words, the /// prediction has the original instance data first, followed /// by the actual prediction content (as per the schema). /// /// The possible formats are: /// /// * `jsonl` /// The JSON Lines format, where each prediction is a single line. Uses /// \[GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination\]. /// /// * `csv` /// The CSV format, where each prediction is a single comma-separated line. /// The first line in the file is the header, containing comma-separated field /// names. Uses /// \[GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination\]. /// /// * `bigquery` /// Each prediction is a single row in a BigQuery table, uses /// \[BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination\] /// . /// /// /// If this Model doesn't support any of these formats it means it cannot be /// used with a \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\]. However, if it has /// \[supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types\], it could serve online /// predictions by using \[PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict\] or /// \[PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain\]. #[prost(string, repeated, tag = "12")] pub supported_output_storage_formats: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Output only. Timestamp when this Model was uploaded into Vertex AI. #[prost(message, optional, tag = "13")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Timestamp when this Model was most recently updated. #[prost(message, optional, tag = "14")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. The pointers to DeployedModels created from this Model. Note that /// Model could have been deployed to Endpoints in different Locations. #[prost(message, repeated, tag = "15")] pub deployed_models: ::prost::alloc::vec::Vec, /// The default explanation specification for this Model. /// /// The Model can be used for [requesting /// explanation]\[PredictionService.Explain\] after being /// \[deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel\] if it is populated. /// The Model can be used for [batch /// explanation]\[BatchPredictionJob.generate_explanation\] if it is populated. /// /// All fields of the explanation_spec can be overridden by /// \[explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec\] of /// \[DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model\], or /// \[explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec\] of /// \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\]. /// /// If the default explanation specification is not set for this Model, this /// Model can still be used for [requesting /// explanation]\[PredictionService.Explain\] by setting /// \[explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec\] of /// \[DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model\] and for [batch /// explanation]\[BatchPredictionJob.generate_explanation\] by setting /// \[explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec\] of /// \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\]. #[prost(message, optional, tag = "23")] pub explanation_spec: ::core::option::Option, /// Used to perform consistent read-modify-write updates. If not set, a blind /// "overwrite" update happens. #[prost(string, tag = "16")] pub etag: ::prost::alloc::string::String, /// The labels with user-defined metadata to organize your Models. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// /// See for more information and examples of labels. #[prost(map = "string, string", tag = "17")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Customer-managed encryption key spec for a Model. If set, this /// Model and all sub-resources of this Model will be secured by this key. #[prost(message, optional, tag = "24")] pub encryption_spec: ::core::option::Option, } /// Nested message and enum types in `Model`. pub mod model { /// Represents export format supported by the Model. /// All formats export to Google Cloud Storage. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportFormat { /// Output only. The ID of the export format. /// The possible format IDs are: /// /// * `tflite` /// Used for Android mobile devices. /// /// * `edgetpu-tflite` /// Used for [Edge TPU]() devices. /// /// * `tf-saved-model` /// A tensorflow model in SavedModel format. /// /// * `tf-js` /// A \[TensorFlow.js\]() model that can be used /// in the browser and in Node.js using JavaScript. /// /// * `core-ml` /// Used for iOS mobile devices. /// /// * `custom-trained` /// A Model that was uploaded or trained by custom code. #[prost(string, tag = "1")] pub id: ::prost::alloc::string::String, /// Output only. The content of this Model that may be exported. #[prost( enumeration = "export_format::ExportableContent", repeated, packed = "false", tag = "2" )] pub exportable_contents: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `ExportFormat`. pub mod export_format { /// The Model content that can be exported. #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum ExportableContent { /// Should not be used. Unspecified = 0, /// Model artifact and any of its supported files. Will be exported to the /// location specified by the `artifactDestination` field of the /// \[ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config\] object. Artifact = 1, /// The container image that is to be used when deploying this Model. Will /// be exported to the location specified by the `imageDestination` field /// of the \[ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config\] object. Image = 2, } } /// Identifies a type of Model's prediction resources. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum DeploymentResourcesType { /// Should not be used. Unspecified = 0, /// Resources that are dedicated to the \[DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel\], and that need a /// higher degree of manual configuration. DedicatedResources = 1, /// Resources that to large degree are decided by Vertex AI, and require /// only a modest additional configuration. AutomaticResources = 2, } } /// Contains the schemata used in Model's predictions and explanations via /// \[PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict\], \[PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain\] and /// \[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob\]. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PredictSchemata { /// Immutable. Points to a YAML file stored on Google Cloud Storage describing the format /// of a single instance, which are used in \[PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances\], /// \[ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances\] and /// \[BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config\]. /// The schema is defined as an OpenAPI 3.0.2 [Schema /// Object](). /// AutoML Models always have this field populated by Vertex AI. /// Note: The URI given on output will be immutable and probably different, /// including the URI scheme, than the one given on input. The output URI will /// point to a location where the user only has a read access. #[prost(string, tag = "1")] pub instance_schema_uri: ::prost::alloc::string::String, /// Immutable. Points to a YAML file stored on Google Cloud Storage describing the /// parameters of prediction and explanation via /// \[PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters\], \[ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters\] and /// \[BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters\]. /// The schema is defined as an OpenAPI 3.0.2 [Schema /// Object](). /// AutoML Models always have this field populated by Vertex AI, if no /// parameters are supported, then it is set to an empty string. /// Note: The URI given on output will be immutable and probably different, /// including the URI scheme, than the one given on input. The output URI will /// point to a location where the user only has a read access. #[prost(string, tag = "2")] pub parameters_schema_uri: ::prost::alloc::string::String, /// Immutable. Points to a YAML file stored on Google Cloud Storage describing the format /// of a single prediction produced by this Model, which are returned via /// \[PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions\], \[ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations\], and /// \[BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config\]. /// The schema is defined as an OpenAPI 3.0.2 [Schema /// Object](). /// AutoML Models always have this field populated by Vertex AI. /// Note: The URI given on output will be immutable and probably different, /// including the URI scheme, than the one given on input. The output URI will /// point to a location where the user only has a read access. #[prost(string, tag = "3")] pub prediction_schema_uri: ::prost::alloc::string::String, } /// Specification of a container for serving predictions. Some fields in this /// message correspond to fields in the [Kubernetes Container v1 core /// specification](). #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModelContainerSpec { /// Required. Immutable. URI of the Docker image to be used as the custom container for serving /// predictions. This URI must identify an image in Artifact Registry or /// Container Registry. Learn more about the [container publishing /// requirements](), /// including permissions requirements for the Vertex AI Service Agent. /// /// The container image is ingested upon \[ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel\], stored /// internally, and this original path is afterwards not used. /// /// To learn about the requirements for the Docker image itself, see /// [Custom container /// requirements](). /// /// You can use the URI to one of Vertex AI's [pre-built container images for /// prediction]() /// in this field. #[prost(string, tag = "1")] pub image_uri: ::prost::alloc::string::String, /// Immutable. Specifies the command that runs when the container starts. This overrides /// the container's /// \[ENTRYPOINT\](). /// Specify this field as an array of executable and arguments, similar to a /// Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. /// /// If you do not specify this field, then the container's `ENTRYPOINT` runs, /// in conjunction with the \[args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args\] field or the /// container's \[`CMD`\](), /// if either exists. If this field is not specified and the container does not /// have an `ENTRYPOINT`, then refer to the Docker documentation about [how /// `CMD` and `ENTRYPOINT` /// interact](). /// /// If you specify this field, then you can also specify the `args` field to /// provide additional arguments for this command. However, if you specify this /// field, then the container's `CMD` is ignored. See the /// [Kubernetes documentation about how the /// `command` and `args` fields interact with a container's `ENTRYPOINT` and /// `CMD`](). /// /// In this field, you can reference [environment variables set by Vertex /// AI]() /// and environment variables set in the \[env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env\] field. /// You cannot reference environment variables set in the Docker image. In /// order for environment variables to be expanded, reference them by using the /// following syntax: /// $(VARIABLE_NAME) /// Note that this differs from Bash variable expansion, which does not use /// parentheses. If a variable cannot be resolved, the reference in the input /// string is used unchanged. To avoid variable expansion, you can escape this /// syntax with `$$`; for example: /// $$(VARIABLE_NAME) /// This field corresponds to the `command` field of the Kubernetes Containers /// [v1 core /// API](). #[prost(string, repeated, tag = "2")] pub command: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Immutable. Specifies arguments for the command that runs when the container starts. /// This overrides the container's /// \[`CMD`\](). Specify /// this field as an array of executable and arguments, similar to a Docker /// `CMD`'s "default parameters" form. /// /// If you don't specify this field but do specify the /// \[command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command\] field, then the command from the /// `command` field runs without any additional arguments. See the /// [Kubernetes documentation about how the /// `command` and `args` fields interact with a container's `ENTRYPOINT` and /// `CMD`](). /// /// If you don't specify this field and don't specify the `command` field, /// then the container's /// \[`ENTRYPOINT`\]() and /// `CMD` determine what runs based on their default behavior. See the Docker /// documentation about [how `CMD` and `ENTRYPOINT` /// interact](). /// /// In this field, you can reference [environment variables /// set by Vertex /// AI]() /// and environment variables set in the \[env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env\] field. /// You cannot reference environment variables set in the Docker image. In /// order for environment variables to be expanded, reference them by using the /// following syntax: /// $(VARIABLE_NAME) /// Note that this differs from Bash variable expansion, which does not use /// parentheses. If a variable cannot be resolved, the reference in the input /// string is used unchanged. To avoid variable expansion, you can escape this /// syntax with `$$`; for example: /// $$(VARIABLE_NAME) /// This field corresponds to the `args` field of the Kubernetes Containers /// [v1 core /// API](). #[prost(string, repeated, tag = "3")] pub args: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Immutable. List of environment variables to set in the container. After the container /// starts running, code running in the container can read these environment /// variables. /// /// Additionally, the \[command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command\] and /// \[args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args\] fields can reference these variables. Later /// entries in this list can also reference earlier entries. For example, the /// following example sets the variable `VAR_2` to have the value `foo bar`: /// /// ```json /// [ /// { /// "name": "VAR_1", /// "value": "foo" /// }, /// { /// "name": "VAR_2", /// "value": "$(VAR_1) bar" /// } /// ] /// ``` /// /// If you switch the order of the variables in the example, then the expansion /// does not occur. /// /// This field corresponds to the `env` field of the Kubernetes Containers /// [v1 core /// API](). #[prost(message, repeated, tag = "4")] pub env: ::prost::alloc::vec::Vec, /// Immutable. List of ports to expose from the container. Vertex AI sends any /// prediction requests that it receives to the first port on this list. Vertex /// AI also sends /// [liveness and health /// checks]() /// to this port. /// /// If you do not specify this field, it defaults to following value: /// /// ```json /// [ /// { /// "containerPort": 8080 /// } /// ] /// ``` /// /// Vertex AI does not use ports other than the first one listed. This field /// corresponds to the `ports` field of the Kubernetes Containers /// [v1 core /// API](). #[prost(message, repeated, tag = "5")] pub ports: ::prost::alloc::vec::Vec, /// Immutable. HTTP path on the container to send prediction requests to. Vertex AI /// forwards requests sent using /// \[projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict\] to this /// path on the container's IP address and port. Vertex AI then returns the /// container's response in the API response. /// /// For example, if you set this field to `/foo`, then when Vertex AI /// receives a prediction request, it forwards the request body in a POST /// request to the `/foo` path on the port of your container specified by the /// first value of this `ModelContainerSpec`'s /// \[ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports\] field. /// /// If you don't specify this field, it defaults to the following value when /// you [deploy this Model to an Endpoint]\[google.cloud.aiplatform.v1beta1.EndpointService.DeployModel\]: /// /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict /// The placeholders in this value are replaced as follows: /// /// * ENDPOINT: The last segment (following `endpoints/`)of the /// Endpoint.name][] field of the Endpoint where this Model has been /// deployed. (Vertex AI makes this value available to your container code /// as the [`AIP_ENDPOINT_ID` environment /// variable]().) /// /// * DEPLOYED_MODEL: \[DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id\] of the `DeployedModel`. /// (Vertex AI makes this value available to your container code /// as the [`AIP_DEPLOYED_MODEL_ID` environment /// variable]().) #[prost(string, tag = "6")] pub predict_route: ::prost::alloc::string::String, /// Immutable. HTTP path on the container to send health checks to. Vertex AI /// intermittently sends GET requests to this path on the container's IP /// address and port to check that the container is healthy. Read more about /// [health /// checks](). /// /// For example, if you set this field to `/bar`, then Vertex AI /// intermittently sends a GET request to the `/bar` path on the port of your /// container specified by the first value of this `ModelContainerSpec`'s /// \[ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports\] field. /// /// If you don't specify this field, it defaults to the following value when /// you [deploy this Model to an Endpoint]\[google.cloud.aiplatform.v1beta1.EndpointService.DeployModel\]: /// /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict /// The placeholders in this value are replaced as follows: /// /// * ENDPOINT: The last segment (following `endpoints/`)of the /// Endpoint.name][] field of the Endpoint where this Model has been /// deployed. (Vertex AI makes this value available to your container code /// as the [`AIP_ENDPOINT_ID` environment /// variable]().) /// /// * DEPLOYED_MODEL: \[DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id\] of the `DeployedModel`. /// (Vertex AI makes this value available to your container code as the /// [`AIP_DEPLOYED_MODEL_ID` environment /// variable]().) #[prost(string, tag = "7")] pub health_route: ::prost::alloc::string::String, } /// Represents a network port in a container. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Port { /// The number of the port to expose on the pod's IP address. /// Must be a valid port number, between 1 and 65535 inclusive. #[prost(int32, tag = "3")] pub container_port: i32, } /// Describes the state of a pipeline. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum PipelineState { /// The pipeline state is unspecified. Unspecified = 0, /// The pipeline has been created or resumed, and processing has not yet /// begun. Queued = 1, /// The service is preparing to run the pipeline. Pending = 2, /// The pipeline is in progress. Running = 3, /// The pipeline completed successfully. Succeeded = 4, /// The pipeline failed. Failed = 5, /// The pipeline is being cancelled. From this state, the pipeline may only go /// to either PIPELINE_STATE_SUCCEEDED, PIPELINE_STATE_FAILED or /// PIPELINE_STATE_CANCELLED. Cancelling = 6, /// The pipeline has been cancelled. Cancelled = 7, /// The pipeline has been stopped, and can be resumed. Paused = 8, } /// The TrainingPipeline orchestrates tasks associated with training a Model. It /// always executes the training task, and optionally may also /// export data from Vertex AI's Dataset which becomes the training input, /// \[upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel\] the Model to Vertex AI, and evaluate the /// Model. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TrainingPipeline { /// Output only. Resource name of the TrainingPipeline. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The user-defined name of this TrainingPipeline. #[prost(string, tag = "2")] pub display_name: ::prost::alloc::string::String, /// Specifies Vertex AI owned input data that may be used for training the /// Model. The TrainingPipeline's \[training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition\] should make /// clear whether this config is used and if there are any special requirements /// on how it should be filled. If nothing about this config is mentioned in /// the \[training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition\], then it should be assumed that the /// TrainingPipeline does not depend on this configuration. #[prost(message, optional, tag = "3")] pub input_data_config: ::core::option::Option, /// Required. A Google Cloud Storage path to the YAML file that defines the training task /// which is responsible for producing the model artifact, and may also include /// additional auxiliary work. /// The definition files that can be used here are found in /// gs://google-cloud-aiplatform/schema/trainingjob/definition/. /// Note: The URI given on output will be immutable and probably different, /// including the URI scheme, than the one given on input. The output URI will /// point to a location where the user only has a read access. #[prost(string, tag = "4")] pub training_task_definition: ::prost::alloc::string::String, /// Required. The training task's parameter(s), as specified in the /// \[training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition\]'s `inputs`. #[prost(message, optional, tag = "5")] pub training_task_inputs: ::core::option::Option<::prost_types::Value>, /// Output only. The metadata information as specified in the \[training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition\]'s /// `metadata`. This metadata is an auxiliary runtime and final information /// about the training task. While the pipeline is running this information is /// populated only at a best effort basis. Only present if the /// pipeline's \[training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition\] contains `metadata` object. #[prost(message, optional, tag = "6")] pub training_task_metadata: ::core::option::Option<::prost_types::Value>, /// Describes the Model that may be uploaded (via \[ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel\]) /// by this TrainingPipeline. The TrainingPipeline's /// \[training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition\] should make clear whether this Model /// description should be populated, and if there are any special requirements /// regarding how it should be filled. If nothing is mentioned in the /// \[training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition\], then it should be assumed that this field /// should not be filled and the training task either uploads the Model without /// a need of this information, or that training task does not support /// uploading a Model as part of the pipeline. /// When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and /// the trained Model had been uploaded into Vertex AI, then the /// model_to_upload's resource \[name][google.cloud.aiplatform.v1beta1.Model.name\] is populated. The Model /// is always uploaded into the Project and Location in which this pipeline /// is. #[prost(message, optional, tag = "7")] pub model_to_upload: ::core::option::Option, /// Output only. The detailed state of the pipeline. #[prost(enumeration = "PipelineState", tag = "9")] pub state: i32, /// Output only. Only populated when the pipeline's state is `PIPELINE_STATE_FAILED` or /// `PIPELINE_STATE_CANCELLED`. #[prost(message, optional, tag = "10")] pub error: ::core::option::Option, /// Output only. Time when the TrainingPipeline was created. #[prost(message, optional, tag = "11")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the TrainingPipeline for the first time entered the /// `PIPELINE_STATE_RUNNING` state. #[prost(message, optional, tag = "12")] pub start_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the TrainingPipeline entered any of the following states: /// `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`, /// `PIPELINE_STATE_CANCELLED`. #[prost(message, optional, tag = "13")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, /// Output only. Time when the TrainingPipeline was most recently updated. #[prost(message, optional, tag = "14")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// The labels with user-defined metadata to organize TrainingPipelines. /// /// Label keys and values can be no longer than 64 characters /// (Unicode codepoints), can only contain lowercase letters, numeric /// characters, underscores and dashes. International characters are allowed. /// /// See for more information and examples of labels. #[prost(map = "string, string", tag = "15")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, /// Customer-managed encryption key spec for a TrainingPipeline. If set, this /// TrainingPipeline will be secured by this key. /// /// Note: Model trained by this TrainingPipeline is also secured by this key if /// \[model_to_upload][google.cloud.aiplatform.v1beta1.TrainingPipeline.encryption_spec\] is not set separately. #[prost(message, optional, tag = "18")] pub encryption_spec: ::core::option::Option, } /// Specifies Vertex AI owned input data to be used for training, and /// possibly evaluating, the Model. #[derive(Clone, PartialEq, ::prost::Message)] pub struct InputDataConfig { /// Required. The ID of the Dataset in the same Project and Location which data will be /// used to train the Model. The Dataset must use schema compatible with /// Model being trained, and what is compatible should be described in the /// used TrainingPipeline's \[training_task_definition\] /// \[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition\]. /// For tabular Datasets, all their data is exported to training, to pick /// and choose from. #[prost(string, tag = "1")] pub dataset_id: ::prost::alloc::string::String, /// Applicable only to Datasets that have DataItems and Annotations. /// /// A filter on Annotations of the Dataset. Only Annotations that both /// match this filter and belong to DataItems not ignored by the split method /// are used in respectively training, validation or test role, depending on /// the role of the DataItem they are on (for the auto-assigned that role is /// decided by Vertex AI). A filter with same syntax as the one used in /// \[ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations\] may be used, but note /// here it filters across all Annotations of the Dataset, and not just within /// a single DataItem. #[prost(string, tag = "6")] pub annotations_filter: ::prost::alloc::string::String, /// Applicable only to custom training with Datasets that have DataItems and /// Annotations. /// /// Cloud Storage URI that points to a YAML file describing the annotation /// schema. The schema is defined as an OpenAPI 3.0.2 [Schema /// Object](). /// The schema files that can be used here are found in /// gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the /// chosen schema must be consistent with /// \[metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri\] of the Dataset specified by /// \[dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id\]. /// /// Only Annotations that both match this schema and belong to DataItems not /// ignored by the split method are used in respectively training, validation /// or test role, depending on the role of the DataItem they are on. /// /// When used in conjunction with \[annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter\], the Annotations used /// for training are filtered by both \[annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter\] and /// \[annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri\]. #[prost(string, tag = "9")] pub annotation_schema_uri: ::prost::alloc::string::String, /// The instructions how the input data should be split between the /// training, validation and test sets. /// If no split type is provided, the \[fraction_split][google.cloud.aiplatform.v1beta1.InputDataConfig.fraction_split\] is used by default. #[prost(oneof = "input_data_config::Split", tags = "2, 3, 4, 5")] pub split: ::core::option::Option, /// Only applicable to Custom and Hyperparameter Tuning TrainingPipelines. /// /// The destination of the training data to be written to. /// /// Supported destination file formats: /// * For non-tabular data: "jsonl". /// * For tabular data: "csv" and "bigquery". /// /// The following Vertex AI environment variables are passed to containers /// or python modules of the training task when this field is set: /// /// * AIP_DATA_FORMAT : Exported data format. /// * AIP_TRAINING_DATA_URI : Sharded exported training data uris. /// * AIP_VALIDATION_DATA_URI : Sharded exported validation data uris. /// * AIP_TEST_DATA_URI : Sharded exported test data uris. #[prost(oneof = "input_data_config::Destination", tags = "8, 10")] pub destination: ::core::option::Option, } /// Nested message and enum types in `InputDataConfig`. pub mod input_data_config { /// The instructions how the input data should be split between the /// training, validation and test sets. /// If no split type is provided, the \[fraction_split][google.cloud.aiplatform.v1beta1.InputDataConfig.fraction_split\] is used by default. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Split { /// Split based on fractions defining the size of each set. #[prost(message, tag = "2")] FractionSplit(super::FractionSplit), /// Split based on the provided filters for each set. #[prost(message, tag = "3")] FilterSplit(super::FilterSplit), /// Supported only for tabular Datasets. /// /// Split based on a predefined key. #[prost(message, tag = "4")] PredefinedSplit(super::PredefinedSplit), /// Supported only for tabular Datasets. /// /// Split based on the timestamp of the input data pieces. #[prost(message, tag = "5")] TimestampSplit(super::TimestampSplit), } /// Only applicable to Custom and Hyperparameter Tuning TrainingPipelines. /// /// The destination of the training data to be written to. /// /// Supported destination file formats: /// * For non-tabular data: "jsonl". /// * For tabular data: "csv" and "bigquery". /// /// The following Vertex AI environment variables are passed to containers /// or python modules of the training task when this field is set: /// /// * AIP_DATA_FORMAT : Exported data format. /// * AIP_TRAINING_DATA_URI : Sharded exported training data uris. /// * AIP_VALIDATION_DATA_URI : Sharded exported validation data uris. /// * AIP_TEST_DATA_URI : Sharded exported test data uris. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Destination { /// The Cloud Storage location where the training data is to be /// written to. In the given directory a new directory is created with /// name: /// `dataset---` /// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. /// All training input data is written into that directory. /// /// The Vertex AI environment variables representing Cloud Storage /// data URIs are represented in the Cloud Storage wildcard /// format to support sharded data. e.g.: "gs://.../training-*.jsonl" /// /// * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data /// * AIP_TRAINING_DATA_URI = /// "gcs_destination/dataset---