/// LINT.IfChange /// Containers to hold repeated fundamental values. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BytesList { #[prost(bytes = "vec", repeated, tag = "1")] pub value: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FloatList { #[prost(float, repeated, tag = "1")] pub value: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Int64List { #[prost(int64, repeated, tag = "1")] pub value: ::prost::alloc::vec::Vec, } /// Containers for non-sequential data. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Feature { /// Each feature can be exactly one kind. #[prost(oneof = "feature::Kind", tags = "1, 2, 3")] pub kind: ::core::option::Option, } /// Nested message and enum types in `Feature`. pub mod feature { /// Each feature can be exactly one kind. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Kind { #[prost(message, tag = "1")] BytesList(super::BytesList), #[prost(message, tag = "2")] FloatList(super::FloatList), #[prost(message, tag = "3")] Int64List(super::Int64List), } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Features { /// Map from feature name to feature. #[prost(map = "string, message", tag = "1")] pub feature: ::std::collections::HashMap<::prost::alloc::string::String, Feature>, } /// Containers for sequential data. /// /// A FeatureList contains lists of Features. These may hold zero or more /// Feature values. /// /// FeatureLists are organized into categories by name. The FeatureLists message /// contains the mapping from name to FeatureList. /// #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeatureList { #[prost(message, repeated, tag = "1")] pub feature: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeatureLists { /// Map from feature name to feature list. #[prost(map = "string, message", tag = "1")] pub feature_list: ::std::collections::HashMap< ::prost::alloc::string::String, FeatureList, >, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Example { #[prost(message, optional, tag = "1")] pub features: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SequenceExample { #[prost(message, optional, tag = "1")] pub context: ::core::option::Option, #[prost(message, optional, tag = "2")] pub feature_lists: ::core::option::Option, } /// Dimensions of a tensor. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TensorShapeProto { /// Dimensions of the tensor, such as {"input", 30}, {"output", 40} /// for a 30 x 40 2D tensor. If an entry has size -1, this /// corresponds to a dimension of unknown size. The names are /// optional. /// /// The order of entries in "dim" matters: It indicates the layout of the /// values in the tensor in-memory representation. /// /// The first entry in "dim" is the outermost dimension used to layout the /// values, the last entry is the innermost dimension. This matches the /// in-memory layout of RowMajor Eigen tensors. /// /// If "dim.size()" > 0, "unknown_rank" must be false. #[prost(message, repeated, tag = "2")] pub dim: ::prost::alloc::vec::Vec, /// If true, the number of dimensions in the shape is unknown. /// /// If true, "dim.size()" must be 0. #[prost(bool, tag = "3")] pub unknown_rank: bool, } /// Nested message and enum types in `TensorShapeProto`. pub mod tensor_shape_proto { /// One dimension of the tensor. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Dim { /// Size of the tensor in that dimension. /// This value must be >= -1, but values of -1 are reserved for "unknown" /// shapes (values of -1 mean "unknown" dimension). Certain wrappers /// that work with TensorShapeProto may fail at runtime when deserializing /// a TensorShapeProto containing a dim value of -1. #[prost(int64, tag = "1")] pub size: i64, /// Optional name of the tensor dimension. #[prost(string, tag = "2")] pub name: ::prost::alloc::string::String, } } /// Represents a serialized tf.dtypes.Dtype #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SerializedDType { #[prost(enumeration = "DataType", tag = "1")] pub datatype: i32, } /// (== suppress_warning documentation-presence ==) /// LINT.IfChange #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum DataType { /// Not a legal value for DataType. Used to indicate a DataType field /// has not been set. DtInvalid = 0, /// Data types that all computation devices are expected to be /// capable to support. DtFloat = 1, DtDouble = 2, DtInt32 = 3, DtUint8 = 4, DtInt16 = 5, DtInt8 = 6, DtString = 7, /// Single-precision complex DtComplex64 = 8, DtInt64 = 9, DtBool = 10, /// Quantized int8 DtQint8 = 11, /// Quantized uint8 DtQuint8 = 12, /// Quantized int32 DtQint32 = 13, /// Float32 truncated to 16 bits. DtBfloat16 = 14, /// Quantized int16 DtQint16 = 15, /// Quantized uint16 DtQuint16 = 16, DtUint16 = 17, /// Double-precision complex DtComplex128 = 18, DtHalf = 19, DtResource = 20, /// Arbitrary C++ data types DtVariant = 21, DtUint32 = 22, DtUint64 = 23, /// 5 exponent bits, 2 mantissa bits. DtFloat8E5m2 = 24, /// 4 exponent bits, 3 mantissa bits, finite-only, with DtFloat8E4m3fn = 25, /// Do not use! These are only for parameters. Every enum above /// should have a corresponding value below (verified by types_test). DtFloatRef = 101, DtDoubleRef = 102, DtInt32Ref = 103, DtUint8Ref = 104, DtInt16Ref = 105, DtInt8Ref = 106, DtStringRef = 107, DtComplex64Ref = 108, DtInt64Ref = 109, DtBoolRef = 110, DtQint8Ref = 111, DtQuint8Ref = 112, DtQint32Ref = 113, DtBfloat16Ref = 114, DtQint16Ref = 115, DtQuint16Ref = 116, DtUint16Ref = 117, DtComplex128Ref = 118, DtHalfRef = 119, DtResourceRef = 120, DtVariantRef = 121, DtUint32Ref = 122, DtUint64Ref = 123, DtFloat8E5m2Ref = 124, DtFloat8E4m3fnRef = 125, } impl DataType { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { DataType::DtInvalid => "DT_INVALID", DataType::DtFloat => "DT_FLOAT", DataType::DtDouble => "DT_DOUBLE", DataType::DtInt32 => "DT_INT32", DataType::DtUint8 => "DT_UINT8", DataType::DtInt16 => "DT_INT16", DataType::DtInt8 => "DT_INT8", DataType::DtString => "DT_STRING", DataType::DtComplex64 => "DT_COMPLEX64", DataType::DtInt64 => "DT_INT64", DataType::DtBool => "DT_BOOL", DataType::DtQint8 => "DT_QINT8", DataType::DtQuint8 => "DT_QUINT8", DataType::DtQint32 => "DT_QINT32", DataType::DtBfloat16 => "DT_BFLOAT16", DataType::DtQint16 => "DT_QINT16", DataType::DtQuint16 => "DT_QUINT16", DataType::DtUint16 => "DT_UINT16", DataType::DtComplex128 => "DT_COMPLEX128", DataType::DtHalf => "DT_HALF", DataType::DtResource => "DT_RESOURCE", DataType::DtVariant => "DT_VARIANT", DataType::DtUint32 => "DT_UINT32", DataType::DtUint64 => "DT_UINT64", DataType::DtFloat8E5m2 => "DT_FLOAT8_E5M2", DataType::DtFloat8E4m3fn => "DT_FLOAT8_E4M3FN", DataType::DtFloatRef => "DT_FLOAT_REF", DataType::DtDoubleRef => "DT_DOUBLE_REF", DataType::DtInt32Ref => "DT_INT32_REF", DataType::DtUint8Ref => "DT_UINT8_REF", DataType::DtInt16Ref => "DT_INT16_REF", DataType::DtInt8Ref => "DT_INT8_REF", DataType::DtStringRef => "DT_STRING_REF", DataType::DtComplex64Ref => "DT_COMPLEX64_REF", DataType::DtInt64Ref => "DT_INT64_REF", DataType::DtBoolRef => "DT_BOOL_REF", DataType::DtQint8Ref => "DT_QINT8_REF", DataType::DtQuint8Ref => "DT_QUINT8_REF", DataType::DtQint32Ref => "DT_QINT32_REF", DataType::DtBfloat16Ref => "DT_BFLOAT16_REF", DataType::DtQint16Ref => "DT_QINT16_REF", DataType::DtQuint16Ref => "DT_QUINT16_REF", DataType::DtUint16Ref => "DT_UINT16_REF", DataType::DtComplex128Ref => "DT_COMPLEX128_REF", DataType::DtHalfRef => "DT_HALF_REF", DataType::DtResourceRef => "DT_RESOURCE_REF", DataType::DtVariantRef => "DT_VARIANT_REF", DataType::DtUint32Ref => "DT_UINT32_REF", DataType::DtUint64Ref => "DT_UINT64_REF", DataType::DtFloat8E5m2Ref => "DT_FLOAT8_E5M2_REF", DataType::DtFloat8E4m3fnRef => "DT_FLOAT8_E4M3FN_REF", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "DT_INVALID" => Some(Self::DtInvalid), "DT_FLOAT" => Some(Self::DtFloat), "DT_DOUBLE" => Some(Self::DtDouble), "DT_INT32" => Some(Self::DtInt32), "DT_UINT8" => Some(Self::DtUint8), "DT_INT16" => Some(Self::DtInt16), "DT_INT8" => Some(Self::DtInt8), "DT_STRING" => Some(Self::DtString), "DT_COMPLEX64" => Some(Self::DtComplex64), "DT_INT64" => Some(Self::DtInt64), "DT_BOOL" => Some(Self::DtBool), "DT_QINT8" => Some(Self::DtQint8), "DT_QUINT8" => Some(Self::DtQuint8), "DT_QINT32" => Some(Self::DtQint32), "DT_BFLOAT16" => Some(Self::DtBfloat16), "DT_QINT16" => Some(Self::DtQint16), "DT_QUINT16" => Some(Self::DtQuint16), "DT_UINT16" => Some(Self::DtUint16), "DT_COMPLEX128" => Some(Self::DtComplex128), "DT_HALF" => Some(Self::DtHalf), "DT_RESOURCE" => Some(Self::DtResource), "DT_VARIANT" => Some(Self::DtVariant), "DT_UINT32" => Some(Self::DtUint32), "DT_UINT64" => Some(Self::DtUint64), "DT_FLOAT8_E5M2" => Some(Self::DtFloat8E5m2), "DT_FLOAT8_E4M3FN" => Some(Self::DtFloat8E4m3fn), "DT_FLOAT_REF" => Some(Self::DtFloatRef), "DT_DOUBLE_REF" => Some(Self::DtDoubleRef), "DT_INT32_REF" => Some(Self::DtInt32Ref), "DT_UINT8_REF" => Some(Self::DtUint8Ref), "DT_INT16_REF" => Some(Self::DtInt16Ref), "DT_INT8_REF" => Some(Self::DtInt8Ref), "DT_STRING_REF" => Some(Self::DtStringRef), "DT_COMPLEX64_REF" => Some(Self::DtComplex64Ref), "DT_INT64_REF" => Some(Self::DtInt64Ref), "DT_BOOL_REF" => Some(Self::DtBoolRef), "DT_QINT8_REF" => Some(Self::DtQint8Ref), "DT_QUINT8_REF" => Some(Self::DtQuint8Ref), "DT_QINT32_REF" => Some(Self::DtQint32Ref), "DT_BFLOAT16_REF" => Some(Self::DtBfloat16Ref), "DT_QINT16_REF" => Some(Self::DtQint16Ref), "DT_QUINT16_REF" => Some(Self::DtQuint16Ref), "DT_UINT16_REF" => Some(Self::DtUint16Ref), "DT_COMPLEX128_REF" => Some(Self::DtComplex128Ref), "DT_HALF_REF" => Some(Self::DtHalfRef), "DT_RESOURCE_REF" => Some(Self::DtResourceRef), "DT_VARIANT_REF" => Some(Self::DtVariantRef), "DT_UINT32_REF" => Some(Self::DtUint32Ref), "DT_UINT64_REF" => Some(Self::DtUint64Ref), "DT_FLOAT8_E5M2_REF" => Some(Self::DtFloat8E5m2Ref), "DT_FLOAT8_E4M3FN_REF" => Some(Self::DtFloat8E4m3fnRef), _ => None, } } } /// Protocol buffer representing a handle to a tensorflow resource. Handles are /// not valid across executions, but can be serialized back and forth from within /// a single run. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ResourceHandleProto { /// Unique name for the device containing the resource. #[prost(string, tag = "1")] pub device: ::prost::alloc::string::String, /// Container in which this resource is placed. #[prost(string, tag = "2")] pub container: ::prost::alloc::string::String, /// Unique name of this resource. #[prost(string, tag = "3")] pub name: ::prost::alloc::string::String, /// Hash code for the type of the resource. Is only valid in the same device /// and in the same execution. #[prost(uint64, tag = "4")] pub hash_code: u64, /// For debug-only, the name of the type pointed to by this handle, if /// available. #[prost(string, tag = "5")] pub maybe_type_name: ::prost::alloc::string::String, /// Data types and shapes for the underlying resource. #[prost(message, repeated, tag = "6")] pub dtypes_and_shapes: ::prost::alloc::vec::Vec< resource_handle_proto::DtypeAndShape, >, } /// Nested message and enum types in `ResourceHandleProto`. pub mod resource_handle_proto { /// Protocol buffer representing a pair of (data type, tensor shape). #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DtypeAndShape { #[prost(enumeration = "super::DataType", tag = "1")] pub dtype: i32, #[prost(message, optional, tag = "2")] pub shape: ::core::option::Option, } } /// Protocol buffer representing a tensor. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TensorProto { #[prost(enumeration = "DataType", tag = "1")] pub dtype: i32, /// Shape of the tensor. TODO(touts): sort out the 0-rank issues. #[prost(message, optional, tag = "2")] pub tensor_shape: ::core::option::Option, /// Version number. /// /// In version 0, if the "repeated xxx" representations contain only one /// element, that element is repeated to fill the shape. This makes it easy /// to represent a constant Tensor with a single value. #[prost(int32, tag = "3")] pub version_number: i32, /// Serialized raw tensor content from either Tensor::AsProtoTensorContent or /// memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation /// can be used for all tensor types. The purpose of this representation is to /// reduce serialization overhead during RPC call by avoiding serialization of /// many repeated small items. #[prost(bytes = "vec", tag = "4")] pub tensor_content: ::prost::alloc::vec::Vec, /// DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll /// have some pointless zero padding for each value here. #[prost(int32, repeated, tag = "13")] pub half_val: ::prost::alloc::vec::Vec, /// DT_FLOAT. #[prost(float, repeated, tag = "5")] pub float_val: ::prost::alloc::vec::Vec, /// DT_DOUBLE. #[prost(double, repeated, tag = "6")] pub double_val: ::prost::alloc::vec::Vec, /// DT_INT32, DT_INT16, DT_UINT16, DT_INT8, DT_UINT8. #[prost(int32, repeated, tag = "7")] pub int_val: ::prost::alloc::vec::Vec, /// DT_STRING #[prost(bytes = "vec", repeated, tag = "8")] pub string_val: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real /// and imaginary parts of i-th single precision complex. #[prost(float, repeated, tag = "9")] pub scomplex_val: ::prost::alloc::vec::Vec, /// DT_INT64 #[prost(int64, repeated, tag = "10")] pub int64_val: ::prost::alloc::vec::Vec, /// DT_BOOL #[prost(bool, repeated, tag = "11")] pub bool_val: ::prost::alloc::vec::Vec, /// DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real /// and imaginary parts of i-th double precision complex. #[prost(double, repeated, tag = "12")] pub dcomplex_val: ::prost::alloc::vec::Vec, /// DT_RESOURCE #[prost(message, repeated, tag = "14")] pub resource_handle_val: ::prost::alloc::vec::Vec, /// DT_VARIANT #[prost(message, repeated, tag = "15")] pub variant_val: ::prost::alloc::vec::Vec, /// DT_UINT32 #[prost(uint32, repeated, tag = "16")] pub uint32_val: ::prost::alloc::vec::Vec, /// DT_UINT64 #[prost(uint64, repeated, tag = "17")] pub uint64_val: ::prost::alloc::vec::Vec, /// DT_FLOAT8_*, use variable-sized set of bytes /// (i.e. the equivalent of repeated uint8, if such a thing existed). #[prost(bytes = "vec", tag = "18")] pub float8_val: ::prost::alloc::vec::Vec, } /// Protocol buffer representing the serialization format of DT_VARIANT tensors. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VariantTensorDataProto { /// Name of the type of objects being serialized. #[prost(string, tag = "1")] pub type_name: ::prost::alloc::string::String, /// Portions of the object that are not Tensors. #[prost(bytes = "vec", tag = "2")] pub metadata: ::prost::alloc::vec::Vec, /// Tensors contained within objects being serialized. #[prost(message, repeated, tag = "3")] pub tensors: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VarLenFeatureProto { #[prost(enumeration = "DataType", tag = "1")] pub dtype: i32, #[prost(string, tag = "2")] pub values_output_tensor_name: ::prost::alloc::string::String, #[prost(string, tag = "3")] pub indices_output_tensor_name: ::prost::alloc::string::String, #[prost(string, tag = "4")] pub shapes_output_tensor_name: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FixedLenFeatureProto { #[prost(enumeration = "DataType", tag = "1")] pub dtype: i32, #[prost(message, optional, tag = "2")] pub shape: ::core::option::Option, #[prost(message, optional, tag = "3")] pub default_value: ::core::option::Option, #[prost(string, tag = "4")] pub values_output_tensor_name: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeatureConfiguration { #[prost(oneof = "feature_configuration::Config", tags = "1, 2")] pub config: ::core::option::Option, } /// Nested message and enum types in `FeatureConfiguration`. pub mod feature_configuration { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Config { #[prost(message, tag = "1")] FixedLenFeature(super::FixedLenFeatureProto), #[prost(message, tag = "2")] VarLenFeature(super::VarLenFeatureProto), } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExampleParserConfiguration { #[prost(map = "string, message", tag = "1")] pub feature_map: ::std::collections::HashMap< ::prost::alloc::string::String, FeatureConfiguration, >, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AllocationDescription { /// Total number of bytes requested #[prost(int64, tag = "1")] pub requested_bytes: i64, /// Total number of bytes allocated if known #[prost(int64, tag = "2")] pub allocated_bytes: i64, /// Name of the allocator used #[prost(string, tag = "3")] pub allocator_name: ::prost::alloc::string::String, /// Identifier of the allocated buffer if known #[prost(int64, tag = "4")] pub allocation_id: i64, /// Set if this tensor only has one remaining reference #[prost(bool, tag = "5")] pub has_single_reference: bool, /// Address of the allocation. #[prost(uint64, tag = "6")] pub ptr: u64, } /// Protocol buffer representing the value for an attr used to configure an Op. /// Comment indicates the corresponding attr type. Only the field matching the /// attr type may be filled. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AttrValue { #[prost(oneof = "attr_value::Value", tags = "2, 3, 4, 5, 6, 7, 8, 1, 10, 9")] pub value: ::core::option::Option, } /// Nested message and enum types in `AttrValue`. pub mod attr_value { /// LINT.IfChange #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListValue { /// "list(string)" #[prost(bytes = "vec", repeated, tag = "2")] pub s: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// "list(int)" #[prost(int64, repeated, tag = "3")] pub i: ::prost::alloc::vec::Vec, /// "list(float)" #[prost(float, repeated, tag = "4")] pub f: ::prost::alloc::vec::Vec, /// "list(bool)" #[prost(bool, repeated, tag = "5")] pub b: ::prost::alloc::vec::Vec, /// "list(type)" #[prost(enumeration = "super::DataType", repeated, tag = "6")] pub r#type: ::prost::alloc::vec::Vec, /// "list(shape)" #[prost(message, repeated, tag = "7")] pub shape: ::prost::alloc::vec::Vec, /// "list(tensor)" #[prost(message, repeated, tag = "8")] pub tensor: ::prost::alloc::vec::Vec, /// "list(attr)" #[prost(message, repeated, tag = "9")] pub func: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { /// "string" #[prost(bytes, tag = "2")] S(::prost::alloc::vec::Vec), /// "int" #[prost(int64, tag = "3")] I(i64), /// "float" #[prost(float, tag = "4")] F(f32), /// "bool" #[prost(bool, tag = "5")] B(bool), /// "type" #[prost(enumeration = "super::DataType", tag = "6")] Type(i32), /// "shape" #[prost(message, tag = "7")] Shape(super::TensorShapeProto), /// "tensor" #[prost(message, tag = "8")] Tensor(super::TensorProto), /// any "list(...)" #[prost(message, tag = "1")] List(ListValue), /// "func" represents a function. func.name is a function's name or /// a primitive op's name. func.attr.first is the name of an attr /// defined for that function. func.attr.second is the value for /// that attr in the instantiation. #[prost(message, tag = "10")] Func(super::NameAttrList), /// This is a placeholder only used in nodes defined inside a /// function. It indicates the attr value will be supplied when /// the function is instantiated. For example, let us suppose a /// node "N" in function "FN". "N" has an attr "A" with value /// placeholder = "foo". When FN is instantiated with attr "foo" /// set to "bar", the instantiated node N's attr A will have been /// given the value "bar". #[prost(string, tag = "9")] Placeholder(::prost::alloc::string::String), } } /// A list of attr names and their values. The whole list is attached /// with a string name. E.g., MatMul\[T=float\]. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NameAttrList { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, #[prost(map = "string, message", tag = "2")] pub attr: ::std::collections::HashMap<::prost::alloc::string::String, AttrValue>, } /// Used to specify and override the default API & behavior in the /// generated code for client languages, from what you would get from /// the OpDef alone. There will be a set of ApiDefs that are common /// to all client languages, and another set per client language. /// The per-client-language ApiDefs will inherit values from the /// common ApiDefs which it can either replace or modify. /// /// We separate the API definition from the OpDef so we can evolve the /// API while remaining backwards compatible when interpreting old /// graphs. Overrides go in an "api_def.pbtxt" file with a text-format /// ApiDefs message. /// /// WARNING: Be *very* careful changing the API for any existing op -- /// you can change the semantics of existing code. These changes may /// need to wait until a major release of TensorFlow to avoid breaking /// our compatibility promises. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ApiDef { /// Name of the op (in the OpDef) to specify the API for. #[prost(string, tag = "1")] pub graph_op_name: ::prost::alloc::string::String, /// If this op is deprecated, set deprecation message to the message /// that should be logged when this op is used. /// The message should indicate alternative op to use, if any. #[prost(string, tag = "12")] pub deprecation_message: ::prost::alloc::string::String, /// Major version when the op will be deleted. For e.g. set this /// value to 2 if op API should be removed in TensorFlow 2.0 and /// deprecated in versions before that. #[prost(int32, tag = "13")] pub deprecation_version: i32, #[prost(enumeration = "api_def::Visibility", tag = "2")] pub visibility: i32, #[prost(message, repeated, tag = "3")] pub endpoint: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "4")] pub in_arg: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "5")] pub out_arg: ::prost::alloc::vec::Vec, /// List of original in_arg names to specify new argument order. /// Length of arg_order should be either empty to keep current order /// or match size of in_arg. #[prost(string, repeated, tag = "11")] pub arg_order: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, #[prost(message, repeated, tag = "6")] pub attr: ::prost::alloc::vec::Vec, /// One-line human-readable description of what the Op does. #[prost(string, tag = "7")] pub summary: ::prost::alloc::string::String, /// Additional, longer human-readable description of what the Op does. #[prost(string, tag = "8")] pub description: ::prost::alloc::string::String, /// Modify an existing/inherited description by adding text to the beginning /// or end. #[prost(string, tag = "9")] pub description_prefix: ::prost::alloc::string::String, #[prost(string, tag = "10")] pub description_suffix: ::prost::alloc::string::String, } /// Nested message and enum types in `ApiDef`. pub mod api_def { /// If you specify any endpoint, this will replace all of the /// inherited endpoints. The first endpoint should be the /// "canonical" endpoint, and should not be deprecated (unless all /// endpoints are deprecated). #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Endpoint { /// Name should be either like "CamelCaseName" or /// "Package.CamelCaseName". Client-language-specific ApiDefs may /// use a snake_case convention instead of CamelCase. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Set if this endpoint is deprecated. If set to true, a message suggesting /// to use a non-deprecated endpoint instead will be printed. If all /// endpoints are deprecated, set deprecation_message in ApiDef instead. #[prost(bool, tag = "3")] pub deprecated: bool, /// Major version when an endpoint will be deleted. For e.g. set this /// value to 2 if endpoint should be removed in TensorFlow 2.0 and /// deprecated in versions before that. #[prost(int32, tag = "4")] pub deprecation_version: i32, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Arg { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Change the name used to access this arg in the API from what /// is used in the GraphDef. Note that these names in `backticks` /// will also be replaced in the summary & description fields. #[prost(string, tag = "2")] pub rename_to: ::prost::alloc::string::String, /// Note: this will replace any inherited arg doc. There is no /// current way of modifying arg descriptions (other than replacing /// them entirely) as can be done with op descriptions. #[prost(string, tag = "3")] pub description: ::prost::alloc::string::String, } /// Description of the graph-construction-time configuration of this /// Op. That is to say, this describes the attr fields that will /// be specified in the NodeDef. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Attr { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Change the name used to access this attr in the API from what /// is used in the GraphDef. Note that these names in `backticks` /// will also be replaced in the summary & description fields. #[prost(string, tag = "2")] pub rename_to: ::prost::alloc::string::String, /// Specify a new default value to use for this attr. This default /// will be used when creating new graphs, as opposed to the /// default in the OpDef, which will be used when interpreting old /// GraphDefs. #[prost(message, optional, tag = "3")] pub default_value: ::core::option::Option, /// Note: this will replace any inherited attr doc, there is no current /// way of modifying attr descriptions as can be done with op descriptions. #[prost(string, tag = "4")] pub description: ::prost::alloc::string::String, } #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration )] #[repr(i32)] pub enum Visibility { /// Normally this is "VISIBLE" unless you are inheriting a /// different value from another ApiDef. DefaultVisibility = 0, /// Publicly visible in the API. Visible = 1, /// Do not include this op in the generated API. If visibility is /// set to 'SKIP', other fields are ignored for this op. Skip = 2, /// Hide this op by putting it into an internal namespace (or whatever /// is appropriate in the target language). Hidden = 3, } impl Visibility { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { Visibility::DefaultVisibility => "DEFAULT_VISIBILITY", Visibility::Visible => "VISIBLE", Visibility::Skip => "SKIP", Visibility::Hidden => "HIDDEN", } } /// Creates an enum from field names used in the ProtoBuf definition. pub fn from_str_name(value: &str) -> ::core::option::Option { match value { "DEFAULT_VISIBILITY" => Some(Self::DefaultVisibility), "VISIBLE" => Some(Self::Visible), "SKIP" => Some(Self::Skip), "HIDDEN" => Some(Self::Hidden), _ => None, } } } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ApiDefs { #[prost(message, repeated, tag = "1")] pub op: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CostGraphDef { #[prost(message, repeated, tag = "1")] pub node: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "2")] pub cost: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `CostGraphDef`. pub mod cost_graph_def { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Node { /// The name of the node. Names are globally unique. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// The device of the node. Can be empty if the node is mapped to the /// default partition or partitioning hasn't been run yet. #[prost(string, tag = "2")] pub device: ::prost::alloc::string::String, /// The id of the node. Node ids are only unique inside a partition. #[prost(int32, tag = "3")] pub id: i32, #[prost(message, repeated, tag = "4")] pub input_info: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "5")] pub output_info: ::prost::alloc::vec::Vec, /// Temporary memory used by this node. #[prost(int64, tag = "6")] pub temporary_memory_size: i64, /// Persistent memory used by this node. #[prost(int64, tag = "12")] pub persistent_memory_size: i64, #[deprecated] #[prost(int64, tag = "10")] pub host_temp_memory_size: i64, #[deprecated] #[prost(int64, tag = "11")] pub device_temp_memory_size: i64, #[deprecated] #[prost(int64, tag = "16")] pub device_persistent_memory_size: i64, /// Estimate of the computational cost of this node, in microseconds. #[prost(int64, tag = "9")] pub compute_cost: i64, /// Analytical estimate of the computational cost of this node, in /// microseconds. #[prost(int64, tag = "14")] pub compute_time: i64, /// Analytical estimate of the memory access cost of this node, in /// microseconds. #[prost(int64, tag = "15")] pub memory_time: i64, /// If true, the output is permanent: it can't be discarded, because this /// node is part of the "final output". Nodes may depend on final nodes. #[prost(bool, tag = "7")] pub is_final: bool, /// Ids of the control inputs for this node. #[prost(int32, repeated, tag = "8")] pub control_input: ::prost::alloc::vec::Vec, /// Are the costs inaccurate? #[prost(bool, tag = "17")] pub inaccurate: bool, } /// Nested message and enum types in `Node`. pub mod node { /// Inputs of this node. They must be executed before this node can be /// executed. An input is a particular output of another node, specified /// by the node id and the output index. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct InputInfo { #[prost(int32, tag = "1")] pub preceding_node: i32, #[prost(int32, tag = "2")] pub preceding_port: i32, } /// Outputs of this node. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OutputInfo { #[prost(int64, tag = "1")] pub size: i64, /// If >= 0, the output is an alias of an input. Note that an alias input /// may itself be an alias. The algorithm will therefore need to follow /// those pointers. #[prost(int64, tag = "2")] pub alias_input_port: i64, #[prost(message, optional, tag = "3")] pub shape: ::core::option::Option, #[prost(enumeration = "super::super::DataType", tag = "4")] pub dtype: i32, } } /// Total cost of this graph, typically used for balancing decisions. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AggregatedCost { /// Aggregated cost value. #[prost(float, tag = "1")] pub cost: f32, /// Aggregated cost dimension (e.g. 'memory', 'compute', 'network'). #[prost(string, tag = "2")] pub dimension: ::prost::alloc::string::String, } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct InterconnectLink { #[prost(int32, tag = "1")] pub device_id: i32, #[prost(string, tag = "2")] pub r#type: ::prost::alloc::string::String, #[prost(int32, tag = "3")] pub strength: i32, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LocalLinks { #[prost(message, repeated, tag = "1")] pub link: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeviceLocality { /// Optional bus locality of device. Default value of 0 means /// no specific locality. Specific localities are indexed from 1. #[prost(int32, tag = "1")] pub bus_id: i32, /// Optional NUMA locality of device. #[prost(int32, tag = "2")] pub numa_node: i32, /// Optional local interconnect links to other devices. #[prost(message, optional, tag = "3")] pub links: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeviceAttributes { /// Fully specified name of the device within a cluster. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// String representation of device_type. #[prost(string, tag = "2")] pub device_type: ::prost::alloc::string::String, /// Memory capacity of device in bytes. #[prost(int64, tag = "4")] pub memory_limit: i64, /// Platform-specific data about device that may be useful /// for supporting efficient data transfers. #[prost(message, optional, tag = "5")] pub locality: ::core::option::Option, /// A device is assigned a global unique number each time it is /// initialized. "incarnation" should never be 0. #[prost(fixed64, tag = "6")] pub incarnation: u64, /// String representation of the physical device that this device maps to. #[prost(string, tag = "7")] pub physical_device_desc: ::prost::alloc::string::String, /// A physical device ID for use in XLA DeviceAssignments, unique across /// clients in a multi-client setup. Set to -1 if unavailable, non-negative /// otherwise. #[prost(int64, tag = "8")] pub xla_global_id: i64, } /// Highly experimental and very likely to change. /// This encoding uses tags instead of dedicated messages for regularity. In /// particular the encoding imposes no restrictions on what the parameters of any /// type should be, which in particular needs to be true for type symbols. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FullTypeDef { /// The principal type represented by this object. This may be a concrete type /// (Tensor, Dataset) a type variable (used for dependent types) a type /// symbol (Any, Union). See FullTypeId for details. #[prost(enumeration = "FullTypeId", tag = "1")] pub type_id: i32, #[prost(message, repeated, tag = "2")] pub args: ::prost::alloc::vec::Vec, /// Literal values of this type object, if the type admits one. /// For example, a type variable admits a string attribute - its name. /// Shape-related types may admit int attributes - their static shape values. /// Fields for more data types to be added as needed. #[prost(oneof = "full_type_def::Attr", tags = "3, 4")] pub attr: ::core::option::Option, } /// Nested message and enum types in `FullTypeDef`. pub mod full_type_def { /// Literal values of this type object, if the type admits one. /// For example, a type variable admits a string attribute - its name. /// Shape-related types may admit int attributes - their static shape values. /// Fields for more data types to be added as needed. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Attr { #[prost(string, tag = "3")] S(::prost::alloc::string::String), /// TODO(mdan): list/tensor, map? Need to reconcile with TFT_RECORD, etc. #[prost(int64, tag = "4")] I(i64), } } /// LINT.IfChange /// Experimental. Represents the complete type information of a TensorFlow value. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum FullTypeId { /// The default represents an uninitialized values. TftUnset = 0, /// Type variables may serve as placeholder for any other type ID in type /// templates. /// /// Examples: /// TFT_DATASET\[TFT_VAR["T"]\] is a Dataset returning a type indicated by "T". /// TFT_TENSOR\[TFT_VAR["T"]\] is a Tensor of n element type indicated by "T". /// TFT_TENSOR\[TFT_VAR["T"]\], TFT_TENSOR\[TFT_VAR["T"]\] are two tensors of /// identical element types. /// TFT_TENSOR\[TFT_VAR["P"]\], TFT_TENSOR\[TFT_VAR["Q"]\] are two tensors of /// independent element types. /// TftVar = 1, /// Wildcard type. Describes a parameter of unknown type. In TensorFlow, that /// can mean either a "Top" type (accepts any type), or a dynamically typed /// object whose type is unknown in context. /// Important: "unknown" does not necessarily mean undeterminable! TftAny = 2, /// The algebraic product type. This is an algebraic type that may be used just /// for logical grouping. Not to confused with TFT_TUPLE which describes a /// concrete object of several elements. /// /// Example: /// TFT_DATASET\[TFT_PRODUCT[TFT_TENSOR[TFT_INT32\], TFT_TENSOR\[TFT_FLOAT64]\]] /// is a Dataset producing two tensors, an integer one and a float one. /// TftProduct = 3, /// Represents a named field, with the name stored in the attribute. /// /// Parametrization: /// TFT_NAMED\[\]{} /// * is the type of the field /// * is the field name, as string (thpugh can theoretically be an int /// as well) /// /// Example: /// TFT_RECORD[ /// TFT_NAMED\[TFT_TENSOR[TFT_INT32]\]{'foo'}, /// TFT_NAMED\[TFT_TENSOR[TFT_FLOAT32]\]{'bar'}, /// ] /// is a structure with two fields, an int tensor "foo" and a float tensor /// "bar". TftNamed = 4, /// Template definition. Expands the variables by repeating a template as /// arguments of container. /// /// Parametrization: /// TFT_FOR_EACH\[,