# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "llama_cpp" version = "0.3.2" authors = [ "Dakota Thompson ", "Pedro Valente ", ] publish = true description = "High-level bindings to llama.cpp with a focus on just being really, really easy to use" readme = "README.md" license = "MIT OR Apache-2.0" repository = "https://github.com/edgenai/llama_cpp-rs" [lib] doctest = false [dependencies.derive_more] version = "0.99.17" [dependencies.futures] version = "0.3.30" [dependencies.llama_cpp_sys] version = "^0.3.2" default-features = false [dependencies.num_cpus] version = "1.16.0" [dependencies.thiserror] version = "1.0.57" [dependencies.tokio] version = "1.36.0" features = [ "sync", "rt", "rt-multi-thread", ] [dependencies.tracing] version = "0.1.40" [features] accel = ["llama_cpp_sys/accel"] avx = ["llama_cpp_sys/avx"] avx2 = ["llama_cpp_sys/avx2"] avx512 = ["llama_cpp_sys/avx512"] avx512_vmbi = ["llama_cpp_sys/avx512_vmbi"] avx512_vnni = ["llama_cpp_sys/avx512_vnni"] blas = ["llama_cpp_sys/blas"] clblast = ["llama_cpp_sys/clblast"] compat = ["llama_cpp_sys/compat"] cuda = ["llama_cpp_sys/cuda"] cuda_dmmv = [ "llama_cpp_sys/cuda_dmmv", "cuda", ] cuda_f16 = [ "llama_cpp_sys/cuda_f16", "cuda", ] cuda_mmq = [ "llama_cpp_sys/cuda_mmq", "cuda", ] default = [ "compat", "native", ] f16c = ["llama_cpp_sys/f16c"] fma = ["llama_cpp_sys/fma"] hipblas = ["llama_cpp_sys/hipblas"] metal = ["llama_cpp_sys/metal"] mpi = ["llama_cpp_sys/mpi"] native = [ "llama_cpp_sys/native", "avx", "avx2", "fma", "f16c", "accel", ] sys_verbosity = [] vulkan = ["llama_cpp_sys/vulkan"]