[package] name = "llama_cpp_low" version.workspace = true authors.workspace = true edition = "2021" license = "MIT" keywords = ["llm"] description = "small server binary compile build from llama.cpp" readme = "README.md" exclude = ["llama.cpp/models/**", "llama.cpp/ggml/src/kompute/**"] repository = "https://github.com/blmarket/llm-daemon" [dependencies] [build-dependencies] cmake = "0.1.50" [features] default = ["cuda"] cuda = []