#![allow(clippy::uninlined_format_args)] // *A lot* of inspiration from whisper-rs :) // https://github.com/tazz4843/whisper-rs/blob/master/sys/build.rs extern crate bindgen; use std::env; use std::path::PathBuf; fn main() { let target = env::var("TARGET").unwrap(); // Link C++ standard library if let Some(cpp_stdlib) = get_cpp_link_stdlib(&target) { println!("cargo:rustc-link-lib=dylib={}", cpp_stdlib); println!("cargo:rustc-link-arg=-l{}", cpp_stdlib); } // Link macOS Accelerate framework for matrix calculations if target.contains("apple") { println!("cargo:rustc-link-lib=framework=Accelerate"); } println!("cargo:rustc-link-search={}", env::var("OUT_DIR").unwrap()); println!("cargo:rustc-link-lib=static=llama"); println!("cargo:rerun-if-changed=wrapper.h"); // Check if CUDA is enabled for cuBlAS let cuda_enabled = env::var("CARGO_FEATURE_CUDA").is_ok(); if env::var("LLAMA_DONT_GENERATE_BINDINGS").is_ok() { let _: u64 = std::fs::copy( "src/bindings.rs", env::var("OUT_DIR").unwrap() + "/bindings.rs", ) .expect("Failed to copy bindings.rs"); } else { let bindings_builder = bindgen::Builder::default() .header("wrapper.h") .clang_args(&["-x", "c++"]) .clang_arg("-I./llama.cpp"); let bindings = if cuda_enabled { bindings_builder.clang_arg("-DGGML_USE_CUBLAS") } else { bindings_builder } .parse_callbacks(Box::new(bindgen::CargoCallbacks)) .generate(); match bindings { Ok(b) => { let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); b.write_to_file(out_path.join("bindings.rs")) .expect("Couldn't write bindings!"); } Err(e) => { println!("cargo:warning=Unable to generate bindings: {}", e); println!("cargo:warning=Using bundled bindings.rs, which may be out of date"); // copy src/bindings.rs to OUT_DIR std::fs::copy( "src/bindings.rs", env::var("OUT_DIR").unwrap() + "/bindings.rs", ) .expect("Unable to copy bindings.rs"); } } }; // stop if we're on docs.rs if env::var("DOCS_RS").is_ok() { return; } // build lib env::set_current_dir("llama.cpp").expect("Unable to change directory to llama.cpp"); _ = std::fs::remove_dir_all("build"); _ = std::fs::create_dir("build"); env::set_current_dir("build").expect("Unable to change directory to llama.cpp build"); env::set_var("CXXFLAGS", "-fPIC"); env::set_var("CFLAGS", "-fPIC"); let mut code = std::process::Command::new("cmake"); let code = code .arg("..") .arg("-DCMAKE_BUILD_TYPE=Release") .arg("-DBUILD_SHARED_LIBS=OFF") .arg("-DLLAMA_ALL_WARNINGS=OFF") .arg("-DLLAMA_ALL_WARNINGS_3RD_PARTY=OFF") .arg("-DLLAMA_BUILD_TESTS=OFF") .arg("-DLLAMA_BUILD_EXAMPLES=OFF"); // .arg("-DLLAMA_STATIC=ON") if cuda_enabled { // If CUDA feature is enabled, build with cuBlAS to enable GPU acceleration code.arg("-DLLAMA_CUBLAS=ON"); } let code = code.status().expect("Failed to generate build script"); if code.code() != Some(0) { panic!("Failed to generate build script"); } #[allow(clippy::suspicious_command_arg_space)] let code = std::process::Command::new("cmake") .arg("--build") .arg(".") .arg("--config Release") .status() .expect("Failed to build lib"); if code.code() != Some(0) { panic!("Failed to build lib"); } // move libllama.a to where Cargo expects it (OUT_DIR) #[cfg(target_os = "windows")] { std::fs::copy( "Release/llama.lib", format!("{}/llama.lib", env::var("OUT_DIR").unwrap()), ) .expect("Failed to copy lib"); } #[cfg(not(target_os = "windows"))] { std::fs::copy( "libllama.a", format!("{}/libllama.a", env::var("OUT_DIR").unwrap()), ) .expect("Failed to copy lib"); } // clean the llama build directory to prevent Cargo from complaining during crate publish _ = std::fs::remove_dir_all("build"); } // From https://github.com/alexcrichton/cc-rs/blob/fba7feded71ee4f63cfe885673ead6d7b4f2f454/src/lib.rs#L2462 fn get_cpp_link_stdlib(target: &str) -> Option<&'static str> { if target.contains("msvc") { None } else if target.contains("apple") || target.contains("freebsd") || target.contains("openbsd") { Some("c++") } else if target.contains("android") { Some("c++_shared") } else { Some("stdc++") } }