forked from TabbyML/tabby
-
Notifications
You must be signed in to change notification settings - Fork 0
/
build.rs
111 lines (101 loc) · 3.79 KB
/
build.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
use std::{env, path::Path};
use cmake::Config;
fn main() {
const LLAMA_CMAKE_PATH: &str = "llama.cpp/CMakeLists.txt";
assert!(
Path::new(LLAMA_CMAKE_PATH).exists(),
"Please init submodules with `git submodule update --init --recursive` and try again"
);
println!("cargo:rerun-if-changed=include/engine.h");
println!("cargo:rerun-if-changed=src/engine.cc");
println!("cargo:rustc-link-lib=llama");
println!("cargo:rustc-link-lib=ggml_static");
build_llama_cpp();
build_cxx_binding();
}
fn build_llama_cpp() {
let mut config = Config::new("llama.cpp");
if cfg!(target_os = "macos") {
config.define("LLAMA_METAL", "ON");
println!("cargo:rustc-link-lib=framework=Foundation");
println!("cargo:rustc-link-lib=framework=Accelerate");
println!("cargo:rustc-link-lib=framework=Metal");
println!("cargo:rustc-link-lib=framework=MetalKit");
}
if cfg!(feature = "cuda") {
config.define("LLAMA_CUBLAS", "ON");
config.define("CMAKE_POSITION_INDEPENDENT_CODE", "ON");
if cfg!(target_os = "windows") {
let Ok(cuda_path) = env::var("CUDA_PATH") else {
panic!("CUDA_PATH is not set");
};
println!(r"cargo:rustc-link-search=native={}\lib\x64", cuda_path);
} else {
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
println!("cargo:rustc-link-lib=culibos");
}
println!("cargo:rustc-link-lib=cudart");
println!("cargo:rustc-link-lib=cublas");
println!("cargo:rustc-link-lib=cublasLt");
}
if cfg!(feature = "rocm") {
let amd_gpu_targets: Vec<&str> = vec![
"gfx803",
"gfx900",
"gfx906:xnack-",
"gfx908:xnack-",
"gfx90a:xnack+",
"gfx90a:xnack-",
"gfx940",
"gfx941",
"gfx942",
"gfx1010",
"gfx1012",
"gfx1030",
"gfx1031",
"gfx1100",
"gfx1101",
"gfx1102",
"gfx1103",
];
let rocm_root = env::var("ROCM_ROOT").unwrap_or("/opt/rocm".to_string());
config.define("LLAMA_HIPBLAS", "ON");
config.define("CMAKE_C_COMPILER", format!("{}/llvm/bin/clang", rocm_root));
config.define(
"CMAKE_CXX_COMPILER",
format!("{}/llvm/bin/clang++", rocm_root),
);
config.define("AMDGPU_TARGETS", amd_gpu_targets.join(";"));
println!("cargo:rustc-link-arg=-Wl,--copy-dt-needed-entries");
println!("cargo:rustc-link-search=native={}/hip/lib", rocm_root);
println!("cargo:rustc-link-search=native={}/rocblas/lib", rocm_root);
println!("cargo:rustc-link-search=native={}/hipblas/lib", rocm_root);
println!("cargo:rustc-link-lib=amdhip64");
println!("cargo:rustc-link-lib=rocblas");
println!("cargo:rustc-link-lib=hipblas");
}
// By default, this value is automatically inferred from Rust’s compilation profile.
// For Windows platform, we always build llama.cpp in release mode.
// See https://github.com/TabbyML/tabby/pull/948 for more details.
if cfg!(target_os = "windows") {
config.profile("Release");
}
let dst = config.build();
if cfg!(target_os = "windows") {
println!(
r"cargo:rustc-link-search=native={}\build\{}",
dst.display(),
config.get_profile()
);
} else {
println!("cargo:rustc-link-search=native={}/build", dst.display());
}
}
fn build_cxx_binding() {
cxx_build::bridge("src/lib.rs")
.file("src/engine.cc")
.flag_if_supported("-Iinclude")
.flag_if_supported("-Illama.cpp")
.flag_if_supported("-std=c++14")
.compile("cxxbridge");
}