20 cudaSupport ? config.cudaSupport,
21 rocmSupport ? config.rocmSupport,
22 metalSupport ? stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64,
23 # one of [ null "cpu" "rocm" "cuda" "metal" ];
28 inherit (lib) optional optionals flatten;
30 # https://github.com/NixOS/nixpkgs/blob/master/pkgs/by-name/ll/llama-cpp/package.nix
31 # https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/ollama/default.nix
36 availableAccelerations = flatten [
37 (optional cudaSupport "cuda")
38 (optional rocmSupport "rocm")
39 (optional metalSupport "metal")
42 warnIfMultipleAccelerationMethods =
46 len = builtins.length configured;
47 result = if len == 0 then "cpu" else (builtins.head configured);
49 lib.warnIf (len > 1) ''
50 building tabby with multiple acceleration methods enabled is not
51 supported; falling back to `${result}`
55 # If user did not not override the acceleration attribute, then try to use one of
56 # - nixpkgs.config.cudaSupport
57 # - nixpkgs.config.rocmSupport
58 # - metal if (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64)
59 # !! warn if multiple acceleration methods are enabled and default to the first one in the list
61 if (builtins.isNull acceleration) then
62 (warnIfMultipleAccelerationMethods availableAccelerations)
68 (lib.warnIfNot stdenv.hostPlatform.isLinux
69 "building tabby with `${api}` is only supported on linux; falling back to cpu"
70 stdenv.hostPlatform.isLinux
72 warnIfNotDarwinAarch64 =
74 (lib.warnIfNot (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64)
75 "building tabby with `${api}` is only supported on Darwin-aarch64; falling back to cpu"
76 (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64)
79 validAccel = lib.assertOneOf "tabby.featureDevice" featureDevice [
86 # TODO(ghthor): there is a bug here where featureDevice could be cuda, but enableCuda is false
87 # The would result in a startup failure of the service module.
88 enableRocm = validAccel && (featureDevice == "rocm") && (warnIfNotLinux "rocm");
89 enableCuda = validAccel && (featureDevice == "cuda") && (warnIfNotLinux "cuda");
90 enableMetal = validAccel && (featureDevice == "metal") && (warnIfNotDarwinAarch64 "metal");
92 # We have to use override here because tabby doesn't actually tell llama-cpp
93 # to use a specific device type as it is relying on llama-cpp only being
94 # built to use one type of device.
96 # See: https://github.com/TabbyML/tabby/blob/v0.11.1/crates/llama-cpp-bindings/include/engine.h#L20
98 llamaccpPackage = llama-cpp.override {
99 rocmSupport = enableRocm;
100 cudaSupport = enableCuda;
101 metalSupport = enableMetal;
104 # TODO(ghthor): some of this can be removed
107 ++ optionals stdenv.hostPlatform.isDarwin ([
111 cudaBuildInputs = [ llamaccpPackage ];
112 rocmBuildInputs = [ llamaccpPackage ];
115 rustPlatform.buildRustPackage {
116 inherit pname version;
117 inherit featureDevice;
119 src = fetchFromGitHub {
122 rev = "refs/tags/v${version}";
123 hash = "sha256-Vhl5oNVYY3pizoA0PuV4c9UXH3F2L+WiXQMOM0Pqxks=";
124 fetchSubmodules = true;
128 lockFile = ./Cargo.lock;
130 "ollama-rs-0.1.9" = "sha256-d6sKUxc8VQbRkVqMOeNFqDdKesq5k32AQShK67y2ssg=";
131 "oneshot-0.1.6" = "sha256-PmYuHuNTqToMyMHPRFDUaHUvFkVftx9ZCOBwXj+4Hc4=";
132 "ownedbytes-0.7.0" = "sha256-p0+ohtW0VLmfDTZw/LfwX2gYfuYuoOBcE+JsguK7Wn8=";
133 "sqlx-0.7.4" = "sha256-tcISzoSfOZ0jjNgGpuPPxjMxmBUPw/5FVDoALZEAHKY=";
134 "tree-sitter-c-0.21.3" = "sha256-ucbHLS2xyGo1uyKZv/K1HNXuMo4GpTY327cgdVS9F3c=";
135 "tree-sitter-cpp-0.22.1" = "sha256-3akSuQltFMF6I32HwRU08+Hcl9ojxPGk2ZuOX3gAObw=";
136 "tree-sitter-solidity-1.2.6" = "sha256-S00hdzMoIccPYBEvE092/RIMnG8YEnDGk6GJhXlr4ng=";
140 # https://github.com/TabbyML/tabby/blob/v0.7.0/.github/workflows/release.yml#L39
143 # Don't need to build llama-cpp-server (included in default build)
144 "--no-default-features"
150 ++ optionals enableRocm [
154 ++ optionals enableCuda [
159 nativeInstallCheckInputs = [
162 versionCheckProgramArg = [ "--version" ];
163 doInstallCheck = true;
171 ++ optionals enableCuda [
177 ++ optionals stdenv.hostPlatform.isDarwin darwinBuildInputs
178 ++ optionals enableCuda cudaBuildInputs
179 ++ optionals enableRocm rocmBuildInputs;
182 # NOTE: Project contains a subproject for building llama-server
183 # But, we already have a derivation for this
184 ln -s ${lib.getExe' llama-cpp "llama-server"} $out/bin/llama-server
188 OPENSSL_NO_VENDOR = 1;
192 # file cannot create directory: /var/empty/local/lib64/cmake/Llama
195 passthru.updateScript = nix-update-script {
203 homepage = "https://github.com/TabbyML/tabby";
204 changelog = "https://github.com/TabbyML/tabby/releases/tag/v${version}";
205 description = "Self-hosted AI coding assistant";
206 mainProgram = "tabby";
207 license = licenses.asl20;
208 maintainers = [ maintainers.ghthor ];
209 broken = stdenv.hostPlatform.isDarwin && !stdenv.hostPlatform.isAarch64;