15 , autoAddDriverRunpath
16 , cudaSupport ? config.cudaSupport
19 , rocmSupport ? config.rocmSupport
22 , metalSupport ? stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64
24 # one of [ null "cpu" "rocm" "cuda" "metal" ];
29 inherit (lib) optional optionals flatten;
31 # https://github.com/NixOS/nixpkgs/blob/master/pkgs/by-name/ll/llama-cpp/package.nix
32 # https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/ollama/default.nix
38 availableAccelerations = flatten [
39 (optional cudaSupport "cuda")
40 (optional rocmSupport "rocm")
41 (optional metalSupport "metal")
44 warnIfMultipleAccelerationMethods = configured: (let
45 len = builtins.length configured;
46 result = if len == 0 then "cpu" else (builtins.head configured);
48 lib.warnIf (len > 1) ''
49 building tabby with multiple acceleration methods enabled is not
50 supported; falling back to `${result}`
55 # If user did not not override the acceleration attribute, then try to use one of
56 # - nixpkgs.config.cudaSupport
57 # - nixpkgs.config.rocmSupport
58 # - metal if (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64)
59 # !! warn if multiple acceleration methods are enabled and default to the first one in the list
60 featureDevice = if (builtins.isNull acceleration) then (warnIfMultipleAccelerationMethods availableAccelerations) else acceleration;
62 warnIfNotLinux = api: (lib.warnIfNot stdenv.hostPlatform.isLinux
63 "building tabby with `${api}` is only supported on linux; falling back to cpu"
64 stdenv.hostPlatform.isLinux);
65 warnIfNotDarwinAarch64 = api: (lib.warnIfNot (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64)
66 "building tabby with `${api}` is only supported on Darwin-aarch64; falling back to cpu"
67 (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64));
69 validAccel = lib.assertOneOf "tabby.featureDevice" featureDevice [ "cpu" "rocm" "cuda" "metal" ];
71 # TODO(ghthor): there is a bug here where featureDevice could be cuda, but enableCuda is false
72 # The would result in a startup failure of the service module.
73 enableRocm = validAccel && (featureDevice == "rocm") && (warnIfNotLinux "rocm");
74 enableCuda = validAccel && (featureDevice == "cuda") && (warnIfNotLinux "cuda");
75 enableMetal = validAccel && (featureDevice == "metal") && (warnIfNotDarwinAarch64 "metal");
77 # We have to use override here because tabby doesn't actually tell llama-cpp
78 # to use a specific device type as it is relying on llama-cpp only being
79 # built to use one type of device.
81 # See: https://github.com/TabbyML/tabby/blob/v0.11.1/crates/llama-cpp-bindings/include/engine.h#L20
83 llamaccpPackage = llama-cpp.override {
84 rocmSupport = enableRocm;
85 cudaSupport = enableCuda;
86 metalSupport = enableMetal;
89 # TODO(ghthor): some of this can be removed
90 darwinBuildInputs = [ llamaccpPackage ]
91 ++ optionals stdenv.hostPlatform.isDarwin (with darwin.apple_sdk.frameworks; [
97 ++ optionals enableMetal [ Metal MetalKit ]);
99 cudaBuildInputs = [ llamaccpPackage ];
100 rocmBuildInputs = [ llamaccpPackage ];
103 rustPlatform.buildRustPackage {
104 inherit pname version;
105 inherit featureDevice;
107 src = fetchFromGitHub {
111 hash = "sha256-OgAE526aW3mVqf6fVmBmL5/B4gH9B54QLEITQk9Kgsg=";
112 fetchSubmodules = true;
116 lockFile = ./Cargo.lock;
118 "apalis-0.5.1" = "sha256-hGvVuSy32lSTR5DJdiyf8q1sXbIeuLSGrtyq6m2QlUQ=";
119 "tree-sitter-c-0.20.6" = "sha256-Etl4s29YSOxiqPo4Z49N6zIYqNpIsdk/Qd0jR8jdvW4=";
120 "tree-sitter-cpp-0.20.3" = "sha256-UrQ48CoUMSHmlHzOMu22c9N4hxJtHL2ZYRabYjf5byA=";
121 "tree-sitter-solidity-0.0.3" = "sha256-b+LthCf+g19sjKeNgXZmUV0RNi94O3u0WmXfgKRpaE0=";
125 # https://github.com/TabbyML/tabby/blob/v0.7.0/.github/workflows/release.yml#L39
129 ] ++ optionals enableRocm [
131 ] ++ optionals enableCuda [
135 OPENSSL_NO_VENDOR = 1;
137 nativeBuildInputs = [
141 ] ++ optionals enableCuda [
145 buildInputs = [ openssl ]
146 ++ optionals stdenv.hostPlatform.isDarwin darwinBuildInputs
147 ++ optionals enableCuda cudaBuildInputs
148 ++ optionals enableRocm rocmBuildInputs
151 env.LLAMA_CPP_LIB = "${lib.getLib llamaccpPackage}/lib";
152 patches = [ ./0001-nix-build-use-nix-native-llama-cpp-package.patch ];
155 # file cannot create directory: /var/empty/local/lib64/cmake/Llama
158 passthru.updateScript = nix-update-script { };
161 homepage = "https://github.com/TabbyML/tabby";
162 changelog = "https://github.com/TabbyML/tabby/releases/tag/v${version}";
163 description = "Self-hosted AI coding assistant";
164 mainProgram = "tabby";
165 license = licenses.asl20;
166 maintainers = [ maintainers.ghthor ];
167 broken = stdenv.hostPlatform.isDarwin && !stdenv.hostPlatform.isAarch64;