1 { config, lib, pkgs, ... }:
5 cfg = config.services.tabby;
6 format = pkgs.formats.toml { };
7 tabbyPackage = cfg.package.override {
8 inherit (cfg) acceleration;
14 enable = lib.mkEnableOption "Self-hosted AI coding assistant using large language models";
16 package = lib.mkPackageOption pkgs "tabby" { };
22 Specifies the bind port on which the tabby server HTTP interface listens.
26 model = lib.mkOption {
28 default = "TabbyML/StarCoder-1B";
30 Specify the model that tabby will use to generate completions.
32 This model will be downloaded automatically if it is not already present.
34 If you want to utilize an existing model that you've already
35 downloaded you'll need to move it into tabby's state directory which
36 lives in `/var/lib/tabby`. Because the tabby.service is configured to
37 use a DyanmicUser the service will need to have been started at least
38 once before you can move the locally existing model into
39 `/var/lib/tabby`. You can set the model to 'none' and tabby will
40 startup and fail to download a model, but will have created the
41 `/var/lib/tabby` directory. You can then copy over the model manually
42 into `/var/lib/tabby`, update the model option to the name you just
43 downloaded and copied over then `nixos-rebuild switch` to start using
46 $ tabby download --model TabbyML/DeepseekCoder-6.7B
47 $ find ~/.tabby/ | tail -n1
48 /home/ghthor/.tabby/models/TabbyML/DeepseekCoder-6.7B/ggml/q8_0.v2.gguf
49 $ sudo rsync -r ~/.tabby/models/ /var/lib/tabby/models/
50 $ sudo chown -R tabby:tabby /var/lib/tabby/models/
52 See for Model Options:
53 > https://github.com/TabbyML/registry-tabby
57 acceleration = lib.mkOption {
58 type = types.nullOr (types.enum [ "cpu" "rocm" "cuda" "metal" ]);
62 Specifies the device to use for hardware acceleration.
64 - `cpu`: no acceleration just use the CPU
65 - `rocm`: supported by modern AMD GPUs
66 - `cuda`: supported by modern NVIDIA GPUs
67 - `metal`: supported on darwin aarch64 machines
69 Tabby will try and determine what type of acceleration that is
70 already enabled in your configuration when `acceleration = null`.
72 - nixpkgs.config.cudaSupport
73 - nixpkgs.config.rocmSupport
74 - if stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64
76 IFF multiple acceleration methods are found to be enabled or if you
77 haven't set either `cudaSupport or rocmSupport` you will have to
78 specify the device type manually here otherwise it will default to
79 the first from the list above or to cpu.
83 settings = lib.mkOption {
84 inherit (format) type;
87 Tabby scheduler configuration
90 > https://tabby.tabbyml.com/docs/configuration/#repository-context-for-code-completion
92 example = lib.literalExpression ''
95 { name = "tabby"; git_url = "https://github.com/TabbyML/tabby.git"; }
96 { name = "CTranslate2"; git_url = "git@github.com:OpenNMT/CTranslate2.git"; }
98 # local directory is also supported, but limited by systemd DynamicUser=1
99 # adding local repositories will need to be done manually
100 { name = "repository_a"; git_url = "file:///var/lib/tabby/repository_a"; }
106 usageCollection = lib.mkOption {
110 Enable sending anonymous usage data.
112 See for more details:
113 > https://tabby.tabbyml.com/docs/configuration#usage-collection
117 indexInterval = lib.mkOption {
122 Run tabby scheduler to generate the index database at this interval.
123 Updates by default every 5 hours. This value applies to
126 The format is described in
127 {manpage}`systemd.time(7)`.
129 To disable running `tabby scheduler --now` updates, set to `"never"`
135 # TODO(ghthor): firewall config
137 config = lib.mkIf cfg.enable {
139 etc."tabby/config.toml".source = format.generate "config.toml" cfg.settings;
140 systemPackages = [ tabbyPackage ];
146 WorkingDirectory = "/var/lib/tabby";
147 StateDirectory = [ "tabby" ];
148 ConfigurationDirectory = [ "tabby" ];
154 serviceEnv = lib.mkMerge [
156 TABBY_ROOT = "%S/tabby";
158 (lib.mkIf (!cfg.usageCollection) {
159 TABBY_DISABLE_USAGE_COLLECTION = "1";
164 wantedBy = [ "multi-user.target" ];
165 description = "Self-hosted AI coding assistant using large language models";
166 after = [ "network.target" ];
167 environment = serviceEnv;
168 serviceConfig = lib.mkMerge [
172 "${lib.getExe tabbyPackage} serve --model ${cfg.model} --port ${toString cfg.port} --device ${tabbyPackage.featureDevice}";
177 services.tabby-scheduler = lib.mkIf (cfg.indexInterval != "never") {
178 wantedBy = [ "multi-user.target" ];
179 description = "Tabby repository indexing service";
180 after = [ "network.target" ];
181 environment = serviceEnv;
182 preStart = "cp -f /etc/tabby/config.toml \${TABBY_ROOT}/config.toml";
183 serviceConfig = lib.mkMerge [
187 ExecStart = "${lib.getExe tabbyPackage} scheduler --now";
191 timers.tabby-scheduler = lib.mkIf (cfg.indexInterval != "never") {
192 description = "Update timer for tabby-scheduler";
193 partOf = [ "tabby-scheduler.service" ];
194 wantedBy = [ "timers.target" ];
195 timerConfig.OnUnitInactiveSec = cfg.indexInterval;
200 meta.maintainers = with lib.maintainers; [ ghthor ];