7 nvidiaEnabled = (lib.elem "nvidia" config.services.xserver.videoDrivers);
9 if nvidiaEnabled || cfg.datacenter.enable
13 cfg = config.hardware.nvidia;
17 offloadCfg = pCfg.offload;
18 reverseSyncCfg = pCfg.reverseSync;
19 primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
20 busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
21 ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
22 settingsFormat = pkgs.formats.keyValue {};
26 datacenter.enable = lib.mkEnableOption (lib.mdDoc ''
27 Data Center drivers for NVIDIA cards on a NVLink topology
29 datacenter.settings = lib.mkOption {
30 type = settingsFormat.type;
33 LOG_FILE_NAME="/var/log/fabricmanager.log";
35 LOG_FILE_MAX_SIZE=1024;
38 BIND_INTERFACE_IP="127.0.0.1";
39 STARTING_TCP_PORT=16000;
41 FABRIC_MODE_RESTART=0;
42 STATE_FILE_NAME="/var/tmp/fabricmanager.state";
43 FM_CMD_BIND_INTERFACE="127.0.0.1";
44 FM_CMD_PORT_NUMBER=6666;
45 FM_STAY_RESIDENT_ON_FAILURES=0;
46 ACCESS_LINK_FAILURE_MODE=0;
47 TRUNK_LINK_FAILURE_MODE=0;
48 NVSWITCH_FAILURE_MODE=0;
49 ABORT_CUDA_JOBS_ON_FM_EXIT=1;
50 TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
52 defaultText = lib.literalExpression ''
55 LOG_FILE_NAME="/var/log/fabricmanager.log";
57 LOG_FILE_MAX_SIZE=1024;
60 BIND_INTERFACE_IP="127.0.0.1";
61 STARTING_TCP_PORT=16000;
63 FABRIC_MODE_RESTART=0;
64 STATE_FILE_NAME="/var/tmp/fabricmanager.state";
65 FM_CMD_BIND_INTERFACE="127.0.0.1";
66 FM_CMD_PORT_NUMBER=6666;
67 FM_STAY_RESIDENT_ON_FAILURES=0;
68 ACCESS_LINK_FAILURE_MODE=0;
69 TRUNK_LINK_FAILURE_MODE=0;
70 NVSWITCH_FAILURE_MODE=0;
71 ABORT_CUDA_JOBS_ON_FM_EXIT=1;
72 TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
75 description = lib.mdDoc ''
76 Additional configuration options for fabricmanager.
80 powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
81 experimental power management through systemd. For more information, see
82 the NVIDIA docs, on Chapter 21. Configuring Power Management Support
85 powerManagement.finegrained = lib.mkEnableOption (lib.mdDoc ''
86 experimental power management of PRIME offload. For more information, see
87 the NVIDIA docs, on Chapter 22. PCI-Express Runtime D3 (RTD3) Power Management
90 dynamicBoost.enable = lib.mkEnableOption (lib.mdDoc ''
91 dynamic Boost balances power between the CPU and the GPU for improved
92 performance on supported laptops using the nvidia-powerd daemon. For more
93 information, see the NVIDIA docs, on Chapter 23. Dynamic Boost on Linux
96 modesetting.enable = lib.mkEnableOption (lib.mdDoc ''
97 kernel modesetting when using the NVIDIA proprietary driver.
99 Enabling this fixes screen tearing when using Optimus via PRIME (see
100 {option}`hardware.nvidia.prime.sync.enable`. This is not enabled
101 by default because it is not officially supported by NVIDIA and would not
105 prime.nvidiaBusId = lib.mkOption {
108 example = "PCI:1:0:0";
109 description = lib.mdDoc ''
110 Bus ID of the NVIDIA GPU. You can find it using lspci; for example if lspci
111 shows the NVIDIA GPU at "01:00.0", set this option to "PCI:1:0:0".
115 prime.intelBusId = lib.mkOption {
118 example = "PCI:0:2:0";
119 description = lib.mdDoc ''
120 Bus ID of the Intel GPU. You can find it using lspci; for example if lspci
121 shows the Intel GPU at "00:02.0", set this option to "PCI:0:2:0".
125 prime.amdgpuBusId = lib.mkOption {
128 example = "PCI:4:0:0";
129 description = lib.mdDoc ''
130 Bus ID of the AMD APU. You can find it using lspci; for example if lspci
131 shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
135 prime.sync.enable = lib.mkEnableOption (lib.mdDoc ''
136 NVIDIA Optimus support using the NVIDIA proprietary driver via PRIME.
137 If enabled, the NVIDIA GPU will be always on and used for all rendering,
138 while enabling output to displays attached only to the integrated Intel/AMD
139 GPU without a multiplexer.
141 Note that this option only has any effect if the "nvidia" driver is specified
142 in {option}`services.xserver.videoDrivers`, and it should preferably
143 be the only driver there.
145 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
146 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
147 {option}`hardware.nvidia.prime.intelBusId` or
148 {option}`hardware.nvidia.prime.amdgpuBusId`).
150 If you enable this, you may want to also enable kernel modesetting for the
151 NVIDIA driver ({option}`hardware.nvidia.modesetting.enable`) in order
154 Note that this configuration will only be successful when a display manager
155 for which the {option}`services.xserver.displayManager.setupCommands`
156 option is supported is used
159 prime.allowExternalGpu = lib.mkEnableOption (lib.mdDoc ''
160 configuring X to allow external NVIDIA GPUs when using Prime [Reverse] sync optimus
163 prime.offload.enable = lib.mkEnableOption (lib.mdDoc ''
164 render offload support using the NVIDIA proprietary driver via PRIME.
166 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
167 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
168 {option}`hardware.nvidia.prime.intelBusId` or
169 {option}`hardware.nvidia.prime.amdgpuBusId`)
172 prime.offload.enableOffloadCmd = lib.mkEnableOption (lib.mdDoc ''
173 adding a `nvidia-offload` convenience script to {option}`environment.systemPackages`
174 for offloading programs to an nvidia device. To work, should have also enabled
175 {option}`hardware.nvidia.prime.offload.enable` or {option}`hardware.nvidia.prime.reverseSync.enable`.
177 Example usage `nvidia-offload sauerbraten_client`
180 prime.reverseSync.enable = lib.mkEnableOption (lib.mdDoc ''
181 NVIDIA Optimus support using the NVIDIA proprietary driver via reverse
182 PRIME. If enabled, the Intel/AMD GPU will be used for all rendering, while
183 enabling output to displays attached only to the NVIDIA GPU without a
186 Warning: This feature is relatively new, depending on your system this might
187 work poorly. AMD support, especially so.
188 See: https://forums.developer.nvidia.com/t/the-all-new-outputsink-feature-aka-reverse-prime/129828
190 Note that this option only has any effect if the "nvidia" driver is specified
191 in {option}`services.xserver.videoDrivers`, and it should preferably
192 be the only driver there.
194 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
195 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
196 {option}`hardware.nvidia.prime.intelBusId` or
197 {option}`hardware.nvidia.prime.amdgpuBusId`).
199 If you enable this, you may want to also enable kernel modesetting for the
200 NVIDIA driver ({option}`hardware.nvidia.modesetting.enable`) in order
203 Note that this configuration will only be successful when a display manager
204 for which the {option}`services.xserver.displayManager.setupCommands`
205 option is supported is used
209 (lib.mkEnableOption (lib.mdDoc ''
210 nvidia-settings, NVIDIA's GUI configuration tool
212 // {default = true;};
214 nvidiaPersistenced = lib.mkEnableOption (lib.mdDoc ''
215 nvidia-persistenced a update for NVIDIA GPU headless mode, i.e.
216 It ensures all GPUs stay awake even during headless mode
219 forceFullCompositionPipeline = lib.mkEnableOption (lib.mdDoc ''
220 forcefully the full composition pipeline.
221 This sometimes fixes screen tearing issues.
222 This has been reported to reduce the performance of some OpenGL applications and may produce issues in WebGL.
223 It also drastically increases the time the driver needs to clock down after load
226 package = lib.mkOption {
227 default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
228 defaultText = lib.literalExpression ''
229 config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
231 example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
232 description = lib.mdDoc ''
233 The NVIDIA driver package to use.
237 open = lib.mkEnableOption (lib.mdDoc ''
238 the open source NVIDIA kernel module
245 if pCfg.intelBusId != ""
249 if pCfg.intelBusId != ""
251 else pCfg.amdgpuBusId;
253 lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
258 assertion = !(nvidiaEnabled && cfg.datacenter.enable);
259 message = "You cannot configure both X11 and Data Center drivers at the same time.";
263 blacklistedKernelModules = ["nouveau" "nvidiafb"];
265 # Don't add `nvidia-uvm` to `kernelModules`, because we want
266 # `nvidia-uvm` be loaded only after `udev` rules for `nvidia` kernel
267 # module are applied.
269 # Instead, we use `softdep` to lazily load `nvidia-uvm` kernel module
270 # after `nvidia` kernel module is loaded and `udev` rules are applied.
271 extraModprobeConfig = ''
272 softdep nvidia post: nvidia-uvm
275 systemd.tmpfiles.rules =
276 lib.optional config.virtualisation.docker.enableNvidia
277 "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
278 services.udev.extraRules =
280 # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
281 KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'"
282 KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'"
283 KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c 195 254'"
284 KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
285 KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
295 environment.systemPackages = [
300 (lib.mkIf nvidiaEnabled {
303 assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
304 message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
308 assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
309 message = "Offload command requires offloading or reverse prime sync to be enabled.";
313 assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
314 message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.";
318 assertion = offloadCfg.enable -> lib.versionAtLeast nvidia_x11.version "435.21";
319 message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
323 assertion = (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0";
324 message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
328 assertion = !(syncCfg.enable && offloadCfg.enable);
329 message = "PRIME Sync and Offload cannot be both enabled";
333 assertion = !(syncCfg.enable && reverseSyncCfg.enable);
334 message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
338 assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
339 message = "Sync precludes powering down the NVIDIA GPU.";
343 assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
344 message = "Fine-grained power management requires offload to be enabled.";
348 assertion = cfg.powerManagement.enable -> lib.versionAtLeast nvidia_x11.version "430.09";
349 message = "Required files for driver based power management only exist on versions >= 430.09.";
353 assertion = cfg.open -> (cfg.package ? open && cfg.package ? firmware);
354 message = "This version of NVIDIA driver does not provide a corresponding opensource kernel driver";
358 assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
359 message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
362 # If Optimus/PRIME is enabled, we:
363 # - Specify the configured NVIDIA GPU bus ID in the Device section for the
365 # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
366 # "nvidia" driver, in order to allow the X server to start without any outputs.
367 # - Add a separate Device section for the Intel GPU, using the "modesetting"
368 # driver and with the configured BusID.
369 # - OR add a separate Device section for the AMD APU, using the "amdgpu"
370 # driver and with the configures BusID.
371 # - Reference that Device section from the ServerLayout section as an inactive
373 # - Configure the display manager to run specific `xrandr` commands which will
374 # configure/enable displays connected to the Intel iGPU / AMD APU.
376 # reverse sync implies offloading
377 hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
379 services.xserver.drivers =
380 lib.optional primeEnabled {
382 display = offloadCfg.enable;
383 modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
388 + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
389 Option "AccelMethod" "none"
394 modules = [nvidia_x11.bin];
395 display = !offloadCfg.enable;
397 lib.optionalString primeEnabled
399 BusID "${pCfg.nvidiaBusId}"
401 + lib.optionalString pCfg.allowExternalGpu ''
402 Option "AllowExternalGpus"
406 Option "RandRRotation" "on"
408 + lib.optionalString syncCfg.enable ''
409 Option "AllowEmptyInitialConfiguration"
411 + lib.optionalString cfg.forceFullCompositionPipeline ''
412 Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
413 Option "AllowIndirectGLXProtocol" "off"
414 Option "TripleBuffer" "on"
418 services.xserver.serverLayoutSection =
419 lib.optionalString syncCfg.enable ''
420 Inactive "Device-${igpuDriver}[0]"
422 + lib.optionalString reverseSyncCfg.enable ''
423 Inactive "Device-nvidia[0]"
425 + lib.optionalString offloadCfg.enable ''
426 Option "AllowNVIDIAGPUScreens"
429 services.xserver.displayManager.setupCommands = let
431 if igpuDriver == "amdgpu"
433 # find the name of the provider if amdgpu
434 "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
438 then "\"${gpuProviderName}\" NVIDIA-0"
439 else "NVIDIA-G0 \"${gpuProviderName}\"";
441 lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
442 # Added by nvidia configuration module for Optimus/PRIME.
443 ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
444 ${lib.getExe pkgs.xorg.xrandr} --auto
448 "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
450 # 'nvidia_x11' installs it's files to /run/opengl-driver/...
451 "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
456 pkgs.nvidia-vaapi-driver
459 pkgs.pkgsi686Linux.nvidia-vaapi-driver
462 environment.systemPackages =
463 lib.optional cfg.nvidiaSettings nvidia_x11.settings
464 ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
465 ++ lib.optional offloadCfg.enableOffloadCmd
466 (pkgs.writeShellScriptBin "nvidia-offload" ''
467 export __NV_PRIME_RENDER_OFFLOAD=1
468 export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
469 export __GLX_VENDOR_LIBRARY_NAME=nvidia
470 export __VK_LAYER_NV_optimus=NVIDIA_only
474 systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
476 systemd.services = let
477 nvidiaService = state: {
478 description = "NVIDIA system ${state} actions";
482 ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
484 before = ["systemd-${state}.service"];
485 requiredBy = ["systemd-${state}.service"];
489 (lib.mkIf cfg.powerManagement.enable {
490 nvidia-suspend = nvidiaService "suspend";
491 nvidia-hibernate = nvidiaService "hibernate";
493 (nvidiaService "resume")
496 after = ["systemd-suspend.service" "systemd-hibernate.service"];
497 requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
500 (lib.mkIf cfg.nvidiaPersistenced {
501 "nvidia-persistenced" = {
502 description = "NVIDIA Persistence Daemon";
503 wantedBy = ["multi-user.target"];
507 PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
508 ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
509 ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
513 (lib.mkIf cfg.dynamicBoost.enable {
515 description = "nvidia-powerd service";
517 pkgs.util-linux # nvidia-powerd wants lscpu
519 wantedBy = ["multi-user.target"];
522 BusName = "nvidia.powerd.server";
523 ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
528 services.acpid.enable = true;
530 services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
532 hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
534 systemd.tmpfiles.rules =
535 lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
536 "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
539 extraModulePackages =
541 then [nvidia_x11.open]
542 else [nvidia_x11.bin];
543 # nvidia-uvm is required by CUDA applications.
545 lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
547 # If requested enable modesetting via kernel parameter.
549 lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
550 ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
551 ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
552 ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
554 # enable finegrained power management
555 extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
556 options nvidia "NVreg_DynamicPowerManagement=0x02"
559 services.udev.extraRules =
560 lib.optionalString cfg.powerManagement.finegrained (
561 lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
562 # Remove NVIDIA USB xHCI Host Controller devices, if present
563 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
565 # Remove NVIDIA USB Type-C UCSI devices, if present
566 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
568 # Remove NVIDIA Audio devices, if present
569 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
572 # Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
573 ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
574 ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
576 # Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
577 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
578 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
583 (lib.mkIf (cfg.datacenter.enable) {
584 boot.extraModulePackages = [
587 systemd.services.nvidia-fabricmanager = {
589 description = "Start NVIDIA NVLink Management";
590 wantedBy = [ "multi-user.target" ];
591 unitConfig.After = [ "network-online.target" ];
592 unitConfig.Requires = [ "network-online.target" ];
595 TimeoutStartSec = 240;
597 nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
599 nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
600 LimitCORE="infinity";
603 environment.systemPackages =
604 lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;