8 nvidiaEnabled = (lib.elem "nvidia" config.services.xserver.videoDrivers);
9 nvidia_x11 = if nvidiaEnabled || cfg.datacenter.enable then cfg.package else null;
11 cfg = config.hardware.nvidia;
13 useOpenModules = cfg.open == true;
17 offloadCfg = pCfg.offload;
18 reverseSyncCfg = pCfg.reverseSync;
19 primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
20 busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
21 ibtSupport = useOpenModules || (nvidia_x11.ibtSupport or false);
22 settingsFormat = pkgs.formats.keyValue { };
27 datacenter.enable = lib.mkEnableOption ''
28 Data Center drivers for NVIDIA cards on a NVLink topology
30 datacenter.settings = lib.mkOption {
31 type = settingsFormat.type;
34 LOG_FILE_NAME = "/var/log/fabricmanager.log";
35 LOG_APPEND_TO_LOG = 1;
36 LOG_FILE_MAX_SIZE = 1024;
39 BIND_INTERFACE_IP = "127.0.0.1";
40 STARTING_TCP_PORT = 16000;
42 FABRIC_MODE_RESTART = 0;
43 STATE_FILE_NAME = "/var/tmp/fabricmanager.state";
44 FM_CMD_BIND_INTERFACE = "127.0.0.1";
45 FM_CMD_PORT_NUMBER = 6666;
46 FM_STAY_RESIDENT_ON_FAILURES = 0;
47 ACCESS_LINK_FAILURE_MODE = 0;
48 TRUNK_LINK_FAILURE_MODE = 0;
49 NVSWITCH_FAILURE_MODE = 0;
50 ABORT_CUDA_JOBS_ON_FM_EXIT = 1;
52 defaultText = lib.literalExpression ''
55 LOG_FILE_NAME="/var/log/fabricmanager.log";
57 LOG_FILE_MAX_SIZE=1024;
60 BIND_INTERFACE_IP="127.0.0.1";
61 STARTING_TCP_PORT=16000;
63 FABRIC_MODE_RESTART=0;
64 STATE_FILE_NAME="/var/tmp/fabricmanager.state";
65 FM_CMD_BIND_INTERFACE="127.0.0.1";
66 FM_CMD_PORT_NUMBER=6666;
67 FM_STAY_RESIDENT_ON_FAILURES=0;
68 ACCESS_LINK_FAILURE_MODE=0;
69 TRUNK_LINK_FAILURE_MODE=0;
70 NVSWITCH_FAILURE_MODE=0;
71 ABORT_CUDA_JOBS_ON_FM_EXIT=1;
75 Additional configuration options for fabricmanager.
79 powerManagement.enable = lib.mkEnableOption ''
80 experimental power management through systemd. For more information, see
81 the NVIDIA docs, on Chapter 21. Configuring Power Management Support
84 powerManagement.finegrained = lib.mkEnableOption ''
85 experimental power management of PRIME offload. For more information, see
86 the NVIDIA docs, on Chapter 22. PCI-Express Runtime D3 (RTD3) Power Management
89 dynamicBoost.enable = lib.mkEnableOption ''
90 dynamic Boost balances power between the CPU and the GPU for improved
91 performance on supported laptops using the nvidia-powerd daemon. For more
92 information, see the NVIDIA docs, on Chapter 23. Dynamic Boost on Linux
95 modesetting.enable = lib.mkEnableOption ''
96 kernel modesetting when using the NVIDIA proprietary driver.
98 Enabling this fixes screen tearing when using Optimus via PRIME (see
99 {option}`hardware.nvidia.prime.sync.enable`. This is not enabled
100 by default because it is not officially supported by NVIDIA and would not
103 Enabling this and using version 545 or newer of the proprietary NVIDIA
104 driver causes it to provide its own framebuffer device, which can cause
105 Wayland compositors to work when they otherwise wouldn't.
107 default = lib.versionAtLeast cfg.package.version "535";
108 defaultText = lib.literalExpression "lib.versionAtLeast cfg.package.version \"535\"";
111 prime.nvidiaBusId = lib.mkOption {
114 example = "PCI:1:0:0";
116 Bus ID of the NVIDIA GPU. You can find it using lspci; for example if lspci
117 shows the NVIDIA GPU at "01:00.0", set this option to "PCI:1:0:0".
121 prime.intelBusId = lib.mkOption {
124 example = "PCI:0:2:0";
126 Bus ID of the Intel GPU. You can find it using lspci; for example if lspci
127 shows the Intel GPU at "00:02.0", set this option to "PCI:0:2:0".
131 prime.amdgpuBusId = lib.mkOption {
134 example = "PCI:4:0:0";
136 Bus ID of the AMD APU. You can find it using lspci; for example if lspci
137 shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
141 prime.sync.enable = lib.mkEnableOption ''
142 NVIDIA Optimus support using the NVIDIA proprietary driver via PRIME.
143 If enabled, the NVIDIA GPU will be always on and used for all rendering,
144 while enabling output to displays attached only to the integrated Intel/AMD
145 GPU without a multiplexer.
147 Note that this option only has any effect if the "nvidia" driver is specified
148 in {option}`services.xserver.videoDrivers`, and it should preferably
149 be the only driver there.
151 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
152 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
153 {option}`hardware.nvidia.prime.intelBusId` or
154 {option}`hardware.nvidia.prime.amdgpuBusId`).
156 If you enable this, you may want to also enable kernel modesetting for the
157 NVIDIA driver ({option}`hardware.nvidia.modesetting.enable`) in order
160 Note that this configuration will only be successful when a display manager
161 for which the {option}`services.xserver.displayManager.setupCommands`
162 option is supported is used
165 prime.allowExternalGpu = lib.mkEnableOption ''
166 configuring X to allow external NVIDIA GPUs when using Prime [Reverse] sync optimus
169 prime.offload.enable = lib.mkEnableOption ''
170 render offload support using the NVIDIA proprietary driver via PRIME.
172 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
173 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
174 {option}`hardware.nvidia.prime.intelBusId` or
175 {option}`hardware.nvidia.prime.amdgpuBusId`)
178 prime.offload.enableOffloadCmd = lib.mkEnableOption ''
179 adding a `nvidia-offload` convenience script to {option}`environment.systemPackages`
180 for offloading programs to an nvidia device. To work, should have also enabled
181 {option}`hardware.nvidia.prime.offload.enable` or {option}`hardware.nvidia.prime.reverseSync.enable`.
183 Example usage `nvidia-offload sauerbraten_client`
186 prime.reverseSync.enable = lib.mkEnableOption ''
187 NVIDIA Optimus support using the NVIDIA proprietary driver via reverse
188 PRIME. If enabled, the Intel/AMD GPU will be used for all rendering, while
189 enabling output to displays attached only to the NVIDIA GPU without a
192 Warning: This feature is relatively new, depending on your system this might
193 work poorly. AMD support, especially so.
194 See: https://forums.developer.nvidia.com/t/the-all-new-outputsink-feature-aka-reverse-prime/129828
196 Note that this option only has any effect if the "nvidia" driver is specified
197 in {option}`services.xserver.videoDrivers`, and it should preferably
198 be the only driver there.
200 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
201 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
202 {option}`hardware.nvidia.prime.intelBusId` or
203 {option}`hardware.nvidia.prime.amdgpuBusId`).
205 If you enable this, you may want to also enable kernel modesetting for the
206 NVIDIA driver ({option}`hardware.nvidia.modesetting.enable`) in order
209 Note that this configuration will only be successful when a display manager
210 for which the {option}`services.xserver.displayManager.setupCommands`
211 option is supported is used
214 prime.reverseSync.setupCommands.enable =
215 (lib.mkEnableOption ''
216 configure the display manager to be able to use the outputs
217 attached to the NVIDIA GPU.
218 Disable in order to configure the NVIDIA GPU outputs manually using xrandr.
219 Note that this configuration will only be successful when a display manager
220 for which the {option}`services.xserver.displayManager.setupCommands`
221 option is supported is used
228 (lib.mkEnableOption ''
229 nvidia-settings, NVIDIA's GUI configuration tool
235 nvidiaPersistenced = lib.mkEnableOption ''
236 nvidia-persistenced a update for NVIDIA GPU headless mode, i.e.
237 It ensures all GPUs stay awake even during headless mode
240 forceFullCompositionPipeline = lib.mkEnableOption ''
241 forcefully the full composition pipeline.
242 This sometimes fixes screen tearing issues.
243 This has been reported to reduce the performance of some OpenGL applications and may produce issues in WebGL.
244 It also drastically increases the time the driver needs to clock down after load
247 package = lib.mkOption {
249 config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
250 defaultText = lib.literalExpression ''
251 config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
253 example = "config.boot.kernelPackages.nvidiaPackages.legacy_470";
255 The NVIDIA driver package to use.
259 open = lib.mkOption {
261 description = "Whether to enable the open source NVIDIA kernel module.";
262 type = lib.types.nullOr lib.types.bool;
263 default = if lib.versionOlder nvidia_x11.version "560" then false else null;
264 defaultText = lib.literalExpression ''
265 if lib.versionOlder config.hardware.nvidia.package.version "560" then false else null
269 gsp.enable = lib.mkEnableOption ''
270 the GPU System Processor (GSP) on the video card
272 default = useOpenModules || lib.versionAtLeast nvidia_x11.version "555";
273 defaultText = lib.literalExpression ''
274 config.hardware.nvidia.open == true || lib.versionAtLeast config.hardware.nvidia.package.version "555"
282 igpuDriver = if pCfg.intelBusId != "" then "modesetting" else "amdgpu";
283 igpuBusId = if pCfg.intelBusId != "" then pCfg.intelBusId else pCfg.amdgpuBusId;
285 lib.mkIf (nvidia_x11 != null) (
291 assertion = !(nvidiaEnabled && cfg.datacenter.enable);
292 message = "You cannot configure both X11 and Data Center drivers at the same time.";
295 assertion = cfg.open != null;
297 You must configure `hardware.nvidia.open` on NVIDIA driver versions >= 560.
298 It is suggested to use the open source kernel modules on Turing or later GPUs (RTX series, GTX 16xx), and the closed source modules otherwise.
303 blacklistedKernelModules = [
308 # Don't add `nvidia-uvm` to `kernelModules`, because we want
309 # `nvidia-uvm` be loaded only after `udev` rules for `nvidia` kernel
310 # module are applied.
312 # Instead, we use `softdep` to lazily load `nvidia-uvm` kernel module
313 # after `nvidia` kernel module is loaded and `udev` rules are applied.
314 extraModprobeConfig = ''
315 softdep nvidia post: nvidia-uvm
318 systemd.tmpfiles.rules = lib.mkIf config.virtualisation.docker.enableNvidia [ "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin" ];
319 services.udev.extraRules = ''
320 # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
321 KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'"
322 KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'"
323 KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c 195 254'"
324 KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
325 KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
327 hardware.graphics = {
328 extraPackages = [ nvidia_x11.out ];
329 extraPackages32 = [ nvidia_x11.lib32 ];
331 environment.systemPackages = [ nvidia_x11.bin ];
335 (lib.mkIf nvidiaEnabled {
338 assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
339 message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
343 assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
344 message = "Offload command requires offloading or reverse prime sync to be enabled.";
349 primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
350 message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.";
354 assertion = offloadCfg.enable -> lib.versionAtLeast nvidia_x11.version "435.21";
355 message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
360 (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0";
361 message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
365 assertion = !(syncCfg.enable && offloadCfg.enable);
366 message = "PRIME Sync and Offload cannot be both enabled";
370 assertion = !(syncCfg.enable && reverseSyncCfg.enable);
371 message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
375 assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
376 message = "Sync precludes powering down the NVIDIA GPU.";
380 assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
381 message = "Fine-grained power management requires offload to be enabled.";
385 assertion = cfg.powerManagement.enable -> lib.versionAtLeast nvidia_x11.version "430.09";
386 message = "Required files for driver based power management only exist on versions >= 430.09.";
390 assertion = cfg.gsp.enable -> (cfg.package ? firmware);
391 message = "This version of NVIDIA driver does not provide a GSP firmware.";
395 assertion = useOpenModules -> (cfg.package ? open);
396 message = "This version of NVIDIA driver does not provide a corresponding opensource kernel driver.";
400 assertion = useOpenModules -> cfg.gsp.enable;
401 message = "The GSP cannot be disabled when using the opensource kernel driver.";
405 assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
406 message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
410 # If Optimus/PRIME is enabled, we:
411 # - Specify the configured NVIDIA GPU bus ID in the Device section for the
413 # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
414 # "nvidia" driver, in order to allow the X server to start without any outputs.
415 # - Add a separate Device section for the Intel GPU, using the "modesetting"
416 # driver and with the configured BusID.
417 # - OR add a separate Device section for the AMD APU, using the "amdgpu"
418 # driver and with the configures BusID.
419 # - Reference that Device section from the ServerLayout section as an inactive
421 # - Configure the display manager to run specific `xrandr` commands which will
422 # configure/enable displays connected to the Intel iGPU / AMD APU.
424 # reverse sync implies offloading
425 hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
427 services.xserver.drivers =
428 lib.optional primeEnabled {
430 display = offloadCfg.enable;
431 modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
436 + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
437 Option "AccelMethod" "none"
442 modules = [ nvidia_x11.bin ];
443 display = !offloadCfg.enable;
446 Option "SidebandSocketPath" "/run/nvidia-xdriver/"
448 + lib.optionalString primeEnabled ''
449 BusID "${pCfg.nvidiaBusId}"
451 + lib.optionalString pCfg.allowExternalGpu ''
452 Option "AllowExternalGpus"
456 Option "RandRRotation" "on"
458 + lib.optionalString syncCfg.enable ''
459 Option "AllowEmptyInitialConfiguration"
461 + lib.optionalString cfg.forceFullCompositionPipeline ''
462 Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
463 Option "AllowIndirectGLXProtocol" "off"
464 Option "TripleBuffer" "on"
468 services.xserver.serverLayoutSection =
469 lib.optionalString syncCfg.enable ''
470 Inactive "Device-${igpuDriver}[0]"
472 + lib.optionalString reverseSyncCfg.enable ''
473 Inactive "Device-nvidia[0]"
475 + lib.optionalString offloadCfg.enable ''
476 Option "AllowNVIDIAGPUScreens"
479 services.xserver.displayManager.setupCommands =
482 if igpuDriver == "amdgpu" then
483 # find the name of the provider if amdgpu
484 "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
488 if syncCfg.enable then "\"${gpuProviderName}\" NVIDIA-0" else "NVIDIA-G0 \"${gpuProviderName}\"";
491 (syncCfg.enable || (reverseSyncCfg.enable && reverseSyncCfg.setupCommands.enable))
493 # Added by nvidia configuration module for Optimus/PRIME.
494 ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
495 ${lib.getExe pkgs.xorg.xrandr} --auto
499 "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {
500 source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
503 # 'nvidia_x11' installs it's files to /run/opengl-driver/...
504 "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
507 hardware.graphics = {
508 extraPackages = [ pkgs.nvidia-vaapi-driver ];
511 environment.systemPackages =
512 lib.optional cfg.nvidiaSettings nvidia_x11.settings
513 ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
514 ++ lib.optional offloadCfg.enableOffloadCmd (
515 pkgs.writeShellScriptBin "nvidia-offload" ''
516 export __NV_PRIME_RENDER_OFFLOAD=1
517 export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
518 export __GLX_VENDOR_LIBRARY_NAME=nvidia
519 export __VK_LAYER_NV_optimus=NVIDIA_only
524 systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
528 nvidiaService = state: {
529 description = "NVIDIA system ${state} actions";
533 ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
535 before = [ "systemd-${state}.service" ];
536 requiredBy = [ "systemd-${state}.service" ];
540 (lib.mkIf cfg.powerManagement.enable {
541 nvidia-suspend = nvidiaService "suspend";
542 nvidia-hibernate = nvidiaService "hibernate";
543 nvidia-resume = (nvidiaService "resume") // {
546 "systemd-suspend.service"
547 "systemd-hibernate.service"
550 "systemd-suspend.service"
551 "systemd-hibernate.service"
555 (lib.mkIf cfg.nvidiaPersistenced {
556 "nvidia-persistenced" = {
557 description = "NVIDIA Persistence Daemon";
558 wantedBy = [ "multi-user.target" ];
562 PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
563 ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
564 ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
568 (lib.mkIf cfg.dynamicBoost.enable {
570 description = "nvidia-powerd service";
572 pkgs.util-linux # nvidia-powerd wants lscpu
574 wantedBy = [ "multi-user.target" ];
577 BusName = "nvidia.powerd.server";
578 ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
584 services.acpid.enable = true;
586 services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
588 hardware.firmware = lib.optional cfg.gsp.enable nvidia_x11.firmware;
590 systemd.tmpfiles.rules =
592 # Remove the following log message:
593 # (WW) NVIDIA: Failed to bind sideband socket to
594 # (WW) NVIDIA: '/var/run/nvidia-xdriver-b4f69129' Permission denied
596 # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
597 "d /run/nvidia-xdriver 0770 root users"
599 ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
600 "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
603 extraModulePackages = if useOpenModules then [ nvidia_x11.open ] else [ nvidia_x11.bin ];
604 # nvidia-uvm is required by CUDA applications.
606 lib.optionals config.services.xserver.enable [
611 # With the open driver, nvidia-uvm does not automatically load as
612 # a softdep of the nvidia module, so we explicitly load it for now.
613 # See https://github.com/NixOS/nixpkgs/issues/334180
614 ++ lib.optionals (config.services.xserver.enable && useOpenModules) [ "nvidia_uvm" ];
616 # If requested enable modesetting via kernel parameters.
618 lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
619 ++ lib.optional ((offloadCfg.enable || cfg.modesetting.enable) && lib.versionAtLeast nvidia_x11.version "545") "nvidia-drm.fbdev=1"
620 ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
621 ++ lib.optional useOpenModules "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
622 ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
624 # enable finegrained power management
625 extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
626 options nvidia "NVreg_DynamicPowerManagement=0x02"
629 services.udev.extraRules = lib.optionalString cfg.powerManagement.finegrained (
630 lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
631 # Remove NVIDIA USB xHCI Host Controller devices, if present
632 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
634 # Remove NVIDIA USB Type-C UCSI devices, if present
635 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
637 # Remove NVIDIA Audio devices, if present
638 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
641 # Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
642 ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
643 ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
645 # Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
646 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
647 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
652 (lib.mkIf (cfg.datacenter.enable) {
653 boot.extraModulePackages = [ nvidia_x11.bin ];
657 lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
658 "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
660 services = lib.mkMerge [
662 nvidia-fabricmanager = {
664 description = "Start NVIDIA NVLink Management";
665 wantedBy = [ "multi-user.target" ];
666 unitConfig.After = [ "network-online.target" ];
667 unitConfig.Requires = [ "network-online.target" ];
670 TimeoutStartSec = 240;
673 # Since these rely on the `nvidia_x11.fabricmanager` derivation, they're
674 # unsuitable to be mentioned in the configuration defaults, but they _can_
675 # be overridden in `cfg.datacenter.settings` if needed.
676 fabricManagerConfDefaults = {
677 TOPOLOGY_FILE_PATH = "${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
678 DATABASE_PATH = "${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
680 nv-fab-conf = settingsFormat.generate "fabricmanager.conf" (fabricManagerConfDefaults // cfg.datacenter.settings);
682 "${lib.getExe nvidia_x11.fabricmanager} -c ${nv-fab-conf}";
683 LimitCORE = "infinity";
687 (lib.mkIf cfg.nvidiaPersistenced {
688 "nvidia-persistenced" = {
689 description = "NVIDIA Persistence Daemon";
690 wantedBy = [ "multi-user.target" ];
694 PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
695 ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
696 ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
703 environment.systemPackages =
704 lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager
705 ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced;