3 testCDIScript = pkgs.writeShellScriptBin "test-cdi" ''
9 check_file_referential_integrity() {
10 echo "checking $1 referential integrity"
11 ( ${pkgs.glibc.bin}/bin/ldd "$1" | ${lib.getExe pkgs.gnugrep} "not found" &> /dev/null ) && return 1
15 check_directory_referential_integrity() {
16 ${lib.getExe pkgs.findutils} "$1" -type f -print0 | while read -d $'\0' file; do
17 if [[ $(${lib.getExe pkgs.file} "$file" | ${lib.getExe pkgs.gnugrep} ELF) ]]; then
18 check_file_referential_integrity "$file" || exit 1
20 echo "skipping $file: not an ELF file"
25 check_directory_referential_integrity "/usr/bin" || exit 1
26 check_directory_referential_integrity "${pkgs.addDriverRunpath.driverLink}" || exit 1
27 check_directory_referential_integrity "/usr/local/nvidia" || exit 1
29 testContainerImage = pkgs.dockerTools.buildImage {
33 Cmd = [ (lib.getExe testCDIScript) ];
35 copyToRoot = with pkgs.dockerTools; [
42 "cdiVersion": "0.5.0",
43 "kind": "nvidia.com/gpu",
50 "path": "/dev/urandom"
65 nvidia-container-toolkit = {
67 package = pkgs.stdenv.mkDerivation {
68 pname = "nvidia-ctk-dummy";
74 passAsFile = [ "emptyCDISpec" ];
77 mkdir -p $out/bin $out/share/nvidia-container-toolkit
78 cp "$emptyCDISpecPath" "$out/share/nvidia-container-toolkit/spec.json"
79 echo -n "$emptyCDISpec" > "$out/bin/nvidia-ctk";
80 cat << EOF > "$out/bin/nvidia-ctk"
81 #!${pkgs.runtimeShell}
82 cat "$out/share/nvidia-container-toolkit/spec.json"
84 chmod +x $out/bin/nvidia-ctk
86 meta.mainProgram = "nvidia-ctk";
91 name = "nvidia-container-toolkit";
92 meta = with lib.maintainers; {
93 maintainers = [ ereslibre ];
98 environment.systemPackages = with pkgs; [ jq ];
99 virtualisation.diskSize = lib.mkDefault 10240;
100 virtualisation.containers.enable = lib.mkDefault true;
102 inherit nvidia-container-toolkit;
105 package = config.boot.kernelPackages.nvidiaPackages.stable.open;
107 graphics.enable = lib.mkDefault true;
112 virtualisation.containers.enable = false;
113 hardware.graphics.enable = false;
118 environment.systemPackages = with pkgs; [ podman ];
119 hardware.graphics.enable = true;
122 one-gpu-invalid-host-paths = {
123 hardware.nvidia-container-toolkit.mounts = [
125 hostPath = "/non-existant-path";
126 containerPath = "/some/path";
134 with subtest("Generate an empty CDI spec for a machine with no Nvidia GPUs"):
135 no_gpus.wait_for_unit("nvidia-container-toolkit-cdi-generator.service")
136 no_gpus.succeed("cat /var/run/cdi/nvidia-container-toolkit.json | jq")
138 with subtest("Podman loads the generated CDI spec for a machine with an Nvidia GPU"):
139 one_gpu.wait_for_unit("nvidia-container-toolkit-cdi-generator.service")
140 one_gpu.succeed("cat /var/run/cdi/nvidia-container-toolkit.json | jq")
141 one_gpu.succeed("podman load < ${testContainerImage}")
142 print(one_gpu.succeed("podman run --pull=never --device=nvidia.com/gpu=all -v /run/opengl-driver:/run/opengl-driver:ro cdi-test:latest"))
144 # Issue: https://github.com/NixOS/nixpkgs/issues/319201
145 with subtest("The generated CDI spec skips specified non-existant paths in the host"):
146 one_gpu_invalid_host_paths.wait_for_unit("nvidia-container-toolkit-cdi-generator.service")
147 one_gpu_invalid_host_paths.fail("grep 'non-existant-path' /var/run/cdi/nvidia-container-toolkit.json")