20 pyVerNoDot = builtins.replaceStrings [ "." ] [ "" ] python.pythonVersion;
21 srcs = import ./binary-hashes.nix version;
22 unsupported = throw "Unsupported system";
24 in buildPythonPackage {
28 # Don't forget to update torch to the same version.
32 disabled = !(isPy37 || isPy38 || isPy39 || isPy310);
34 src = fetchurl srcs."${stdenv.system}-${pyVerNoDot}" or unsupported;
41 propagatedBuildInputs = [
56 rpath = lib.makeLibraryPath [ stdenv.cc.cc.lib ];
58 find $out/${python.sitePackages}/torch/lib -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
59 echo "setting rpath for $lib..."
60 patchelf --set-rpath "${rpath}:$out/${python.sitePackages}/torch/lib" "$lib"
61 addOpenGLRunpath "$lib"
65 # The wheel-binary is not stripped to avoid the error of `ImportError: libtorch_cuda_cpp.so: ELF load command address/offset not properly aligned.`.
68 pythonImportsCheck = [ "torch" ];
71 description = "PyTorch: Tensors and Dynamic neural networks in Python with strong GPU acceleration";
72 homepage = "https://pytorch.org/";
73 changelog = "https://github.com/pytorch/pytorch/releases/tag/v${version}";
74 # Includes CUDA and Intel MKL, but redistributions of the binary are not limited.
75 # https://docs.nvidia.com/cuda/eula/index.html
76 # https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html
77 license = licenses.bsd3;
78 sourceProvenance = with sourceTypes; [ binaryNativeCode ];
79 platforms = platforms.linux ++ platforms.darwin;
80 hydraPlatforms = []; # output size 3.2G on 1.11.0
81 maintainers = with maintainers; [ junjihashimoto ];