{ lib, buildPythonPackage , fetchPypi, isPy3k, cython , fastrlock, numpy, six, wheel, pytestCheckHook, mock, setuptools , cudatoolkit, cudnn, cutensor, nccl , addOpenGLRunpath }: assert cudnn.cudatoolkit == cudatoolkit; assert cutensor.cudatoolkit == cudatoolkit; assert nccl.cudatoolkit == cudatoolkit; buildPythonPackage rec { pname = "cupy"; version = "10.2.0"; disabled = !isPy3k; src = fetchPypi { inherit pname version; sha256 = "sha256-5ovvA76QGOsOnVztMfDgLerks5nJrKR08rLc+ArmWA8="; }; # See https://docs.cupy.dev/en/v10.2.0/reference/environment.html. Seting both # CUPY_NUM_BUILD_JOBS and CUPY_NUM_NVCC_THREADS to NIX_BUILD_CORES results in # a small amount of thrashing but it turns out there are a large number of # very short builds and a few extremely long ones, so setting both ends up # working nicely in practice. preConfigure = '' export CUDA_PATH=${cudatoolkit} export CUPY_NUM_BUILD_JOBS="$NIX_BUILD_CORES" export CUPY_NUM_NVCC_THREADS="$NIX_BUILD_CORES" ''; nativeBuildInputs = [ addOpenGLRunpath cython ]; LDFLAGS = "-L${cudatoolkit}/lib/stubs"; propagatedBuildInputs = [ cudatoolkit cudnn cutensor nccl fastrlock numpy six setuptools wheel ]; checkInputs = [ pytestCheckHook mock ]; # Won't work with the GPU, whose drivers won't be accessible from the build # sandbox doCheck = false; postFixup = '' find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do addOpenGLRunpath "$lib" done ''; enableParallelBuilding = true; meta = with lib; { description = "A NumPy-compatible matrix library accelerated by CUDA"; homepage = "https://cupy.chainer.org/"; license = licenses.mit; platforms = [ "x86_64-linux" ]; maintainers = with maintainers; [ hyphon81 ]; }; }