summaryrefslogtreecommitdiff
path: root/var
diff options
context:
space:
mode:
authorGlenn Johnson <glenn-johnson@uiowa.edu>2020-01-24 22:57:16 -0600
committerAdam J. Stewart <ajstewart426@gmail.com>2020-01-24 22:57:16 -0600
commit71243f3f7b85ffcd63bf8d54f801fa515458ff07 (patch)
tree33f11ed09a985534f4b401c9b36e2bc1341ee33a /var
parent35db2d05b787de449ad2327568b793c75e8eed7e (diff)
downloadspack-71243f3f7b85ffcd63bf8d54f801fa515458ff07.tar.gz
spack-71243f3f7b85ffcd63bf8d54f801fa515458ff07.tar.bz2
spack-71243f3f7b85ffcd63bf8d54f801fa515458ff07.tar.xz
spack-71243f3f7b85ffcd63bf8d54f801fa515458ff07.zip
Get py-torch to build caffe2 (#14619)
* Get py-torch to build caffe2 This PR gets the py-torch package to build with caffe2, and closes issue #14576. If building on a machine with CUDA but no GPU the build will try to build with all compute capabilities. Older compute capabilities are not supported so the build will fail. The list of capabilities can be passed to the build using values set in the cuda_arch variant. Likewise, conflicts are also set to catch if the unsupported capabilities are listed in cuda_arch. This PR also sets version constraints on using an external mkldnn for newer versions. Currenly, only versions up to 0.4 use an external mkldnn library. Also, the cuda variant is set to True, which restores previous behavior. * Update var/spack/repos/builtin/packages/py-torch/package.py Fix typo. Co-Authored-By: Adam J. Stewart <ajstewart426@gmail.com> * Adjust conflicts This commit adjusts the conflicts. There is an issue with the cuda_arch=20 conflicts directive as there is a conflicting dependency with any version >=1.1 and a cuda_arch=20 dependency specified in CudaPackage that gets trapped first. * Use a common message for conflicts This commit adds a variable to contain the bulk of the message stringi for the cuda_arch conflicts. This is used along with a version string in the conflicts directives messages. * Fix the strings - Use a multiline string for the cuda_arch_conflict variable. - No need for format() in the msg value. Co-authored-by: Adam J. Stewart <ajstewart426@gmail.com>
Diffstat (limited to 'var')
-rw-r--r--var/spack/repos/builtin/packages/py-torch/package.py33
1 files changed, 31 insertions, 2 deletions
diff --git a/var/spack/repos/builtin/packages/py-torch/package.py b/var/spack/repos/builtin/packages/py-torch/package.py
index dcec15dc68..2a20235bef 100644
--- a/var/spack/repos/builtin/packages/py-torch/package.py
+++ b/var/spack/repos/builtin/packages/py-torch/package.py
@@ -62,6 +62,7 @@ class PyTorch(PythonPackage, CudaPackage):
version('0.4.0', tag='v0.4.0', submodules=True)
version('0.3.1', tag='v0.3.1', submodules=True)
+ variant('cuda', default=True, description='Build with CUDA')
variant('cudnn', default=True, description='Enables the cuDNN build')
variant('magma', default=False, description='Enables the MAGMA build')
variant('fbgemm', default=False, description='Enables the FBGEMM build')
@@ -100,6 +101,27 @@ class PyTorch(PythonPackage, CudaPackage):
conflicts('+zstd', when='@:1.0')
conflicts('+tbb', when='@:1.1')
+ cuda_arch_conflict = ('This version of Torch/Caffe2 only supports compute '
+ 'capabilities ')
+
+ conflicts('cuda_arch=none', when='+cuda+caffe2',
+ msg='Must specify CUDA compute capabilities of your GPU, see '
+ 'https://developer.nvidia.com/cuda-gpus')
+ conflicts('cuda_arch=52', when='@1.3.0:+cuda+caffe2',
+ msg=cuda_arch_conflict + '>=5.3')
+ conflicts('cuda_arch=50', when='@1.3.0:+cuda+caffe2',
+ msg=cuda_arch_conflict + '>=5.3')
+ conflicts('cuda_arch=35', when='@1.3.0:+cuda+caffe2',
+ msg=cuda_arch_conflict + '>=5.3')
+ conflicts('cuda_arch=32', when='@1.3.0:+cuda+caffe2',
+ msg=cuda_arch_conflict + '>=5.3')
+ conflicts('cuda_arch=30', when='@1.3.0:+cuda+caffe2',
+ msg=cuda_arch_conflict + '>=5.3')
+ conflicts('cuda_arch=30', when='@1.2.0:+cuda+caffe2',
+ msg=cuda_arch_conflict + '>=3.2')
+ conflicts('cuda_arch=20', when='@1.0.0:+cuda+caffe2',
+ msg=cuda_arch_conflict + '>=3.0')
+
# Required dependencies
depends_on('cmake@3.5:', type='build')
# Use Ninja generator to speed up build times
@@ -128,7 +150,10 @@ class PyTorch(PythonPackage, CudaPackage):
# depends_on('fbgemm', when='+fbgemm')
# TODO: add dependency: https://github.com/ROCmSoftwarePlatform/MIOpen
# depends_on('miopen', when='+miopen')
- depends_on('intel-mkl-dnn', when='+mkldnn')
+ # TODO: See if there is a way to use an external mkldnn installation.
+ # Currently, only older versions of py-torch use an external mkldnn
+ # library.
+ depends_on('intel-mkl-dnn', when='@0.4:0.4.1+mkldnn')
# TODO: add dependency: https://github.com/Maratyszcza/NNPACK
# depends_on('nnpack', when='+nnpack')
depends_on('qnnpack', when='+qnnpack')
@@ -197,6 +222,10 @@ class PyTorch(PythonPackage, CudaPackage):
enable_or_disable('cuda')
if '+cuda' in self.spec:
env.set('CUDA_HOME', self.spec['cuda'].prefix)
+ torch_cuda_arch = ';'.join('{0:.1f}'.format(float(i) / 10.0) for i
+ in
+ self.spec.variants['cuda_arch'].value)
+ env.set('TORCH_CUDA_ARCH_LIST', torch_cuda_arch)
enable_or_disable('cudnn')
if '+cudnn' in self.spec:
@@ -213,7 +242,7 @@ class PyTorch(PythonPackage, CudaPackage):
env.set('MIOPEN_LIBRARY', self.spec['miopen'].libs[0])
enable_or_disable('mkldnn')
- if '+mkldnn' in self.spec:
+ if '@0.4:0.4.1+mkldnn' in self.spec:
env.set('MKLDNN_HOME', self.spec['intel-mkl-dnn'].prefix)
enable_or_disable('nnpack')