summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam J. Stewart <ajstewart426@gmail.com>2022-01-12 08:52:16 -0600
committerGitHub <noreply@github.com>2022-01-12 15:52:16 +0100
commitfaaf38ca7d2df2b7468e387d74ddf6b37fbd63d5 (patch)
tree598de7f88c5b04ae4b89e25af2912e564731ed31
parent54d741ba5448951ea689f3b8b91e3b1552d0a4d1 (diff)
downloadspack-faaf38ca7d2df2b7468e387d74ddf6b37fbd63d5.tar.gz
spack-faaf38ca7d2df2b7468e387d74ddf6b37fbd63d5.tar.bz2
spack-faaf38ca7d2df2b7468e387d74ddf6b37fbd63d5.tar.xz
spack-faaf38ca7d2df2b7468e387d74ddf6b37fbd63d5.zip
py-torch: use conditional variants (#28242)
-rw-r--r--var/spack/repos/builtin/packages/py-torch/package.py188
1 files changed, 77 insertions, 111 deletions
diff --git a/var/spack/repos/builtin/packages/py-torch/package.py b/var/spack/repos/builtin/packages/py-torch/package.py
index 72c786934e..3b667deafb 100644
--- a/var/spack/repos/builtin/packages/py-torch/package.py
+++ b/var/spack/repos/builtin/packages/py-torch/package.py
@@ -36,73 +36,50 @@ class PyTorch(PythonPackage, CudaPackage):
version('1.5.1', tag='v1.5.1', submodules=True)
version('1.5.0', tag='v1.5.0', submodules=True)
version('1.4.1', tag='v1.4.1', submodules=True)
- version('1.4.0', tag='v1.4.0', submodules=True, deprecated=True,
- submodules_delete=['third_party/fbgemm'])
version('1.3.1', tag='v1.3.1', submodules=True)
version('1.3.0', tag='v1.3.0', submodules=True)
version('1.2.0', tag='v1.2.0', submodules=True)
version('1.1.0', tag='v1.1.0', submodules=True)
- version('1.0.1', tag='v1.0.1', submodules=True)
- version('1.0.0', tag='v1.0.0', submodules=True)
- version('0.4.1', tag='v0.4.1', submodules=True, deprecated=True,
- submodules_delete=['third_party/nervanagpu'])
- version('0.4.0', tag='v0.4.0', submodules=True, deprecated=True)
- version('0.3.1', tag='v0.3.1', submodules=True, deprecated=True)
+ version('1.0.1', tag='v1.0.1', submodules=True, deprecated=True)
+ version('1.0.0', tag='v1.0.0', submodules=True, deprecated=True)
is_darwin = sys.platform == 'darwin'
# All options are defined in CMakeLists.txt.
# Some are listed in setup.py, but not all.
- variant('caffe2', default=True, description='Build Caffe2')
+ variant('caffe2', default=True, description='Build Caffe2', when='@1.7:')
variant('test', default=False, description='Build C++ test binaries')
variant('cuda', default=not is_darwin, description='Use CUDA')
variant('rocm', default=False, description='Use ROCm')
- variant('cudnn', default=not is_darwin, description='Use cuDNN')
+ variant('cudnn', default=not is_darwin, description='Use cuDNN', when='+cuda')
variant('fbgemm', default=True, description='Use FBGEMM (quantized 8-bit server operators)')
- variant('kineto', default=True, description='Use Kineto profiling library')
- variant('magma', default=not is_darwin, description='Use MAGMA')
+ variant('kineto', default=True, description='Use Kineto profiling library', when='@1.8:')
+ variant('magma', default=not is_darwin, description='Use MAGMA', when='+cuda')
variant('metal', default=is_darwin, description='Use Metal for Caffe2 iOS build')
- variant('nccl', default=not is_darwin, description='Use NCCL')
+ variant('nccl', default=True, description='Use NCCL', when='+cuda platform=linux')
+ variant('nccl', default=True, description='Use NCCL', when='+cuda platform=cray')
+ variant('nccl', default=True, description='Use NCCL', when='+rocm platform=linux')
+ variant('nccl', default=True, description='Use NCCL', when='+rocm platform=cray')
variant('nnpack', default=True, description='Use NNPACK')
- variant('numa', default=not is_darwin, description='Use NUMA')
+ variant('numa', default=True, description='Use NUMA', when='platform=linux')
+ variant('numa', default=True, description='Use NUMA', when='platform=cray')
variant('numpy', default=True, description='Use NumPy')
variant('openmp', default=True, description='Use OpenMP for parallel code')
variant('qnnpack', default=True, description='Use QNNPACK (quantized 8-bit operators)')
- variant('valgrind', default=not is_darwin, description='Use Valgrind')
- variant('xnnpack', default=True, description='Use XNNPACK')
+ variant('valgrind', default=True, description='Use Valgrind', when='@1.8: platform=linux')
+ variant('valgrind', default=True, description='Use Valgrind', when='@1.8: platform=cray')
+ variant('xnnpack', default=True, description='Use XNNPACK', when='@1.5:')
variant('mkldnn', default=True, description='Use MKLDNN')
variant('distributed', default=not is_darwin, description='Use distributed')
- variant('mpi', default=not is_darwin, description='Use MPI for Caffe2')
- variant('gloo', default=not is_darwin, description='Use Gloo')
- variant('tensorpipe', default=not is_darwin, description='Use TensorPipe')
- variant('onnx_ml', default=True, description='Enable traditional ONNX ML API')
- variant('breakpad', default=True, description='Enable breakpad crash dump library')
-
- conflicts('+cuda', when='+rocm')
- conflicts('+cudnn', when='~cuda')
- conflicts('+magma', when='~cuda')
- conflicts('+nccl', when='~cuda~rocm')
- conflicts('+nccl', when='platform=darwin')
- conflicts('+numa', when='platform=darwin', msg='Only available on Linux')
- conflicts('+valgrind', when='platform=darwin', msg='Only available on Linux')
- conflicts('+mpi', when='~distributed')
- conflicts('+gloo', when='~distributed')
- conflicts('+tensorpipe', when='~distributed')
- conflicts('+kineto', when='@:1.7')
- conflicts('+valgrind', when='@:1.7')
- conflicts('~caffe2', when='@0.4.0:1.6') # no way to disable caffe2?
- conflicts('+caffe2', when='@:0.3.1') # caffe2 did not yet exist?
- conflicts('+tensorpipe', when='@:1.5')
- conflicts('+xnnpack', when='@:1.4')
- conflicts('~onnx_ml', when='@:1.4') # no way to disable ONNX?
- conflicts('+rocm', when='@:0.4')
- conflicts('+cudnn', when='@:0.4')
- conflicts('+fbgemm', when='@:0.4,1.4.0')
- conflicts('+qnnpack', when='@:0.4')
- conflicts('+mkldnn', when='@:0.4')
- conflicts('+breakpad', when='@:1.9') # Option appeared in 1.10.0
- conflicts('+breakpad', when='target=ppc64:', msg='Unsupported')
- conflicts('+breakpad', when='target=ppc64le:', msg='Unsupported')
+ variant('mpi', default=not is_darwin, description='Use MPI for Caffe2', when='+distributed')
+ variant('gloo', default=not is_darwin, description='Use Gloo', when='+distributed')
+ variant('tensorpipe', default=not is_darwin, description='Use TensorPipe', when='@1.6: +distributed')
+ variant('onnx_ml', default=True, description='Enable traditional ONNX ML API', when='@1.5:')
+ variant('breakpad', default=True, description='Enable breakpad crash dump library', when='@1.9:')
+
+ conflicts('+cuda+rocm')
+ conflicts('+breakpad', when='target=ppc64:')
+ conflicts('+breakpad', when='target=ppc64le:')
conflicts('cuda_arch=none', when='+cuda',
msg='Must specify CUDA compute capabilities of your GPU, see '
@@ -111,53 +88,52 @@ class PyTorch(PythonPackage, CudaPackage):
# Required dependencies
depends_on('cmake@3.5:', type='build')
# Use Ninja generator to speed up build times, automatically used if found
- depends_on('ninja@1.5:', when='@1.1.0:', type='build')
+ depends_on('ninja@1.5:', when='@1.1:', type='build')
# See python_min_version in setup.py
depends_on('python@3.6.2:', when='@1.7.1:', type=('build', 'link', 'run'))
- depends_on('python@3.6.1:', when='@1.6.0:1.7.0', type=('build', 'link', 'run'))
- depends_on('python@3.5:', when='@1.5.0:1.5', type=('build', 'link', 'run'))
- depends_on('python@2.7:2.8,3.5:', when='@1.4.0:1.4', type=('build', 'link', 'run'))
- depends_on('python@2.7:2.8,3.5:3.7', when='@:1.3', type=('build', 'link', 'run'))
+ depends_on('python@3.6.1:', when='@1.6:1.7.0', type=('build', 'link', 'run'))
+ depends_on('python@3.5:', when='@1.5', type=('build', 'link', 'run'))
+ depends_on('python@2.7:2,3.5:', when='@1.4', type=('build', 'link', 'run'))
+ depends_on('python@2.7:2,3.5:3.7', when='@:1.3', type=('build', 'link', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-future', when='@1.5:', type=('build', 'run'))
depends_on('py-future', when='@1.1: ^python@:2', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
- depends_on('py-typing', when='@0.4: ^python@:3.4', type=('build', 'run'))
+ depends_on('py-typing', when='^python@:3.4', type=('build', 'run'))
depends_on('py-typing-extensions', when='@1.7:', type=('build', 'run'))
- depends_on('py-pybind11@2.6.2', when='@1.8.0:', type=('build', 'link', 'run'))
- depends_on('py-pybind11@2.3.0', when='@1.1.0:1.7', type=('build', 'link', 'run'))
- depends_on('py-pybind11@2.2.4', when='@1.0.0:1.0', type=('build', 'link', 'run'))
- depends_on('py-pybind11@2.2.2', when='@0.4.0:0.4', type=('build', 'link', 'run'))
- depends_on('py-dataclasses', when='@1.7: ^python@3.6.0:3.6', type=('build', 'run'))
+ depends_on('py-pybind11@2.6.2', when='@1.8:', type=('build', 'link', 'run'))
+ depends_on('py-pybind11@2.3.0', when='@1.1:1.7', type=('build', 'link', 'run'))
+ depends_on('py-pybind11@2.2.4', when='@:1.0', type=('build', 'link', 'run'))
+ depends_on('py-dataclasses', when='@1.7: ^python@3.6', type=('build', 'run'))
depends_on('py-tqdm', type='run')
- depends_on('py-protobuf', when='@0.4:', type=('build', 'run'))
- depends_on('protobuf', when='@0.4:')
+ depends_on('py-protobuf', type=('build', 'run'))
+ depends_on('protobuf')
depends_on('blas')
depends_on('lapack')
- depends_on('eigen', when='@0.4:')
+ depends_on('eigen')
# https://github.com/pytorch/pytorch/issues/60329
- # depends_on('cpuinfo@2020-12-17', when='@1.8.0:')
- # depends_on('cpuinfo@2020-06-11', when='@1.6.0:1.7')
+ # depends_on('cpuinfo@2020-12-17', when='@1.8:')
+ # depends_on('cpuinfo@2020-06-11', when='@1.6:1.7')
# https://github.com/shibatch/sleef/issues/427
- # depends_on('sleef@3.5.1_2020-12-22', when='@1.8.0:')
+ # depends_on('sleef@3.5.1_2020-12-22', when='@1.8:')
# https://github.com/pytorch/pytorch/issues/60334
- # depends_on('sleef@3.4.0_2019-07-30', when='@1.6.0:1.7')
+ # depends_on('sleef@3.4.0_2019-07-30', when='@1.6:1.7')
# https://github.com/Maratyszcza/FP16/issues/18
- # depends_on('fp16@2020-05-14', when='@1.6.0:')
- depends_on('pthreadpool@2021-04-13', when='@1.9.0:')
- depends_on('pthreadpool@2020-10-05', when='@1.8.0:1.8')
- depends_on('pthreadpool@2020-06-15', when='@1.6.0:1.7')
- depends_on('psimd@2020-05-17', when='@1.6.0:')
- depends_on('fxdiv@2020-04-17', when='@1.6.0:')
+ # depends_on('fp16@2020-05-14', when='@1.6:')
+ depends_on('pthreadpool@2021-04-13', when='@1.9:')
+ depends_on('pthreadpool@2020-10-05', when='@1.8')
+ depends_on('pthreadpool@2020-06-15', when='@1.6:1.7')
+ depends_on('psimd@2020-05-17', when='@1.6:')
+ depends_on('fxdiv@2020-04-17', when='@1.6:')
depends_on('benchmark', when='@1.6:+test')
# Optional dependencies
depends_on('cuda@7.5:', when='+cuda', type=('build', 'link', 'run'))
depends_on('cuda@9:', when='@1.1:+cuda', type=('build', 'link', 'run'))
depends_on('cuda@9.2:', when='@1.6:+cuda', type=('build', 'link', 'run'))
- depends_on('cudnn@6.0:7', when='@:1.0+cudnn')
- depends_on('cudnn@7.0:7', when='@1.1.0:1.5+cudnn')
- depends_on('cudnn@7.0:', when='@1.6.0:+cudnn')
+ depends_on('cudnn@6:7', when='@:1.0+cudnn')
+ depends_on('cudnn@7.0:7', when='@1.1:1.5+cudnn')
+ depends_on('cudnn@7:', when='@1.6:+cudnn')
depends_on('magma', when='+magma')
depends_on('nccl', when='+nccl')
depends_on('numactl', when='+numa')
@@ -165,16 +141,16 @@ class PyTorch(PythonPackage, CudaPackage):
depends_on('llvm-openmp', when='%apple-clang +openmp')
depends_on('valgrind', when='+valgrind')
# https://github.com/pytorch/pytorch/issues/60332
- # depends_on('xnnpack@2021-02-22', when='@1.8.0:+xnnpack')
- # depends_on('xnnpack@2020-03-23', when='@1.6.0:1.7+xnnpack')
+ # depends_on('xnnpack@2021-02-22', when='@1.8:+xnnpack')
+ # depends_on('xnnpack@2020-03-23', when='@1.6:1.7+xnnpack')
depends_on('mpi', when='+mpi')
# https://github.com/pytorch/pytorch/issues/60270
- # depends_on('gloo@2021-05-04', when='@1.9.0:+gloo')
- # depends_on('gloo@2020-09-18', when='@1.7.0:1.8+gloo')
- # depends_on('gloo@2020-03-17', when='@1.6.0:1.6+gloo')
+ # depends_on('gloo@2021-05-04', when='@1.9:+gloo')
+ # depends_on('gloo@2020-09-18', when='@1.7:1.8+gloo')
+ # depends_on('gloo@2020-03-17', when='@1.6+gloo')
# https://github.com/pytorch/pytorch/issues/60331
- # depends_on('onnx@1.8.0_2020-11-03', when='@1.8.0:+onnx_ml')
- # depends_on('onnx@1.7.0_2020-05-31', when='@1.6.0:1.7+onnx_ml')
+ # depends_on('onnx@1.8.0_2020-11-03', when='@1.8:+onnx_ml')
+ # depends_on('onnx@1.7.0_2020-05-31', when='@1.6:1.7+onnx_ml')
depends_on('mkl', when='+mkldnn')
# Test dependencies
@@ -186,25 +162,25 @@ class PyTorch(PythonPackage, CudaPackage):
# https://github.com/pytorch/pytorch/issues/60328
patch('https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/59220.patch',
sha256='e37afffe45cf7594c22050109942370e49983ad772d12ebccf508377dc9dcfc9',
- when='@1.2.0:')
+ when='@1.2:')
# Fixes build on older systems with glibc <2.12
patch('https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/55063.patch',
sha256='e17eaa42f5d7c18bf0d7c37d7b0910127a01ad53fdce3e226a92893356a70395',
- when='@1.1.0:1.8.1')
+ when='@1.1:1.8.1')
# Fixes CMake configuration error when XNNPACK is disabled
# https://github.com/pytorch/pytorch/pull/35607
# https://github.com/pytorch/pytorch/pull/37865
- patch('xnnpack.patch', when='@1.5.0:1.5')
+ patch('xnnpack.patch', when='@1.5')
# Fixes build error when ROCm is enabled for pytorch-1.5 release
- patch('rocm.patch', when='@1.5.0:1.5+rocm')
+ patch('rocm.patch', when='@1.5+rocm')
# Fixes fatal error: sleef.h: No such file or directory
# https://github.com/pytorch/pytorch/pull/35359
# https://github.com/pytorch/pytorch/issues/26555
- # patch('sleef.patch', when='@1.0.0:1.5')
+ # patch('sleef.patch', when='@:1.5')
# Fixes compilation with Clang 9.0.0 and Apple Clang 11.0.3
# https://github.com/pytorch/pytorch/pull/37086
@@ -214,7 +190,7 @@ class PyTorch(PythonPackage, CudaPackage):
# Removes duplicate definition of getCusparseErrorString
# https://github.com/pytorch/pytorch/issues/32083
- patch('cusparseGetErrorString.patch', when='@0.4.1:1.0^cuda@10.1.243:')
+ patch('cusparseGetErrorString.patch', when='@:1.0^cuda@10.1.243:')
# Fixes 'FindOpenMP.cmake'
# to detect openmp settings used by Fujitsu compiler.
@@ -279,12 +255,12 @@ class PyTorch(PythonPackage, CudaPackage):
if self.spec.satisfies('@1.1:') or newer:
if '+' + variant in self.spec:
env.set(keyword + '_' + var, 'ON')
- else:
+ elif '~' + variant in self.spec:
env.set(keyword + '_' + var, 'OFF')
else:
if '+' + variant in self.spec:
env.unset('NO_' + var)
- else:
+ elif '~' + variant in self.spec:
env.set('NO_' + var, 'ON')
# Build in parallel to speed up build times
@@ -293,11 +269,8 @@ class PyTorch(PythonPackage, CudaPackage):
# Spack logs have trouble handling colored output
env.set('COLORIZE_OUTPUT', 'OFF')
- if self.spec.satisfies('@0.4:'):
- enable_or_disable('test', keyword='BUILD')
-
- if self.spec.satisfies('@1.7:'):
- enable_or_disable('caffe2', keyword='BUILD')
+ enable_or_disable('test', keyword='BUILD')
+ enable_or_disable('caffe2', keyword='BUILD')
enable_or_disable('cuda')
if '+cuda' in self.spec:
@@ -318,12 +291,10 @@ class PyTorch(PythonPackage, CudaPackage):
env.set('CUDNN_LIBRARY', self.spec['cudnn'].libs[0])
enable_or_disable('fbgemm')
- if self.spec.satisfies('@1.8:'):
- enable_or_disable('kineto')
+ enable_or_disable('kineto')
enable_or_disable('magma')
enable_or_disable('metal')
- if self.spec.satisfies('@1.10:'):
- enable_or_disable('breakpad')
+ enable_or_disable('breakpad')
enable_or_disable('nccl')
if '+nccl' in self.spec:
@@ -343,23 +314,19 @@ class PyTorch(PythonPackage, CudaPackage):
# cmake/Modules/FindOpenMP.cmake
enable_or_disable('openmp', newer=True)
enable_or_disable('qnnpack')
- if self.spec.satisfies('@1.3:'):
- enable_or_disable('qnnpack', var='PYTORCH_QNNPACK')
- if self.spec.satisfies('@1.8:'):
- enable_or_disable('valgrind')
- if self.spec.satisfies('@1.5:'):
- enable_or_disable('xnnpack')
+ enable_or_disable('qnnpack', var='PYTORCH_QNNPACK')
+ enable_or_disable('valgrind')
+ enable_or_disable('xnnpack')
enable_or_disable('mkldnn')
enable_or_disable('distributed')
enable_or_disable('mpi')
# cmake/Modules/FindGloo.cmake
enable_or_disable('gloo', newer=True)
- if self.spec.satisfies('@1.6:'):
- enable_or_disable('tensorpipe')
+ enable_or_disable('tensorpipe')
if '+onnx_ml' in self.spec:
env.set('ONNX_ML', 'ON')
- else:
+ elif '~onnx_ml' in self.spec:
env.set('ONNX_ML', 'OFF')
if not self.spec.satisfies('@master'):
@@ -397,10 +364,9 @@ class PyTorch(PythonPackage, CudaPackage):
env.set('BUILD_CUSTOM_PROTOBUF', 'OFF')
env.set('USE_SYSTEM_NCCL', 'ON')
env.set('USE_SYSTEM_EIGEN_INSTALL', 'ON')
- if self.spec.satisfies('@0.4:'):
- env.set('pybind11_DIR', self.spec['py-pybind11'].prefix)
- env.set('pybind11_INCLUDE_DIR',
- self.spec['py-pybind11'].prefix.include)
+ env.set('pybind11_DIR', self.spec['py-pybind11'].prefix)
+ env.set('pybind11_INCLUDE_DIR',
+ self.spec['py-pybind11'].prefix.include)
if self.spec.satisfies('@1.10:'):
env.set('USE_SYSTEM_PYBIND11', 'ON')
# https://github.com/pytorch/pytorch/issues/60334