summaryrefslogtreecommitdiff
path: root/var/spack/repos/builtin/packages/py-pytorch/package.py
blob: ab9625a8213dcb3a002f68881dd5bf5670f960ef (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

from spack import *


class PyPytorch(PythonPackage):
    """Tensors and Dynamic neural networks in Python
    with strong GPU acceleration."""

    homepage = "http://pytorch.org/"
    git      = "https://github.com/pytorch/pytorch.git"

    version('1.0.0', tag='v1.0.0', submodules=True)
    version('0.4.1', tag='v0.4.1', submodules=True)
    version('0.4.0', tag='v0.4.0', submodules=True)
    version('0.3.1', tag='v0.3.1', submodules=True)

    variant('cuda', default='False', description='Add GPU support')
    variant('cudnn', default='False', description='Add cuDNN support')
    variant('nccl', default='False', description='Add NCCL support')
    variant('mkldnn', default='False', description='Add Intel MKL DNN support')
    variant('magma', default='False', description='Add MAGMA support')

    conflicts('+cudnn', when='~cuda')
    conflicts('+nccl', when='~cuda')
    conflicts('+magma', when='~cuda')
    conflicts('+mkldnn', when='@:0.3.2')

    depends_on('py-setuptools', type='build')
    depends_on('py-cffi', type='build')
    depends_on('py-numpy', type=('run', 'build'))
    depends_on('blas')
    depends_on('lapack')
    depends_on('py-pyyaml', type=('run', 'build'))
    depends_on('py-typing', when='@0.3.2:', type=('run', 'build'))
    depends_on('intel-mkl', when='+mkl')
    depends_on('cuda@7.5:', when='+cuda', type=('build', 'link', 'run'))
    depends_on('cudnn@6:', when='+cuda+cudnn')
    depends_on('nccl', when='+cuda+nccl')
    depends_on('magma+shared', when='+cuda+magma')

    def setup_environment(self, build_env, run_env):
        build_env.set('MAX_JOBS', make_jobs)

        if '+cuda' in self.spec:
            build_env.set('CUDA_HOME', self.spec['cuda'].prefix)
        else:
            build_env.set('NO_CUDA', 'TRUE')

        if '+cudnn' in self.spec:
            build_env.set('CUDNN_LIB_DIR',
                          self.spec['cudnn'].prefix.lib)
            build_env.set('CUDNN_INCLUDE_DIR',
                          self.spec['cudnn'].prefix.include)
        else:
            build_env.set('NO_CUDNN', 'TRUE')

        if '+nccl' in self.spec:
            build_env.set('NCCL_ROOT_DIR', self.spec['nccl'].prefix)
        else:
            build_env.set('NO_SYSTEM_NCCL', 'TRUE')

        if '+mkldnn' in self.spec:
            build_env.set('MKLDNN_HOME', self.spec['intel-mkl'].prefix)
        else:
            build_env.set('NO_MKLDNN', 'TRUE')

        build_env.set('NO_NNPACK', 'TRUE')

        build_env.set('PYTORCH_BUILD_VERSION', str(self.version))
        build_env.set('PYTORCH_BUILD_NUMBER', 0)