summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/spack/spack/test/data/unparse/README.md17
-rw-r--r--lib/spack/spack/test/data/unparse/amdfftw.txt266
-rw-r--r--lib/spack/spack/test/data/unparse/grads.txt72
-rw-r--r--lib/spack/spack/test/data/unparse/llvm.txt715
-rw-r--r--lib/spack/spack/test/data/unparse/mfem.txt931
-rw-r--r--lib/spack/spack/test/data/unparse/py-torch.txt448
-rw-r--r--lib/spack/spack/test/util/package_hash.py37
7 files changed, 2486 insertions, 0 deletions
diff --git a/lib/spack/spack/test/data/unparse/README.md b/lib/spack/spack/test/data/unparse/README.md
new file mode 100644
index 0000000000..c21c14c8f8
--- /dev/null
+++ b/lib/spack/spack/test/data/unparse/README.md
@@ -0,0 +1,17 @@
+# Test data for unparser
+
+These are test packages for testing Spack's unparser. They are used to ensure that the
+canonical unparser used for Spack's package hash remains consistent across Python
+versions.
+
+All of these were copied from mainline Spack packages, and they have been renamed with
+`.txt` suffixes so that they're not considered proper source files by the various
+checkers used in Spack CI.
+
+These packages were chosen for various reasons, but mainly because:
+
+1. They're some of the more complex packages in Spack, and they exercise more unparser
+ features than other packages.
+
+2. Each of these packages has some interesting feature that was hard to unparse
+ consistently across Python versions. See docstrings in packages for details.
diff --git a/lib/spack/spack/test/data/unparse/amdfftw.txt b/lib/spack/spack/test/data/unparse/amdfftw.txt
new file mode 100644
index 0000000000..0a178e199b
--- /dev/null
+++ b/lib/spack/spack/test/data/unparse/amdfftw.txt
@@ -0,0 +1,266 @@
+# -*- python -*-
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+"""This is an unparser test package.
+
+``amdfftw`` was chosen for its complexity and because it uses a negative array
+index that was not being unparsed consistently from Python 2 to 3.
+
+"""
+
+import os
+
+from spack import *
+from spack.pkg.builtin.fftw import FftwBase
+
+
+class Amdfftw(FftwBase):
+ """FFTW (AMD Optimized version) is a comprehensive collection of
+ fast C routines for computing the Discrete Fourier Transform (DFT)
+ and various special cases thereof.
+
+ It is an open-source implementation of the Fast Fourier transform
+ algorithm. It can compute transforms of real and complex-values
+ arrays of arbitrary size and dimension.
+ AMD Optimized FFTW is the optimized FFTW implementation targeted
+ for AMD CPUs.
+
+ For single precision build, please use precision value as float.
+ Example : spack install amdfftw precision=float
+ """
+
+ _name = 'amdfftw'
+ homepage = "https://developer.amd.com/amd-aocl/fftw/"
+ url = "https://github.com/amd/amd-fftw/archive/3.0.tar.gz"
+ git = "https://github.com/amd/amd-fftw.git"
+
+ maintainers = ['amd-toolchain-support']
+
+ version('3.1', sha256='3e777f3acef13fa1910db097e818b1d0d03a6a36ef41186247c6ab1ab0afc132')
+ version('3.0.1', sha256='87030c6bbb9c710f0a64f4f306ba6aa91dc4b182bb804c9022b35aef274d1a4c')
+ version('3.0', sha256='a69deaf45478a59a69f77c4f7e9872967f1cfe996592dd12beb6318f18ea0bcd')
+ version('2.2', sha256='de9d777236fb290c335860b458131678f75aa0799c641490c644c843f0e246f8')
+
+ variant('shared', default=True,
+ description='Builds a shared version of the library')
+ variant('openmp', default=True,
+ description='Enable OpenMP support')
+ variant('threads', default=False,
+ description='Enable SMP threads support')
+ variant('debug', default=False,
+ description='Builds a debug version of the library')
+ variant(
+ 'amd-fast-planner',
+ default=False,
+ description='Option to reduce the planning time without much'
+ 'tradeoff in the performance. It is supported for'
+ 'Float and double precisions only.')
+ variant(
+ 'amd-top-n-planner',
+ default=False,
+ description='Build with amd-top-n-planner support')
+ variant(
+ 'amd-mpi-vader-limit',
+ default=False,
+ description='Build with amd-mpi-vader-limit support')
+ variant(
+ 'static',
+ default=False,
+ description='Build with static suppport')
+ variant(
+ 'amd-trans',
+ default=False,
+ description='Build with amd-trans suppport')
+ variant(
+ 'amd-app-opt',
+ default=False,
+ description='Build with amd-app-opt suppport')
+
+ depends_on('texinfo')
+
+ provides('fftw-api@3', when='@2:')
+
+ conflicts(
+ 'precision=quad',
+ when='@2.2 %aocc',
+ msg='Quad precision is not supported by AOCC clang version 2.2')
+ conflicts(
+ '+debug',
+ when='@2.2 %aocc',
+ msg='debug mode is not supported by AOCC clang version 2.2')
+ conflicts(
+ '%gcc@:7.2',
+ when='@2.2:',
+ msg='GCC version above 7.2 is required for AMDFFTW')
+ conflicts(
+ '+amd-fast-planner ',
+ when='+mpi',
+ msg='mpi thread is not supported with amd-fast-planner')
+ conflicts(
+ '+amd-fast-planner',
+ when='@2.2',
+ msg='amd-fast-planner is supported from 3.0 onwards')
+ conflicts(
+ '+amd-fast-planner',
+ when='precision=quad',
+ msg='Quad precision is not supported with amd-fast-planner')
+ conflicts(
+ '+amd-fast-planner',
+ when='precision=long_double',
+ msg='long_double precision is not supported with amd-fast-planner')
+ conflicts(
+ '+amd-top-n-planner',
+ when='@:3.0.0',
+ msg='amd-top-n-planner is supported from 3.0.1 onwards')
+ conflicts(
+ '+amd-top-n-planner',
+ when='precision=long_double',
+ msg='long_double precision is not supported with amd-top-n-planner')
+ conflicts(
+ '+amd-top-n-planner',
+ when='precision=quad',
+ msg='Quad precision is not supported with amd-top-n-planner')
+ conflicts(
+ '+amd-top-n-planner',
+ when='+amd-fast-planner',
+ msg='amd-top-n-planner cannot be used with amd-fast-planner')
+ conflicts(
+ '+amd-top-n-planner',
+ when='+threads',
+ msg='amd-top-n-planner works only for single thread')
+ conflicts(
+ '+amd-top-n-planner',
+ when='+mpi',
+ msg='mpi thread is not supported with amd-top-n-planner')
+ conflicts(
+ '+amd-top-n-planner',
+ when='+openmp',
+ msg='openmp thread is not supported with amd-top-n-planner')
+ conflicts(
+ '+amd-mpi-vader-limit',
+ when='@:3.0.0',
+ msg='amd-mpi-vader-limit is supported from 3.0.1 onwards')
+ conflicts(
+ '+amd-mpi-vader-limit',
+ when='precision=quad',
+ msg='Quad precision is not supported with amd-mpi-vader-limit')
+ conflicts(
+ '+amd-trans',
+ when='+threads',
+ msg='amd-trans works only for single thread')
+ conflicts(
+ '+amd-trans',
+ when='+mpi',
+ msg='mpi thread is not supported with amd-trans')
+ conflicts(
+ '+amd-trans',
+ when='+openmp',
+ msg='openmp thread is not supported with amd-trans')
+ conflicts(
+ '+amd-trans',
+ when='precision=long_double',
+ msg='long_double precision is not supported with amd-trans')
+ conflicts(
+ '+amd-trans',
+ when='precision=quad',
+ msg='Quad precision is not supported with amd-trans')
+ conflicts(
+ '+amd-app-opt',
+ when='@:3.0.1',
+ msg='amd-app-opt is supported from 3.1 onwards')
+ conflicts(
+ '+amd-app-opt',
+ when='+mpi',
+ msg='mpi thread is not supported with amd-app-opt')
+ conflicts(
+ '+amd-app-opt',
+ when='precision=long_double',
+ msg='long_double precision is not supported with amd-app-opt')
+ conflicts(
+ '+amd-app-opt',
+ when='precision=quad',
+ msg='Quad precision is not supported with amd-app-opt')
+
+ def configure(self, spec, prefix):
+ """Configure function"""
+ # Base options
+ options = [
+ '--prefix={0}'.format(prefix),
+ '--enable-amd-opt'
+ ]
+
+ # Check if compiler is AOCC
+ if '%aocc' in spec:
+ options.append('CC={0}'.format(os.path.basename(spack_cc)))
+ options.append('FC={0}'.format(os.path.basename(spack_fc)))
+ options.append('F77={0}'.format(os.path.basename(spack_fc)))
+
+ if '+debug' in spec:
+ options.append('--enable-debug')
+
+ if '+mpi' in spec:
+ options.append('--enable-mpi')
+ options.append('--enable-amd-mpifft')
+ else:
+ options.append('--disable-mpi')
+ options.append('--disable-amd-mpifft')
+
+ options.extend(self.enable_or_disable('shared'))
+ options.extend(self.enable_or_disable('openmp'))
+ options.extend(self.enable_or_disable('threads'))
+ options.extend(self.enable_or_disable('amd-fast-planner'))
+ options.extend(self.enable_or_disable('amd-top-n-planner'))
+ options.extend(self.enable_or_disable('amd-mpi-vader-limit'))
+ options.extend(self.enable_or_disable('static'))
+ options.extend(self.enable_or_disable('amd-trans'))
+ options.extend(self.enable_or_disable('amd-app-opt'))
+
+ if not self.compiler.f77 or not self.compiler.fc:
+ options.append('--disable-fortran')
+
+ # Cross compilation is supported in amd-fftw by making use of target
+ # variable to set AMD_ARCH configure option.
+ # Spack user can not directly use AMD_ARCH for this purpose but should
+ # use target variable to set appropriate -march option in AMD_ARCH.
+ arch = spec.architecture
+ options.append(
+ 'AMD_ARCH={0}'.format(
+ arch.target.optimization_flags(
+ spec.compiler).split('=')[-1]))
+
+ # Specific SIMD support.
+ # float and double precisions are supported
+ simd_features = ['sse2', 'avx', 'avx2']
+
+ simd_options = []
+ for feature in simd_features:
+ msg = '--enable-{0}' if feature in spec.target else '--disable-{0}'
+ simd_options.append(msg.format(feature))
+
+ # When enabling configure option "--enable-amd-opt", do not use the
+ # configure option "--enable-generic-simd128" or
+ # "--enable-generic-simd256"
+
+ # Double is the default precision, for all the others we need
+ # to enable the corresponding option.
+ enable_precision = {
+ 'float': ['--enable-float'],
+ 'double': None,
+ 'long_double': ['--enable-long-double'],
+ 'quad': ['--enable-quad-precision']
+ }
+
+ # Different precisions must be configured and compiled one at a time
+ configure = Executable('../configure')
+ for precision in self.selected_precisions:
+
+ opts = (enable_precision[precision] or []) + options[:]
+
+ # SIMD optimizations are available only for float and double
+ if precision in ('float', 'double'):
+ opts += simd_options
+
+ with working_dir(precision, create=True):
+ configure(*opts)
diff --git a/lib/spack/spack/test/data/unparse/grads.txt b/lib/spack/spack/test/data/unparse/grads.txt
new file mode 100644
index 0000000000..8a381bffdc
--- /dev/null
+++ b/lib/spack/spack/test/data/unparse/grads.txt
@@ -0,0 +1,72 @@
+# -*- python -*-
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+"""This is an unparser test package.
+
+``grads`` was chosen because it has an embedded comment that looks like a docstring,
+which should be removed when doing canonical unparsing.
+
+"""
+
+from spack import *
+
+
+class Grads(AutotoolsPackage):
+ """The Grid Analysis and Display System (GrADS) is an interactive
+ desktop tool that is used for easy access, manipulation, and visualization
+ of earth science data. GrADS has two data models for handling gridded and
+ station data. GrADS supports many data file formats, including
+ binary (stream or sequential), GRIB (version 1 and 2), NetCDF,
+ HDF (version 4 and 5), and BUFR (for station data)."""
+
+ homepage = "http://cola.gmu.edu/grads/grads.php"
+ url = "ftp://cola.gmu.edu/grads/2.2/grads-2.2.1-src.tar.gz"
+
+ version('2.2.1', sha256='695e2066d7d131720d598bac0beb61ac3ae5578240a5437401dc0ffbbe516206')
+
+ variant('geotiff', default=True, description="Enable GeoTIFF support")
+ variant('shapefile', default=True, description="Enable Shapefile support")
+
+ """
+ # FIXME: Fails with undeclared functions (tdefi, tdef, ...) in gauser.c
+ variant('hdf5', default=False, description="Enable HDF5 support")
+ variant('hdf4', default=False, description="Enable HDF4 support")
+ variant('netcdf', default=False, description="Enable NetCDF support")
+ depends_on('hdf5', when='+hdf5')
+ depends_on('hdf', when='+hdf4')
+ depends_on('netcdf-c', when='+netcdf')
+ """
+
+ depends_on('libgeotiff', when='+geotiff')
+ depends_on('shapelib', when='+shapefile')
+ depends_on('udunits')
+ depends_on('libgd')
+ depends_on('libxmu')
+ depends_on('cairo +X +pdf +fc +ft')
+ depends_on('readline')
+ depends_on('pkgconfig', type='build')
+
+ def setup_build_environment(self, env):
+ env.set('SUPPLIBS', '/')
+
+ def setup_run_environment(self, env):
+ env.set('GADDIR', self.prefix.data)
+
+ @run_after('install')
+ def copy_data(self):
+ with working_dir(self.build_directory):
+ install_tree('data', self.prefix.data)
+ with working_dir(self.package_dir):
+ install('udpt', self.prefix.data)
+ filter_file(
+ r'({lib})',
+ self.prefix.lib,
+ self.prefix.data.udpt
+ )
+
+ def configure_args(self):
+ args = []
+ args.extend(self.with_or_without('geotiff'))
+ return args
diff --git a/lib/spack/spack/test/data/unparse/llvm.txt b/lib/spack/spack/test/data/unparse/llvm.txt
new file mode 100644
index 0000000000..67557328d9
--- /dev/null
+++ b/lib/spack/spack/test/data/unparse/llvm.txt
@@ -0,0 +1,715 @@
+# -*- python -*-
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+"""This is an unparser test package.
+
+``llvm`` was chosen because it is one of the most complex packages in Spack and it
+exercises nearly all directives.
+
+"""
+import os
+import os.path
+import re
+import sys
+
+import llnl.util.tty as tty
+
+import spack.build_environment
+import spack.util.executable
+
+
+class Llvm(CMakePackage, CudaPackage):
+ """The LLVM Project is a collection of modular and reusable compiler and
+ toolchain technologies. Despite its name, LLVM has little to do
+ with traditional virtual machines, though it does provide helpful
+ libraries that can be used to build them. The name "LLVM" itself
+ is not an acronym; it is the full name of the project.
+ """
+
+ homepage = "https://llvm.org/"
+ url = "https://github.com/llvm/llvm-project/archive/llvmorg-7.1.0.tar.gz"
+ list_url = "https://releases.llvm.org/download.html"
+ git = "https://github.com/llvm/llvm-project"
+ maintainers = ['trws', 'haampie']
+
+ tags = ['e4s']
+
+ generator = 'Ninja'
+
+ family = "compiler" # Used by lmod
+
+ # fmt: off
+ version('main', branch='main')
+ version('13.0.0', sha256='a1131358f1f9f819df73fa6bff505f2c49d176e9eef0a3aedd1fdbce3b4630e8')
+ version('12.0.1', sha256='66b64aa301244975a4aea489f402f205cde2f53dd722dad9e7b77a0459b4c8df')
+ version('12.0.0', sha256='8e6c99e482bb16a450165176c2d881804976a2d770e0445af4375e78a1fbf19c')
+ version('11.1.0', sha256='53a0719f3f4b0388013cfffd7b10c7d5682eece1929a9553c722348d1f866e79')
+ version('11.0.1', sha256='9c7ad8e8ec77c5bde8eb4afa105a318fd1ded7dff3747d14f012758719d7171b')
+ version('11.0.0', sha256='8ad4ddbafac4f2c8f2ea523c2c4196f940e8e16f9e635210537582a48622a5d5')
+ version('10.0.1', sha256='c7ccb735c37b4ec470f66a6c35fbae4f029c0f88038f6977180b1a8ddc255637')
+ version('10.0.0', sha256='b81c96d2f8f40dc61b14a167513d87c0d813aae0251e06e11ae8a4384ca15451')
+ version('9.0.1', sha256='be7b034641a5fda51ffca7f5d840b1a768737779f75f7c4fd18fe2d37820289a')
+ version('9.0.0', sha256='7807fac25330e24e9955ca46cd855dd34bbc9cc4fdba8322366206654d1036f2')
+ version('8.0.1', sha256='5b18f6111c7aee7c0933c355877d4abcfe6cb40c1a64178f28821849c725c841')
+ version('8.0.0', sha256='d81238b4a69e93e29f74ce56f8107cbfcf0c7d7b40510b7879e98cc031e25167')
+ version('7.1.0', sha256='71c93979f20e01f1a1cc839a247945f556fa5e63abf2084e8468b238080fd839')
+ version('7.0.1', sha256='f17a6cd401e8fd8f811fbfbb36dcb4f455f898c9d03af4044807ad005df9f3c0')
+ version('6.0.1', sha256='aefadceb231f4c195fe6d6cd3b1a010b269c8a22410f339b5a089c2e902aa177')
+ version('6.0.0', sha256='1946ec629c88d30122afa072d3c6a89cc5d5e4e2bb28dc63b2f9ebcc7917ee64')
+ version('5.0.2', sha256='fe87aa11558c08856739bfd9bd971263a28657663cb0c3a0af01b94f03b0b795')
+ version('5.0.1', sha256='84ca454abf262579814a2a2b846569f6e0cb3e16dc33ca3642b4f1dff6fbafd3')
+ version('5.0.0', sha256='1f1843315657a4371d8ca37f01265fa9aae17dbcf46d2d0a95c1fdb3c6a4bab6')
+ version('4.0.1', sha256='cd664fb3eec3208c08fb61189c00c9118c290b3be5adb3215a97b24255618be5')
+ version('4.0.0', sha256='28ca4b2fc434cb1f558e8865386c233c2a6134437249b8b3765ae745ffa56a34')
+ version('3.9.1', sha256='f5b6922a5c65f9232f83d89831191f2c3ccf4f41fdd8c63e6645bbf578c4ab92')
+ version('3.9.0', sha256='9c6563a72c8b5b79941c773937d997dd2b1b5b3f640136d02719ec19f35e0333')
+ version('3.8.1', sha256='69360f0648fde0dc3d3c4b339624613f3bc2a89c4858933bc3871a250ad02826')
+ version('3.8.0', sha256='b5cc5974cc2fd4e9e49e1bbd0700f872501a8678bd9694fa2b36c65c026df1d1')
+ version('3.7.1', sha256='d2cb0eb9b8eb21e07605bfe5e7a5c6c5f5f8c2efdac01ec1da6ffacaabe4195a')
+ version('3.7.0', sha256='dc00bc230be2006fb87b84f6fe4800ca28bc98e6692811a98195da53c9cb28c6')
+ version('3.6.2', sha256='f75d703a388ba01d607f9cf96180863a5e4a106827ade17b221d43e6db20778a')
+ version('3.5.1', sha256='5d739684170d5b2b304e4fb521532d5c8281492f71e1a8568187bfa38eb5909d')
+ # fmt: on
+
+ # NOTE: The debug version of LLVM is an order of magnitude larger than
+ # the release version, and may take up 20-30 GB of space. If you want
+ # to save space, build with `build_type=Release`.
+
+ variant(
+ "clang",
+ default=True,
+ description="Build the LLVM C/C++/Objective-C compiler frontend",
+ )
+ variant(
+ "flang",
+ default=False,
+ description="Build the LLVM Fortran compiler frontend "
+ "(experimental - parser only, needs GCC)",
+ )
+ variant(
+ "omp_debug",
+ default=False,
+ description="Include debugging code in OpenMP runtime libraries",
+ )
+ variant("lldb", default=True, description="Build the LLVM debugger")
+ variant("lld", default=True, description="Build the LLVM linker")
+ variant("mlir", default=False, description="Build with MLIR support")
+ variant(
+ "internal_unwind",
+ default=True,
+ description="Build the libcxxabi libunwind",
+ )
+ variant(
+ "polly",
+ default=True,
+ description="Build the LLVM polyhedral optimization plugin, "
+ "only builds for 3.7.0+",
+ )
+ variant(
+ "libcxx",
+ default=True,
+ description="Build the LLVM C++ standard library",
+ )
+ variant(
+ "compiler-rt",
+ default=True,
+ description="Build LLVM compiler runtime, including sanitizers",
+ )
+ variant(
+ "gold",
+ default=(sys.platform != "darwin"),
+ description="Add support for LTO with the gold linker plugin",
+ )
+ variant(
+ "split_dwarf",
+ default=False,
+ description="Build with split dwarf information",
+ )
+ variant(
+ "shared_libs",
+ default=False,
+ description="Build all components as shared libraries, faster, "
+ "less memory to build, less stable",
+ )
+ variant(
+ "llvm_dylib",
+ default=False,
+ description="Build LLVM shared library, containing all "
+ "components in a single shared library",
+ )
+ variant(
+ "link_llvm_dylib",
+ default=False,
+ description="Link LLVM tools against the LLVM shared library",
+ )
+ variant(
+ "all_targets",
+ default=False,
+ description="Build all supported targets, default targets "
+ "<current arch>,NVPTX,AMDGPU,CppBackend",
+ )
+ variant(
+ "build_type",
+ default="Release",
+ description="CMake build type",
+ values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel"),
+ )
+ variant(
+ "omp_tsan",
+ default=False,
+ description="Build with OpenMP capable thread sanitizer",
+ )
+ variant(
+ "omp_as_runtime",
+ default=True,
+ description="Build OpenMP runtime via ENABLE_RUNTIME by just-built Clang",
+ )
+ variant('code_signing', default=False,
+ description="Enable code-signing on macOS")
+ variant("python", default=False, description="Install python bindings")
+
+ variant('version_suffix', default='none', description="Add a symbol suffix")
+ variant('z3', default=False, description='Use Z3 for the clang static analyzer')
+
+ extends("python", when="+python")
+
+ # Build dependency
+ depends_on("cmake@3.4.3:", type="build")
+ depends_on('cmake@3.13.4:', type='build', when='@12:')
+ depends_on("ninja", type="build")
+ depends_on("python@2.7:2.8", when="@:4 ~python", type="build")
+ depends_on("python", when="@5: ~python", type="build")
+ depends_on("pkgconfig", type="build")
+
+ # Universal dependency
+ depends_on("python@2.7:2.8", when="@:4+python")
+ depends_on("python", when="@5:+python")
+ depends_on('z3', when='@8:+clang+z3')
+
+ # openmp dependencies
+ depends_on("perl-data-dumper", type=("build"))
+ depends_on("hwloc")
+ depends_on("libelf", when="+cuda") # libomptarget
+ depends_on("libffi", when="+cuda") # libomptarget
+
+ # ncurses dependency
+ depends_on("ncurses+termlib")
+
+ # lldb dependencies
+ depends_on("swig", when="+lldb")
+ depends_on("libedit", when="+lldb")
+ depends_on("py-six", when="@5.0.0: +lldb +python")
+
+ # gold support, required for some features
+ depends_on("binutils+gold+ld+plugins", when="+gold")
+
+ # polly plugin
+ depends_on("gmp", when="@:3.6 +polly")
+ depends_on("isl", when="@:3.6 +polly")
+
+ conflicts("+llvm_dylib", when="+shared_libs")
+ conflicts("+link_llvm_dylib", when="~llvm_dylib")
+ conflicts("+lldb", when="~clang")
+ conflicts("+libcxx", when="~clang")
+ conflicts("+internal_unwind", when="~clang")
+ conflicts("+compiler-rt", when="~clang")
+ conflicts("+flang", when="~clang")
+ # Introduced in version 11 as a part of LLVM and not a separate package.
+ conflicts("+flang", when="@:10")
+
+ conflicts('~mlir', when='+flang', msg='Flang requires MLIR')
+
+ # Older LLVM do not build with newer compilers, and vice versa
+ conflicts("%gcc@8:", when="@:5")
+ conflicts("%gcc@:5.0", when="@8:")
+ # clang/lib: a lambda parameter cannot shadow an explicitly captured entity
+ conflicts("%clang@8:", when="@:4")
+
+ # When these versions are concretized, but not explicitly with +libcxx, these
+ # conflicts will enable clingo to set ~libcxx, making the build successful:
+
+ # libc++ of LLVM13, see https://libcxx.llvm.org/#platform-and-compiler-support
+ # @13 does not support %gcc@:10 https://bugs.llvm.org/show_bug.cgi?id=51359#c1
+ # GCC 11 - latest stable release per GCC release page
+ # Clang: 11, 12 - latest two stable releases per LLVM release page
+ # AppleClang 12 - latest stable release per Xcode release page
+ conflicts("%gcc@:10", when="@13:+libcxx")
+ conflicts("%clang@:10", when="@13:+libcxx")
+ conflicts("%apple-clang@:11", when="@13:+libcxx")
+
+ # libcxx-4 and compiler-rt-4 fail to build with "newer" clang and gcc versions:
+ conflicts('%gcc@7:', when='@:4+libcxx')
+ conflicts('%clang@6:', when='@:4+libcxx')
+ conflicts('%apple-clang@6:', when='@:4+libcxx')
+ conflicts('%gcc@7:', when='@:4+compiler-rt')
+ conflicts('%clang@6:', when='@:4+compiler-rt')
+ conflicts('%apple-clang@6:', when='@:4+compiler-rt')
+
+ # OMP TSAN exists in > 5.x
+ conflicts("+omp_tsan", when="@:5")
+
+ # OpenMP via ENABLE_RUNTIME restrictions
+ conflicts("+omp_as_runtime", when="~clang", msg="omp_as_runtime requires clang being built.")
+ conflicts("+omp_as_runtime", when="@:11.1", msg="omp_as_runtime works since LLVM 12.")
+
+ # cuda_arch value must be specified
+ conflicts("cuda_arch=none", when="+cuda", msg="A value for cuda_arch must be specified.")
+
+ # MLIR exists in > 10.x
+ conflicts("+mlir", when="@:9")
+
+ # code signing is only necessary on macOS",
+ conflicts('+code_signing', when='platform=linux')
+ conflicts('+code_signing', when='platform=cray')
+
+ conflicts(
+ '+code_signing',
+ when='~lldb platform=darwin',
+ msg="code signing is only necessary for building the "
+ "in-tree debug server on macOS. Turning this variant "
+ "off enables a build of llvm with lldb that uses the "
+ "system debug server",
+ )
+
+ # LLVM bug https://bugs.llvm.org/show_bug.cgi?id=48234
+ # CMake bug: https://gitlab.kitware.com/cmake/cmake/-/issues/21469
+ # Fixed in upstream versions of both
+ conflicts('^cmake@3.19.0', when='@6.0.0:11.0.0')
+
+ # Github issue #4986
+ patch("llvm_gcc7.patch", when="@4.0.0:4.0.1+lldb %gcc@7.0:")
+
+ # sys/ustat.h has been removed in favour of statfs from glibc-2.28. Use fixed sizes:
+ patch('llvm5-sanitizer-ustat.patch', when="@4:6+compiler-rt")
+
+ # Fix lld templates: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=230463
+ patch('llvm4-lld-ELF-Symbols.patch', when="@4+lld%clang@6:")
+ patch('llvm5-lld-ELF-Symbols.patch', when="@5+lld%clang@7:")
+
+ # Fix missing std:size_t in 'llvm@4:5' when built with '%clang@7:'
+ patch('xray_buffer_queue-cstddef.patch', when="@4:5+compiler-rt%clang@7:")
+
+ # https://github.com/llvm/llvm-project/commit/947f9692440836dcb8d88b74b69dd379d85974ce
+ patch('sanitizer-ipc_perm_mode.patch', when="@5:7+compiler-rt%clang@11:")
+ patch('sanitizer-ipc_perm_mode.patch', when="@5:9+compiler-rt%gcc@9:")
+
+ # github.com/spack/spack/issues/24270 and MicrosoftDemangle: %gcc@10: and %clang@13:
+ patch('missing-includes.patch', when='@8:11')
+
+ # Backport from llvm master + additional fix
+ # see https://bugs.llvm.org/show_bug.cgi?id=39696
+ # for a bug report about this problem in llvm master.
+ patch("constexpr_longdouble.patch", when="@6:8+libcxx")
+ patch("constexpr_longdouble_9.0.patch", when="@9:10.0.0+libcxx")
+
+ # Backport from llvm master; see
+ # https://bugs.llvm.org/show_bug.cgi?id=38233
+ # for a bug report about this problem in llvm master.
+ patch("llvm_py37.patch", when="@4:6 ^python@3.7:")
+
+ # https://bugs.llvm.org/show_bug.cgi?id=39696
+ patch("thread-p9.patch", when="@develop+libcxx")
+
+ # https://github.com/spack/spack/issues/19625,
+ # merged in llvm-11.0.0_rc2, but not found in 11.0.1
+ patch("lldb_external_ncurses-10.patch", when="@10.0.0:11.0.1+lldb")
+
+ # https://github.com/spack/spack/issues/19908
+ # merged in llvm main prior to 12.0.0
+ patch("llvm_python_path.patch", when="@11.0.0")
+
+ # Workaround for issue https://github.com/spack/spack/issues/18197
+ patch('llvm7_intel.patch', when='@7 %intel@18.0.2,19.0.4')
+
+ # Remove cyclades support to build against newer kernel headers
+ # https://reviews.llvm.org/D102059
+ patch('no_cyclades.patch', when='@10:12.0.0')
+ patch('no_cyclades9.patch', when='@6:9')
+
+ # The functions and attributes below implement external package
+ # detection for LLVM. See:
+ #
+ # https://spack.readthedocs.io/en/latest/packaging_guide.html#making-a-package-discoverable-with-spack-external-find
+ executables = ['clang', 'flang', 'ld.lld', 'lldb']
+
+ @classmethod
+ def filter_detected_exes(cls, prefix, exes_in_prefix):
+ result = []
+ for exe in exes_in_prefix:
+ # Executables like lldb-vscode-X are daemon listening
+ # on some port and would hang Spack during detection.
+ # clang-cl and clang-cpp are dev tools that we don't
+ # need to test
+ if any(x in exe for x in ('vscode', 'cpp', '-cl', '-gpu')):
+ continue
+ result.append(exe)
+ return result
+
+ @classmethod
+ def determine_version(cls, exe):
+ version_regex = re.compile(
+ # Normal clang compiler versions are left as-is
+ r'clang version ([^ )\n]+)-svn[~.\w\d-]*|'
+ # Don't include hyphenated patch numbers in the version
+ # (see https://github.com/spack/spack/pull/14365 for details)
+ r'clang version ([^ )\n]+?)-[~.\w\d-]*|'
+ r'clang version ([^ )\n]+)|'
+ # LLDB
+ r'lldb version ([^ )\n]+)|'
+ # LLD
+ r'LLD ([^ )\n]+) \(compatible with GNU linkers\)'
+ )
+ try:
+ compiler = Executable(exe)
+ output = compiler('--version', output=str, error=str)
+ if 'Apple' in output:
+ return None
+ match = version_regex.search(output)
+ if match:
+ return match.group(match.lastindex)
+ except spack.util.executable.ProcessError:
+ pass
+ except Exception as e:
+ tty.debug(e)
+
+ return None
+
+ @classmethod
+ def determine_variants(cls, exes, version_str):
+ variants, compilers = ['+clang'], {}
+ lld_found, lldb_found = False, False
+ for exe in exes:
+ if 'clang++' in exe:
+ compilers['cxx'] = exe
+ elif 'clang' in exe:
+ compilers['c'] = exe
+ elif 'flang' in exe:
+ variants.append('+flang')
+ compilers['fc'] = exe
+ compilers['f77'] = exe
+ elif 'ld.lld' in exe:
+ lld_found = True
+ compilers['ld'] = exe
+ elif 'lldb' in exe:
+ lldb_found = True
+ compilers['lldb'] = exe
+
+ variants.append('+lld' if lld_found else '~lld')
+ variants.append('+lldb' if lldb_found else '~lldb')
+
+ return ''.join(variants), {'compilers': compilers}
+
+ @classmethod
+ def validate_detected_spec(cls, spec, extra_attributes):
+ # For LLVM 'compilers' is a mandatory attribute
+ msg = ('the extra attribute "compilers" must be set for '
+ 'the detected spec "{0}"'.format(spec))
+ assert 'compilers' in extra_attributes, msg
+ compilers = extra_attributes['compilers']
+ for key in ('c', 'cxx'):
+ msg = '{0} compiler not found for {1}'
+ assert key in compilers, msg.format(key, spec)
+
+ @property
+ def cc(self):
+ msg = "cannot retrieve C compiler [spec is not concrete]"
+ assert self.spec.concrete, msg
+ if self.spec.external:
+ return self.spec.extra_attributes['compilers'].get('c', None)
+ result = None
+ if '+clang' in self.spec:
+ result = os.path.join(self.spec.prefix.bin, 'clang')
+ return result
+
+ @property
+ def cxx(self):
+ msg = "cannot retrieve C++ compiler [spec is not concrete]"
+ assert self.spec.concrete, msg
+ if self.spec.external:
+ return self.spec.extra_attributes['compilers'].get('cxx', None)
+ result = None
+ if '+clang' in self.spec:
+ result = os.path.join(self.spec.prefix.bin, 'clang++')
+ return result
+
+ @property
+ def fc(self):
+ msg = "cannot retrieve Fortran compiler [spec is not concrete]"
+ assert self.spec.concrete, msg
+ if self.spec.external:
+ return self.spec.extra_attributes['compilers'].get('fc', None)
+ result = None
+ if '+flang' in self.spec:
+ result = os.path.join(self.spec.prefix.bin, 'flang')
+ return result
+
+ @property
+ def f77(self):
+ msg = "cannot retrieve Fortran 77 compiler [spec is not concrete]"
+ assert self.spec.concrete, msg
+ if self.spec.external:
+ return self.spec.extra_attributes['compilers'].get('f77', None)
+ result = None
+ if '+flang' in self.spec:
+ result = os.path.join(self.spec.prefix.bin, 'flang')
+ return result
+
+ @run_before('cmake')
+ def codesign_check(self):
+ if self.spec.satisfies("+code_signing"):
+ codesign = which('codesign')
+ mkdir('tmp')
+ llvm_check_file = join_path('tmp', 'llvm_check')
+ copy('/usr/bin/false', llvm_check_file)
+ try:
+ codesign('-f', '-s', 'lldb_codesign', '--dryrun',
+ llvm_check_file)
+
+ except ProcessError:
+ # Newer LLVM versions have a simple script that sets up
+ # automatically when run with sudo priviliges
+ setup = Executable("./lldb/scripts/macos-setup-codesign.sh")
+ try:
+ setup()
+ except Exception:
+ raise RuntimeError(
+ 'spack was unable to either find or set up'
+ 'code-signing on your system. Please refer to'
+ 'https://lldb.llvm.org/resources/build.html#'
+ 'code-signing-on-macos for details on how to'
+ 'create this identity.'
+ )
+
+ def flag_handler(self, name, flags):
+ if name == 'cxxflags':
+ flags.append(self.compiler.cxx11_flag)
+ return(None, flags, None)
+ elif name == 'ldflags' and self.spec.satisfies('%intel'):
+ flags.append('-shared-intel')
+ return(None, flags, None)
+ return(flags, None, None)
+
+ def setup_build_environment(self, env):
+ """When using %clang, add only its ld.lld-$ver and/or ld.lld to our PATH"""
+ if self.compiler.name in ['clang', 'apple-clang']:
+ for lld in 'ld.lld-{0}'.format(self.compiler.version.version[0]), 'ld.lld':
+ bin = os.path.join(os.path.dirname(self.compiler.cc), lld)
+ sym = os.path.join(self.stage.path, 'ld.lld')
+ if os.path.exists(bin) and not os.path.exists(sym):
+ mkdirp(self.stage.path)
+ os.symlink(bin, sym)
+ env.prepend_path('PATH', self.stage.path)
+
+ def setup_run_environment(self, env):
+ if "+clang" in self.spec:
+ env.set("CC", join_path(self.spec.prefix.bin, "clang"))
+ env.set("CXX", join_path(self.spec.prefix.bin, "clang++"))
+ if "+flang" in self.spec:
+ env.set("FC", join_path(self.spec.prefix.bin, "flang"))
+ env.set("F77", join_path(self.spec.prefix.bin, "flang"))
+
+ root_cmakelists_dir = "llvm"
+
+ def cmake_args(self):
+ spec = self.spec
+ define = CMakePackage.define
+ from_variant = self.define_from_variant
+
+ python = spec['python']
+ cmake_args = [
+ define("LLVM_REQUIRES_RTTI", True),
+ define("LLVM_ENABLE_RTTI", True),
+ define("LLVM_ENABLE_EH", True),
+ define("CLANG_DEFAULT_OPENMP_RUNTIME", "libomp"),
+ define("PYTHON_EXECUTABLE", python.command.path),
+ define("LIBOMP_USE_HWLOC", True),
+ define("LIBOMP_HWLOC_INSTALL_DIR", spec["hwloc"].prefix),
+ ]
+
+ version_suffix = spec.variants['version_suffix'].value
+ if version_suffix != 'none':
+ cmake_args.append(define('LLVM_VERSION_SUFFIX', version_suffix))
+
+ if python.version >= Version("3"):
+ cmake_args.append(define("Python3_EXECUTABLE", python.command.path))
+ else:
+ cmake_args.append(define("Python2_EXECUTABLE", python.command.path))
+
+ projects = []
+ runtimes = []
+
+ if "+cuda" in spec:
+ cmake_args.extend([
+ define("CUDA_TOOLKIT_ROOT_DIR", spec["cuda"].prefix),
+ define("LIBOMPTARGET_NVPTX_COMPUTE_CAPABILITIES",
+ ",".join(spec.variants["cuda_arch"].value)),
+ define("CLANG_OPENMP_NVPTX_DEFAULT_ARCH",
+ "sm_{0}".format(spec.variants["cuda_arch"].value[-1])),
+ ])
+ if "+omp_as_runtime" in spec:
+ cmake_args.extend([
+ define("LIBOMPTARGET_NVPTX_ENABLE_BCLIB", True),
+ # work around bad libelf detection in libomptarget
+ define("LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR",
+ spec["libelf"].prefix.include),
+ ])
+ else:
+ # still build libomptarget but disable cuda
+ cmake_args.extend([
+ define("CUDA_TOOLKIT_ROOT_DIR", "IGNORE"),
+ define("CUDA_SDK_ROOT_DIR", "IGNORE"),
+ define("CUDA_NVCC_EXECUTABLE", "IGNORE"),
+ define("LIBOMPTARGET_DEP_CUDA_DRIVER_LIBRARIES", "IGNORE"),
+ ])
+
+ cmake_args.append(from_variant("LIBOMPTARGET_ENABLE_DEBUG", "omp_debug"))
+
+ if "+lldb" in spec:
+ if spec.version >= Version('10'):
+ cmake_args.append(from_variant("LLDB_ENABLE_PYTHON", 'python'))
+ else:
+ cmake_args.append(define("LLDB_DISABLE_PYTHON",
+ '~python' in spec))
+ if spec.satisfies("@5.0.0: +python"):
+ cmake_args.append(define("LLDB_USE_SYSTEM_SIX", True))
+
+ if "+gold" in spec:
+ cmake_args.append(
+ define("LLVM_BINUTILS_INCDIR", spec["binutils"].prefix.include)
+ )
+
+ if "+clang" in spec:
+ projects.append("clang")
+ projects.append("clang-tools-extra")
+ if "+omp_as_runtime" in spec:
+ runtimes.append("openmp")
+ else:
+ projects.append("openmp")
+
+ if self.spec.satisfies("@8"):
+ cmake_args.append(define('CLANG_ANALYZER_ENABLE_Z3_SOLVER',
+ self.spec.satisfies('@8+z3')))
+ if self.spec.satisfies("@9:"):
+ cmake_args.append(define('LLVM_ENABLE_Z3_SOLVER',
+ self.spec.satisfies('@9:+z3')))
+
+ if "+flang" in spec:
+ projects.append("flang")
+ if "+lldb" in spec:
+ projects.append("lldb")
+ if "+lld" in spec:
+ projects.append("lld")
+ if "+compiler-rt" in spec:
+ projects.append("compiler-rt")
+ if "+libcxx" in spec:
+ projects.append("libcxx")
+ projects.append("libcxxabi")
+ if "+mlir" in spec:
+ projects.append("mlir")
+ if "+internal_unwind" in spec:
+ projects.append("libunwind")
+ if "+polly" in spec:
+ projects.append("polly")
+ cmake_args.append(define("LINK_POLLY_INTO_TOOLS", True))
+
+ cmake_args.extend([
+ from_variant("BUILD_SHARED_LIBS", "shared_libs"),
+ from_variant("LLVM_BUILD_LLVM_DYLIB", "llvm_dylib"),
+ from_variant("LLVM_LINK_LLVM_DYLIB", "link_llvm_dylib"),
+ from_variant("LLVM_USE_SPLIT_DWARF", "split_dwarf"),
+ # By default on Linux, libc++.so is a ldscript. CMake fails to add
+ # CMAKE_INSTALL_RPATH to it, which fails. Statically link libc++abi.a
+ # into libc++.so, linking with -lc++ or -stdlib=libc++ is enough.
+ define('LIBCXX_ENABLE_STATIC_ABI_LIBRARY', True)
+ ])
+
+ if "+all_targets" not in spec: # all is default on cmake
+
+ targets = ["NVPTX", "AMDGPU"]
+ if spec.version < Version("3.9.0"):
+ # Starting in 3.9.0 CppBackend is no longer a target (see
+ # LLVM_ALL_TARGETS in llvm's top-level CMakeLists.txt for
+ # the complete list of targets)
+ targets.append("CppBackend")
+
+ if spec.target.family in ("x86", "x86_64"):
+ targets.append("X86")
+ elif spec.target.family == "arm":
+ targets.append("ARM")
+ elif spec.target.family == "aarch64":
+ targets.append("AArch64")
+ elif spec.target.family in ("sparc", "sparc64"):
+ targets.append("Sparc")
+ elif spec.target.family in ("ppc64", "ppc64le", "ppc", "ppcle"):
+ targets.append("PowerPC")
+
+ cmake_args.append(define("LLVM_TARGETS_TO_BUILD", targets))
+
+ cmake_args.append(from_variant("LIBOMP_TSAN_SUPPORT", "omp_tsan"))
+
+ if self.compiler.name == "gcc":
+ compiler = Executable(self.compiler.cc)
+ gcc_output = compiler('-print-search-dirs', output=str, error=str)
+
+ for line in gcc_output.splitlines():
+ if line.startswith("install:"):
+ # Get path and strip any whitespace
+ # (causes oddity with ancestor)
+ gcc_prefix = line.split(":")[1].strip()
+ gcc_prefix = ancestor(gcc_prefix, 4)
+ break
+ cmake_args.append(define("GCC_INSTALL_PREFIX", gcc_prefix))
+
+ if self.spec.satisfies("~code_signing platform=darwin"):
+ cmake_args.append(define('LLDB_USE_SYSTEM_DEBUGSERVER', True))
+
+ # Semicolon seperated list of projects to enable
+ cmake_args.append(define("LLVM_ENABLE_PROJECTS", projects))
+
+ # Semicolon seperated list of runtimes to enable
+ if runtimes:
+ cmake_args.append(define("LLVM_ENABLE_RUNTIMES", runtimes))
+
+ return cmake_args
+
+ @run_after("install")
+ def post_install(self):
+ spec = self.spec
+ define = CMakePackage.define
+
+ # unnecessary if we build openmp via LLVM_ENABLE_RUNTIMES
+ if "+cuda ~omp_as_runtime" in self.spec:
+ ompdir = "build-bootstrapped-omp"
+ prefix_paths = spack.build_environment.get_cmake_prefix_path(self)
+ prefix_paths.append(str(spec.prefix))
+ # rebuild libomptarget to get bytecode runtime library files
+ with working_dir(ompdir, create=True):
+ cmake_args = [
+ '-G', 'Ninja',
+ define('CMAKE_BUILD_TYPE', spec.variants['build_type'].value),
+ define("CMAKE_C_COMPILER", spec.prefix.bin + "/clang"),
+ define("CMAKE_CXX_COMPILER", spec.prefix.bin + "/clang++"),
+ define("CMAKE_INSTALL_PREFIX", spec.prefix),
+ define('CMAKE_PREFIX_PATH', prefix_paths)
+ ]
+ cmake_args.extend(self.cmake_args())
+ cmake_args.extend([
+ define("LIBOMPTARGET_NVPTX_ENABLE_BCLIB", True),
+ define("LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR",
+ spec["libelf"].prefix.include),
+ self.stage.source_path + "/openmp",
+ ])
+
+ cmake(*cmake_args)
+ ninja()
+ ninja("install")
+ if "+python" in self.spec:
+ install_tree("llvm/bindings/python", site_packages_dir)
+
+ if "+clang" in self.spec:
+ install_tree("clang/bindings/python", site_packages_dir)
+
+ with working_dir(self.build_directory):
+ install_tree("bin", join_path(self.prefix, "libexec", "llvm"))
diff --git a/lib/spack/spack/test/data/unparse/mfem.txt b/lib/spack/spack/test/data/unparse/mfem.txt
new file mode 100644
index 0000000000..bb074f9c5c
--- /dev/null
+++ b/lib/spack/spack/test/data/unparse/mfem.txt
@@ -0,0 +1,931 @@
+# -*- python -*-
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+"""This is an unparser test package.
+
+``mfem`` was chosen because it's one of the most complex packages in Spack, because it
+uses ``@when`` functions, because it has ``configure()`` calls with star-args in
+different locations, and beacuse it has a function with embedded unicode that needs to
+be unparsed consistently between Python versions.
+
+"""
+
+import os
+import shutil
+import sys
+
+from spack import *
+
+
+class Mfem(Package, CudaPackage, ROCmPackage):
+ """Free, lightweight, scalable C++ library for finite element methods."""
+
+ tags = ['fem', 'finite-elements', 'high-order', 'amr', 'hpc', 'radiuss', 'e4s']
+
+ homepage = 'http://www.mfem.org'
+ git = 'https://github.com/mfem/mfem.git'
+
+ maintainers = ['v-dobrev', 'tzanio', 'acfisher',
+ 'goxberry', 'markcmiller86']
+
+ test_requires_compiler = True
+
+ # Recommended mfem builds to test when updating this file: see the shell
+ # script 'test_builds.sh' in the same directory as this file.
+
+ # mfem is downloaded from a URL shortener at request of upstream
+ # author Tzanio Kolev <tzanio@llnl.gov>. See here:
+ # https://github.com/mfem/mfem/issues/53
+ #
+ # The following procedure should be used to verify security when a
+ # new version is added:
+ #
+ # 1. Verify that no checksums on old versions have changed.
+ #
+ # 2. Verify that the shortened URL for the new version is listed at:
+ # https://mfem.org/download/
+ #
+ # 3. Use http://getlinkinfo.com or similar to verify that the
+ # underling download link for the latest version comes has the
+ # prefix: http://mfem.github.io/releases
+ #
+ # If this quick verification procedure fails, additional discussion
+ # will be required to verify the new version.
+
+ # 'develop' is a special version that is always larger (or newer) than any
+ # other version.
+ version('develop', branch='master')
+
+ version('4.3.0',
+ sha256='3a495602121b986049286ea0b23512279cdbdfb43c15c42a1511b521051fbe38',
+ url='https://bit.ly/mfem-4-3', extension='tar.gz')
+
+ version('4.2.0',
+ '4352a225b55948d2e73a5ee88cece0e88bdbe7ba6726a23d68b2736d3221a86d',
+ url='https://bit.ly/mfem-4-2', extension='tar.gz')
+
+ version('4.1.0',
+ '4c83fdcf083f8e2f5b37200a755db843cdb858811e25a8486ad36b2cbec0e11d',
+ url='https://bit.ly/mfem-4-1', extension='tar.gz')
+
+ # Tagged development version used by xSDK
+ version('4.0.1-xsdk', commit='c55c80d17b82d80de04b849dd526e17044f8c99a')
+
+ version('4.0.0',
+ 'df5bdac798ea84a263979f6fbf79de9013e1c55562f95f98644c3edcacfbc727',
+ url='https://bit.ly/mfem-4-0', extension='tar.gz')
+
+ # Tagged development version used by the laghos package:
+ version('3.4.1-laghos-v2.0', tag='laghos-v2.0')
+
+ version('3.4.0',
+ sha256='4e73e4fe0482636de3c5dc983cd395839a83cb16f6f509bd88b053e8b3858e05',
+ url='https://bit.ly/mfem-3-4', extension='tar.gz')
+
+ version('3.3.2',
+ sha256='b70fa3c5080b9ec514fc05f4a04ff74322b99ac4ecd6d99c229f0ed5188fc0ce',
+ url='https://goo.gl/Kd7Jk8', extension='tar.gz')
+
+ # Tagged development version used by the laghos package:
+ version('3.3.1-laghos-v1.0', tag='laghos-v1.0')
+
+ version('3.3',
+ sha256='b17bd452593aada93dc0fee748fcfbbf4f04ce3e7d77fdd0341cc9103bcacd0b',
+ url='http://goo.gl/Vrpsns', extension='tar.gz')
+
+ version('3.2',
+ sha256='2938c3deed4ec4f7fd5b5f5cfe656845282e86e2dcd477d292390058b7b94340',
+ url='http://goo.gl/Y9T75B', extension='tar.gz')
+
+ version('3.1',
+ sha256='841ea5cf58de6fae4de0f553b0e01ebaab9cd9c67fa821e8a715666ecf18fc57',
+ url='http://goo.gl/xrScXn', extension='tar.gz')
+
+ variant('static', default=True,
+ description='Build static library')
+ variant('shared', default=False,
+ description='Build shared library')
+ variant('mpi', default=True,
+ description='Enable MPI parallelism')
+ # Can we make the default value for 'metis' to depend on the 'mpi' value?
+ variant('metis', default=True,
+ description='Enable METIS support')
+ variant('openmp', default=False,
+ description='Enable OpenMP parallelism')
+ # Note: '+cuda' and 'cuda_arch' variants are added by the CudaPackage
+ # Note: '+rocm' and 'amdgpu_target' variants are added by the ROCmPackage
+ variant('occa', default=False, description='Enable OCCA backend')
+ variant('raja', default=False, description='Enable RAJA backend')
+ variant('libceed', default=False, description='Enable libCEED backend')
+ variant('umpire', default=False, description='Enable Umpire support')
+ variant('amgx', default=False, description='Enable NVIDIA AmgX solver support')
+
+ variant('threadsafe', default=False,
+ description=('Enable thread safe features.'
+ ' Required for OpenMP.'
+ ' May cause minor performance issues.'))
+ variant('superlu-dist', default=False,
+ description='Enable MPI parallel, sparse direct solvers')
+ variant('strumpack', default=False,
+ description='Enable support for STRUMPACK')
+ variant('suite-sparse', default=False,
+ description='Enable serial, sparse direct solvers')
+ variant('petsc', default=False,
+ description='Enable PETSc solvers, preconditioners, etc.')
+ variant('slepc', default=False,
+ description='Enable SLEPc integration')
+ variant('sundials', default=False,
+ description='Enable Sundials time integrators')
+ variant('pumi', default=False,
+ description='Enable functionality based on PUMI')
+ variant('gslib', default=False,
+ description='Enable functionality based on GSLIB')
+ variant('mpfr', default=False,
+ description='Enable precise, 1D quadrature rules')
+ variant('lapack', default=False,
+ description='Use external blas/lapack routines')
+ variant('debug', default=False,
+ description='Build debug instead of optimized version')
+ variant('netcdf', default=False,
+ description='Enable Cubit/Genesis reader')
+ variant('conduit', default=False,
+ description='Enable binary data I/O using Conduit')
+ variant('zlib', default=True,
+ description='Support zip\'d streams for I/O')
+ variant('gnutls', default=False,
+ description='Enable secure sockets using GnuTLS')
+ variant('libunwind', default=False,
+ description='Enable backtrace on error support using Libunwind')
+ # TODO: SIMD, Ginkgo, ADIOS2, HiOp, MKL CPardiso, Axom/Sidre
+ variant('timer', default='auto',
+ values=('auto', 'std', 'posix', 'mac', 'mpi'),
+ description='Timing functions to use in mfem::StopWatch')
+ variant('examples', default=False,
+ description='Build and install examples')
+ variant('miniapps', default=False,
+ description='Build and install miniapps')
+
+ conflicts('+shared', when='@:3.3.2')
+ conflicts('~static~shared')
+ conflicts('~threadsafe', when='@:3+openmp')
+
+ conflicts('+cuda', when='@:3')
+ conflicts('+rocm', when='@:4.1')
+ conflicts('+cuda+rocm')
+ conflicts('+netcdf', when='@:3.1')
+ conflicts('+superlu-dist', when='@:3.1')
+ # STRUMPACK support was added in mfem v3.3.2, however, here we allow only
+ # strumpack v3+ support for which is available starting with mfem v4.0:
+ conflicts('+strumpack', when='@:3')
+ conflicts('+gnutls', when='@:3.1')
+ conflicts('+zlib', when='@:3.2')
+ conflicts('+mpfr', when='@:3.2')
+ conflicts('+petsc', when='@:3.2')
+ conflicts('+slepc', when='@:4.1')
+ conflicts('+sundials', when='@:3.2')
+ conflicts('+pumi', when='@:3.3.2')
+ conflicts('+gslib', when='@:4.0')
+ conflicts('timer=mac', when='@:3.3.0')
+ conflicts('timer=mpi', when='@:3.3.0')
+ conflicts('~metis+mpi', when='@:3.3.0')
+ conflicts('+metis~mpi', when='@:3.3.0')
+ conflicts('+conduit', when='@:3.3.2')
+ conflicts('+occa', when='mfem@:3')
+ conflicts('+raja', when='mfem@:3')
+ conflicts('+libceed', when='mfem@:4.0')
+ conflicts('+umpire', when='mfem@:4.0')
+ conflicts('+amgx', when='mfem@:4.1')
+ conflicts('+amgx', when='~cuda')
+ conflicts('+mpi~cuda ^hypre+cuda')
+
+ conflicts('+superlu-dist', when='~mpi')
+ conflicts('+strumpack', when='~mpi')
+ conflicts('+petsc', when='~mpi')
+ conflicts('+slepc', when='~petsc')
+ conflicts('+pumi', when='~mpi')
+ conflicts('timer=mpi', when='~mpi')
+
+ depends_on('mpi', when='+mpi')
+ depends_on('hypre@2.10.0:2.13', when='@:3.3+mpi')
+ depends_on('hypre@:2.20.0', when='@3.4:4.2+mpi')
+ depends_on('hypre@:2.23.0', when='@4.3.0+mpi')
+ depends_on('hypre', when='+mpi')
+
+ depends_on('metis', when='+metis')
+ depends_on('blas', when='+lapack')
+ depends_on('lapack@3.0:', when='+lapack')
+
+ depends_on('sundials@2.7.0', when='@:3.3.0+sundials~mpi')
+ depends_on('sundials@2.7.0+mpi+hypre', when='@:3.3.0+sundials+mpi')
+ depends_on('sundials@2.7.0:', when='@3.3.2:+sundials~mpi')
+ depends_on('sundials@2.7.0:+mpi+hypre', when='@3.3.2:+sundials+mpi')
+ depends_on('sundials@5.0.0:', when='@4.0.1-xsdk:+sundials~mpi')
+ depends_on('sundials@5.0.0:+mpi+hypre', when='@4.0.1-xsdk:+sundials+mpi')
+ for sm_ in CudaPackage.cuda_arch_values:
+ depends_on('sundials@5.4.0:+cuda cuda_arch={0}'.format(sm_),
+ when='@4.2.0:+sundials+cuda cuda_arch={0}'.format(sm_))
+ depends_on('pumi@2.2.3:', when='@4.2.0:+pumi')
+ depends_on('pumi', when='+pumi~shared')
+ depends_on('pumi+shared', when='+pumi+shared')
+ depends_on('gslib@1.0.5:+mpi', when='+gslib+mpi')
+ depends_on('gslib@1.0.5:~mpi~mpiio', when='+gslib~mpi')
+ depends_on('suite-sparse', when='+suite-sparse')
+ depends_on('superlu-dist', when='+superlu-dist')
+ depends_on('strumpack@3.0.0:', when='+strumpack~shared')
+ depends_on('strumpack@3.0.0:+shared', when='+strumpack+shared')
+ for sm_ in CudaPackage.cuda_arch_values:
+ depends_on('strumpack+cuda cuda_arch={0}'.format(sm_),
+ when='+strumpack+cuda cuda_arch={0}'.format(sm_))
+ # The PETSc tests in MFEM will fail if PETSc is not configured with
+ # SuiteSparse and MUMPS. On the other hand, if we require the variants
+ # '+suite-sparse+mumps' of PETSc, the xsdk package concretization fails.
+ depends_on('petsc@3.8:+mpi+double+hypre', when='+petsc')
+ depends_on('slepc@3.8.0:', when='+slepc')
+ # Recommended when building outside of xsdk:
+ # depends_on('petsc@3.8:+mpi+double+hypre+suite-sparse+mumps',
+ # when='+petsc')
+ depends_on('mpfr', when='+mpfr')
+ depends_on('netcdf-c@4.1.3:', when='+netcdf')
+ depends_on('unwind', when='+libunwind')
+ depends_on('zlib', when='+zlib')
+ depends_on('gnutls', when='+gnutls')
+ depends_on('conduit@0.3.1:,master:', when='+conduit')
+ depends_on('conduit+mpi', when='+conduit+mpi')
+
+ # The MFEM 4.0.0 SuperLU interface fails when using hypre@2.16.0 and
+ # superlu-dist@6.1.1. See https://github.com/mfem/mfem/issues/983.
+ # This issue was resolved in v4.1.
+ conflicts('+superlu-dist',
+ when='mfem@:4.0 ^hypre@2.16.0: ^superlu-dist@6:')
+ # The STRUMPACK v3 interface in MFEM seems to be broken as of MFEM v4.1
+ # when using hypre version >= 2.16.0.
+ # This issue is resolved in v4.2.
+ conflicts('+strumpack', when='mfem@4.0.0:4.1 ^hypre@2.16.0:')
+ conflicts('+strumpack ^strumpack+cuda', when='~cuda')
+
+ depends_on('occa@1.0.8:', when='@:4.1+occa')
+ depends_on('occa@1.1.0:', when='@4.2.0:+occa')
+ depends_on('occa+cuda', when='+occa+cuda')
+ # TODO: propagate '+rocm' variant to occa when it is supported
+
+ depends_on('raja@0.10.0:', when='@4.0.1:+raja')
+ depends_on('raja@0.7.0:0.9.0', when='@4.0.0+raja')
+ for sm_ in CudaPackage.cuda_arch_values:
+ depends_on('raja+cuda cuda_arch={0}'.format(sm_),
+ when='+raja+cuda cuda_arch={0}'.format(sm_))
+ for gfx in ROCmPackage.amdgpu_targets:
+ depends_on('raja+rocm amdgpu_target={0}'.format(gfx),
+ when='+raja+rocm amdgpu_target={0}'.format(gfx))
+
+ depends_on('libceed@0.6:', when='@:4.1+libceed')
+ depends_on('libceed@0.7:', when='@4.2.0:+libceed')
+ for sm_ in CudaPackage.cuda_arch_values:
+ depends_on('libceed+cuda cuda_arch={0}'.format(sm_),
+ when='+libceed+cuda cuda_arch={0}'.format(sm_))
+ for gfx in ROCmPackage.amdgpu_targets:
+ depends_on('libceed+rocm amdgpu_target={0}'.format(gfx),
+ when='+libceed+rocm amdgpu_target={0}'.format(gfx))
+
+ depends_on('umpire@2.0.0:', when='+umpire')
+ for sm_ in CudaPackage.cuda_arch_values:
+ depends_on('umpire+cuda cuda_arch={0}'.format(sm_),
+ when='+umpire+cuda cuda_arch={0}'.format(sm_))
+ for gfx in ROCmPackage.amdgpu_targets:
+ depends_on('umpire+rocm amdgpu_target={0}'.format(gfx),
+ when='+umpire+rocm amdgpu_target={0}'.format(gfx))
+
+ # AmgX: propagate the cuda_arch and mpi settings:
+ for sm_ in CudaPackage.cuda_arch_values:
+ depends_on('amgx+mpi cuda_arch={0}'.format(sm_),
+ when='+amgx+mpi cuda_arch={0}'.format(sm_))
+ depends_on('amgx~mpi cuda_arch={0}'.format(sm_),
+ when='+amgx~mpi cuda_arch={0}'.format(sm_))
+
+ patch('mfem_ppc_build.patch', when='@3.2:3.3.0 arch=ppc64le')
+ patch('mfem-3.4.patch', when='@3.4.0')
+ patch('mfem-3.3-3.4-petsc-3.9.patch',
+ when='@3.3.0:3.4.0 +petsc ^petsc@3.9.0:')
+ patch('mfem-4.2-umpire.patch', when='@4.2.0+umpire')
+ patch('mfem-4.2-slepc.patch', when='@4.2.0+slepc')
+ patch('mfem-4.2-petsc-3.15.0.patch', when='@4.2.0+petsc ^petsc@3.15.0:')
+ patch('mfem-4.3-hypre-2.23.0.patch', when='@4.3.0')
+ patch('mfem-4.3-cusparse-11.4.patch', when='@4.3.0+cuda')
+
+ # Patch to fix MFEM makefile syntax error. See
+ # https://github.com/mfem/mfem/issues/1042 for the bug report and
+ # https://github.com/mfem/mfem/pull/1043 for the bugfix contributed
+ # upstream.
+ patch('mfem-4.0.0-makefile-syntax-fix.patch', when='@4.0.0')
+ phases = ['configure', 'build', 'install']
+
+ def setup_build_environment(self, env):
+ env.unset('MFEM_DIR')
+ env.unset('MFEM_BUILD_DIR')
+
+ #
+ # Note: Although MFEM does support CMake configuration, MFEM
+ # development team indicates that vanilla GNU Make is the
+ # preferred mode of configuration of MFEM and the mode most
+ # likely to be up to date in supporting *all* of MFEM's
+ # configuration options. So, don't use CMake
+ #
+ def configure(self, spec, prefix):
+
+ def yes_no(varstr):
+ return 'YES' if varstr in self.spec else 'NO'
+
+ # See also find_system_libraries in lib/spack/llnl/util/filesystem.py
+ # where the same list of paths is used.
+ sys_lib_paths = [
+ '/lib64',
+ '/lib',
+ '/usr/lib64',
+ '/usr/lib',
+ '/usr/local/lib64',
+ '/usr/local/lib']
+
+ def is_sys_lib_path(dir):
+ return dir in sys_lib_paths
+
+ xcompiler = ''
+ xlinker = '-Wl,'
+ if '+cuda' in spec:
+ xcompiler = '-Xcompiler='
+ xlinker = '-Xlinker='
+ cuda_arch = None if '~cuda' in spec else spec.variants['cuda_arch'].value
+
+ # We need to add rpaths explicitly to allow proper export of link flags
+ # from within MFEM.
+
+ # Similar to spec[pkg].libs.ld_flags but prepends rpath flags too.
+ # Also does not add system library paths as defined by 'sys_lib_paths'
+ # above -- this is done to avoid issues like this:
+ # https://github.com/mfem/mfem/issues/1088.
+ def ld_flags_from_library_list(libs_list):
+ flags = ['%s-rpath,%s' % (xlinker, dir)
+ for dir in libs_list.directories
+ if not is_sys_lib_path(dir)]
+ flags += ['-L%s' % dir for dir in libs_list.directories
+ if not is_sys_lib_path(dir)]
+ flags += [libs_list.link_flags]
+ return ' '.join(flags)
+
+ def ld_flags_from_dirs(pkg_dirs_list, pkg_libs_list):
+ flags = ['%s-rpath,%s' % (xlinker, dir) for dir in pkg_dirs_list
+ if not is_sys_lib_path(dir)]
+ flags += ['-L%s' % dir for dir in pkg_dirs_list
+ if not is_sys_lib_path(dir)]
+ flags += ['-l%s' % lib for lib in pkg_libs_list]
+ return ' '.join(flags)
+
+ def find_optional_library(name, prefix):
+ for shared in [True, False]:
+ for path in ['lib64', 'lib']:
+ lib = find_libraries(name, join_path(prefix, path),
+ shared=shared, recursive=False)
+ if lib:
+ return lib
+ return LibraryList([])
+
+ # Determine how to run MPI tests, e.g. when using '--test=root', when
+ # Spack is run inside a batch system job.
+ mfem_mpiexec = 'mpirun'
+ mfem_mpiexec_np = '-np'
+ if 'SLURM_JOBID' in os.environ:
+ mfem_mpiexec = 'srun'
+ mfem_mpiexec_np = '-n'
+ elif 'LSB_JOBID' in os.environ:
+ if 'LLNL_COMPUTE_NODES' in os.environ:
+ mfem_mpiexec = 'lrun'
+ mfem_mpiexec_np = '-n'
+ else:
+ mfem_mpiexec = 'jsrun'
+ mfem_mpiexec_np = '-p'
+
+ metis5_str = 'NO'
+ if ('+metis' in spec) and spec['metis'].satisfies('@5:'):
+ metis5_str = 'YES'
+
+ zlib_var = 'MFEM_USE_ZLIB' if (spec.satisfies('@4.1.0:')) else \
+ 'MFEM_USE_GZSTREAM'
+
+ options = [
+ 'PREFIX=%s' % prefix,
+ 'MFEM_USE_MEMALLOC=YES',
+ 'MFEM_DEBUG=%s' % yes_no('+debug'),
+ # NOTE: env['CXX'] is the spack c++ compiler wrapper. The real
+ # compiler is defined by env['SPACK_CXX'].
+ 'CXX=%s' % env['CXX'],
+ 'MFEM_USE_LIBUNWIND=%s' % yes_no('+libunwind'),
+ '%s=%s' % (zlib_var, yes_no('+zlib')),
+ 'MFEM_USE_METIS=%s' % yes_no('+metis'),
+ 'MFEM_USE_METIS_5=%s' % metis5_str,
+ 'MFEM_THREAD_SAFE=%s' % yes_no('+threadsafe'),
+ 'MFEM_USE_MPI=%s' % yes_no('+mpi'),
+ 'MFEM_USE_LAPACK=%s' % yes_no('+lapack'),
+ 'MFEM_USE_SUPERLU=%s' % yes_no('+superlu-dist'),
+ 'MFEM_USE_STRUMPACK=%s' % yes_no('+strumpack'),
+ 'MFEM_USE_SUITESPARSE=%s' % yes_no('+suite-sparse'),
+ 'MFEM_USE_SUNDIALS=%s' % yes_no('+sundials'),
+ 'MFEM_USE_PETSC=%s' % yes_no('+petsc'),
+ 'MFEM_USE_SLEPC=%s' % yes_no('+slepc'),
+ 'MFEM_USE_PUMI=%s' % yes_no('+pumi'),
+ 'MFEM_USE_GSLIB=%s' % yes_no('+gslib'),
+ 'MFEM_USE_NETCDF=%s' % yes_no('+netcdf'),
+ 'MFEM_USE_MPFR=%s' % yes_no('+mpfr'),
+ 'MFEM_USE_GNUTLS=%s' % yes_no('+gnutls'),
+ 'MFEM_USE_OPENMP=%s' % yes_no('+openmp'),
+ 'MFEM_USE_CONDUIT=%s' % yes_no('+conduit'),
+ 'MFEM_USE_CUDA=%s' % yes_no('+cuda'),
+ 'MFEM_USE_HIP=%s' % yes_no('+rocm'),
+ 'MFEM_USE_OCCA=%s' % yes_no('+occa'),
+ 'MFEM_USE_RAJA=%s' % yes_no('+raja'),
+ 'MFEM_USE_AMGX=%s' % yes_no('+amgx'),
+ 'MFEM_USE_CEED=%s' % yes_no('+libceed'),
+ 'MFEM_USE_UMPIRE=%s' % yes_no('+umpire'),
+ 'MFEM_MPIEXEC=%s' % mfem_mpiexec,
+ 'MFEM_MPIEXEC_NP=%s' % mfem_mpiexec_np]
+
+ cxxflags = spec.compiler_flags['cxxflags']
+
+ if cxxflags:
+ # Add opt/debug flags if they are not present in global cxx flags
+ opt_flag_found = any(f in self.compiler.opt_flags
+ for f in cxxflags)
+ debug_flag_found = any(f in self.compiler.debug_flags
+ for f in cxxflags)
+
+ if '+debug' in spec:
+ if not debug_flag_found:
+ cxxflags.append('-g')
+ if not opt_flag_found:
+ cxxflags.append('-O0')
+ else:
+ if not opt_flag_found:
+ cxxflags.append('-O2')
+
+ cxxflags = [(xcompiler + flag) for flag in cxxflags]
+ if '+cuda' in spec:
+ cxxflags += [
+ '-x=cu --expt-extended-lambda -arch=sm_%s' % cuda_arch,
+ '-ccbin %s' % (spec['mpi'].mpicxx if '+mpi' in spec
+ else env['CXX'])]
+ if self.spec.satisfies('@4.0.0:'):
+ cxxflags.append(self.compiler.cxx11_flag)
+ # The cxxflags are set by the spack c++ compiler wrapper. We also
+ # set CXXFLAGS explicitly, for clarity, and to properly export the
+ # cxxflags in the variable MFEM_CXXFLAGS in config.mk.
+ options += ['CXXFLAGS=%s' % ' '.join(cxxflags)]
+
+ if '~static' in spec:
+ options += ['STATIC=NO']
+ if '+shared' in spec:
+ options += [
+ 'SHARED=YES',
+ 'PICFLAG=%s' % (xcompiler + self.compiler.cxx_pic_flag)]
+
+ if '+mpi' in spec:
+ options += ['MPICXX=%s' % spec['mpi'].mpicxx]
+ hypre = spec['hypre']
+ # The hypre package always links with 'blas' and 'lapack'.
+ all_hypre_libs = hypre.libs + hypre['lapack'].libs + \
+ hypre['blas'].libs
+ options += [
+ 'HYPRE_OPT=-I%s' % hypre.prefix.include,
+ 'HYPRE_LIB=%s' % ld_flags_from_library_list(all_hypre_libs)]
+
+ if '+metis' in spec:
+ options += [
+ 'METIS_OPT=-I%s' % spec['metis'].prefix.include,
+ 'METIS_LIB=%s' %
+ ld_flags_from_library_list(spec['metis'].libs)]
+
+ if '+lapack' in spec:
+ lapack_blas = spec['lapack'].libs + spec['blas'].libs
+ options += [
+ # LAPACK_OPT is not used
+ 'LAPACK_LIB=%s' % ld_flags_from_library_list(lapack_blas)]
+
+ if '+superlu-dist' in spec:
+ lapack_blas = spec['lapack'].libs + spec['blas'].libs
+ options += [
+ 'SUPERLU_OPT=-I%s -I%s' %
+ (spec['superlu-dist'].prefix.include,
+ spec['parmetis'].prefix.include),
+ 'SUPERLU_LIB=%s %s' %
+ (ld_flags_from_dirs([spec['superlu-dist'].prefix.lib,
+ spec['parmetis'].prefix.lib],
+ ['superlu_dist', 'parmetis']),
+ ld_flags_from_library_list(lapack_blas))]
+
+ if '+strumpack' in spec:
+ strumpack = spec['strumpack']
+ sp_opt = ['-I%s' % strumpack.prefix.include]
+ sp_lib = [ld_flags_from_library_list(strumpack.libs)]
+ # Parts of STRUMPACK use fortran, so we need to link with the
+ # fortran library and also the MPI fortran library:
+ if '~shared' in strumpack:
+ if os.path.basename(env['FC']) == 'gfortran':
+ gfortran = Executable(env['FC'])
+ libext = 'dylib' if sys.platform == 'darwin' else 'so'
+ libfile = os.path.abspath(gfortran(
+ '-print-file-name=libgfortran.%s' % libext,
+ output=str).strip())
+ gfortran_lib = LibraryList(libfile)
+ sp_lib += [ld_flags_from_library_list(gfortran_lib)]
+ if ('^mpich' in strumpack) or ('^mvapich2' in strumpack):
+ sp_lib += ['-lmpifort']
+ elif '^openmpi' in strumpack:
+ sp_lib += ['-lmpi_mpifh']
+ elif '^spectrum-mpi' in strumpack:
+ sp_lib += ['-lmpi_ibm_mpifh']
+ if '+openmp' in strumpack:
+ # The '+openmp' in the spec means strumpack will TRY to find
+ # OpenMP; if not found, we should not add any flags -- how do
+ # we figure out if strumpack found OpenMP?
+ if not self.spec.satisfies('%apple-clang'):
+ sp_opt += [xcompiler + self.compiler.openmp_flag]
+ if '^parmetis' in strumpack:
+ parmetis = strumpack['parmetis']
+ sp_opt += [parmetis.headers.cpp_flags]
+ sp_lib += [ld_flags_from_library_list(parmetis.libs)]
+ if '^netlib-scalapack' in strumpack:
+ scalapack = strumpack['scalapack']
+ sp_opt += ['-I%s' % scalapack.prefix.include]
+ sp_lib += [ld_flags_from_dirs([scalapack.prefix.lib],
+ ['scalapack'])]
+ elif '^scalapack' in strumpack:
+ scalapack = strumpack['scalapack']
+ sp_opt += [scalapack.headers.cpp_flags]
+ sp_lib += [ld_flags_from_library_list(scalapack.libs)]
+ if '+butterflypack' in strumpack:
+ bp = strumpack['butterflypack']
+ sp_opt += ['-I%s' % bp.prefix.include]
+ sp_lib += [ld_flags_from_dirs([bp.prefix.lib],
+ ['dbutterflypack',
+ 'zbutterflypack'])]
+ if '+zfp' in strumpack:
+ zfp = strumpack['zfp']
+ sp_opt += ['-I%s' % zfp.prefix.include]
+ sp_lib += [ld_flags_from_dirs([zfp.prefix.lib], ['zfp'])]
+ if '+cuda' in strumpack:
+ # assuming also ('+cuda' in spec)
+ sp_lib += ['-lcusolver', '-lcublas']
+ options += [
+ 'STRUMPACK_OPT=%s' % ' '.join(sp_opt),
+ 'STRUMPACK_LIB=%s' % ' '.join(sp_lib)]
+
+ if '+suite-sparse' in spec:
+ ss_spec = 'suite-sparse:' + self.suitesparse_components
+ options += [
+ 'SUITESPARSE_OPT=-I%s' % spec[ss_spec].prefix.include,
+ 'SUITESPARSE_LIB=%s' %
+ ld_flags_from_library_list(spec[ss_spec].libs)]
+
+ if '+sundials' in spec:
+ sun_spec = 'sundials:' + self.sundials_components
+ options += [
+ 'SUNDIALS_OPT=%s' % spec[sun_spec].headers.cpp_flags,
+ 'SUNDIALS_LIB=%s' %
+ ld_flags_from_library_list(spec[sun_spec].libs)]
+
+ if '+petsc' in spec:
+ petsc = spec['petsc']
+ if '+shared' in petsc:
+ options += [
+ 'PETSC_OPT=%s' % petsc.headers.cpp_flags,
+ 'PETSC_LIB=%s' % ld_flags_from_library_list(petsc.libs)]
+ else:
+ options += ['PETSC_DIR=%s' % petsc.prefix]
+
+ if '+slepc' in spec:
+ slepc = spec['slepc']
+ options += [
+ 'SLEPC_OPT=%s' % slepc.headers.cpp_flags,
+ 'SLEPC_LIB=%s' % ld_flags_from_library_list(slepc.libs)]
+
+ if '+pumi' in spec:
+ pumi_libs = ['pumi', 'crv', 'ma', 'mds', 'apf', 'pcu', 'gmi',
+ 'parma', 'lion', 'mth', 'apf_zoltan', 'spr']
+ options += [
+ 'PUMI_OPT=-I%s' % spec['pumi'].prefix.include,
+ 'PUMI_LIB=%s' %
+ ld_flags_from_dirs([spec['pumi'].prefix.lib], pumi_libs)]
+
+ if '+gslib' in spec:
+ options += [
+ 'GSLIB_OPT=-I%s' % spec['gslib'].prefix.include,
+ 'GSLIB_LIB=%s' %
+ ld_flags_from_dirs([spec['gslib'].prefix.lib], ['gs'])]
+
+ if '+netcdf' in spec:
+ lib_flags = ld_flags_from_dirs([spec['netcdf-c'].prefix.lib],
+ ['netcdf'])
+ hdf5 = spec['hdf5:hl']
+ if hdf5.satisfies('~shared'):
+ hdf5_libs = hdf5.libs
+ hdf5_libs += LibraryList(find_system_libraries('libdl'))
+ lib_flags += " " + ld_flags_from_library_list(hdf5_libs)
+ options += [
+ 'NETCDF_OPT=-I%s' % spec['netcdf-c'].prefix.include,
+ 'NETCDF_LIB=%s' % lib_flags]
+
+ if '+zlib' in spec:
+ if "@:3.3.2" in spec:
+ options += ['ZLIB_DIR=%s' % spec['zlib'].prefix]
+ else:
+ options += [
+ 'ZLIB_OPT=-I%s' % spec['zlib'].prefix.include,
+ 'ZLIB_LIB=%s' %
+ ld_flags_from_library_list(spec['zlib'].libs)]
+
+ if '+mpfr' in spec:
+ options += [
+ 'MPFR_OPT=-I%s' % spec['mpfr'].prefix.include,
+ 'MPFR_LIB=%s' %
+ ld_flags_from_dirs([spec['mpfr'].prefix.lib], ['mpfr'])]
+
+ if '+gnutls' in spec:
+ options += [
+ 'GNUTLS_OPT=-I%s' % spec['gnutls'].prefix.include,
+ 'GNUTLS_LIB=%s' %
+ ld_flags_from_dirs([spec['gnutls'].prefix.lib], ['gnutls'])]
+
+ if '+libunwind' in spec:
+ libunwind = spec['unwind']
+ headers = find_headers('libunwind', libunwind.prefix.include)
+ headers.add_macro('-g')
+ libs = find_optional_library('libunwind', libunwind.prefix)
+ # When mfem uses libunwind, it also needs 'libdl'.
+ libs += LibraryList(find_system_libraries('libdl'))
+ options += [
+ 'LIBUNWIND_OPT=%s' % headers.cpp_flags,
+ 'LIBUNWIND_LIB=%s' % ld_flags_from_library_list(libs)]
+
+ if '+openmp' in spec:
+ options += [
+ 'OPENMP_OPT=%s' % (xcompiler + self.compiler.openmp_flag)]
+
+ if '+cuda' in spec:
+ options += [
+ 'CUDA_CXX=%s' % join_path(spec['cuda'].prefix, 'bin', 'nvcc'),
+ 'CUDA_ARCH=sm_%s' % cuda_arch]
+
+ if '+rocm' in spec:
+ amdgpu_target = ','.join(spec.variants['amdgpu_target'].value)
+ options += [
+ 'HIP_CXX=%s' % spec['hip'].hipcc,
+ 'HIP_ARCH=%s' % amdgpu_target]
+
+ if '+occa' in spec:
+ options += ['OCCA_OPT=-I%s' % spec['occa'].prefix.include,
+ 'OCCA_LIB=%s' %
+ ld_flags_from_dirs([spec['occa'].prefix.lib],
+ ['occa'])]
+
+ if '+raja' in spec:
+ options += ['RAJA_OPT=-I%s' % spec['raja'].prefix.include,
+ 'RAJA_LIB=%s' %
+ ld_flags_from_dirs([spec['raja'].prefix.lib],
+ ['RAJA'])]
+
+ if '+amgx' in spec:
+ amgx = spec['amgx']
+ if '+shared' in amgx:
+ options += ['AMGX_OPT=-I%s' % amgx.prefix.include,
+ 'AMGX_LIB=%s' %
+ ld_flags_from_library_list(amgx.libs)]
+ else:
+ options += ['AMGX_DIR=%s' % amgx.prefix]
+
+ if '+libceed' in spec:
+ options += ['CEED_OPT=-I%s' % spec['libceed'].prefix.include,
+ 'CEED_LIB=%s' %
+ ld_flags_from_dirs([spec['libceed'].prefix.lib],
+ ['ceed'])]
+
+ if '+umpire' in spec:
+ options += ['UMPIRE_OPT=-I%s' % spec['umpire'].prefix.include,
+ 'UMPIRE_LIB=%s' %
+ ld_flags_from_library_list(spec['umpire'].libs)]
+
+ timer_ids = {'std': '0', 'posix': '2', 'mac': '4', 'mpi': '6'}
+ timer = spec.variants['timer'].value
+ if timer != 'auto':
+ options += ['MFEM_TIMER_TYPE=%s' % timer_ids[timer]]
+
+ if '+conduit' in spec:
+ conduit = spec['conduit']
+ headers = HeaderList(find(conduit.prefix.include, 'conduit.hpp',
+ recursive=True))
+ conduit_libs = ['libconduit', 'libconduit_relay',
+ 'libconduit_blueprint']
+ libs = find_libraries(conduit_libs, conduit.prefix.lib,
+ shared=('+shared' in conduit))
+ libs += LibraryList(find_system_libraries('libdl'))
+ if '+hdf5' in conduit:
+ hdf5 = conduit['hdf5']
+ headers += find_headers('hdf5', hdf5.prefix.include)
+ libs += hdf5.libs
+
+ ##################
+ # cyrush note:
+ ##################
+ # spack's HeaderList is applying too much magic, undermining us:
+ #
+ # It applies a regex to strip back to the last "include" dir
+ # in the path. In our case we need to pass the following
+ # as part of the CONDUIT_OPT flags:
+ #
+ # -I<install_path>/include/conduit
+ #
+ # I tried several ways to present this path to the HeaderList,
+ # but the regex always kills the trailing conduit dir
+ # breaking build.
+ #
+ # To resolve the issue, we simply join our own string with
+ # the headers results (which are important b/c they handle
+ # hdf5 paths when enabled).
+ ##################
+
+ # construct proper include path
+ conduit_include_path = conduit.prefix.include.conduit
+ # add this path to the found flags
+ conduit_opt_flags = "-I{0} {1}".format(conduit_include_path,
+ headers.cpp_flags)
+
+ options += [
+ 'CONDUIT_OPT=%s' % conduit_opt_flags,
+ 'CONDUIT_LIB=%s' % ld_flags_from_library_list(libs)]
+
+ make('config', *options, parallel=False)
+ make('info', parallel=False)
+
+ def build(self, spec, prefix):
+ make('lib')
+
+ @run_after('build')
+ def check_or_test(self):
+ # Running 'make check' or 'make test' may fail if MFEM_MPIEXEC or
+ # MFEM_MPIEXEC_NP are not set appropriately.
+ if not self.run_tests:
+ # check we can build ex1 (~mpi) or ex1p (+mpi).
+ make('-C', 'examples', 'ex1p' if ('+mpi' in self.spec) else 'ex1',
+ parallel=False)
+ # make('check', parallel=False)
+ else:
+ make('all')
+ make('test', parallel=False)
+
+ def install(self, spec, prefix):
+ make('install', parallel=False)
+
+ # TODO: The way the examples and miniapps are being installed is not
+ # perfect. For example, the makefiles do not work.
+
+ install_em = ('+examples' in spec) or ('+miniapps' in spec)
+ if install_em and ('+shared' in spec):
+ make('examples/clean', 'miniapps/clean')
+ # This is a hack to get the examples and miniapps to link with the
+ # installed shared mfem library:
+ with working_dir('config'):
+ os.rename('config.mk', 'config.mk.orig')
+ copy(str(self.config_mk), 'config.mk')
+ shutil.copystat('config.mk.orig', 'config.mk')
+
+ prefix_share = join_path(prefix, 'share', 'mfem')
+
+ if '+examples' in spec:
+ make('examples')
+ install_tree('examples', join_path(prefix_share, 'examples'))
+
+ if '+miniapps' in spec:
+ make('miniapps')
+ install_tree('miniapps', join_path(prefix_share, 'miniapps'))
+
+ if install_em:
+ install_tree('data', join_path(prefix_share, 'data'))
+
+ examples_src_dir = 'examples'
+ examples_data_dir = 'data'
+
+ @run_after('install')
+ def cache_test_sources(self):
+ """Copy the example source files after the package is installed to an
+ install test subdirectory for use during `spack test run`."""
+ self.cache_extra_test_sources([self.examples_src_dir,
+ self.examples_data_dir])
+
+ def test(self):
+ test_dir = join_path(
+ self.test_suite.current_test_cache_dir,
+ self.examples_src_dir
+ )
+
+ # MFEM has many examples to serve as a suitable smoke check. ex10
+ # was chosen arbitrarily among the examples that work both with
+ # MPI and without it
+ test_exe = 'ex10p' if ('+mpi' in self.spec) else 'ex10'
+ self.run_test(
+ 'make',
+ [
+ 'CONFIG_MK={0}/share/mfem/config.mk'.format(self.prefix),
+ test_exe,
+ 'parallel=False'
+ ],
+ purpose='test: building {0}'.format(test_exe),
+ skip_missing=False,
+ work_dir=test_dir,
+ )
+
+ self.run_test(
+ './{0}'.format(test_exe),
+ [
+ '--mesh',
+ '../{0}/beam-quad.mesh'.format(self.examples_data_dir)
+ ],
+ [],
+ installed=False,
+ purpose='test: running {0}'.format(test_exe),
+ skip_missing=False,
+ work_dir=test_dir,
+ )
+
+ # this patch is only needed for mfem 4.1, where a few
+ # released files include byte order marks
+ @when('@4.1.0')
+ def patch(self):
+ # Remove the byte order mark since it messes with some compilers
+ files_with_bom = [
+ 'fem/gslib.hpp', 'fem/gslib.cpp', 'linalg/hiop.hpp',
+ 'miniapps/gslib/field-diff.cpp', 'miniapps/gslib/findpts.cpp',
+ 'miniapps/gslib/pfindpts.cpp']
+ bom = '\xef\xbb\xbf' if sys.version_info < (3,) else u'\ufeff'
+ for f in files_with_bom:
+ filter_file(bom, '', f)
+
+ @property
+ def suitesparse_components(self):
+ """Return the SuiteSparse components needed by MFEM."""
+ ss_comps = 'umfpack,cholmod,colamd,amd,camd,ccolamd,suitesparseconfig'
+ if self.spec.satisfies('@3.2:'):
+ ss_comps = 'klu,btf,' + ss_comps
+ return ss_comps
+
+ @property
+ def sundials_components(self):
+ """Return the SUNDIALS components needed by MFEM."""
+ spec = self.spec
+ sun_comps = 'arkode,cvodes,nvecserial,kinsol'
+ if '+mpi' in spec:
+ if spec.satisfies('@4.2:'):
+ sun_comps += ',nvecparallel,nvecmpiplusx'
+ else:
+ sun_comps += ',nvecparhyp,nvecparallel'
+ if '+cuda' in spec and '+cuda' in spec['sundials']:
+ sun_comps += ',nveccuda'
+ return sun_comps
+
+ @property
+ def headers(self):
+ """Export the main mfem header, mfem.hpp.
+ """
+ hdrs = HeaderList(find(self.prefix.include, 'mfem.hpp',
+ recursive=False))
+ return hdrs or None
+
+ @property
+ def libs(self):
+ """Export the mfem library file.
+ """
+ libs = find_libraries('libmfem', root=self.prefix.lib,
+ shared=('+shared' in self.spec), recursive=False)
+ return libs or None
+
+ @property
+ def config_mk(self):
+ """Export the location of the config.mk file.
+ This property can be accessed using spec['mfem'].package.config_mk
+ """
+ dirs = [self.prefix, self.prefix.share.mfem]
+ for d in dirs:
+ f = join_path(d, 'config.mk')
+ if os.access(f, os.R_OK):
+ return FileList(f)
+ return FileList(find(self.prefix, 'config.mk', recursive=True))
+
+ @property
+ def test_mk(self):
+ """Export the location of the test.mk file.
+ This property can be accessed using spec['mfem'].package.test_mk.
+ In version 3.3.2 and newer, the location of test.mk is also defined
+ inside config.mk, variable MFEM_TEST_MK.
+ """
+ dirs = [self.prefix, self.prefix.share.mfem]
+ for d in dirs:
+ f = join_path(d, 'test.mk')
+ if os.access(f, os.R_OK):
+ return FileList(f)
+ return FileList(find(self.prefix, 'test.mk', recursive=True))
diff --git a/lib/spack/spack/test/data/unparse/py-torch.txt b/lib/spack/spack/test/data/unparse/py-torch.txt
new file mode 100644
index 0000000000..b3a1c99b40
--- /dev/null
+++ b/lib/spack/spack/test/data/unparse/py-torch.txt
@@ -0,0 +1,448 @@
+# -*- python -*-
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+"""This is an unparser test package.
+
+``py-torch`` was chosen for its complexity and because it has an ``@when`` function
+that can be removed statically, as well as several decorated @run_after functions
+that should be preserved.
+
+"""
+
+import os
+import sys
+
+from spack import *
+
+
+class PyTorch(PythonPackage, CudaPackage):
+ """Tensors and Dynamic neural networks in Python
+ with strong GPU acceleration."""
+
+ homepage = "https://pytorch.org/"
+ git = "https://github.com/pytorch/pytorch.git"
+
+ maintainers = ['adamjstewart']
+
+ # Exact set of modules is version- and variant-specific, just attempt to import the
+ # core libraries to ensure that the package was successfully installed.
+ import_modules = ['torch', 'torch.autograd', 'torch.nn', 'torch.utils']
+
+ version('master', branch='master', submodules=True)
+ version('1.10.1', tag='v1.10.1', submodules=True)
+ version('1.10.0', tag='v1.10.0', submodules=True)
+ version('1.9.1', tag='v1.9.1', submodules=True)
+ version('1.9.0', tag='v1.9.0', submodules=True)
+ version('1.8.2', tag='v1.8.2', submodules=True)
+ version('1.8.1', tag='v1.8.1', submodules=True)
+ version('1.8.0', tag='v1.8.0', submodules=True)
+ version('1.7.1', tag='v1.7.1', submodules=True)
+ version('1.7.0', tag='v1.7.0', submodules=True)
+ version('1.6.0', tag='v1.6.0', submodules=True)
+ version('1.5.1', tag='v1.5.1', submodules=True)
+ version('1.5.0', tag='v1.5.0', submodules=True)
+ version('1.4.1', tag='v1.4.1', submodules=True)
+ version('1.4.0', tag='v1.4.0', submodules=True, deprecated=True,
+ submodules_delete=['third_party/fbgemm'])
+ version('1.3.1', tag='v1.3.1', submodules=True)
+ version('1.3.0', tag='v1.3.0', submodules=True)
+ version('1.2.0', tag='v1.2.0', submodules=True)
+ version('1.1.0', tag='v1.1.0', submodules=True)
+ version('1.0.1', tag='v1.0.1', submodules=True)
+ version('1.0.0', tag='v1.0.0', submodules=True)
+ version('0.4.1', tag='v0.4.1', submodules=True, deprecated=True,
+ submodules_delete=['third_party/nervanagpu'])
+ version('0.4.0', tag='v0.4.0', submodules=True, deprecated=True)
+ version('0.3.1', tag='v0.3.1', submodules=True, deprecated=True)
+
+ is_darwin = sys.platform == 'darwin'
+
+ # All options are defined in CMakeLists.txt.
+ # Some are listed in setup.py, but not all.
+ variant('caffe2', default=True, description='Build Caffe2')
+ variant('test', default=False, description='Build C++ test binaries')
+ variant('cuda', default=not is_darwin, description='Use CUDA')
+ variant('rocm', default=False, description='Use ROCm')
+ variant('cudnn', default=not is_darwin, description='Use cuDNN')
+ variant('fbgemm', default=True, description='Use FBGEMM (quantized 8-bit server operators)')
+ variant('kineto', default=True, description='Use Kineto profiling library')
+ variant('magma', default=not is_darwin, description='Use MAGMA')
+ variant('metal', default=is_darwin, description='Use Metal for Caffe2 iOS build')
+ variant('nccl', default=not is_darwin, description='Use NCCL')
+ variant('nnpack', default=True, description='Use NNPACK')
+ variant('numa', default=not is_darwin, description='Use NUMA')
+ variant('numpy', default=True, description='Use NumPy')
+ variant('openmp', default=True, description='Use OpenMP for parallel code')
+ variant('qnnpack', default=True, description='Use QNNPACK (quantized 8-bit operators)')
+ variant('valgrind', default=not is_darwin, description='Use Valgrind')
+ variant('xnnpack', default=True, description='Use XNNPACK')
+ variant('mkldnn', default=True, description='Use MKLDNN')
+ variant('distributed', default=not is_darwin, description='Use distributed')
+ variant('mpi', default=not is_darwin, description='Use MPI for Caffe2')
+ variant('gloo', default=not is_darwin, description='Use Gloo')
+ variant('tensorpipe', default=not is_darwin, description='Use TensorPipe')
+ variant('onnx_ml', default=True, description='Enable traditional ONNX ML API')
+ variant('breakpad', default=True, description='Enable breakpad crash dump library')
+
+ conflicts('+cuda', when='+rocm')
+ conflicts('+cudnn', when='~cuda')
+ conflicts('+magma', when='~cuda')
+ conflicts('+nccl', when='~cuda~rocm')
+ conflicts('+nccl', when='platform=darwin')
+ conflicts('+numa', when='platform=darwin', msg='Only available on Linux')
+ conflicts('+valgrind', when='platform=darwin', msg='Only available on Linux')
+ conflicts('+mpi', when='~distributed')
+ conflicts('+gloo', when='~distributed')
+ conflicts('+tensorpipe', when='~distributed')
+ conflicts('+kineto', when='@:1.7')
+ conflicts('+valgrind', when='@:1.7')
+ conflicts('~caffe2', when='@0.4.0:1.6') # no way to disable caffe2?
+ conflicts('+caffe2', when='@:0.3.1') # caffe2 did not yet exist?
+ conflicts('+tensorpipe', when='@:1.5')
+ conflicts('+xnnpack', when='@:1.4')
+ conflicts('~onnx_ml', when='@:1.4') # no way to disable ONNX?
+ conflicts('+rocm', when='@:0.4')
+ conflicts('+cudnn', when='@:0.4')
+ conflicts('+fbgemm', when='@:0.4,1.4.0')
+ conflicts('+qnnpack', when='@:0.4')
+ conflicts('+mkldnn', when='@:0.4')
+ conflicts('+breakpad', when='@:1.9') # Option appeared in 1.10.0
+ conflicts('+breakpad', when='target=ppc64:', msg='Unsupported')
+ conflicts('+breakpad', when='target=ppc64le:', msg='Unsupported')
+
+ conflicts('cuda_arch=none', when='+cuda',
+ msg='Must specify CUDA compute capabilities of your GPU, see '
+ 'https://developer.nvidia.com/cuda-gpus')
+
+ # Required dependencies
+ depends_on('cmake@3.5:', type='build')
+ # Use Ninja generator to speed up build times, automatically used if found
+ depends_on('ninja@1.5:', when='@1.1.0:', type='build')
+ # See python_min_version in setup.py
+ depends_on('python@3.6.2:', when='@1.7.1:', type=('build', 'link', 'run'))
+ depends_on('python@3.6.1:', when='@1.6.0:1.7.0', type=('build', 'link', 'run'))
+ depends_on('python@3.5:', when='@1.5.0:1.5', type=('build', 'link', 'run'))
+ depends_on('python@2.7:2.8,3.5:', when='@1.4.0:1.4', type=('build', 'link', 'run'))
+ depends_on('python@2.7:2.8,3.5:3.7', when='@:1.3', type=('build', 'link', 'run'))
+ depends_on('py-setuptools', type=('build', 'run'))
+ depends_on('py-future', when='@1.5:', type=('build', 'run'))
+ depends_on('py-future', when='@1.1: ^python@:2', type=('build', 'run'))
+ depends_on('py-pyyaml', type=('build', 'run'))
+ depends_on('py-typing', when='@0.4: ^python@:3.4', type=('build', 'run'))
+ depends_on('py-typing-extensions', when='@1.7:', type=('build', 'run'))
+ depends_on('py-pybind11@2.6.2', when='@1.8.0:', type=('build', 'link', 'run'))
+ depends_on('py-pybind11@2.3.0', when='@1.1.0:1.7', type=('build', 'link', 'run'))
+ depends_on('py-pybind11@2.2.4', when='@1.0.0:1.0', type=('build', 'link', 'run'))
+ depends_on('py-pybind11@2.2.2', when='@0.4.0:0.4', type=('build', 'link', 'run'))
+ depends_on('py-dataclasses', when='@1.7: ^python@3.6.0:3.6', type=('build', 'run'))
+ depends_on('py-tqdm', type='run')
+ depends_on('py-protobuf', when='@0.4:', type=('build', 'run'))
+ depends_on('protobuf', when='@0.4:')
+ depends_on('blas')
+ depends_on('lapack')
+ depends_on('eigen', when='@0.4:')
+ # https://github.com/pytorch/pytorch/issues/60329
+ # depends_on('cpuinfo@2020-12-17', when='@1.8.0:')
+ # depends_on('cpuinfo@2020-06-11', when='@1.6.0:1.7')
+ # https://github.com/shibatch/sleef/issues/427
+ # depends_on('sleef@3.5.1_2020-12-22', when='@1.8.0:')
+ # https://github.com/pytorch/pytorch/issues/60334
+ # depends_on('sleef@3.4.0_2019-07-30', when='@1.6.0:1.7')
+ # https://github.com/Maratyszcza/FP16/issues/18
+ # depends_on('fp16@2020-05-14', when='@1.6.0:')
+ depends_on('pthreadpool@2021-04-13', when='@1.9.0:')
+ depends_on('pthreadpool@2020-10-05', when='@1.8.0:1.8')
+ depends_on('pthreadpool@2020-06-15', when='@1.6.0:1.7')
+ depends_on('psimd@2020-05-17', when='@1.6.0:')
+ depends_on('fxdiv@2020-04-17', when='@1.6.0:')
+ depends_on('benchmark', when='@1.6:+test')
+
+ # Optional dependencies
+ depends_on('cuda@7.5:', when='+cuda', type=('build', 'link', 'run'))
+ depends_on('cuda@9:', when='@1.1:+cuda', type=('build', 'link', 'run'))
+ depends_on('cuda@9.2:', when='@1.6:+cuda', type=('build', 'link', 'run'))
+ depends_on('cudnn@6.0:7', when='@:1.0+cudnn')
+ depends_on('cudnn@7.0:7', when='@1.1.0:1.5+cudnn')
+ depends_on('cudnn@7.0:', when='@1.6.0:+cudnn')
+ depends_on('magma', when='+magma')
+ depends_on('nccl', when='+nccl')
+ depends_on('numactl', when='+numa')
+ depends_on('py-numpy', when='+numpy', type=('build', 'run'))
+ depends_on('llvm-openmp', when='%apple-clang +openmp')
+ depends_on('valgrind', when='+valgrind')
+ # https://github.com/pytorch/pytorch/issues/60332
+ # depends_on('xnnpack@2021-02-22', when='@1.8.0:+xnnpack')
+ # depends_on('xnnpack@2020-03-23', when='@1.6.0:1.7+xnnpack')
+ depends_on('mpi', when='+mpi')
+ # https://github.com/pytorch/pytorch/issues/60270
+ # depends_on('gloo@2021-05-04', when='@1.9.0:+gloo')
+ # depends_on('gloo@2020-09-18', when='@1.7.0:1.8+gloo')
+ # depends_on('gloo@2020-03-17', when='@1.6.0:1.6+gloo')
+ # https://github.com/pytorch/pytorch/issues/60331
+ # depends_on('onnx@1.8.0_2020-11-03', when='@1.8.0:+onnx_ml')
+ # depends_on('onnx@1.7.0_2020-05-31', when='@1.6.0:1.7+onnx_ml')
+ depends_on('mkl', when='+mkldnn')
+
+ # Test dependencies
+ depends_on('py-hypothesis', type='test')
+ depends_on('py-six', type='test')
+ depends_on('py-psutil', type='test')
+
+ # Fix BLAS being overridden by MKL
+ # https://github.com/pytorch/pytorch/issues/60328
+ patch('https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/59220.patch',
+ sha256='e37afffe45cf7594c22050109942370e49983ad772d12ebccf508377dc9dcfc9',
+ when='@1.2.0:')
+
+ # Fixes build on older systems with glibc <2.12
+ patch('https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/55063.patch',
+ sha256='e17eaa42f5d7c18bf0d7c37d7b0910127a01ad53fdce3e226a92893356a70395',
+ when='@1.1.0:1.8.1')
+
+ # Fixes CMake configuration error when XNNPACK is disabled
+ # https://github.com/pytorch/pytorch/pull/35607
+ # https://github.com/pytorch/pytorch/pull/37865
+ patch('xnnpack.patch', when='@1.5.0:1.5')
+
+ # Fixes build error when ROCm is enabled for pytorch-1.5 release
+ patch('rocm.patch', when='@1.5.0:1.5+rocm')
+
+ # Fixes fatal error: sleef.h: No such file or directory
+ # https://github.com/pytorch/pytorch/pull/35359
+ # https://github.com/pytorch/pytorch/issues/26555
+ # patch('sleef.patch', when='@1.0.0:1.5')
+
+ # Fixes compilation with Clang 9.0.0 and Apple Clang 11.0.3
+ # https://github.com/pytorch/pytorch/pull/37086
+ patch('https://github.com/pytorch/pytorch/commit/e921cd222a8fbeabf5a3e74e83e0d8dfb01aa8b5.patch',
+ sha256='17561b16cd2db22f10c0fe1fdcb428aecb0ac3964ba022a41343a6bb8cba7049',
+ when='@1.1:1.5')
+
+ # Removes duplicate definition of getCusparseErrorString
+ # https://github.com/pytorch/pytorch/issues/32083
+ patch('cusparseGetErrorString.patch', when='@0.4.1:1.0^cuda@10.1.243:')
+
+ # Fixes 'FindOpenMP.cmake'
+ # to detect openmp settings used by Fujitsu compiler.
+ patch('detect_omp_of_fujitsu_compiler.patch', when='%fj')
+
+ # Fix compilation of +distributed~tensorpipe
+ # https://github.com/pytorch/pytorch/issues/68002
+ patch('https://github.com/pytorch/pytorch/commit/c075f0f633fa0136e68f0a455b5b74d7b500865c.patch',
+ sha256='e69e41b5c171bfb00d1b5d4ee55dd5e4c8975483230274af4ab461acd37e40b8', when='@1.10.0+distributed~tensorpipe')
+
+ # Both build and install run cmake/make/make install
+ # Only run once to speed up build times
+ phases = ['install']
+
+ @property
+ def libs(self):
+ root = join_path(self.prefix, self.spec['python'].package.site_packages_dir,
+ 'torch', 'lib')
+ return find_libraries('libtorch', root)
+
+ @property
+ def headers(self):
+ root = join_path(self.prefix, self.spec['python'].package.site_packages_dir,
+ 'torch', 'include')
+ headers = find_all_headers(root)
+ headers.directories = [root]
+ return headers
+
+ @when('@1.5.0:')
+ def patch(self):
+ # https://github.com/pytorch/pytorch/issues/52208
+ filter_file('torch_global_deps PROPERTIES LINKER_LANGUAGE C',
+ 'torch_global_deps PROPERTIES LINKER_LANGUAGE CXX',
+ 'caffe2/CMakeLists.txt')
+
+ def setup_build_environment(self, env):
+ """Set environment variables used to control the build.
+
+ PyTorch's ``setup.py`` is a thin wrapper around ``cmake``.
+ In ``tools/setup_helpers/cmake.py``, you can see that all
+ environment variables that start with ``BUILD_``, ``USE_``,
+ or ``CMAKE_``, plus a few more explicitly specified variable
+ names, are passed directly to the ``cmake`` call. Therefore,
+ most flags defined in ``CMakeLists.txt`` can be specified as
+ environment variables.
+ """
+ def enable_or_disable(variant, keyword='USE', var=None, newer=False):
+ """Set environment variable to enable or disable support for a
+ particular variant.
+
+ Parameters:
+ variant (str): the variant to check
+ keyword (str): the prefix to use for enabling/disabling
+ var (str): CMake variable to set. Defaults to variant.upper()
+ newer (bool): newer variants that never used NO_*
+ """
+ if var is None:
+ var = variant.upper()
+
+ # Version 1.1.0 switched from NO_* to USE_* or BUILD_*
+ # But some newer variants have always used USE_* or BUILD_*
+ if self.spec.satisfies('@1.1:') or newer:
+ if '+' + variant in self.spec:
+ env.set(keyword + '_' + var, 'ON')
+ else:
+ env.set(keyword + '_' + var, 'OFF')
+ else:
+ if '+' + variant in self.spec:
+ env.unset('NO_' + var)
+ else:
+ env.set('NO_' + var, 'ON')
+
+ # Build in parallel to speed up build times
+ env.set('MAX_JOBS', make_jobs)
+
+ # Spack logs have trouble handling colored output
+ env.set('COLORIZE_OUTPUT', 'OFF')
+
+ if self.spec.satisfies('@0.4:'):
+ enable_or_disable('test', keyword='BUILD')
+
+ if self.spec.satisfies('@1.7:'):
+ enable_or_disable('caffe2', keyword='BUILD')
+
+ enable_or_disable('cuda')
+ if '+cuda' in self.spec:
+ # cmake/public/cuda.cmake
+ # cmake/Modules_CUDA_fix/upstream/FindCUDA.cmake
+ env.unset('CUDA_ROOT')
+ torch_cuda_arch = ';'.join('{0:.1f}'.format(float(i) / 10.0) for i
+ in
+ self.spec.variants['cuda_arch'].value)
+ env.set('TORCH_CUDA_ARCH_LIST', torch_cuda_arch)
+
+ enable_or_disable('rocm')
+
+ enable_or_disable('cudnn')
+ if '+cudnn' in self.spec:
+ # cmake/Modules_CUDA_fix/FindCUDNN.cmake
+ env.set('CUDNN_INCLUDE_DIR', self.spec['cudnn'].prefix.include)
+ env.set('CUDNN_LIBRARY', self.spec['cudnn'].libs[0])
+
+ enable_or_disable('fbgemm')
+ if self.spec.satisfies('@1.8:'):
+ enable_or_disable('kineto')
+ enable_or_disable('magma')
+ enable_or_disable('metal')
+ if self.spec.satisfies('@1.10:'):
+ enable_or_disable('breakpad')
+
+ enable_or_disable('nccl')
+ if '+nccl' in self.spec:
+ env.set('NCCL_LIB_DIR', self.spec['nccl'].libs.directories[0])
+ env.set('NCCL_INCLUDE_DIR', self.spec['nccl'].prefix.include)
+
+ # cmake/External/nnpack.cmake
+ enable_or_disable('nnpack')
+
+ enable_or_disable('numa')
+ if '+numa' in self.spec:
+ # cmake/Modules/FindNuma.cmake
+ env.set('NUMA_ROOT_DIR', self.spec['numactl'].prefix)
+
+ # cmake/Modules/FindNumPy.cmake
+ enable_or_disable('numpy')
+ # cmake/Modules/FindOpenMP.cmake
+ enable_or_disable('openmp', newer=True)
+ enable_or_disable('qnnpack')
+ if self.spec.satisfies('@1.3:'):
+ enable_or_disable('qnnpack', var='PYTORCH_QNNPACK')
+ if self.spec.satisfies('@1.8:'):
+ enable_or_disable('valgrind')
+ if self.spec.satisfies('@1.5:'):
+ enable_or_disable('xnnpack')
+ enable_or_disable('mkldnn')
+ enable_or_disable('distributed')
+ enable_or_disable('mpi')
+ # cmake/Modules/FindGloo.cmake
+ enable_or_disable('gloo', newer=True)
+ if self.spec.satisfies('@1.6:'):
+ enable_or_disable('tensorpipe')
+
+ if '+onnx_ml' in self.spec:
+ env.set('ONNX_ML', 'ON')
+ else:
+ env.set('ONNX_ML', 'OFF')
+
+ if not self.spec.satisfies('@master'):
+ env.set('PYTORCH_BUILD_VERSION', self.version)
+ env.set('PYTORCH_BUILD_NUMBER', 0)
+
+ # BLAS to be used by Caffe2
+ # Options defined in cmake/Dependencies.cmake and cmake/Modules/FindBLAS.cmake
+ if self.spec['blas'].name == 'atlas':
+ env.set('BLAS', 'ATLAS')
+ env.set('WITH_BLAS', 'atlas')
+ elif self.spec['blas'].name in ['blis', 'amdblis']:
+ env.set('BLAS', 'BLIS')
+ env.set('WITH_BLAS', 'blis')
+ elif self.spec['blas'].name == 'eigen':
+ env.set('BLAS', 'Eigen')
+ elif self.spec['lapack'].name in ['libflame', 'amdlibflame']:
+ env.set('BLAS', 'FLAME')
+ env.set('WITH_BLAS', 'FLAME')
+ elif self.spec['blas'].name in [
+ 'intel-mkl', 'intel-parallel-studio', 'intel-oneapi-mkl']:
+ env.set('BLAS', 'MKL')
+ env.set('WITH_BLAS', 'mkl')
+ elif self.spec['blas'].name == 'openblas':
+ env.set('BLAS', 'OpenBLAS')
+ env.set('WITH_BLAS', 'open')
+ elif self.spec['blas'].name == 'veclibfort':
+ env.set('BLAS', 'vecLib')
+ env.set('WITH_BLAS', 'veclib')
+ else:
+ env.set('BLAS', 'Generic')
+ env.set('WITH_BLAS', 'generic')
+
+ # Don't use vendored third-party libraries when possible
+ env.set('BUILD_CUSTOM_PROTOBUF', 'OFF')
+ env.set('USE_SYSTEM_NCCL', 'ON')
+ env.set('USE_SYSTEM_EIGEN_INSTALL', 'ON')
+ if self.spec.satisfies('@0.4:'):
+ env.set('pybind11_DIR', self.spec['py-pybind11'].prefix)
+ env.set('pybind11_INCLUDE_DIR',
+ self.spec['py-pybind11'].prefix.include)
+ if self.spec.satisfies('@1.10:'):
+ env.set('USE_SYSTEM_PYBIND11', 'ON')
+ # https://github.com/pytorch/pytorch/issues/60334
+ # if self.spec.satisfies('@1.8:'):
+ # env.set('USE_SYSTEM_SLEEF', 'ON')
+ if self.spec.satisfies('@1.6:'):
+ # env.set('USE_SYSTEM_LIBS', 'ON')
+ # https://github.com/pytorch/pytorch/issues/60329
+ # env.set('USE_SYSTEM_CPUINFO', 'ON')
+ # https://github.com/pytorch/pytorch/issues/60270
+ # env.set('USE_SYSTEM_GLOO', 'ON')
+ # https://github.com/Maratyszcza/FP16/issues/18
+ # env.set('USE_SYSTEM_FP16', 'ON')
+ env.set('USE_SYSTEM_PTHREADPOOL', 'ON')
+ env.set('USE_SYSTEM_PSIMD', 'ON')
+ env.set('USE_SYSTEM_FXDIV', 'ON')
+ env.set('USE_SYSTEM_BENCHMARK', 'ON')
+ # https://github.com/pytorch/pytorch/issues/60331
+ # env.set('USE_SYSTEM_ONNX', 'ON')
+ # https://github.com/pytorch/pytorch/issues/60332
+ # env.set('USE_SYSTEM_XNNPACK', 'ON')
+
+ @run_before('install')
+ def build_amd(self):
+ if '+rocm' in self.spec:
+ python(os.path.join('tools', 'amd_build', 'build_amd.py'))
+
+ @run_after('install')
+ @on_package_attributes(run_tests=True)
+ def install_test(self):
+ with working_dir('test'):
+ python('run_test.py')
+
+ # Tests need to be re-added since `phases` was overridden
+ run_after('install')(
+ PythonPackage._run_default_install_time_test_callbacks)
+ run_after('install')(PythonPackage.sanity_check_prefix)
diff --git a/lib/spack/spack/test/util/package_hash.py b/lib/spack/spack/test/util/package_hash.py
index 395007f41e..f76fe71812 100644
--- a/lib/spack/spack/test/util/package_hash.py
+++ b/lib/spack/spack/test/util/package_hash.py
@@ -4,6 +4,9 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import ast
+import os
+
+import pytest
import spack.directives
import spack.paths
@@ -11,6 +14,8 @@ import spack.util.package_hash as ph
from spack.spec import Spec
from spack.util.unparse import unparse
+datadir = os.path.join(spack.paths.test_path, "data", "unparse")
+
def test_hash(tmpdir, mock_packages, config):
ph.package_hash("hash-test1@1.2")
@@ -152,6 +157,38 @@ def test_remove_directives():
assert name not in unparsed
+@pytest.mark.parametrize("package_spec,expected_hash", [
+ ("amdfftw", "nfrk76xyu6wxs4xb4nyichm3om3kb7yp"),
+ ("grads", "rrlmwml3f2frdnqavmro3ias66h5b2ce"),
+ ("llvm", "ngact4ds3xwgsbn5bruxpfs6f4u4juba"),
+ # has @when("@4.1.0")
+ ("mfem", "65xryd5zxarwzqlh2pojq7ykohpod4xz"),
+ ("mfem@4.0.0", "65xryd5zxarwzqlh2pojq7ykohpod4xz"),
+ ("mfem@4.1.0", "2j655nix3oe57iwvs2mlgx2mresk7czl"),
+ # has @when("@1.5.0:")
+ ("py-torch", "lnwmqk4wadtlsc2badrt7foid5tl5vaw"),
+ ("py-torch@1.0", "lnwmqk4wadtlsc2badrt7foid5tl5vaw"),
+ ("py-torch@1.6", "5nwndnknxdfs5or5nrl4pecvw46xc5i2"),
+])
+def test_package_hash_consistency(package_spec, expected_hash):
+ """Ensure that that package hash is consistent python version to version.
+
+ We assume these tests run across all supported Python versions in CI, and we ensure
+ consistency with recorded hashes for some well known inputs.
+
+ If this fails, then something about the way the python AST works has likely changed.
+ If Spack is running in a new python version, we might need to modify the unparser to
+ handle it. If not, then something has become inconsistent about the way we unparse
+ Python code across versions.
+
+ """
+ spec = Spec(package_spec)
+ filename = os.path.join(datadir, "%s.txt" % spec.name)
+ print(ph.canonical_source(spec, filename))
+ h = ph.canonical_source_hash(spec, filename)
+ assert expected_hash == h
+
+
many_multimethods = """\
class Pkg:
def foo(self):