diff options
author | Thomas-Ulrich <ulrich@geophysik.uni-muenchen.de> | 2024-10-24 14:36:37 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-10-24 14:36:37 +0200 |
commit | f09ce00fe1e034ca17d38672f6c3d9c880ffac3b (patch) | |
tree | 52390734851f90dde9ed83f04696f83df5d26228 | |
parent | cdde7c3ccf61de94d1770dcb8c0b0e09bbd82732 (diff) | |
download | spack-f09ce00fe1e034ca17d38672f6c3d9c880ffac3b.tar.gz spack-f09ce00fe1e034ca17d38672f6c3d9c880ffac3b.tar.bz2 spack-f09ce00fe1e034ca17d38672f6c3d9c880ffac3b.tar.xz spack-f09ce00fe1e034ca17d38672f6c3d9c880ffac3b.zip |
seissol: new package (#41176)
Co-authored-by: Bernhard Kaindl <bernhardkaindl7@gmail.com>
5 files changed, 414 insertions, 1 deletions
diff --git a/var/spack/repos/builtin/packages/easi/package.py b/var/spack/repos/builtin/packages/easi/package.py index e13180b684..fbe2163509 100644 --- a/var/spack/repos/builtin/packages/easi/package.py +++ b/var/spack/repos/builtin/packages/easi/package.py @@ -32,6 +32,7 @@ class Easi(CMakePackage): variant("python", default=True, description="Install python bindings") extends("python", when="+python") + depends_on("mpi", when="+python") variant("asagi", default=True, description="build with ASAGI support") variant( diff --git a/var/spack/repos/builtin/packages/libxsmm/package.py b/var/spack/repos/builtin/packages/libxsmm/package.py index 16fb0dbeff..14e287a918 100644 --- a/var/spack/repos/builtin/packages/libxsmm/package.py +++ b/var/spack/repos/builtin/packages/libxsmm/package.py @@ -28,7 +28,11 @@ class Libxsmm(MakefilePackage): version("main-2023-11", commit="0d9be905527ba575c14ca5d3b4c9673916c868b2") version("main", branch="main") version("1.17-cp2k", commit="6f883620f58afdeebab28039fc9cf580e76a5ec6") - version("1.17", sha256="8b642127880e92e8a75400125307724635ecdf4020ca4481e5efe7640451bb92") + version( + "1.17", + sha256="8b642127880e92e8a75400125307724635ecdf4020ca4481e5efe7640451bb92", + preferred=True, + ) version("1.16.3", sha256="e491ccadebc5cdcd1fc08b5b4509a0aba4e2c096f53d7880062a66b82a0baf84") version("1.16.2", sha256="bdc7554b56b9e0a380fc9c7b4f4394b41be863344858bc633bc9c25835c4c64e") version("1.16.1", sha256="93dc7a3ec40401988729ddb2c6ea2294911261f7e6cd979cf061b5c3691d729d") diff --git a/var/spack/repos/builtin/packages/py-chainforgecodegen/package.py b/var/spack/repos/builtin/packages/py-chainforgecodegen/package.py new file mode 100644 index 0000000000..2734f053d3 --- /dev/null +++ b/var/spack/repos/builtin/packages/py-chainforgecodegen/package.py @@ -0,0 +1,30 @@ +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +# Spack Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + + +from spack.package import * + + +class PyChainforgecodegen(PythonPackage): + """A code generator that fuses subsequent batched matrix multiplications (GEMMs) + into a single GPU kernel, holding intermediate results in shared memory as long as necessary. + """ + + git = "https://github.com/SeisSol/chainforge.git" + + maintainers("davschneller", "Thomas-Ulrich") + license("BSD-3-Clause") + + version("master", branch="master") + depends_on("py-numpy") + depends_on("py-graphviz", type=("build", "run")) + depends_on("py-jinja2", type=("build", "run")) + depends_on("py-lark", type=("build", "run")) + depends_on("py-pyyaml", type=("build", "run")) + depends_on("py-setuptools", type="build") + + def setup_run_environment(self, env): + env.prepend_path("PATH", self.spec.prefix) + env.prepend_path("PYTHONPATH", self.spec.prefix) diff --git a/var/spack/repos/builtin/packages/py-gemmforge/package.py b/var/spack/repos/builtin/packages/py-gemmforge/package.py new file mode 100644 index 0000000000..d368ca717b --- /dev/null +++ b/var/spack/repos/builtin/packages/py-gemmforge/package.py @@ -0,0 +1,27 @@ +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +# Spack Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + + +from spack.package import * + + +class PyGemmforge(PythonPackage): + """GPU-GEMM generator for the Discontinuous Galerkin method""" + + homepage = "https://github.com/SeisSol/gemmforge/blob/master/README.md" + git = "https://github.com/SeisSol/gemmforge.git" + + maintainers("davschneller", "Thomas-Ulrich") + license("BSD-3-Clause") + + version("master", branch="master") + depends_on("py-numpy") + depends_on("py-jinja2", type=("build", "run")) + depends_on("py-pyyaml", type=("build", "run")) + depends_on("py-setuptools", type="build") + + def setup_run_environment(self, env): + env.prepend_path("PATH", self.spec.prefix) + env.prepend_path("PYTHONPATH", self.spec.prefix) diff --git a/var/spack/repos/builtin/packages/seissol/package.py b/var/spack/repos/builtin/packages/seissol/package.py new file mode 100644 index 0000000000..1cd4f787fc --- /dev/null +++ b/var/spack/repos/builtin/packages/seissol/package.py @@ -0,0 +1,351 @@ +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +# Spack Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + + +from spack.package import * + + +class Seissol(CMakePackage, CudaPackage, ROCmPackage): + """Seissol - A scientific software for the numerical simulation + of seismic wave phenomena and earthquake dynamics. + """ + + homepage = "http://www.seissol.org" + git = "https://github.com/SeisSol/SeisSol.git" + version("master", branch="master", submodules=True) + # we cannot use the tar.gz file because it does not contains submodules + version( + "1.2.0", tag="v1.2.0", commit="2057e6e81965e0789128c6d177592800bcf956e1", submodules=True + ) + version( + "1.1.4", tag="v1.1.4", commit="6d301757378ad8446173e0a12c095a695a708aaf", submodules=True + ) + version( + "1.1.3", tag="v1.1.3", commit="01ae1b127fcc6f766b819d2e797df6a3547d730a", submodules=True + ) + version( + "1.1.2", tag="v1.1.2", commit="71002c1c1498ebd6f50a954731da68fa4f9d436b", submodules=True + ) + + maintainers("Thomas-Ulrich", "davschneller", "vikaskurapati") + + variant("asagi", default=True, description="Use ASAGI for material input") + variant( + "convergence_order", + default="4", + description="polynomial degree plus one", + values=(str(v) for v in range(2, 9)), + multi=False, + ) + variant( + "precision", + default="double", + description="float numerical precision", + values=("single", "double"), + multi=False, + ) + variant( + "dr_quad_rule", + default="stroud", + description="dynamic rupture quadrature rule", + values=("stroud", "dunavant"), + multi=False, + ) + variant( + "plasticity_method", + default="nb", + description="Plasticity method", + values=("nb", "ib"), + multi=False, + ) + variant( + "equations", + default="elastic", + description="equation set used", + values=("elastic", "anisotropic", "viscoelastic2", "poroelastic"), + multi=False, + ) + variant( + "number_of_mechanisms", default="3", description="number of mechanisms for viscoelasticity" + ) + variant("netcdf", default=True, description="Enable Netcdf") + variant( + "graph_partitioning_libs", + default="parmetis", + description="graph partitioning library for mesh partitioning", + values=("none", "parmetis", "ptscotch", "parhip"), + multi=True, + ) + + # GPU options + variant("intel_gpu", default=False, description="Compile for Intel GPUs") + variant( + "intel_gpu_arch", + default="none", + values=("none", "bdw", "skl", "pvc"), + description="The Intel GPU to compile for", + when="+intel_gpu", + ) + + forwarded_variants = ["cuda", "intel_gpu", "rocm"] + for v in forwarded_variants: + variant( + "sycl_backend", + default="acpp", + description="SYCL backend to use for DR and point sources", + values=("acpp", "oneapi"), + when=f"+{v}", + ) + variant( + "sycl_gemm", + default=False, + description="Use SYCL also for the wave propagation part (default for Intel GPUs)", + when=f"+{v}", + ) + + requires( + "-cuda -rocm -intel_gpu", + "+cuda", + "+rocm", + "+intel_gpu", + policy="one_of", + msg="You may either compile for one GPU backend, or for CPU.", + ) + + requires("%oneapi", when="sycl_backend=oneapi") + + depends_on("hipsycl@0.9.3: +cuda", when="+cuda sycl_backend=acpp") + + # TODO: this one needs to be +rocm as well--but that's not implemented yet + depends_on("hipsycl@develop", when="+rocm sycl_backend=acpp") + + # TODO: extend as soon as level zero is available + depends_on("hipsycl@develop", when="+intel_gpu sycl_backend=acpp") + + # TODO: once adaptivecpp supports NVHPC, forward that (SYCL_USE_NVHPC) + + # GPU architecture requirements + conflicts( + "cuda_arch=none", + when="+cuda", + msg="A value for cuda_arch must be specified. Add cuda_arch=XX", + ) + + conflicts( + "amdgpu_target=none", + when="+rocm", + msg="A value for amdgpu_arch must be specified. Add amdgpu_arch=XX", + ) + + conflicts( + "intel_gpu_arch=none", + when="+intel_gpu", + msg="A value for intel_gpu_arch must be specified. Add intel_gpu_arch=XX", + ) + + variant( + "gemm_tools_list", + default="LIBXSMM,PSpaMM", + description="gemm toolkit(s) for the (CPU) code generator", + values=("LIBXSMM", "MKL", "OpenBLAS", "BLIS", "PSpaMM", "Eigen", "LIBXSMM_JIT"), + multi=True, + ) + + variant("memkind", default=True, description="Use memkind library for hbw memory support") + + depends_on("mpi") + + with when("+cuda"): + for var in ["openmpi", "mpich", "mvapich", "mvapich2", "mvapich2-gdr"]: + depends_on(f"{var} +cuda", when=f"^[virtuals=mpi] {var}") + + with when("+rocm"): + for var in ["mpich", "mvapich2-gdr"]: + depends_on(f"{var} +rocm", when=f"^[virtuals=mpi] {var}") + + # with cuda 12 and llvm 14:15, we have the issue: "error: no template named 'texture" + # https://github.com/llvm/llvm-project/issues/61340 + conflicts("cuda@12", when="+cuda ^llvm@14:15") + depends_on("cuda@11:", when="+cuda") + depends_on("hip", when="+rocm") + + # graph partitioning + with when("graph_partitioning_libs=parmetis"): + depends_on("parmetis +int64 +shared") + depends_on("metis +int64 +shared") + + depends_on( + "scotch +mpi +mpi_thread +shared +threads +int64", when="graph_partitioning_libs=ptscotch" + ) + depends_on("kahip", when="graph_partitioning_libs=parhip") + + depends_on("hdf5 +shared +threadsafe +hl +mpi") + + depends_on("netcdf-c@4.6: +shared +mpi", when="+netcdf") + + depends_on("asagi +mpi +mpi3", when="+asagi") + + depends_on("easi ~asagi jit=impalajit,lua", when="~asagi") + depends_on("easi +asagi jit=impalajit,lua", when="+asagi") + + depends_on("intel-mkl threads=none", when="gemm_tools_list=MKL") + depends_on("blis threads=none", when="gemm_tools_list=BLIS") + depends_on("openblas threads=none", when="gemm_tools_list=OpenBLAS") + depends_on("libxsmm@main", when="gemm_tools_list=LIBXSMM_JIT") + + conflicts("gemm_tools_list=LIBXSMM", when="gemm_tools_list=LIBXSMM_JIT") + + depends_on("memkind", when="+memkind target=x86_64:") + + depends_on("yaml-cpp@0.6.2") + depends_on("eigen@3.4.0") + + # build dependencies (code generation) + with default_args(type="build"): + # https://seissol.readthedocs.io/en/latest/installing-dependencies.html + depends_on("cmake@3.20:") + depends_on("python@3.9:") + depends_on("py-setuptools") + depends_on("py-numpy@1.12:") + depends_on("py-scipy") + depends_on("py-matplotlib") + depends_on("py-pspamm", when="gemm_tools_list=PSpaMM") + + forwarded_variants = ["cuda", "intel_gpu", "rocm"] + for v in forwarded_variants: + depends_on("py-gemmforge", when=f"+{v}") + depends_on("py-chainforgecodegen", when=f"+{v}") + + depends_on("libxsmm@=1.17 +generator", when="gemm_tools_list=LIBXSMM target=x86_64:") + + def cmake_args(self): + args = [ + "-DMPI=ON", + self.define_from_variant("ASAGI", "asagi"), + self.define_from_variant("PRECISION", "precision"), + self.define_from_variant("PLASTICITY_METHOD", "plasticity_method"), + self.define_from_variant("DR_QUAD_RULE", "dr_quad_rule"), + self.define_from_variant("ORDER", "convergence_order"), + self.define_from_variant("EQUATIONS", "equations"), + self.define_from_variant("NETCDF", "netcdf"), + ] + + gemm_tools_list = ",".join(self.spec.variants["gemm_tools_list"].value) + args.append(f"-DGEMM_TOOLS_LIST={gemm_tools_list}") + + graph_partitioning_libs = ",".join(self.spec.variants["graph_partitioning_libs"].value) + args.append(f"-DGRAPH_PARTITIONING_LIBS={graph_partitioning_libs}") + + if self.spec.variants["equations"].value != "viscoelastic2": + args.append("-DNUMBER_OF_MECHANISMS=0") + else: + args.append(self.define_from_variant("NUMBER_OF_MECHANISMS", "number_of_mechanisms")) + + with_gpu = ( + self.spec.satisfies("+cuda") + or self.spec.satisfies("+rocm") + or self.spec.satisfies("+intel_gpu") + ) + + if with_gpu: + # Nvidia GPUs + if self.spec.satisfies("+cuda"): + cuda_arch = self.spec.variants["cuda_arch"].value[0] + args.append(f"-DDEVICE_ARCH=sm_{cuda_arch}") + args.append("-DUSE_GRAPH_CAPTURING=ON -DENABLE_PROFILING_MARKERS=ON") + if self.spec.satisfies("~sycl_gemm"): + args.append("-DDEVICE_BACKEND=cuda") + + # ROCm/AMD GPUs + if self.spec.satisfies("+rocm"): + amdgpu_target = self.spec.variants["amdgpu_target"].value[0] + args.append(f"-DDEVICE_ARCH={amdgpu_target}") + args.append("-DENABLE_PROFILING_MARKERS=ON") + + if self.spec.satisfies("+rocm@:5.6"): + args.append("-DUSE_GRAPH_CAPTURING=OFF") + else: + args.append("-DUSE_GRAPH_CAPTURING=ON") + + if self.spec.satisfies("~sycl_gemm"): + args.append("-DDEVICE_BACKEND=hip") + + # Intel GPUs + if self.spec.satisfies("+intel_gpu"): + assert self.spec.variants["intel_gpu_arch"].value != "none" + intel_gpu_arch = self.spec.variants["intel_gpu_arch"].value + if self.spec.satisfies("@:1.1.3"): + args.append("-DUSE_GRAPH_CAPTURING=OFF") + else: + args.append("-DUSE_GRAPH_CAPTURING=ON") + args.append(f"-DDEVICE_ARCH={intel_gpu_arch}") + + # SYCL + sycl_backends = {"acpp": "hipsycl", "oneapi": "oneapi"} + syclcc_backends = {"acpp": "hipsycl", "oneapi": "dpcpp"} + + sycl_backend = self.spec.variants["sycl_backend"].value + args.append(f"-DSYCLCC={syclcc_backends[sycl_backend]}") + if self.spec.satisfies("+sycl_gemm"): + args.append(f"-DDEVICE_BACKEND={sycl_backends[sycl_backend]}") + + # CPU arch + + # cf. https://spack.readthedocs.io/en/latest/basic_usage.html#support-for-specific-microarchitectures + + # basic family matching + hostarch = "noarch" + if str(self.spec.target) == "aarch64": + hostarch = "neon" + if str(self.spec.target) == "x86_64": + # pure x86_64v1 doesn't support anything above SSE3 + hostarch = "noarch" + if str(self.spec.target) == "x86_64_v2": + # AVX is only required for x86_64v3 and upwards + hostarch = "wsm" + if str(self.spec.target) == "x86_64_v3": + hostarch = "hsw" + if str(self.spec.target) == "x86_64_v4": + hostarch = "skx" + + # specific architecture matching + if self.spec.target >= "westmere": + hostarch = "wsm" + if self.spec.target >= "sandybridge": + hostarch = "snb" + if self.spec.target >= "haswell": + hostarch = "hsw" + if self.spec.target >= "mic_knl": + hostarch = "knl" + if self.spec.target >= "skylake_avx512": + hostarch = "skx" + if self.spec.target >= "zen": + hostarch = "naples" + if self.spec.target >= "zen2": + hostarch = "rome" + if self.spec.target >= "zen3": + hostarch = "milan" + if self.spec.target >= "zen4": + hostarch = "bergamo" + if self.spec.target >= "thunderx2": + hostarch = "thunderx2t99" + if self.spec.target >= "power9": + hostarch = "power9" + if self.spec.target >= "m1": + hostarch = "apple-m1" + if self.spec.target >= "m2": + hostarch = "apple-m2" + if self.spec.target >= "a64fx": + hostarch = "a64fx" + + args.append(f"-DHOST_ARCH={hostarch}") + + args.append(self.define("PYTHON_EXECUTABLE", self.spec["python"].command.path)) + + return args + + def setup_run_environment(self, env): + # for seissol-launch + env.prepend_path("PATH", self.prefix.share) |