summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Kosukhin <sergey.kosukhin@mpimet.mpg.de>2023-04-17 17:59:10 +0200
committerGitHub <noreply@github.com>2023-04-17 17:59:10 +0200
commit275bfc15b449c6835e498527552b5a1ce603d278 (patch)
tree22eeeb3472d12f2cc5cb8c47df2f7b76865b7e0b
parent036695ac94bc767f7986add2824262ccee2b47f6 (diff)
downloadspack-275bfc15b449c6835e498527552b5a1ce603d278.tar.gz
spack-275bfc15b449c6835e498527552b5a1ce603d278.tar.bz2
spack-275bfc15b449c6835e498527552b5a1ce603d278.tar.xz
spack-275bfc15b449c6835e498527552b5a1ce603d278.zip
netcdf-c: major refactoring, new variants and versions (#36485)
-rw-r--r--var/spack/repos/builtin/packages/netcdf-c/4.7.3-spectrum-mpi-pnetcdf-detect.patch44
-rw-r--r--var/spack/repos/builtin/packages/netcdf-c/4.8.1-no-strict-aliasing-config.patch31
-rw-r--r--var/spack/repos/builtin/packages/netcdf-c/4.9.0-no-mpi-yes-pnetcdf.patch11
-rw-r--r--var/spack/repos/builtin/packages/netcdf-c/package.py456
4 files changed, 310 insertions, 232 deletions
diff --git a/var/spack/repos/builtin/packages/netcdf-c/4.7.3-spectrum-mpi-pnetcdf-detect.patch b/var/spack/repos/builtin/packages/netcdf-c/4.7.3-spectrum-mpi-pnetcdf-detect.patch
deleted file mode 100644
index 0e04164ff6..0000000000
--- a/var/spack/repos/builtin/packages/netcdf-c/4.7.3-spectrum-mpi-pnetcdf-detect.patch
+++ /dev/null
@@ -1,44 +0,0 @@
---- a/configure 2019-11-20 18:59:25.000000000 -0500
-+++ b/configure 13:20:48.088873402 -0500
-@@ -18459,12 +18459,12 @@
- $as_echo_n "checking Is libpnetcdf version 1.6.0 or later?... " >&6; }
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
- /* end confdefs.h. */
-+#include <pnetcdf.h>
-
- int
- main ()
- {
-
--#include <pnetcdf.h>
- #if (PNETCDF_VERSION_MAJOR*1000 + PNETCDF_VERSION_MINOR < 1006)
- choke me
- #endif
-@@ -18584,12 +18584,12 @@
- $as_echo_n "checking if erange-fill is enabled in PnetCDF... " >&6; }
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
- /* end confdefs.h. */
-+#include <pnetcdf.h>
-
- int
- main ()
- {
-
--#include <pnetcdf.h>
- #if !defined(PNETCDF_ERANGE_FILL) || PNETCDF_ERANGE_FILL == 0
- choke me
- #endif
-@@ -18619,12 +18619,12 @@
- $as_echo_n "checking if relax-coord-bound is enabled in PnetCDF... " >&6; }
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
- /* end confdefs.h. */
-+#include <pnetcdf.h>
-
- int
- main ()
- {
-
--#include <pnetcdf.h>
- #if !defined(PNETCDF_RELAX_COORD_BOUND) || PNETCDF_RELAX_COORD_BOUND == 0
- choke me
- #endif
diff --git a/var/spack/repos/builtin/packages/netcdf-c/4.8.1-no-strict-aliasing-config.patch b/var/spack/repos/builtin/packages/netcdf-c/4.8.1-no-strict-aliasing-config.patch
deleted file mode 100644
index 3a2e0972c5..0000000000
--- a/var/spack/repos/builtin/packages/netcdf-c/4.8.1-no-strict-aliasing-config.patch
+++ /dev/null
@@ -1,31 +0,0 @@
---- a/configure
-+++ b/configure
-@@ -3307,8 +3307,6 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
- ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
- ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
--CFLAGS="${CFLAGS} -fno-strict-aliasing"
--
- # Expand $ac_aux_dir to an absolute path.
- am_aux_dir=`cd "$ac_aux_dir" && pwd`
-
-@@ -4161,6 +4159,9 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-
-+
-+CFLAGS="${CFLAGS} -fno-strict-aliasing"
-+
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
- /* end confdefs.h. */
-
---- a/configure.ac
-+++ b/configure.ac
-@@ -61,6 +61,7 @@ AC_CONFIG_HEADERS([config.h])
- ##
- SAVE_CFLAGS="${CFLAGS}"
- AC_LANG_PUSH([C])
-+AC_LANG_COMPILER_REQUIRE
- CFLAGS="${CFLAGS} -fno-strict-aliasing"
-
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([
diff --git a/var/spack/repos/builtin/packages/netcdf-c/4.9.0-no-mpi-yes-pnetcdf.patch b/var/spack/repos/builtin/packages/netcdf-c/4.9.0-no-mpi-yes-pnetcdf.patch
deleted file mode 100644
index de4b34c986..0000000000
--- a/var/spack/repos/builtin/packages/netcdf-c/4.9.0-no-mpi-yes-pnetcdf.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/libhdf5/hdf5internal.c
-+++ b/libhdf5/hdf5internal.c
-@@ -163,7 +163,7 @@ find_var_dim_max_length(NC_GRP_INFO_T *grp, int varid, int dimid,
- if (var->dimids[d] == dimid)
- *maxlen = *maxlen > h5dimlen[d] ? *maxlen : h5dimlen[d];
-
--#ifdef USE_PARALLEL
-+#ifdef USE_PARALLEL4
- /* If we are doing parallel I/O in collective mode (with
- * either pnetcdf or HDF5), then communicate with all
- * other tasks in the collective and find out which has
diff --git a/var/spack/repos/builtin/packages/netcdf-c/package.py b/var/spack/repos/builtin/packages/netcdf-c/package.py
index 25d93a52cf..b837eca71e 100644
--- a/var/spack/repos/builtin/packages/netcdf-c/package.py
+++ b/var/spack/repos/builtin/packages/netcdf-c/package.py
@@ -3,12 +3,16 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+import itertools
import os
import sys
+from llnl.util.lang import dedupe
+
import spack.builder
from spack.build_systems import autotools, cmake
from spack.package import *
+from spack.util.environment import filter_system_paths
class NetcdfC(CMakePackage, AutotoolsPackage):
@@ -23,6 +27,7 @@ class NetcdfC(CMakePackage, AutotoolsPackage):
maintainers("skosukhin", "WardF")
version("main", branch="main")
+ version("4.9.2", sha256="bc104d101278c68b303359b3dc4192f81592ae8640f1aee486921138f7f88cb7")
version("4.9.0", sha256="9f4cb864f3ab54adb75409984c6202323d2fc66c003e5308f3cdf224ed41c0a6")
version("4.8.1", sha256="bc018cc30d5da402622bf76462480664c6668b55eb16ba205a0dfb8647161dd0")
version("4.8.0", sha256="aff58f02b1c3e91dc68f989746f652fe51ff39e6270764e484920cb8db5ad092")
@@ -46,36 +51,53 @@ class NetcdfC(CMakePackage, AutotoolsPackage):
version("4.3.3.1", sha256="f2ee78eb310637c007f001e7c18e2d773d23f3455242bde89647137b7344c2e2")
version("4.3.3", sha256="3f16e21bc3dfeb3973252b9addf5defb48994f84fc9c9356081f871526a680e7")
- # configure fails if curl is not installed.
- # See https://github.com/Unidata/netcdf-c/issues/1390
- patch(
- "https://github.com/Unidata/netcdf-c/commit/e5315da1e748dc541d50796fb05233da65e86b6b.patch?full_index=1",
- sha256="c551ca2f5b6bcefa07dd7f8b7bac426a5df9861e091df1ab99167d8d401f963f",
- when="@4.7.0",
- )
- # fix headers
- patch(
- "https://github.com/Unidata/netcdf-c/pull/1505.patch?full_index=1",
- sha256="495b3e5beb7f074625bcec2ca76aebd339e42719e9c5ccbedbdcc4ffb81a7450",
- when="@4.7.2",
- )
- patch(
- "https://github.com/Unidata/netcdf-c/pull/1508.patch?full_index=1",
- sha256="19e7f31b96536928621b1c29bb6d1a57bcb7aa672cea8719acf9ac934cdd2a3e",
- when="@4.7.2",
- )
+ with when("build_system=cmake"):
+ # TODO: document why we need to revert https://github.com/Unidata/netcdf-c/pull/1731
+ # with the following patch:
+ patch("4.8.1-win-hdf5-with-zlib.patch", when="@4.8.1: platform=windows")
- patch("4.8.1-win-hdf5-with-zlib.patch", when="@4.8.1: platform=windows")
+ # TODO: fetch from the upstream repo once https://github.com/Unidata/netcdf-c/pull/2595
+ # is accepted:
+ patch("netcdfc-mpi-win-support.patch", when="platform=windows")
- patch("netcdfc-mpi-win-support.patch", when="platform=windows")
- # See https://github.com/Unidata/netcdf-c/pull/1752
- patch("4.7.3-spectrum-mpi-pnetcdf-detect.patch", when="@4.7.3:4.7.4 +parallel-netcdf")
-
- # See https://github.com/Unidata/netcdf-c/pull/2293
- patch("4.8.1-no-strict-aliasing-config.patch", when="@4.8.1")
+ # Some of the patches touch configure.ac and, therefore, require forcing the autoreconf stage:
+ _force_autoreconf_when = []
+ with when("build_system=autotools"):
+ # See https://github.com/Unidata/netcdf-c/pull/1752
+ patch(
+ "https://github.com/Unidata/netcdf-c/commit/386e2695286702156eba27ab7c68816efb192230.patch?full_index=1",
+ sha256="cb928a91f87c1615a0788f95b95d7a2e3df91dc16822f8b8a34a85d4e926c0de",
+ when="@4.7.3:4.7.4 +parallel-netcdf",
+ )
+ _force_autoreconf_when.append("@4.7.3:4.7.4 +parallel-netcdf")
+
+ # See https://github.com/Unidata/netcdf-c/pull/2293
+ patch(
+ "https://github.com/Unidata/netcdf-c/commit/a7ea050ebb3c412a99cc352859d5176a9b5ef986.patch?full_index=1",
+ sha256="38d34de38bad99737d3308867071196f20a3fb39b936de7bfcfbc85eb0c7ef54",
+ when="@4.8.1",
+ )
+ _force_autoreconf_when.append("@4.8.1")
+
+ with when("@4.7.2"):
+ # Fix headers
+ # See https://github.com/Unidata/netcdf-c/pull/1505
+ patch(
+ "https://github.com/Unidata/netcdf-c/pull/1505.patch?full_index=1",
+ sha256="495b3e5beb7f074625bcec2ca76aebd339e42719e9c5ccbedbdcc4ffb81a7450",
+ )
+ # See https://github.com/Unidata/netcdf-c/pull/1508
+ patch(
+ "https://github.com/Unidata/netcdf-c/pull/1508.patch?full_index=1",
+ sha256="19e7f31b96536928621b1c29bb6d1a57bcb7aa672cea8719acf9ac934cdd2a3e",
+ )
# See https://github.com/Unidata/netcdf-c/pull/2618
- patch("4.9.0-no-mpi-yes-pnetcdf.patch", when="@4.9.0: ~mpi+parallel-netcdf")
+ patch(
+ "https://github.com/Unidata/netcdf-c/commit/00a722b253bae186bba403d0f92ff1eba719591f.patch?full_index=1",
+ sha256="25b83de1e081f020efa9e21c94c595220849f78c125ad43d8015631d453dfcb9",
+ when="@4.9.0:4.9.1~mpi+parallel-netcdf",
+ )
variant("mpi", default=True, description="Enable parallel I/O for netcdf-4")
variant("parallel-netcdf", default=False, description="Enable parallel I/O for classic files")
@@ -83,31 +105,58 @@ class NetcdfC(CMakePackage, AutotoolsPackage):
variant("pic", default=True, description="Produce position-independent code (for shared libs)")
variant("shared", default=True, description="Enable shared library")
variant("dap", default=False, description="Enable DAP support")
+ variant("byterange", default=False, description="Enable byte-range I/O")
variant("jna", default=False, description="Enable JNA support")
variant("fsync", default=False, description="Enable fsync support")
- variant("zstd", default=True, description="Enable ZStandard compression", when="@4.9.0:")
+ variant("nczarr_zip", default=False, description="Enable NCZarr zipfile format storage")
variant("optimize", default=True, description="Enable -O2 for a more optimized lib")
- # It's unclear if cdmremote can be enabled if '--enable-netcdf-4' is passed
- # to the configure script. Since netcdf-4 support is mandatory we comment
- # this variant out.
- # variant('cdmremote', default=False,
- # description='Enable CDM Remote support')
+ variant("szip", default=True, description="Enable Szip compression plugin")
+ variant("blosc", default=True, description="Enable Blosc compression plugin")
+ variant("zstd", default=True, description="Enable Zstandard compression plugin")
+
+ with when("build_system=cmake"):
+ # Based on the versions required by the root CMakeLists.txt:
+ depends_on("cmake@2.8.12:", type="build", when="@4.3.3:4.3")
+ depends_on("cmake@2.8.11:", type="build", when="@4.4.0:")
+ depends_on("cmake@3.6.1:", type="build", when="@4.5.0:")
+ depends_on("cmake@3.12:", type="build", when="@4.9.0:")
+ # Starting version 4.9.1, nczarr_test/CMakeLists.txt relies on the FILE_PERMISSIONS feature
+ # of the configure_file command, which is only available starting CMake 3.20:
+ depends_on("cmake@3.20:", type="test", when="@4.9.1:")
- # The patch for 4.7.0 touches configure.ac. See force_autoreconf below.
with when("build_system=autotools"):
- depends_on("autoconf", type="build", when="@4.7.0,main")
- depends_on("automake", type="build", when="@4.7.0,main")
- depends_on("libtool", type="build", when="@4.7.0,main")
- # CMake system can use m4, but Windows does not yet support
- depends_on("m4", type="build", when=sys.platform != "win32")
+ for __s in itertools.chain(["@main"], _force_autoreconf_when):
+ with when(__s):
+ depends_on("autoconf", type="build")
+ depends_on("automake", type="build")
+ depends_on("libtool", type="build")
+ depends_on("m4", type="build")
+ del __s
+
+ # M4 is also needed for the source and man file generation. All the generated source files are
+ # included in the release tarballs starting at least the oldest supported version:
+ depends_on("m4", type="build", when="@main")
+
+ # The man files are included in the release tarballs starting version 4.5.0 but they are not
+ # needed for the Windows platform:
+ for __p in ["darwin", "cray", "linux"]:
+ with when("platform={0}".format(__p)):
+ # It is possible to install the package with CMake and without M4 on a non-Windows
+ # platform but some of the man files will not be installed in that case (even if they
+ # are in the release tarball):
+ depends_on("m4", type="build", when="build_system=cmake")
+ # Apart from the redundant configure-time check, which we suppress below, M4 is not
+ # needed when building with Autotools if the man files are in the release tarball:
+ depends_on("m4", type="build", when="@:4.4 build_system=autotools")
+ del __p
depends_on("hdf~netcdf", when="+hdf4")
# curl 7.18.0 or later is required:
- # http://www.unidata.ucar.edu/software/netcdf/docs/getting_and_building_netcdf.html
+ # https://docs.unidata.ucar.edu/nug/current/getting_and_building_netcdf.html
depends_on("curl@7.18.0:", when="+dap")
- # depends_on("curl@7.18.0:", when='+cdmremote')
+ depends_on("curl@7.18.0:", when="+byterange")
# Need to include libxml2 when using DAP in 4.9.0 and newer to build
# https://github.com/Unidata/netcdf-c/commit/53464e89635a43b812b5fec5f7abb6ff34b9be63
@@ -117,16 +166,15 @@ class NetcdfC(CMakePackage, AutotoolsPackage):
# We need to build with MPI wrappers if any of the two
# parallel I/O features is enabled:
- # http://www.unidata.ucar.edu/software/netcdf/docs/getting_and_building_netcdf.html#build_parallel
+ # https://docs.unidata.ucar.edu/nug/current/getting_and_building_netcdf.html#build_parallel
depends_on("mpi", when="+mpi")
depends_on("mpi", when="+parallel-netcdf")
- # zlib 1.2.5 or later is required for netCDF-4 compression:
- # http://www.unidata.ucar.edu/software/netcdf/docs/getting_and_building_netcdf.html
- depends_on("zlib@1.2.5:")
+ # We also need to use MPI wrappers when building against static MPI-enabled HDF5:
+ depends_on("mpi", when="^hdf5+mpi~shared")
# High-level API of HDF5 1.8.9 or later is required for netCDF-4 support:
- # http://www.unidata.ucar.edu/software/netcdf/docs/getting_and_building_netcdf.html
+ # https://docs.unidata.ucar.edu/nug/current/getting_and_building_netcdf.html
depends_on("hdf5@1.8.9:+hl")
# Starting version 4.4.0, it became possible to disable parallel I/O even
@@ -141,31 +189,94 @@ class NetcdfC(CMakePackage, AutotoolsPackage):
# https://github.com/Unidata/netcdf-c/issues/250
depends_on("hdf5@:1.8", when="@:4.4.0")
+ # NetCDF 4.7.4 and prior require HDF5 1.10 or older
+ # https://github.com/Unidata/netcdf-c/pull/1671
+ depends_on("hdf5@:1.10", when="@:4.7.3")
+
+ # Although NetCDF 4.8.0 builds and passes the respective tests against HDF5 1.12.0 with the
+ # default API (i.e. the problem reported in https://github.com/Unidata/netcdf-c/issues/1965 is
+ # not reproducible), the configure script fails if HDF5 1.12.0 is built without api=v18
+ # (according to the error message emitted by the configure script) or api=v110 (according to
+ # the comments in the configure script and its implementation). The check that led to the
+ # failure was removed in version 4.8.1 (https://github.com/Unidata/netcdf-c/pull/2044). To
+ # keep it simple, we require HDF5 1.10.x or older:
+ depends_on("hdf5@:1.10", when="@4.8.0")
+
+ with when("+byterange"):
+ # HDF5 implements H5allocate_memory starting version 1.8.15:
+ depends_on("hdf5@1.8.15:")
+ # HDF5 defines H5FD_FEAT_DEFAULT_VFD_COMPATIBLE (required when version 1.10.x is used)
+ # starting version 1.10.2:
+ depends_on("hdf5@:1.9,1.10.2:")
+ # The macro usage was adjusted (required when versions 1.8.23+, 1.10.8+, 1.12.1+ and
+ # 1.13.0+ of HDF5 are used) in NetCDF 4.8.1
+ # (see https://github.com/Unidata/netcdf-c/pull/2034):
+ depends_on("hdf5@:1.8.22,1.10.0:1.10.7,1.12.0,1.13:", when="@:4.8.0")
+ # Compatibility with HDF5 1.14.x was introduced in NetCDF 4.9.2
+ # (see https://github.com/Unidata/netcdf-c/pull/2615):
+ depends_on("hdf5@:1.12", when="@:4.9.1")
+
+ depends_on("libzip", when="+nczarr_zip")
+
+ # According to the documentation (see
+ # https://docs.unidata.ucar.edu/nug/current/getting_and_building_netcdf.html), zlib 1.2.5 or
+ # later is required for netCDF-4 compression. However, zlib became a direct dependency only
+ # starting NetCDF 4.9.0 (for the deflate plugin):
+ depends_on("zlib@1.2.5:", when="@4.9.0:+shared")
+
+ # Use the vendored bzip2 on Windows:
+ for __p in ["darwin", "cray", "linux"]:
+ depends_on("bzip2", when="@4.9.0:+shared platform={0}".format(__p))
+ del __p
+
+ depends_on("szip", when="+szip")
+ depends_on("c-blosc", when="+blosc")
depends_on("zstd", when="+zstd")
- # The feature was introduced in version 4.1.2
- # and was removed in version 4.4.0
- # conflicts('+cdmremote', when='@:4.1.1,4.4:')
+ # Byte-range I/O was added in version 4.7.0:
+ conflicts("+byterange", when="@:4.6")
+
+ # NCZarr was added in version 4.8.0 as an experimental feature and became a supported one in
+ # version 4.8.1:
+ conflicts("+nczarr_zip", when="@:4.8.0")
+
+ # The features were introduced in version 4.9.0:
+ with when("@:4.8"):
+ conflicts("+szip")
+ conflicts("+blosc")
+ conflicts("+zstd")
- # The features were introduced in version 4.1.0
- conflicts("+parallel-netcdf", when="@:4.0")
- conflicts("+hdf4", when="@:4.0")
+ # The plugins are not built when the shared libraries are disabled:
+ with when("~shared"):
+ conflicts("+szip")
+ conflicts("+blosc")
+ conflicts("+zstd")
default_build_system = "cmake" if sys.platform == "win32" else "autotools"
build_system("cmake", "autotools", default=default_build_system)
def setup_run_environment(self, env):
- if "+zstd" in self.spec:
+ if self.spec.satisfies("@4.9.0:+shared"):
+ # Both HDF5 and NCZarr backends honor the same environment variable:
env.append_path("HDF5_PLUGIN_PATH", self.prefix.plugins)
+ def flag_handler(self, name, flags):
+ if self.builder.build_system == "autotools":
+ if name == "cflags":
+ if "+pic" in self.spec:
+ flags.append(self.compiler.cc_pic_flag)
+ if "+optimize" in self.spec:
+ flags.append("-O2")
+ return flags, None, None
+
@property
def libs(self):
shared = "+shared" in self.spec
return find_libraries("libnetcdf", root=self.prefix, shared=shared, recursive=True)
-class Setup:
+class BaseBuilder(metaclass=spack.builder.PhaseCallbacksMeta):
def setup_dependent_build_environment(self, env, dependent_spec):
self.pkg.setup_run_environment(env)
# Some packages, e.g. ncview, refuse to build if the compiler path returned by nc-config
@@ -175,8 +286,6 @@ class Setup:
if os.path.exists(self._nc_config_backup_dir):
env.prepend_path("PATH", self._nc_config_backup_dir)
-
-class BackupStep(metaclass=spack.builder.PhaseCallbacksMeta):
@property
def _nc_config_backup_dir(self):
return join_path(self.pkg.metadata_dir, "spack-nc-config")
@@ -192,14 +301,14 @@ class BackupStep(metaclass=spack.builder.PhaseCallbacksMeta):
filter_compiler_wrappers("nc-config", relative_root="bin")
-class CMakeBuilder(BackupStep, Setup, cmake.CMakeBuilder):
+class CMakeBuilder(BaseBuilder, cmake.CMakeBuilder):
def cmake_args(self):
base_cmake_args = [
self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
+ self.define_from_variant("ENABLE_BYTERANGE", "byterange"),
self.define("BUILD_UTILITIES", True),
self.define("ENABLE_NETCDF_4", True),
self.define_from_variant("ENABLE_DAP", "dap"),
- self.define("CMAKE_INSTALL_PREFIX", self.prefix),
self.define_from_variant("ENABLE_HDF4", "hdf4"),
self.define("ENABLE_PARALLEL_TESTS", False),
self.define_from_variant("ENABLE_FSYNC", "fsync"),
@@ -209,25 +318,23 @@ class CMakeBuilder(BackupStep, Setup, cmake.CMakeBuilder):
base_cmake_args.append(self.define("ENABLE_PNETCDF", True))
if self.pkg.spec.satisfies("@4.3.1:"):
base_cmake_args.append(self.define("ENABLE_DYNAMIC_LOADING", True))
+ if "platform=windows" in self.pkg.spec:
+ # Enforce the usage of the vendored version of bzip2 on Windows:
+ base_cmake_args.append(self.define("Bz2_INCLUDE_DIRS", ""))
return base_cmake_args
-class AutotoolsBuilder(BackupStep, Setup, autotools.AutotoolsBuilder):
+class AutotoolsBuilder(BaseBuilder, autotools.AutotoolsBuilder):
@property
def force_autoreconf(self):
- # The patch for 4.7.0 touches configure.ac.
- return self.pkg.spec.satisfies("@4.7.0")
+ return any(self.spec.satisfies(s) for s in self.pkg._force_autoreconf_when)
+ @when("@4.6.3:")
def autoreconf(self, pkg, spec, prefix):
if not os.path.exists(self.configure_abs_path):
Executable("./bootstrap")()
def configure_args(self):
- cflags = []
- cppflags = []
- ldflags = []
- libs = []
-
config_args = [
"--enable-v2",
"--enable-utilities",
@@ -236,105 +343,162 @@ class AutotoolsBuilder(BackupStep, Setup, autotools.AutotoolsBuilder):
"--enable-netcdf-4",
]
- if "+optimize" in self.pkg.spec:
- cflags.append("-O2")
+ # NCZarr was added in version 4.8.0 as an experimental feature and became a supported one
+ # in version 4.8.1:
+ if self.spec.satisfies("@4.8.1:"):
+ config_args.append("--enable-nczarr")
+ elif self.spec.satisfies("@4.8.0"):
+ config_args.append("--disable-nczarr")
+
+ if self.spec.satisfies("@4.9.0:+shared"):
+ # The plugins are not built when the shared libraries are disabled:
+ config_args.extend(
+ ["--enable-plugins", "--with-plugin-dir={0}".format(self.prefix.plugins)]
+ )
+
+ # The option was introduced in version 4.3.1 and does nothing starting version 4.6.1:
+ if self.spec.satisfies("@4.3.1:4.6.0"):
+ config_args.append("--enable-dynamic-loading")
- config_args.extend(self.enable_or_disable("fsync"))
+ if self.spec.satisfies("@4.4:"):
+ config_args += self.enable_or_disable("parallel4", variant="mpi")
- # The flag was introduced in version 4.3.1
- if self.pkg.spec.satisfies("@4.3.1:"):
- config_args.append("--enable-dynamic-loading")
+ config_args += self.enable_or_disable("pnetcdf", variant="parallel-netcdf")
- config_args += self.enable_or_disable("shared")
+ config_args += self.enable_or_disable("hdf4")
- if "+pic" in self.pkg.spec:
- cflags.append(self.pkg.compiler.cc_pic_flag)
+ config_args += self.enable_or_disable("shared")
config_args += self.enable_or_disable("dap")
- # config_args += self.enable_or_disable('cdmremote')
-
- # if '+dap' in self.pkg.spec or '+cdmremote' in self.pkg.spec:
- if "+dap" in self.pkg.spec:
- # Make sure Netcdf links against Spack's curl, otherwise it may
- # pick up system's curl, which can give link errors, e.g.:
- # undefined reference to `SSL_CTX_use_certificate_chain_file
- curl = self.pkg.spec["curl"]
- curl_libs = curl.libs
- libs.append(curl_libs.link_flags)
- ldflags.append(curl_libs.search_flags)
- # TODO: figure out how to get correct flags via headers.cpp_flags
- cppflags.append("-I" + curl.prefix.include)
- elif self.pkg.spec.satisfies("@4.8.0:"):
- # Prevent overlinking to a system installation of libcurl:
- config_args.append("ac_cv_lib_curl_curl_easy_setopt=no")
+ if self.spec.satisfies("@4.9.0:"):
+ # Prevent linking to system libxml2:
+ config_args += self.enable_or_disable("libxml2", variant="dap")
- if self.pkg.spec.satisfies("@4.4:"):
- if "+mpi" in self.pkg.spec:
- config_args.append("--enable-parallel4")
- else:
- config_args.append("--disable-parallel4")
+ if "+byterange" in self.spec:
+ config_args.append("--enable-byterange")
+ elif self.spec.satisfies("@4.7.0:"):
+ config_args.append("--disable-byterange")
- if self.pkg.spec.satisfies("@4.3.2:"):
+ if self.spec.satisfies("@4.3.2:"):
config_args += self.enable_or_disable("jna")
- # Starting version 4.1.3, --with-hdf5= and other such configure options
- # are removed. Variables CPPFLAGS, LDFLAGS, and LD_LIBRARY_PATH must be
- # used instead.
- hdf5_hl = self.pkg.spec["hdf5:hl"]
- cppflags.append(hdf5_hl.headers.cpp_flags)
- ldflags.append(hdf5_hl.libs.search_flags)
-
- if "+parallel-netcdf" in self.pkg.spec:
- config_args.append("--enable-pnetcdf")
- pnetcdf = self.pkg.spec["parallel-netcdf"]
- cppflags.append(pnetcdf.headers.cpp_flags)
- # TODO: change to pnetcdf.libs.search_flags once 'parallel-netcdf'
- # package gets custom implementation of 'libs'
- ldflags.append("-L" + pnetcdf.prefix.lib)
+ config_args += self.enable_or_disable("fsync")
+
+ if any(self.spec.satisfies(s) for s in ["+mpi", "+parallel-netcdf", "^hdf5+mpi~shared"]):
+ config_args.append("CC={0}".format(self.spec["mpi"].mpicc))
+
+ # In general, we rely on the compiler wrapper to inject the required CPPFLAGS and LDFLAGS.
+ # However, the injected LDFLAGS are invisible for the configure script and are added
+ # neither to the pkg-config nor to the nc-config files. Therefore, we generate LDFLAGS
+ # based on the contents of the following list and pass them to the configure script:
+ lib_search_dirs = []
+
+ # In general, we rely on the configure script to generate the required linker flags in the
+ # right order. However, the configure script does not know and does not check for several
+ # possible transitive dependencies and we have to pass them as the LIBS argument. The list
+ # is generated based on the contents of the following list:
+ extra_libs = []
+
+ if "+parallel-netcdf" in self.spec:
+ lib_search_dirs.extend(self.spec["parallel-netcdf"].libs.directories)
+
+ if "+hdf4" in self.spec:
+ hdf = self.spec["hdf"]
+ lib_search_dirs.extend(hdf.libs.directories)
+ # The configure script triggers unavoidable overlinking to jpeg:
+ lib_search_dirs.extend(hdf["jpeg"].libs.directories)
+ if "~shared" in hdf:
+ # We do not use self.spec["hdf:transitive"].libs to avoid even more duplicates
+ # introduced by the configure script:
+ if "+szip" in hdf:
+ extra_libs.append(hdf["szip"].libs)
+ if "+external-xdr" in hdf:
+ extra_libs.append(hdf["rpc"].libs)
+ extra_libs.append(hdf["zlib"].libs)
+
+ hdf5 = self.spec["hdf5:hl"]
+ lib_search_dirs.extend(hdf5.libs.directories)
+ if "~shared" in hdf5:
+ if "+szip" in hdf5:
+ extra_libs.append(hdf5["szip"].libs)
+ extra_libs.append(hdf5["zlib"].libs)
+
+ if self.spec.satisfies("@4.9.0:+shared"):
+ lib_search_dirs.extend(self.spec["zlib"].libs.directories)
else:
- config_args.append("--disable-pnetcdf")
-
- if "+mpi" in self.pkg.spec or "+parallel-netcdf" in self.pkg.spec:
- config_args.append("CC=%s" % self.pkg.spec["mpi"].mpicc)
+ # Prevent overlinking to zlib:
+ config_args.append("ac_cv_search_deflate=")
+
+ if "+nczarr_zip" in self.spec:
+ lib_search_dirs.extend(self.spec["libzip"].libs.directories)
+ elif self.spec.satisfies("@4.9.2:"):
+ # Prevent linking to libzip to disable the feature:
+ config_args.append("ac_cv_search_zip_open=no")
+ elif self.spec.satisfies("@4.8.0:"):
+ # Prevent linking to libzip to disable the feature:
+ config_args.append("ac_cv_lib_zip_zip_open=no")
+
+ if "+szip" in self.spec:
+ lib_search_dirs.extend(self.spec["szip"].libs.directories)
+ elif self.spec.satisfies("@4.9.0:"):
+ # Prevent linking to szip to disable the plugin:
+ config_args.append("ac_cv_lib_sz_SZ_BufftoBuffCompress=no")
+
+ if self.spec.satisfies("@4.9.0:"):
+ if "+shared" in self.spec:
+ lib_search_dirs.extend(self.spec["bzip2"].libs.directories)
+ else:
+ # Prevent redundant entries mentioning system bzip2 in nc-config and pkg-config
+ # files:
+ config_args.append("ac_cv_lib_bz2_BZ2_bzCompress=no")
- config_args += self.enable_or_disable("hdf4")
- if "+hdf4" in self.pkg.spec:
- hdf4 = self.pkg.spec["hdf"]
- cppflags.append(hdf4.headers.cpp_flags)
- # TODO: change to hdf4.libs.search_flags once 'hdf'
- # package gets custom implementation of 'libs' property.
- ldflags.append("-L" + hdf4.prefix.lib)
- # TODO: change to self.pkg.spec['jpeg'].libs.link_flags once the
- # implementations of 'jpeg' virtual package get 'jpeg_libs'
- # property.
- libs.append("-ljpeg")
- if "+szip" in hdf4:
- # This should also come from hdf4.libs
- libs.append("-lsz")
- if "+external-xdr" in hdf4 and hdf4["rpc"].name != "libc":
- libs.append(hdf4["rpc"].libs.link_flags)
-
- if "+zstd" in self.pkg.spec:
- zstd = self.pkg.spec["zstd"]
- cppflags.append(zstd.headers.cpp_flags)
- ldflags.append(zstd.libs.search_flags)
- config_args.append("--with-plugin-dir={}".format(self.prefix.plugins))
- elif "~zstd" in self.pkg.spec:
- # Prevent linking to system zstd.
- # There is no explicit option to disable zstd.
+ if "+zstd" in self.spec:
+ lib_search_dirs.extend(self.spec["zstd"].libs.directories)
+ elif self.spec.satisfies("@4.9.0:"):
+ # Prevent linking to system zstd:
config_args.append("ac_cv_lib_zstd_ZSTD_compress=no")
- # Fortran support
- # In version 4.2+, NetCDF-C and NetCDF-Fortran have split.
- # Use the netcdf-fortran package to install Fortran support.
+ if "+blosc" in self.spec:
+ lib_search_dirs.extend(self.spec["c-blosc"].libs.directories)
+ elif self.spec.satisfies("@4.9.0:"):
+ # Prevent linking to system c-blosc:
+ config_args.append("ac_cv_lib_blosc_blosc_init=no")
+
+ if self.spec.satisfies("@:4.7~dap+byterange"):
+ extra_libs.append(self.spec["curl"].libs)
+ elif "+dap" in self.spec or "+byterange" in self.spec:
+ lib_search_dirs.extend(self.spec["curl"].libs.directories)
+ elif self.spec.satisfies("@4.7.0"):
+ # This particular version fails if curl is not found, even if it is not needed
+ # (see https://github.com/Unidata/netcdf-c/issues/1390). Note that the following does
+ # not trigger linking to system curl for this version because DAP support is disabled:
+ config_args.append("ac_cv_lib_curl_curl_easy_setopt=yes")
+ else:
+ # Prevent linking to system curl (for versions 4.8.0 and newer) and the redundant check
+ # for curl (for older versions):
+ config_args.append("ac_cv_lib_curl_curl_easy_setopt=no")
+
+ if not self.spec.satisfies("@:4.4,main"):
+ # Suppress the redundant check for m4:
+ config_args.append("ac_cv_prog_NC_M4=false")
+
+ lib_search_dirs.extend(d for libs in extra_libs for d in libs.directories)
+ # Remove duplicates and system prefixes:
+ lib_search_dirs = filter_system_paths(dedupe(lib_search_dirs))
+ config_args.append(
+ "LDFLAGS={0}".format(" ".join("-L{0}".format(d) for d in lib_search_dirs))
+ )
- config_args.append("CFLAGS=" + " ".join(cflags))
- config_args.append("CPPFLAGS=" + " ".join(cppflags))
- config_args.append("LDFLAGS=" + " ".join(ldflags))
- config_args.append("LIBS=" + " ".join(libs))
+ extra_lib_names = [n for libs in extra_libs for n in libs.names]
+ # Remove duplicates in the reversed order:
+ extra_lib_names = reversed(list(dedupe(reversed(extra_lib_names))))
+ config_args.append("LIBS={0}".format(" ".join("-l{0}".format(n) for n in extra_lib_names)))
return config_args
+ # It looks like the issues with running the tests in parallel were fixed around version 4.6.0
+ # (see https://github.com/Unidata/netcdf-c/commit/812c2fd4d108cca927582c0d84049c0f271bb9e0):
+ @when("@:4.5.0")
def check(self):
# h5_test fails when run in parallel
make("check", parallel=False)