diff options
author | Harmen Stoppels <harmenstoppels@gmail.com> | 2023-09-07 13:16:51 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-09-07 13:16:51 +0200 |
commit | 7bd95f6ad3f0d379df582b0ed5b292dae143b3b9 (patch) | |
tree | 62946cb2718dbdcb3126f37f39339bda78ce3c3c /lib | |
parent | 4429e17db053fcf7ec591ce3abe8ffc7733c0a25 (diff) | |
download | spack-7bd95f6ad3f0d379df582b0ed5b292dae143b3b9.tar.gz spack-7bd95f6ad3f0d379df582b0ed5b292dae143b3b9.tar.bz2 spack-7bd95f6ad3f0d379df582b0ed5b292dae143b3b9.tar.xz spack-7bd95f6ad3f0d379df582b0ed5b292dae143b3b9.zip |
move determine_number_of_jobs into spack.util.cpus, use it in concretize (#37620)
Diffstat (limited to 'lib')
-rw-r--r-- | lib/spack/spack/bootstrap/environment.py | 4 | ||||
-rw-r--r-- | lib/spack/spack/build_environment.py | 35 | ||||
-rw-r--r-- | lib/spack/spack/build_systems/racket.py | 5 | ||||
-rw-r--r-- | lib/spack/spack/environment/environment.py | 2 | ||||
-rw-r--r-- | lib/spack/spack/package.py | 1 | ||||
-rw-r--r-- | lib/spack/spack/test/build_environment.py | 49 | ||||
-rw-r--r-- | lib/spack/spack/util/cpus.py | 36 |
7 files changed, 86 insertions, 46 deletions
diff --git a/lib/spack/spack/bootstrap/environment.py b/lib/spack/spack/bootstrap/environment.py index a2086160f6..2a2fc37b45 100644 --- a/lib/spack/spack/bootstrap/environment.py +++ b/lib/spack/spack/bootstrap/environment.py @@ -15,9 +15,9 @@ import archspec.cpu from llnl.util import tty -import spack.build_environment import spack.environment import spack.tengine +import spack.util.cpus import spack.util.executable from spack.environment import depfile @@ -137,7 +137,7 @@ class BootstrapEnvironment(spack.environment.Environment): "-C", str(self.environment_root()), "-j", - str(spack.build_environment.determine_number_of_jobs(parallel=True)), + str(spack.util.cpus.determine_number_of_jobs(parallel=True)), **kwargs, ) diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py index 7ecde73b8a..723f398ae6 100644 --- a/lib/spack/spack/build_environment.py +++ b/lib/spack/spack/build_environment.py @@ -68,7 +68,7 @@ import spack.util.pattern from spack.error import NoHeadersError, NoLibrariesError from spack.install_test import spack_install_test_log from spack.installer import InstallError -from spack.util.cpus import cpus_available +from spack.util.cpus import determine_number_of_jobs from spack.util.environment import ( SYSTEM_DIRS, EnvironmentModifications, @@ -537,39 +537,6 @@ def set_wrapper_variables(pkg, env): env.set(SPACK_RPATH_DIRS, ":".join(rpath_dirs)) -def determine_number_of_jobs( - parallel=False, command_line=None, config_default=None, max_cpus=None -): - """ - Packages that require sequential builds need 1 job. Otherwise we use the - number of jobs set on the command line. If not set, then we use the config - defaults (which is usually set through the builtin config scope), but we - cap to the number of CPUs available to avoid oversubscription. - - Parameters: - parallel (bool or None): true when package supports parallel builds - command_line (int or None): command line override - config_default (int or None): config default number of jobs - max_cpus (int or None): maximum number of CPUs available. When None, this - value is automatically determined. - """ - if not parallel: - return 1 - - if command_line is None and "command_line" in spack.config.scopes(): - command_line = spack.config.get("config:build_jobs", scope="command_line") - - if command_line is not None: - return command_line - - max_cpus = max_cpus or cpus_available() - - # in some rare cases _builtin config may not be set, so default to max 16 - config_default = config_default or spack.config.get("config:build_jobs", 16) - - return min(max_cpus, config_default) - - def set_module_variables_for_package(pkg): """Populate the Python module of a package with some useful global names. This makes things easier for package writers. diff --git a/lib/spack/spack/build_systems/racket.py b/lib/spack/spack/build_systems/racket.py index 8a07773a29..7dd0b23b01 100644 --- a/lib/spack/spack/build_systems/racket.py +++ b/lib/spack/spack/build_systems/racket.py @@ -10,9 +10,10 @@ import llnl.util.lang as lang import llnl.util.tty as tty import spack.builder -from spack.build_environment import SPACK_NO_PARALLEL_MAKE, determine_number_of_jobs +from spack.build_environment import SPACK_NO_PARALLEL_MAKE from spack.directives import build_system, extends, maintainers from spack.package_base import PackageBase +from spack.util.cpus import determine_number_of_jobs from spack.util.environment import env_flag from spack.util.executable import Executable, ProcessError @@ -92,7 +93,7 @@ class RacketBuilder(spack.builder.Builder): "--copy", "-i", "-j", - str(determine_number_of_jobs(parallel)), + str(determine_number_of_jobs(parallel=parallel)), "--", os.getcwd(), ] diff --git a/lib/spack/spack/environment/environment.py b/lib/spack/spack/environment/environment.py index 8eb7edab40..bf61444f9a 100644 --- a/lib/spack/spack/environment/environment.py +++ b/lib/spack/spack/environment/environment.py @@ -1504,7 +1504,7 @@ class Environment: start = time.time() max_processes = min( len(arguments), # Number of specs - spack.config.get("config:build_jobs"), # Cap on build jobs + spack.util.cpus.determine_number_of_jobs(parallel=True), ) # TODO: revisit this print as soon as darwin is parallel too diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 1e0a9eb655..b73d82e256 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -96,6 +96,7 @@ from spack.package_base import ( on_package_attributes, ) from spack.spec import InvalidSpecDetected, Spec +from spack.util.cpus import determine_number_of_jobs from spack.util.executable import * from spack.variant import ( any_combination_of, diff --git a/lib/spack/spack/test/build_environment.py b/lib/spack/spack/test/build_environment.py index 0ee5a345df..90f3fb378e 100644 --- a/lib/spack/spack/test/build_environment.py +++ b/lib/spack/spack/test/build_environment.py @@ -16,8 +16,9 @@ import spack.config import spack.package_base import spack.spec import spack.util.spack_yaml as syaml -from spack.build_environment import _static_to_shared_library, determine_number_of_jobs, dso_suffix +from spack.build_environment import _static_to_shared_library, dso_suffix from spack.paths import build_env_path +from spack.util.cpus import determine_number_of_jobs from spack.util.environment import EnvironmentModifications from spack.util.executable import Executable from spack.util.path import Path, convert_to_platform_path @@ -442,7 +443,7 @@ def test_parallel_false_is_not_propagating(default_mock_concretization): spack.build_environment.set_module_variables_for_package(s["b"].package) assert s["b"].package.module.make_jobs == spack.build_environment.determine_number_of_jobs( - s["b"].package.parallel + parallel=s["b"].package.parallel ) @@ -474,28 +475,62 @@ def test_setting_dtags_based_on_config(config_setting, expected_flag, config, mo def test_build_jobs_sequential_is_sequential(): assert ( - determine_number_of_jobs(parallel=False, command_line=8, config_default=8, max_cpus=8) == 1 + determine_number_of_jobs( + parallel=False, + max_cpus=8, + config=spack.config.Configuration( + spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 8}}), + spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 8}}), + ), + ) + == 1 ) def test_build_jobs_command_line_overrides(): assert ( - determine_number_of_jobs(parallel=True, command_line=10, config_default=1, max_cpus=1) + determine_number_of_jobs( + parallel=True, + max_cpus=1, + config=spack.config.Configuration( + spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 10}}), + spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 1}}), + ), + ) == 10 ) assert ( - determine_number_of_jobs(parallel=True, command_line=10, config_default=100, max_cpus=100) + determine_number_of_jobs( + parallel=True, + max_cpus=100, + config=spack.config.Configuration( + spack.config.InternalConfigScope("command_line", {"config": {"build_jobs": 10}}), + spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 100}}), + ), + ) == 10 ) def test_build_jobs_defaults(): assert ( - determine_number_of_jobs(parallel=True, command_line=None, config_default=1, max_cpus=10) + determine_number_of_jobs( + parallel=True, + max_cpus=10, + config=spack.config.Configuration( + spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 1}}) + ), + ) == 1 ) assert ( - determine_number_of_jobs(parallel=True, command_line=None, config_default=100, max_cpus=10) + determine_number_of_jobs( + parallel=True, + max_cpus=10, + config=spack.config.Configuration( + spack.config.InternalConfigScope("defaults", {"config": {"build_jobs": 100}}) + ), + ) == 10 ) diff --git a/lib/spack/spack/util/cpus.py b/lib/spack/spack/util/cpus.py index acb6569b1d..b8b371ebb0 100644 --- a/lib/spack/spack/util/cpus.py +++ b/lib/spack/spack/util/cpus.py @@ -5,6 +5,9 @@ import multiprocessing import os +from typing import Optional + +import spack.config def cpus_available(): @@ -18,3 +21,36 @@ def cpus_available(): return len(os.sched_getaffinity(0)) # novermin except Exception: return multiprocessing.cpu_count() + + +def determine_number_of_jobs( + *, + parallel: bool = False, + max_cpus: int = cpus_available(), + config: Optional["spack.config.Configuration"] = None, +) -> int: + """ + Packages that require sequential builds need 1 job. Otherwise we use the + number of jobs set on the command line. If not set, then we use the config + defaults (which is usually set through the builtin config scope), but we + cap to the number of CPUs available to avoid oversubscription. + + Parameters: + parallel: true when package supports parallel builds + max_cpus: maximum number of CPUs to use (defaults to cpus_available()) + config: configuration object (defaults to global config) + """ + if not parallel: + return 1 + + cfg = config or spack.config.CONFIG + + # Command line overrides all + try: + command_line = cfg.get("config:build_jobs", default=None, scope="command_line") + if command_line is not None: + return command_line + except ValueError: + pass + + return min(max_cpus, cfg.get("config:build_jobs", 16)) |