summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/docs/config_yaml.rst15
-rw-r--r--lib/spack/docs/packaging_guide.rst11
-rw-r--r--lib/spack/spack/cmd/common/arguments.py5
-rw-r--r--lib/spack/spack/config.py2
-rw-r--r--lib/spack/spack/test/cmd/common/arguments.py32
5 files changed, 49 insertions, 16 deletions
diff --git a/lib/spack/docs/config_yaml.rst b/lib/spack/docs/config_yaml.rst
index 54a793b53f..7e2759349b 100644
--- a/lib/spack/docs/config_yaml.rst
+++ b/lib/spack/docs/config_yaml.rst
@@ -178,16 +178,23 @@ set ``dirty`` to ``true`` to skip the cleaning step and make all builds
"dirty" by default. Be aware that this will reduce the reproducibility
of builds.
+.. _build-jobs:
+
--------------
``build_jobs``
--------------
Unless overridden in a package or on the command line, Spack builds all
-packages in parallel. For a build system that uses Makefiles, this means
-running ``make -j<build_jobs>``, where ``build_jobs`` is the number of
-threads to use.
+packages in parallel. The default parallelism is equal to the number of
+cores on your machine, up to 16. Parallelism cannot exceed the number of
+cores available on the host. For a build system that uses Makefiles, this
+means running:
+
+- ``make -j<build_jobs>``, when ``build_jobs`` is less than the number of
+ cores on the machine
+- ``make -j<ncores>``, when ``build_jobs`` is greater or equal to the
+ number of cores on the machine
-The default parallelism is equal to the number of cores on your machine.
If you work on a shared login node or have a strict ulimit, it may be
necessary to set the default to a lower value. By setting ``build_jobs``
to 4, for example, commands like ``spack install`` will run ``make -j4``
diff --git a/lib/spack/docs/packaging_guide.rst b/lib/spack/docs/packaging_guide.rst
index a8ec76e6cd..204a5a3d0e 100644
--- a/lib/spack/docs/packaging_guide.rst
+++ b/lib/spack/docs/packaging_guide.rst
@@ -1713,12 +1713,11 @@ RPATHs in Spack are handled in one of three ways:
Parallel builds
---------------
-By default, Spack will invoke ``make()`` with a ``-j <njobs>``
-argument, so that builds run in parallel. It figures out how many
-jobs to run by determining how many cores are on the host machine.
-Specifically, it uses the number of CPUs reported by Python's
-`multiprocessing.cpu_count()
-<http://docs.python.org/library/multiprocessing.html#multiprocessing.cpu_count>`_.
+By default, Spack will invoke ``make()``, or any other similar tool,
+with a ``-j <njobs>`` argument, so that builds run in parallel.
+The parallelism is determined by the value of the ``build_jobs`` entry
+in ``config.yaml`` (see :ref:`here <build-jobs>` for more details on
+how this value is computed).
If a package does not build properly in parallel, you can override
this setting by adding ``parallel = False`` to your package. For
diff --git a/lib/spack/spack/cmd/common/arguments.py b/lib/spack/spack/cmd/common/arguments.py
index b228c16071..1428b8da36 100644
--- a/lib/spack/spack/cmd/common/arguments.py
+++ b/lib/spack/spack/cmd/common/arguments.py
@@ -5,6 +5,7 @@
import argparse
+import multiprocessing
import spack.cmd
import spack.config
@@ -86,6 +87,7 @@ class SetParallelJobs(argparse.Action):
'[expected a positive integer, got "{1}"]'
raise ValueError(msg.format(option_string, jobs))
+ jobs = min(jobs, multiprocessing.cpu_count())
spack.config.set('config:build_jobs', jobs, scope='command_line')
setattr(namespace, 'jobs', jobs)
@@ -94,7 +96,8 @@ class SetParallelJobs(argparse.Action):
def default(self):
# This default is coded as a property so that look-up
# of this value is done only on demand
- return spack.config.get('config:build_jobs')
+ return min(spack.config.get('config:build_jobs'),
+ multiprocessing.cpu_count())
@default.setter
def default(self, value):
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index 3aa91d6349..8114ec21b4 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -100,7 +100,7 @@ config_defaults = {
'verify_ssl': True,
'checksum': True,
'dirty': False,
- 'build_jobs': multiprocessing.cpu_count(),
+ 'build_jobs': min(16, multiprocessing.cpu_count()),
}
}
diff --git a/lib/spack/spack/test/cmd/common/arguments.py b/lib/spack/spack/test/cmd/common/arguments.py
index f7ab281dac..2d32342c1b 100644
--- a/lib/spack/spack/test/cmd/common/arguments.py
+++ b/lib/spack/spack/test/cmd/common/arguments.py
@@ -20,14 +20,38 @@ def parser():
yield p
# Cleanup the command line scope if it was set during tests
if 'command_line' in spack.config.config.scopes:
- spack.config.config.remove_scope('command_line')
+ spack.config.config.scopes['command_line'].clear()
-@pytest.mark.parametrize('cli_args,expected', [
+@pytest.fixture(params=[1, 2, 4, 8, 16, 32])
+def ncores(monkeypatch, request):
+ """Mocks having a machine with n cores for the purpose of
+ computing config:build_jobs.
+ """
+ def _cpu_count():
+ return request.param
+
+ # Patch multiprocessing.cpu_count() to return the value we need
+ monkeypatch.setattr(multiprocessing, 'cpu_count', _cpu_count)
+ # Patch the configuration parts that have been cached already
+ monkeypatch.setitem(spack.config.config_defaults['config'],
+ 'build_jobs', min(16, request.param))
+ monkeypatch.setitem(
+ spack.config.config.scopes, '_builtin',
+ spack.config.InternalConfigScope(
+ '_builtin', spack.config.config_defaults
+ ))
+ return request.param
+
+
+@pytest.mark.parametrize('cli_args,requested', [
(['-j', '24'], 24),
- ([], multiprocessing.cpu_count())
+ # Here we report the default if we have enough cores, as the cap
+ # on the available number of cores will be taken care of in the test
+ ([], 16)
])
-def test_setting_parallel_jobs(parser, cli_args, expected):
+def test_setting_parallel_jobs(parser, cli_args, ncores, requested):
+ expected = min(requested, ncores)
namespace = parser.parse_args(cli_args)
assert namespace.jobs == expected
assert spack.config.get('config:build_jobs') == expected