summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/docs/containers.rst4
-rw-r--r--lib/spack/docs/contribution_guide.rst21
-rw-r--r--lib/spack/docs/developer_guide.rst390
-rw-r--r--lib/spack/docs/getting_started.rst2
-rw-r--r--lib/spack/docs/images/pr-commit.pngbin0 -> 44592 bytes
-rw-r--r--lib/spack/docs/images/projects.pngbin0 -> 69075 bytes
-rw-r--r--lib/spack/docs/pipelines.rst4
-rw-r--r--lib/spack/docs/workflows.rst7
-rw-r--r--lib/spack/llnl/util/tty/pty.py150
-rw-r--r--lib/spack/spack/binary_distribution.py31
-rw-r--r--lib/spack/spack/build_systems/cuda.py30
-rw-r--r--lib/spack/spack/ci.py2
-rw-r--r--lib/spack/spack/cmd/checksum.py2
-rw-r--r--lib/spack/spack/cmd/create.py8
-rw-r--r--lib/spack/spack/main.py8
-rw-r--r--lib/spack/spack/package.py34
-rw-r--r--lib/spack/spack/relocate.py8
-rw-r--r--lib/spack/spack/test/bindist.py471
-rw-r--r--lib/spack/spack/test/llnl/util/lock.py2
-rw-r--r--lib/spack/spack/test/llnl/util/tty/log.py42
-rw-r--r--lib/spack/spack/test/util/executable.py30
-rw-r--r--lib/spack/spack/util/compression.py4
-rw-r--r--lib/spack/spack/util/crypto.py2
-rw-r--r--lib/spack/spack/util/executable.py9
-rw-r--r--lib/spack/spack/util/gpg.py2
25 files changed, 1118 insertions, 145 deletions
diff --git a/lib/spack/docs/containers.rst b/lib/spack/docs/containers.rst
index fe678fd76d..b215507701 100644
--- a/lib/spack/docs/containers.rst
+++ b/lib/spack/docs/containers.rst
@@ -45,7 +45,7 @@ Environments:
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
- RUN cd /opt/spack-environment && spack install && spack gc -y
+ RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
@@ -267,7 +267,7 @@ following ``Dockerfile``:
&& echo " view: /opt/view") > /opt/spack-environment/spack.yaml
# Install the software, remove unnecessary deps
- RUN cd /opt/spack-environment && spack install && spack gc -y
+ RUN cd /opt/spack-environment && spack env activate . && spack install && spack gc -y
# Strip all the binaries
RUN find -L /opt/view/* -type f -exec readlink -f '{}' \; | \
diff --git a/lib/spack/docs/contribution_guide.rst b/lib/spack/docs/contribution_guide.rst
index 9935ec0c83..10c0875e85 100644
--- a/lib/spack/docs/contribution_guide.rst
+++ b/lib/spack/docs/contribution_guide.rst
@@ -27,11 +27,22 @@ correspond to one feature/bugfix/extension/etc. One can create PRs with
changes relevant to different ideas, however reviewing such PRs becomes tedious
and error prone. If possible, try to follow the **one-PR-one-package/feature** rule.
-Spack uses a rough approximation of the `Git Flow <http://nvie.com/posts/a-successful-git-branching-model/>`_
-branching model. The develop branch contains the latest contributions, and
-master is always tagged and points to the latest stable release. Therefore, when
-you send your request, make ``develop`` the destination branch on the
-`Spack repository <https://github.com/spack/spack>`_.
+--------
+Branches
+--------
+
+Spack's ``develop`` branch has the latest contributions. Nearly all pull
+requests should start from ``develop`` and target ``develop``.
+
+There is a branch for each major release series. Release branches
+originate from ``develop`` and have tags for each point release in the
+series. For example, ``releases/v0.14`` has tags for ``0.14.0``,
+``0.14.1``, ``0.14.2``, etc. versions of Spack. We backport important bug
+fixes to these branches, but we do not advance the package versions or
+make other changes that would change the way Spack concretizes
+dependencies. Currently, the maintainers manage these branches by
+cherry-picking from ``develop``. See :ref:`releases` for more
+information.
----------------------
Continuous Integration
diff --git a/lib/spack/docs/developer_guide.rst b/lib/spack/docs/developer_guide.rst
index de2fe80f85..284690bd6f 100644
--- a/lib/spack/docs/developer_guide.rst
+++ b/lib/spack/docs/developer_guide.rst
@@ -495,3 +495,393 @@ The bottom of the output shows the top most time consuming functions,
slowest on top. The profiling support is from Python's built-in tool,
`cProfile
<https://docs.python.org/2/library/profile.html#module-cProfile>`_.
+
+.. _releases:
+
+--------
+Releases
+--------
+
+This section documents Spack's release process. It is intended for
+project maintainers, as the tasks described here require maintainer
+privileges on the Spack repository. For others, we hope this section at
+least provides some insight into how the Spack project works.
+
+.. _release-branches:
+
+^^^^^^^^^^^^^^^^
+Release branches
+^^^^^^^^^^^^^^^^
+
+There are currently two types of Spack releases: :ref:`major releases
+<major-releases>` (``0.13.0``, ``0.14.0``, etc.) and :ref:`point releases
+<point-releases>` (``0.13.1``, ``0.13.2``, ``0.13.3``, etc.). Here is a
+diagram of how Spack release branches work::
+
+ o branch: develop (latest version)
+ |
+ o merge v0.14.1 into develop
+ |\
+ | o branch: releases/v0.14, tag: v0.14.1
+ o | merge v0.14.0 into develop
+ |\|
+ | o tag: v0.14.0
+ |/
+ o merge v0.13.2 into develop
+ |\
+ | o branch: releases/v0.13, tag: v0.13.2
+ o | merge v0.13.1 into develop
+ |\|
+ | o tag: v0.13.1
+ o | merge v0.13.0 into develop
+ |\|
+ | o tag: v0.13.0
+ o |
+ | o
+ |/
+ o
+
+The ``develop`` branch has the latest contributions, and nearly all pull
+requests target ``develop``.
+
+Each Spack release series also has a corresponding branch, e.g.
+``releases/v0.14`` has ``0.14.x`` versions of Spack, and
+``releases/v0.13`` has ``0.13.x`` versions. A major release is the first
+tagged version on a release branch. Minor releases are back-ported from
+develop onto release branches. This is typically done by cherry-picking
+bugfix commits off of ``develop``.
+
+To avoid version churn for users of a release series, minor releases
+should **not** make changes that would change the concretization of
+packages. They should generally only contain fixes to the Spack core.
+
+Both major and minor releases are tagged. After each release, we merge
+the release branch back into ``develop`` so that the version bump and any
+other release-specific changes are visible in the mainline. As a
+convenience, we also tag the latest release as ``releases/latest``,
+so that users can easily check it out to get the latest
+stable version. See :ref:`merging-releases` for more details.
+
+
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Scheduling work for releases
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We schedule work for releases by creating `GitHub projects
+<https://github.com/spack/spack/projects>`_. At any time, there may be
+several open release projects. For example, here are two releases (from
+some past version of the page linked above):
+
+.. image:: images/projects.png
+
+Here, there's one release in progress for ``0.15.1`` and another for
+``0.16.0``. Each of these releases has a project board containing issues
+and pull requests. GitHub shows a status bar with completed work in
+green, work in progress in purple, and work not started yet in gray, so
+it's fairly easy to see progress.
+
+Spack's project boards are not firm commitments, and we move work between
+releases frequently. If we need to make a release and some tasks are not
+yet done, we will simply move them to next minor or major release, rather
+than delaying the release to complete them.
+
+For more on using GitHub project boards, see `GitHub's documentation
+<https://docs.github.com/en/github/managing-your-work-on-github/about-project-boards>`_.
+
+.. _major-releases:
+
+^^^^^^^^^^^^^^^^^^^^^
+Making Major Releases
+^^^^^^^^^^^^^^^^^^^^^
+
+Assuming you've already created a project board and completed the work
+for a major release, the steps to make the release are as follows:
+
+#. Create two new project boards:
+
+ * One for the next major release
+ * One for the next point release
+
+#. Move any tasks that aren't done yet to one of the new project boards.
+ Small bugfixes should go to the next point release. Major features,
+ refactors, and changes that could affect concretization should go in
+ the next major release.
+
+#. Create a branch for the release, based on ``develop``:
+
+ .. code-block:: console
+
+ $ git checkout -b releases/v0.15 develop
+
+ For a version ``vX.Y.Z``, the branch's name should be
+ ``releases/vX.Y``. That is, you should create a ``releases/vX.Y``
+ branch if you are preparing the ``X.Y.0`` release.
+
+#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.13.0
+ <https://github.com/spack/spack/commit/8eeb64096c98b8a43d1c587f13ece743c864fba9>`_
+
+#. Updaate the release version lists in these files to include the new version:
+
+ * ``lib/spack/spack/schema/container.py``
+ * ``lib/spack/spack/container/images.json``
+
+ **TODO**: We should get rid of this step in some future release.
+
+#. Update ``CHANGELOG.md`` with major highlights in bullet form. Use
+ proper markdown formatting, like `this example from 0.15.0
+ <https://github.com/spack/spack/commit/d4bf70d9882fcfe88507e9cb444331d7dd7ba71c>`_.
+
+#. Push the release branch to GitHub.
+
+#. Make sure CI passes on the release branch, including:
+ * Regular unit tests
+ * Build tests
+ * The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
+
+ If CI is not passing, submit pull requests to ``develop`` as normal
+ and keep rebasing the release branch on ``develop`` until CI passes.
+
+#. Follow the steps in :ref:`publishing-releases`.
+
+#. Follow the steps in :ref:`merging-releases`.
+
+#. Follow the steps in :ref:`announcing-releases`.
+
+
+.. _point-releases:
+
+^^^^^^^^^^^^^^^^^^^^^
+Making Point Releases
+^^^^^^^^^^^^^^^^^^^^^
+
+This assumes you've already created a project board for a point release
+and completed the work to be done for the release. To make a point
+release:
+
+#. Create one new project board for the next point release.
+
+#. Move any cards that aren't done yet to the next project board.
+
+#. Check out the release branch (it should already exist). For the
+ ``X.Y.Z`` release, the release branch is called ``releases/vX.Y``. For
+ ``v0.15.1``, you would check out ``releases/v0.15``:
+
+ .. code-block:: console
+
+ $ git checkout releases/v0.15
+
+#. Cherry-pick each pull request in the ``Done`` column of the release
+ project onto the release branch.
+
+ This is **usually** fairly simple since we squash the commits from the
+ vast majority of pull requests, which means there is only one commit
+ per pull request to cherry-pick. For example, `this pull request
+ <https://github.com/spack/spack/pull/15777>`_ has three commits, but
+ the were squashed into a single commit on merge. You can see the
+ commit that was created here:
+
+ .. image:: images/pr-commit.png
+
+ You can easily cherry pick it like this (assuming you already have the
+ release branch checked out):
+
+ .. code-block:: console
+
+ $ git cherry-pick 7e46da7
+
+ For pull requests that were rebased, you'll need to cherry-pick each
+ rebased commit individually. There have not been any rebased PRs like
+ this in recent point releases.
+
+ .. warning::
+
+ It is important to cherry-pick commits in the order they happened,
+ otherwise you can get conflicts while cherry-picking. When
+ cherry-picking onto a point release, look at the merge date,
+ **not** the number of the pull request or the date it was opened.
+
+ Sometimes you may **still** get merge conflicts even if you have
+ cherry-picked all the commits in order. This generally means there
+ is some other intervening pull request that the one you're trying
+ to pick depends on. In these cases, you'll need to make a judgment
+ call:
+
+ 1. If the dependency is small, you might just cherry-pick it, too.
+ If you do this, add it to the release board.
+
+ 2. If it is large, then you may decide that this fix is not worth
+ including in a point release, in which case you should remove it
+ from the release project.
+
+ 3. You can always decide to manually back-port the fix to the release
+ branch if neither of the above options makes sense, but this can
+ require a lot of work. It's seldom the right choice.
+
+#. Bump the version in ``lib/spack/spack/__init__.py``. See `this example from 0.14.1
+ <https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
+
+#. Updaate the release version lists in these files to include the new version:
+
+ * ``lib/spack/spack/schema/container.py``
+ * ``lib/spack/spack/container/images.json``
+
+ **TODO**: We should get rid of this step in some future release.
+
+#. Update ``CHANGELOG.md`` with a list of bugfixes. This is typically just a
+ summary of the commits you cherry-picked onto the release branch. See
+ `the changelog from 0.14.1
+ <https://github.com/spack/spack/commit/ff0abb9838121522321df2a054d18e54b566b44a>`_.
+
+#. Push the release branch to GitHub.
+
+#. Make sure CI passes on the release branch, including:
+ * Regular unit tests
+ * Build tests
+ * The E4S pipeline at `gitlab.spack.io <https://gitlab.spack.io>`_
+
+ If CI does not pass, you'll need to figure out why, and make changes
+ to the release branch until it does. You can make more commits, modify
+ or remove cherry-picked commits, or cherry-pick **more** from
+ ``develop`` to make this happen.
+
+#. Follow the steps in :ref:`publishing-releases`.
+
+#. Follow the steps in :ref:`merging-releases`.
+
+#. Follow the steps in :ref:`announcing-releases`.
+
+
+.. _publishing-releases:
+
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Publishing a release on GitHub
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+#. Go to `github.com/spack/spack/releases
+ <https://github.com/spack/spack/releases>`_ and click ``Draft a new
+ release``. Set the following:
+
+ * ``Tag version`` should start with ``v`` and contain *all three*
+ parts of the version, .g. ``v0.15.1``. This is the name of the tag
+ that will be created.
+
+ * ``Target`` should be the ``releases/vX.Y`` branch (e.g., ``releases/v0.15``).
+
+ * ``Release title`` should be ``vX.Y.Z`` (To match the tag, e.g., ``v0.15.1``).
+
+ * For the text, paste the latest release markdown from your ``CHANGELOG.md``.
+
+ You can save the draft and keep coming back to this as you prepare the release.
+
+#. When you are done, click ``Publish release``.
+
+#. Immediately after publishing, go back to
+ `github.com/spack/spack/releases
+ <https://github.com/spack/spack/releases>`_ and download the
+ auto-generated ``.tar.gz`` file for the release. It's the ``Source
+ code (tar.gz)`` link.
+
+#. Click ``Edit`` on the release you just did and attach the downloaded
+ release tarball as a binary. This does two things:
+
+ #. Makes sure that the hash of our releases doesn't change over time.
+ GitHub sometimes annoyingly changes they way they generate
+ tarballs, and then hashes can change if you rely on the
+ auto-generated tarball links.
+
+ #. Gets us download counts on releases visible through the GitHub
+ API. GitHub tracks downloads of artifacts, but *not* the source
+ links. See the `releases
+ page <https://api.github.com/repos/spack/spack/releases>`_ and search
+ for ``download_count`` to see this.
+
+
+.. _merging-releases:
+
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Updating `releases/latest` and `develop`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If the new release is the **highest** Spack release yet, you should
+also tag it as ``releases/latest``. For example, suppose the highest
+release is currently ``0.15.3``:
+
+ * If you are releasing ``0.15.4`` or ``0.16.0``, then you should tag
+ it with ``releases/latest``, as these are higher than ``0.15.3``.
+
+ * If you are making a new release of an **older** major version of
+ Spack, e.g. ``0.14.4``, then you should not tag it as
+ ``releases/latest`` (as there are newer major versions).
+
+ To tag ``releases/latest``, do this:
+
+ .. code-block:: console
+
+ $ git checkout releases/vX.Y # vX.Y is the new release's branch
+ $ git tag --force releases/latest
+ $ git push --tags
+
+ The ``--force`` argument makes ``git`` overwrite the existing
+ ``releases/latest`` tag with the new one.
+
+We also merge each release that we tag as ``releases/latest`` into ``develop``.
+Make sure to do this with a merge commit:
+
+.. code-block:: console
+
+ $ git checkout develop
+ $ git merge --no-ff vX.Y.Z # vX.Y.Z is the new release's tag
+ $ git push
+
+We merge back to ``develop`` because it:
+
+ * updates the version and ``CHANGELOG.md`` on ``develop``.
+ * ensures that your release tag is reachable from the head of
+ ``develop``
+
+We *must* use a real merge commit (via the ``--no-ff`` option) because it
+ensures that the release tag is reachable from the tip of ``develop``.
+This is necessary for ``spack -V`` to work properly -- it uses ``git
+describe --tags`` to find the last reachable tag in the repository and
+reports how far we are from it. For example:
+
+.. code-block:: console
+
+ $ spack -V
+ 0.14.2-1486-b80d5e74e5
+
+This says that we are at commit ``b80d5e74e5``, which is 1,486 commits
+ahead of the ``0.14.2`` release.
+
+We put this step last in the process because it's best to do it only once
+the release is complete and tagged. If you do it before you've tagged the
+release and later decide you want to tag some later commit, you'll need
+to merge again.
+
+.. _announcing-releases:
+
+^^^^^^^^^^^^^^^^^^^^
+Announcing a release
+^^^^^^^^^^^^^^^^^^^^
+
+We announce releases in all of the major Spack communication channels.
+Publishing the release takes care of GitHub. The remaining channels are
+Twitter, Slack, and the mailing list. Here are the steps:
+
+#. Make a tweet to announce the release. It should link to the release's
+ page on GitHub. You can base it on `this example tweet
+ <https://twitter.com/spackpm/status/1231761858182307840>`_.
+
+#. Ping ``@channel`` in ``#general`` on Slack (`spackpm.slack.com
+ <https://spackpm.slack.com>`_) with a link to the tweet. The tweet
+ will be shown inline so that you do not have to retype your release
+ announcement.
+
+#. Email the Spack mailing list to let them know about the release. As
+ with the tweet, you likely want to link to the release's page on
+ GitHub. It's also helpful to include some information directly in the
+ email. You can base yours on this `example email
+ <https://groups.google.com/forum/#!topic/spack/WT4CT9i_X4s>`_.
+
+Once you've announced the release, congratulations, you're done! You've
+finished making the release!
diff --git a/lib/spack/docs/getting_started.rst b/lib/spack/docs/getting_started.rst
index 226b1f0883..7b908465f5 100644
--- a/lib/spack/docs/getting_started.rst
+++ b/lib/spack/docs/getting_started.rst
@@ -818,7 +818,7 @@ Git
Some Spack packages use ``git`` to download, which might not work on
some computers. For example, the following error was
-encountered on a Macintosh during ``spack install julia-master``:
+encountered on a Macintosh during ``spack install julia@master``:
.. code-block:: console
diff --git a/lib/spack/docs/images/pr-commit.png b/lib/spack/docs/images/pr-commit.png
new file mode 100644
index 0000000000..a87c800ef5
--- /dev/null
+++ b/lib/spack/docs/images/pr-commit.png
Binary files differ
diff --git a/lib/spack/docs/images/projects.png b/lib/spack/docs/images/projects.png
new file mode 100644
index 0000000000..bd2971ff62
--- /dev/null
+++ b/lib/spack/docs/images/projects.png
Binary files differ
diff --git a/lib/spack/docs/pipelines.rst b/lib/spack/docs/pipelines.rst
index 34ff9cfc6d..342024d94c 100644
--- a/lib/spack/docs/pipelines.rst
+++ b/lib/spack/docs/pipelines.rst
@@ -82,9 +82,9 @@ or Amazon Elastic Kubernetes Service (`EKS <https://aws.amazon.com/eks>`_), thou
topics are outside the scope of this document.
Spack's pipelines are now making use of the
-`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>` syntax to run
+`trigger <https://docs.gitlab.com/12.9/ee/ci/yaml/README.html#trigger>`_ syntax to run
dynamically generated
-`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`.
+`child pipelines <https://docs.gitlab.com/12.9/ee/ci/parent_child_pipelines.html>`_.
Note that the use of dynamic child pipelines requires running Gitlab version
``>= 12.9``.
diff --git a/lib/spack/docs/workflows.rst b/lib/spack/docs/workflows.rst
index 17ca695082..2f215f8209 100644
--- a/lib/spack/docs/workflows.rst
+++ b/lib/spack/docs/workflows.rst
@@ -1405,11 +1405,12 @@ The main points that are implemented below:
- export CXXFLAGS="-std=c++11"
install:
- - if ! which spack >/dev/null; then
+ - |
+ if ! which spack >/dev/null; then
mkdir -p $SPACK_ROOT &&
git clone --depth 50 https://github.com/spack/spack.git $SPACK_ROOT &&
- echo -e "config:""\n build_jobs:"" 2" > $SPACK_ROOT/etc/spack/config.yaml **
- echo -e "packages:""\n all:""\n target:"" ['x86_64']"
+ printf "config:\n build_jobs: 2\n" > $SPACK_ROOT/etc/spack/config.yaml &&
+ printf "packages:\n all:\n target: ['x86_64']\n" \
> $SPACK_ROOT/etc/spack/packages.yaml;
fi
- travis_wait spack install cmake@3.7.2~openssl~ncurses
diff --git a/lib/spack/llnl/util/tty/pty.py b/lib/spack/llnl/util/tty/pty.py
index ef5d40ea57..84c272a6e2 100644
--- a/lib/spack/llnl/util/tty/pty.py
+++ b/lib/spack/llnl/util/tty/pty.py
@@ -31,17 +31,17 @@ from spack.util.executable import which
class ProcessController(object):
"""Wrapper around some fundamental process control operations.
- This allows one process to drive another similar to the way a shell
- would, by sending signals and I/O.
+ This allows one process (the controller) to drive another (the
+ minion) similar to the way a shell would, by sending signals and I/O.
"""
- def __init__(self, pid, master_fd,
+ def __init__(self, pid, controller_fd,
timeout=1, sleep_time=1e-1, debug=False):
"""Create a controller to manipulate the process with id ``pid``
Args:
pid (int): id of process to control
- master_fd (int): master file descriptor attached to pid's stdin
+ controller_fd (int): controller fd attached to pid's stdin
timeout (int): time in seconds for wait operations to time out
(default 1 second)
sleep_time (int): time to sleep after signals, to control the
@@ -58,7 +58,7 @@ class ProcessController(object):
"""
self.pid = pid
self.pgid = os.getpgid(pid)
- self.master_fd = master_fd
+ self.controller_fd = controller_fd
self.timeout = timeout
self.sleep_time = sleep_time
self.debug = debug
@@ -67,8 +67,8 @@ class ProcessController(object):
self.ps = which("ps", required=True)
def get_canon_echo_attrs(self):
- """Get echo and canon attributes of the terminal of master_fd."""
- cfg = termios.tcgetattr(self.master_fd)
+ """Get echo and canon attributes of the terminal of controller_fd."""
+ cfg = termios.tcgetattr(self.controller_fd)
return (
bool(cfg[3] & termios.ICANON),
bool(cfg[3] & termios.ECHO),
@@ -82,7 +82,7 @@ class ProcessController(object):
)
def status(self):
- """Print debug message with status info for the child."""
+ """Print debug message with status info for the minion."""
if self.debug:
canon, echo = self.get_canon_echo_attrs()
sys.stderr.write("canon: %s, echo: %s\n" % (
@@ -94,12 +94,12 @@ class ProcessController(object):
sys.stderr.write("\n")
def input_on(self):
- """True if keyboard input is enabled on the master_fd pty."""
+ """True if keyboard input is enabled on the controller_fd pty."""
return self.get_canon_echo_attrs() == (False, False)
def background(self):
- """True if pgid is in a background pgroup of master_fd's terminal."""
- return self.pgid != os.tcgetpgrp(self.master_fd)
+ """True if pgid is in a background pgroup of controller_fd's tty."""
+ return self.pgid != os.tcgetpgrp(self.controller_fd)
def tstp(self):
"""Send SIGTSTP to the controlled process."""
@@ -115,18 +115,18 @@ class ProcessController(object):
def fg(self):
self.horizontal_line("fg")
with log.ignore_signal(signal.SIGTTOU):
- os.tcsetpgrp(self.master_fd, os.getpgid(self.pid))
+ os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid))
time.sleep(self.sleep_time)
def bg(self):
self.horizontal_line("bg")
with log.ignore_signal(signal.SIGTTOU):
- os.tcsetpgrp(self.master_fd, os.getpgrp())
+ os.tcsetpgrp(self.controller_fd, os.getpgrp())
time.sleep(self.sleep_time)
def write(self, byte_string):
self.horizontal_line("write '%s'" % byte_string.decode("utf-8"))
- os.write(self.master_fd, byte_string)
+ os.write(self.controller_fd, byte_string)
def wait(self, condition):
start = time.time()
@@ -156,50 +156,51 @@ class ProcessController(object):
class PseudoShell(object):
- """Sets up master and child processes with a PTY.
+ """Sets up controller and minion processes with a PTY.
You can create a ``PseudoShell`` if you want to test how some
function responds to terminal input. This is a pseudo-shell from a
- job control perspective; ``master_function`` and ``child_function``
- are set up with a pseudoterminal (pty) so that the master can drive
- the child through process control signals and I/O.
+ job control perspective; ``controller_function`` and ``minion_function``
+ are set up with a pseudoterminal (pty) so that the controller can drive
+ the minion through process control signals and I/O.
The two functions should have signatures like this::
- def master_function(proc, ctl, **kwargs)
- def child_function(**kwargs)
+ def controller_function(proc, ctl, **kwargs)
+ def minion_function(**kwargs)
- ``master_function`` is spawned in its own process and passed three
+ ``controller_function`` is spawned in its own process and passed three
arguments:
proc
- the ``multiprocessing.Process`` object representing the child
+ the ``multiprocessing.Process`` object representing the minion
ctl
- a ``ProcessController`` object tied to the child
+ a ``ProcessController`` object tied to the minion
kwargs
keyword arguments passed from ``PseudoShell.start()``.
- ``child_function`` is only passed ``kwargs`` delegated from
+ ``minion_function`` is only passed ``kwargs`` delegated from
``PseudoShell.start()``.
- The ``ctl.master_fd`` will have its ``master_fd`` connected to
- ``sys.stdin`` in the child process. Both processes will share the
+ The ``ctl.controller_fd`` will have its ``controller_fd`` connected to
+ ``sys.stdin`` in the minion process. Both processes will share the
same ``sys.stdout`` and ``sys.stderr`` as the process instantiating
``PseudoShell``.
Here are the relationships between processes created::
._________________________________________________________.
- | Child Process | pid 2
- | - runs child_function | pgroup 2
+ | Minion Process | pid 2
+ | - runs minion_function | pgroup 2
|_________________________________________________________| session 1
^
- | create process with master_fd connected to stdin
+ | create process with controller_fd connected to stdin
| stdout, stderr are the same as caller
._________________________________________________________.
- | Master Process | pid 1
- | - runs master_function | pgroup 1
- | - uses ProcessController and master_fd to control child | session 1
+ | Controller Process | pid 1
+ | - runs controller_function | pgroup 1
+ | - uses ProcessController and controller_fd to | session 1
+ | control minion |
|_________________________________________________________|
^
| create process
@@ -207,51 +208,51 @@ class PseudoShell(object):
._________________________________________________________.
| Caller | pid 0
| - Constructs, starts, joins PseudoShell | pgroup 0
- | - provides master_function, child_function | session 0
+ | - provides controller_function, minion_function | session 0
|_________________________________________________________|
"""
- def __init__(self, master_function, child_function):
+ def __init__(self, controller_function, minion_function):
self.proc = None
- self.master_function = master_function
- self.child_function = child_function
+ self.controller_function = controller_function
+ self.minion_function = minion_function
# these can be optionally set to change defaults
self.controller_timeout = 1
self.sleep_time = 0
def start(self, **kwargs):
- """Start the master and child processes.
+ """Start the controller and minion processes.
Arguments:
kwargs (dict): arbitrary keyword arguments that will be
- passed to master and child functions
+ passed to controller and minion functions
- The master process will create the child, then call
- ``master_function``. The child process will call
- ``child_function``.
+ The controller process will create the minion, then call
+ ``controller_function``. The minion process will call
+ ``minion_function``.
"""
self.proc = multiprocessing.Process(
- target=PseudoShell._set_up_and_run_master_function,
- args=(self.master_function, self.child_function,
+ target=PseudoShell._set_up_and_run_controller_function,
+ args=(self.controller_function, self.minion_function,
self.controller_timeout, self.sleep_time),
kwargs=kwargs,
)
self.proc.start()
def join(self):
- """Wait for the child process to finish, and return its exit code."""
+ """Wait for the minion process to finish, and return its exit code."""
self.proc.join()
return self.proc.exitcode
@staticmethod
- def _set_up_and_run_child_function(
- tty_name, stdout_fd, stderr_fd, ready, child_function, **kwargs):
- """Child process wrapper for PseudoShell.
+ def _set_up_and_run_minion_function(
+ tty_name, stdout_fd, stderr_fd, ready, minion_function, **kwargs):
+ """Minion process wrapper for PseudoShell.
Handles the mechanics of setting up a PTY, then calls
- ``child_function``.
+ ``minion_function``.
"""
# new process group, like a command or pipeline launched by a shell
@@ -266,45 +267,45 @@ class PseudoShell(object):
if kwargs.get("debug"):
sys.stderr.write(
- "child: stdin.isatty(): %s\n" % sys.stdin.isatty())
+ "minion: stdin.isatty(): %s\n" % sys.stdin.isatty())
# tell the parent that we're really running
if kwargs.get("debug"):
- sys.stderr.write("child: ready!\n")
+ sys.stderr.write("minion: ready!\n")
ready.value = True
try:
- child_function(**kwargs)
+ minion_function(**kwargs)
except BaseException:
traceback.print_exc()
@staticmethod
- def _set_up_and_run_master_function(
- master_function, child_function, controller_timeout, sleep_time,
- **kwargs):
- """Set up a pty, spawn a child process, and execute master_function.
+ def _set_up_and_run_controller_function(
+ controller_function, minion_function, controller_timeout,
+ sleep_time, **kwargs):
+ """Set up a pty, spawn a minion process, execute controller_function.
Handles the mechanics of setting up a PTY, then calls
- ``master_function``.
+ ``controller_function``.
"""
os.setsid() # new session; this process is the controller
- master_fd, child_fd = os.openpty()
- pty_name = os.ttyname(child_fd)
+ controller_fd, minion_fd = os.openpty()
+ pty_name = os.ttyname(minion_fd)
# take controlling terminal
pty_fd = os.open(pty_name, os.O_RDWR)
os.close(pty_fd)
ready = multiprocessing.Value('i', False)
- child_process = multiprocessing.Process(
- target=PseudoShell._set_up_and_run_child_function,
+ minion_process = multiprocessing.Process(
+ target=PseudoShell._set_up_and_run_minion_function,
args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(),
- ready, child_function),
+ ready, minion_function),
kwargs=kwargs,
)
- child_process.start()
+ minion_process.start()
# wait for subprocess to be running and connected.
while not ready.value:
@@ -315,30 +316,31 @@ class PseudoShell(object):
sys.stderr.write("pid: %d\n" % os.getpid())
sys.stderr.write("pgid: %d\n" % os.getpgrp())
sys.stderr.write("sid: %d\n" % os.getsid(0))
- sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(master_fd))
+ sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd))
sys.stderr.write("\n")
- child_pgid = os.getpgid(child_process.pid)
- sys.stderr.write("child pid: %d\n" % child_process.pid)
- sys.stderr.write("child pgid: %d\n" % child_pgid)
- sys.stderr.write("child sid: %d\n" % os.getsid(child_process.pid))
+ minion_pgid = os.getpgid(minion_process.pid)
+ sys.stderr.write("minion pid: %d\n" % minion_process.pid)
+ sys.stderr.write("minion pgid: %d\n" % minion_pgid)
+ sys.stderr.write(
+ "minion sid: %d\n" % os.getsid(minion_process.pid))
sys.stderr.write("\n")
sys.stderr.flush()
- # set up master to ignore SIGTSTP, like a shell
+ # set up controller to ignore SIGTSTP, like a shell
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
- # call the master function once the child is ready
+ # call the controller function once the minion is ready
try:
controller = ProcessController(
- child_process.pid, master_fd, debug=kwargs.get("debug"))
+ minion_process.pid, controller_fd, debug=kwargs.get("debug"))
controller.timeout = controller_timeout
controller.sleep_time = sleep_time
- error = master_function(child_process, controller, **kwargs)
+ error = controller_function(minion_process, controller, **kwargs)
except BaseException:
error = 1
traceback.print_exc()
- child_process.join()
+ minion_process.join()
- # return whether either the parent or child failed
- return error or child_process.exitcode
+ # return whether either the parent or minion failed
+ return error or minion_process.exitcode
diff --git a/lib/spack/spack/binary_distribution.py b/lib/spack/spack/binary_distribution.py
index ccfe614720..f53501cbd6 100644
--- a/lib/spack/spack/binary_distribution.py
+++ b/lib/spack/spack/binary_distribution.py
@@ -498,6 +498,7 @@ def download_tarball(spec):
# stage the tarball into standard place
stage = Stage(url, name="build_cache", keep=True)
+ stage.create()
try:
stage.fetch()
return stage.save_filename
@@ -602,15 +603,11 @@ def relocate_package(spec, allow_root):
if not is_backup_file(text_name):
text_names.append(text_name)
-# If we are installing back to the same location don't replace anything
+# If we are not installing back to the same install tree do the relocation
if old_layout_root != new_layout_root:
- paths_to_relocate = [old_spack_prefix, old_layout_root]
- paths_to_relocate.extend(prefix_to_hash.keys())
- files_to_relocate = list(filter(
- lambda pathname: not relocate.file_is_relocatable(
- pathname, paths_to_relocate=paths_to_relocate),
- map(lambda filename: os.path.join(workdir, filename),
- buildinfo['relocate_binaries'])))
+ files_to_relocate = [os.path.join(workdir, filename)
+ for filename in buildinfo.get('relocate_binaries')
+ ]
# If the buildcache was not created with relativized rpaths
# do the relocation of path in binaries
if (spec.architecture.platform == 'darwin' or
@@ -646,6 +643,13 @@ def relocate_package(spec, allow_root):
new_spack_prefix,
prefix_to_prefix)
+ paths_to_relocate = [old_prefix, old_layout_root]
+ paths_to_relocate.extend(prefix_to_hash.keys())
+ files_to_relocate = list(filter(
+ lambda pathname: not relocate.file_is_relocatable(
+ pathname, paths_to_relocate=paths_to_relocate),
+ map(lambda filename: os.path.join(workdir, filename),
+ buildinfo['relocate_binaries'])))
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate,
old_prefix, new_prefix,
@@ -653,6 +657,17 @@ def relocate_package(spec, allow_root):
new_spack_prefix,
prefix_to_prefix)
+# If we are installing back to the same location
+# relocate the sbang location if the spack directory changed
+ else:
+ if old_spack_prefix != new_spack_prefix:
+ relocate.relocate_text(text_names,
+ old_layout_root, new_layout_root,
+ old_prefix, new_prefix,
+ old_spack_prefix,
+ new_spack_prefix,
+ prefix_to_prefix)
+
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
force=False):
diff --git a/lib/spack/spack/build_systems/cuda.py b/lib/spack/spack/build_systems/cuda.py
index cbee710049..ef1b0266f8 100644
--- a/lib/spack/spack/build_systems/cuda.py
+++ b/lib/spack/spack/build_systems/cuda.py
@@ -12,8 +12,9 @@ import spack.variant
class CudaPackage(PackageBase):
"""Auxiliary class which contains CUDA variant, dependencies and conflicts
and is meant to unify and facilitate its usage.
+
+ Maintainers: ax3l, svenevs
"""
- maintainers = ['ax3l', 'svenevs']
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
# https://developer.nvidia.com/cuda-gpus
@@ -25,6 +26,7 @@ class CudaPackage(PackageBase):
'50', '52', '53',
'60', '61', '62',
'70', '72', '75',
+ '80',
]
# FIXME: keep cuda and cuda_arch separate to make usage easier until
@@ -48,6 +50,7 @@ class CudaPackage(PackageBase):
# CUDA version vs Architecture
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
+ # https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
depends_on('cuda@:6.0', when='cuda_arch=10')
depends_on('cuda@:6.5', when='cuda_arch=11')
depends_on('cuda@2.1:6.5', when='cuda_arch=12')
@@ -58,8 +61,8 @@ class CudaPackage(PackageBase):
depends_on('cuda@5.0:10.2', when='cuda_arch=30')
depends_on('cuda@5.0:10.2', when='cuda_arch=32')
- depends_on('cuda@5.0:10.2', when='cuda_arch=35')
- depends_on('cuda@6.5:10.2', when='cuda_arch=37')
+ depends_on('cuda@5.0:', when='cuda_arch=35')
+ depends_on('cuda@6.5:', when='cuda_arch=37')
depends_on('cuda@6.0:', when='cuda_arch=50')
depends_on('cuda@6.5:', when='cuda_arch=52')
@@ -73,6 +76,8 @@ class CudaPackage(PackageBase):
depends_on('cuda@9.0:', when='cuda_arch=72')
depends_on('cuda@10.0:', when='cuda_arch=75')
+ depends_on('cuda@11.0:', when='cuda_arch=80')
+
# There are at least three cases to be aware of for compiler conflicts
# 1. Linux x86_64
# 2. Linux ppc64le
@@ -88,12 +93,15 @@ class CudaPackage(PackageBase):
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1' + arch_platform)
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89' + arch_platform)
+ conflicts('%gcc@:4,10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27' + arch_platform)
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5' + arch_platform)
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8' + arch_platform)
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1' + arch_platform)
- conflicts('%pgi@:16', when='+cuda ^cuda@9.2.88:10' + arch_platform)
- conflicts('%pgi@:17', when='+cuda ^cuda@10.2.89' + arch_platform)
+ conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10' + arch_platform)
+ conflicts('%pgi@:17,20:',
+ when='+cuda ^cuda@10.1.105:10.2.89' + arch_platform)
+ conflicts('%pgi@:17,20.2:', when='+cuda ^cuda@11.0.2' + arch_platform)
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5' + arch_platform)
conflicts('%clang@:3.7,4:',
when='+cuda ^cuda@8.0:9.0' + arch_platform)
@@ -104,7 +112,8 @@ class CudaPackage(PackageBase):
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105' + arch_platform)
conflicts('%clang@:3.7,8.1:',
when='+cuda ^cuda@10.1.105:10.1.243' + arch_platform)
- conflicts('%clang@:3.2,9.0:', when='+cuda ^cuda@10.2.89' + arch_platform)
+ conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89' + arch_platform)
+ conflicts('%clang@:5,10:', when='+cuda ^cuda@11.0.2' + arch_platform)
# x86_64 vs. ppc64le differ according to NVidia docs
# Linux ppc64le compiler conflicts from Table from the docs below:
@@ -119,6 +128,8 @@ class CudaPackage(PackageBase):
conflicts('%gcc@6:', when='+cuda ^cuda@:9' + arch_platform)
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130' + arch_platform)
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243' + arch_platform)
+ # officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
+ conflicts('%gcc@:4,10:', when='+cuda ^cuda@:11.0.2' + arch_platform)
conflicts('%pgi', when='+cuda ^cuda@:8' + arch_platform)
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185' + arch_platform)
conflicts('%pgi@:17', when='+cuda ^cuda@:10' + arch_platform)
@@ -128,6 +139,7 @@ class CudaPackage(PackageBase):
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130' + arch_platform)
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105' + arch_platform)
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89' + arch_platform)
+ conflicts('%clang@:5,10.0:', when='+cuda ^cuda@11.0.2' + arch_platform)
# Intel is mostly relevant for x86_64 Linux, even though it also
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
@@ -141,11 +153,13 @@ class CudaPackage(PackageBase):
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
+ conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
+ conflicts('%intel@19.2:', when='+cuda ^cuda@:11.0.2')
# XL is mostly relevant for ppc64le Linux
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
- conflicts('%xl@17:', when='+cuda ^cuda@:10.2.89')
+ conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.0.2')
# Mac OS X
# platform = ' platform=darwin'
@@ -156,7 +170,7 @@ class CudaPackage(PackageBase):
# `clang-apple@x.y.z as a possible fix.
# Compiler conflicts will be eventual taken from here:
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#abstract
- conflicts('platform=darwin', when='+cuda ^cuda@11.0:')
+ conflicts('platform=darwin', when='+cuda ^cuda@11.0.2:')
# Make sure cuda_arch can not be used without +cuda
for value in cuda_arch_values:
diff --git a/lib/spack/spack/ci.py b/lib/spack/spack/ci.py
index 22ba60235a..283fcab3a1 100644
--- a/lib/spack/spack/ci.py
+++ b/lib/spack/spack/ci.py
@@ -493,7 +493,7 @@ def generate_gitlab_ci_yaml(env, print_summary, output_file,
after_script = None
if custom_spack_repo:
if not custom_spack_ref:
- custom_spack_ref = 'master'
+ custom_spack_ref = 'develop'
before_script = [
('git clone "{0}"'.format(custom_spack_repo)),
'pushd ./spack && git checkout "{0}" && popd'.format(
diff --git a/lib/spack/spack/cmd/checksum.py b/lib/spack/spack/cmd/checksum.py
index 97e7833af0..188deb1149 100644
--- a/lib/spack/spack/cmd/checksum.py
+++ b/lib/spack/spack/cmd/checksum.py
@@ -65,7 +65,7 @@ def checksum(parser, args):
version_lines = spack.stage.get_checksums_for_versions(
url_dict, pkg.name, keep_stage=args.keep_stage,
- batch=(args.batch or len(args.versions) > 0),
+ batch=(args.batch or len(args.versions) > 0 or len(url_dict) == 1),
fetch_options=pkg.fetch_options)
print()
diff --git a/lib/spack/spack/cmd/create.py b/lib/spack/spack/cmd/create.py
index 7d12dc98a7..48326868ae 100644
--- a/lib/spack/spack/cmd/create.py
+++ b/lib/spack/spack/cmd/create.py
@@ -445,6 +445,9 @@ def setup_parser(subparser):
subparser.add_argument(
'--skip-editor', action='store_true',
help="skip the edit session for the package (e.g., automation)")
+ subparser.add_argument(
+ '-b', '--batch', action='store_true',
+ help="don't ask which versions to checksum")
class BuildSystemGuesser:
@@ -511,7 +514,7 @@ class BuildSystemGuesser:
# Determine the build system based on the files contained
# in the archive.
for pattern, bs in clues:
- if any(re.search(pattern, l) for l in lines):
+ if any(re.search(pattern, line) for line in lines):
self.build_system = bs
break
@@ -629,7 +632,8 @@ def get_versions(args, name):
versions = spack.stage.get_checksums_for_versions(
url_dict, name, first_stage_function=guesser,
- keep_stage=args.keep_stage, batch=True)
+ keep_stage=args.keep_stage,
+ batch=(args.batch or len(url_dict) == 1))
else:
versions = unhashed_versions
diff --git a/lib/spack/spack/main.py b/lib/spack/spack/main.py
index b15e99f08b..1bab26c2e1 100644
--- a/lib/spack/spack/main.py
+++ b/lib/spack/spack/main.py
@@ -702,16 +702,16 @@ def main(argv=None):
if stored_var_name in os.environ:
os.environ[var] = os.environ[stored_var_name]
+ # make spack.config aware of any command line configuration scopes
+ if args.config_scopes:
+ spack.config.command_line_scopes = args.config_scopes
+
# activate an environment if one was specified on the command line
if not args.no_env:
env = ev.find_environment(args)
if env:
ev.activate(env, args.use_env_repo, add_view=False)
- # make spack.config aware of any command line configuration scopes
- if args.config_scopes:
- spack.config.command_line_scopes = args.config_scopes
-
if args.print_shell_vars:
print_setup_info(*args.print_shell_vars.split(','))
return 0
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 4febfb1b47..c5cae4f9b0 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -22,6 +22,7 @@ import shutil
import sys
import textwrap
import time
+import traceback
from six import StringIO
from six import string_types
from six import with_metaclass
@@ -1747,7 +1748,23 @@ class PackageBase(with_metaclass(PackageMeta, PackageViewMixin, object)):
with spack.store.db.prefix_write_lock(spec):
if pkg is not None:
- spack.hooks.pre_uninstall(spec)
+ try:
+ spack.hooks.pre_uninstall(spec)
+ except Exception as error:
+ if force:
+ error_msg = (
+ "One or more pre_uninstall hooks have failed"
+ " for {0}, but Spack is continuing with the"
+ " uninstall".format(str(spec)))
+ if isinstance(error, spack.error.SpackError):
+ error_msg += (
+ "\n\nError message: {0}".format(str(error)))
+ tty.warn(error_msg)
+ # Note that if the uninstall succeeds then we won't be
+ # seeing this error again and won't have another chance
+ # to run the hook.
+ else:
+ raise
# Uninstalling in Spack only requires removing the prefix.
if not spec.external:
@@ -1768,7 +1785,20 @@ class PackageBase(with_metaclass(PackageMeta, PackageViewMixin, object)):
spack.store.db.remove(spec)
if pkg is not None:
- spack.hooks.post_uninstall(spec)
+ try:
+ spack.hooks.post_uninstall(spec)
+ except Exception:
+ # If there is a failure here, this is our only chance to do
+ # something about it: at this point the Spec has been removed
+ # from the DB and prefix, so the post-uninstallation hooks
+ # will not have another chance to run.
+ error_msg = (
+ "One or more post-uninstallation hooks failed for"
+ " {0}, but the prefix has been removed (if it is not"
+ " external).".format(str(spec)))
+ tb_msg = traceback.format_exc()
+ error_msg += "\n\nThe error:\n\n{0}".format(tb_msg)
+ tty.warn(error_msg)
tty.msg('Successfully uninstalled {0}'.format(spec.short_spec))
diff --git a/lib/spack/spack/relocate.py b/lib/spack/spack/relocate.py
index 56e7c6632c..e299f1c5c1 100644
--- a/lib/spack/spack/relocate.py
+++ b/lib/spack/spack/relocate.py
@@ -804,15 +804,17 @@ def relocate_text(
where they should be relocated
"""
# TODO: reduce the number of arguments (8 seems too much)
- sbang_regex = r'#!/bin/bash {0}/bin/sbang'.format(orig_spack)
- new_sbang = r'#!/bin/bash {0}/bin/sbang'.format(new_spack)
+ orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(orig_spack)
+ new_sbang = '#!/bin/bash {0}/bin/sbang'.format(new_spack)
for file in files:
_replace_prefix_text(file, orig_install_prefix, new_install_prefix)
for orig_dep_prefix, new_dep_prefix in new_prefixes.items():
_replace_prefix_text(file, orig_dep_prefix, new_dep_prefix)
_replace_prefix_text(file, orig_layout_root, new_layout_root)
- _replace_prefix_text(file, sbang_regex, new_sbang)
+ # relocate the sbang location only if the spack directory changed
+ if orig_spack != new_spack:
+ _replace_prefix_text(file, orig_sbang, new_sbang)
def relocate_text_bin(
diff --git a/lib/spack/spack/test/bindist.py b/lib/spack/spack/test/bindist.py
new file mode 100644
index 0000000000..f561077edd
--- /dev/null
+++ b/lib/spack/spack/test/bindist.py
@@ -0,0 +1,471 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+
+"""
+This test checks creating and install buildcaches
+"""
+import os
+import py
+import pytest
+import argparse
+import platform
+import spack.repo
+import spack.store
+import spack.binary_distribution as bindist
+import spack.cmd.buildcache as buildcache
+import spack.cmd.install as install
+import spack.cmd.uninstall as uninstall
+import spack.cmd.mirror as mirror
+from spack.spec import Spec
+from spack.directory_layout import YamlDirectoryLayout
+
+
+def_install_path_scheme = '${ARCHITECTURE}/${COMPILERNAME}-${COMPILERVER}/${PACKAGE}-${VERSION}-${HASH}' # noqa: E501
+ndef_install_path_scheme = '${PACKAGE}/${VERSION}/${ARCHITECTURE}-${COMPILERNAME}-${COMPILERVER}-${HASH}' # noqa: E501
+
+mirror_path_def = None
+mirror_path_rel = None
+
+
+@pytest.fixture(scope='function')
+def cache_directory(tmpdir):
+ old_cache_path = spack.caches.fetch_cache
+ tmpdir.ensure('fetch_cache', dir=True)
+ fsc = spack.fetch_strategy.FsCache(str(tmpdir.join('fetch_cache')))
+ spack.config.caches = fsc
+ yield spack.config.caches
+ tmpdir.join('fetch_cache').remove()
+ spack.config.caches = old_cache_path
+
+
+@pytest.fixture(scope='session')
+def session_mirror_def(tmpdir_factory):
+ dir = tmpdir_factory.mktemp('mirror')
+ global mirror_path_rel
+ mirror_path_rel = dir
+ dir.ensure('build_cache', dir=True)
+ yield dir
+ dir.join('build_cache').remove()
+
+
+@pytest.fixture(scope='function')
+def mirror_directory_def(session_mirror_def):
+ yield str(session_mirror_def)
+
+
+@pytest.fixture(scope='session')
+def session_mirror_rel(tmpdir_factory):
+ dir = tmpdir_factory.mktemp('mirror')
+ global mirror_path_rel
+ mirror_path_rel = dir
+ dir.ensure('build_cache', dir=True)
+ yield dir
+ dir.join('build_cache').remove()
+
+
+@pytest.fixture(scope='function')
+def mirror_directory_rel(session_mirror_rel):
+ yield(session_mirror_rel)
+
+
+@pytest.fixture(scope='session')
+def config_directory(tmpdir_factory):
+ tmpdir = tmpdir_factory.mktemp('test_configs')
+ # restore some sane defaults for packages and config
+ config_path = py.path.local(spack.paths.etc_path)
+ modules_yaml = config_path.join('spack', 'defaults', 'modules.yaml')
+ os_modules_yaml = config_path.join('spack', 'defaults', '%s' %
+ platform.system().lower(),
+ 'modules.yaml')
+ packages_yaml = config_path.join('spack', 'defaults', 'packages.yaml')
+ config_yaml = config_path.join('spack', 'defaults', 'config.yaml')
+ repos_yaml = config_path.join('spack', 'defaults', 'repos.yaml')
+ tmpdir.ensure('site', dir=True)
+ tmpdir.ensure('user', dir=True)
+ tmpdir.ensure('site/%s' % platform.system().lower(), dir=True)
+ modules_yaml.copy(tmpdir.join('site', 'modules.yaml'))
+ os_modules_yaml.copy(tmpdir.join('site/%s' % platform.system().lower(),
+ 'modules.yaml'))
+ packages_yaml.copy(tmpdir.join('site', 'packages.yaml'))
+ config_yaml.copy(tmpdir.join('site', 'config.yaml'))
+ repos_yaml.copy(tmpdir.join('site', 'repos.yaml'))
+ yield tmpdir
+ tmpdir.remove()
+
+
+@pytest.fixture(scope='function')
+def default_config(tmpdir_factory, config_directory, monkeypatch):
+
+ mutable_dir = tmpdir_factory.mktemp('mutable_config').join('tmp')
+ config_directory.copy(mutable_dir)
+
+ cfg = spack.config.Configuration(
+ *[spack.config.ConfigScope(name, str(mutable_dir))
+ for name in ['site/%s' % platform.system().lower(),
+ 'site', 'user']])
+
+ monkeypatch.setattr(spack.config, 'config', cfg)
+
+ # This is essential, otherwise the cache will create weird side effects
+ # that will compromise subsequent tests if compilers.yaml is modified
+ monkeypatch.setattr(spack.compilers, '_cache_config_file', [])
+ njobs = spack.config.get('config:build_jobs')
+ if not njobs:
+ spack.config.set('config:build_jobs', 4, scope='user')
+ extensions = spack.config.get('config:template_dirs')
+ if not extensions:
+ spack.config.set('config:template_dirs',
+ [os.path.join(spack.paths.share_path, 'templates')],
+ scope='user')
+
+ mutable_dir.ensure('build_stage', dir=True)
+ build_stage = spack.config.get('config:build_stage')
+ if not build_stage:
+ spack.config.set('config:build_stage',
+ [str(mutable_dir.join('build_stage'))], scope='user')
+ timeout = spack.config.get('config:connect_timeout')
+ if not timeout:
+ spack.config.set('config:connect_timeout', 10, scope='user')
+ yield spack.config.config
+ mutable_dir.remove()
+
+
+@pytest.fixture(scope='function')
+def install_dir_default_layout(tmpdir):
+ """Hooks a fake install directory with a default layout"""
+ real_store = spack.store.store
+ real_layout = spack.store.layout
+ spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
+ spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')),
+ path_scheme=def_install_path_scheme) # noqa: E501
+ yield spack.store
+ spack.store.store = real_store
+ spack.store.layout = real_layout
+
+
+@pytest.fixture(scope='function')
+def install_dir_non_default_layout(tmpdir):
+ """Hooks a fake install directory with a non-default layout"""
+ real_store = spack.store.store
+ real_layout = spack.store.layout
+ spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
+ spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')),
+ path_scheme=ndef_install_path_scheme) # noqa: E501
+ yield spack.store
+ spack.store.store = real_store
+ spack.store.layout = real_layout
+
+
+@pytest.mark.requires_executables(
+ '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.disable_clean_stage_check
+@pytest.mark.maybeslow
+@pytest.mark.usefixtures('default_config', 'cache_directory',
+ 'install_dir_default_layout')
+def test_default_rpaths_create_install_default_layout(tmpdir,
+ mirror_directory_def,
+ install_mockery):
+ """
+ Test the creation and installation of buildcaches with default rpaths
+ into the default directory layout scheme.
+ """
+
+ gspec = Spec('garply')
+ gspec.concretize()
+ cspec = Spec('corge')
+ cspec.concretize()
+
+ # Install patchelf needed for relocate in linux test environment
+ iparser = argparse.ArgumentParser()
+ install.setup_parser(iparser)
+ # Install some packages with dependent packages
+ iargs = iparser.parse_args(['--no-cache', cspec.name])
+ install.install(iparser, iargs)
+
+ global mirror_path_def
+ mirror_path_def = mirror_directory_def
+ mparser = argparse.ArgumentParser()
+ mirror.setup_parser(mparser)
+ margs = mparser.parse_args(
+ ['add', '--scope', 'site', 'test-mirror-def', 'file://%s' % mirror_path_def])
+ mirror.mirror(mparser, margs)
+ margs = mparser.parse_args(['list'])
+ mirror.mirror(mparser, margs)
+
+ # setup argument parser
+ parser = argparse.ArgumentParser()
+ buildcache.setup_parser(parser)
+
+ # Set default buildcache args
+ create_args = ['create', '-a', '-u', '-d', str(mirror_path_def),
+ cspec.name]
+ install_args = ['install', '-a', '-u', cspec.name]
+
+ # Create a buildache
+ args = parser.parse_args(create_args)
+ buildcache.buildcache(parser, args)
+ # Test force overwrite create buildcache
+ create_args.insert(create_args.index('-a'), '-f')
+ args = parser.parse_args(create_args)
+ buildcache.buildcache(parser, args)
+ # create mirror index
+ args = parser.parse_args(['update-index', '-d', 'file://%s' % str(mirror_path_def)])
+ buildcache.buildcache(parser, args)
+ # list the buildcaches in the mirror
+ args = parser.parse_args(['list', '-a', '-l', '-v'])
+ buildcache.buildcache(parser, args)
+
+ # Uninstall the package and deps
+ uparser = argparse.ArgumentParser()
+ uninstall.setup_parser(uparser)
+ uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
+ uninstall.uninstall(uparser, uargs)
+
+ # test install
+ args = parser.parse_args(install_args)
+ buildcache.buildcache(parser, args)
+
+ # This gives warning that spec is already installed
+ buildcache.buildcache(parser, args)
+
+ # test overwrite install
+ install_args.insert(install_args.index('-a'), '-f')
+ args = parser.parse_args(install_args)
+ buildcache.buildcache(parser, args)
+
+ args = parser.parse_args(['keys', '-f'])
+ buildcache.buildcache(parser, args)
+
+ args = parser.parse_args(['list'])
+ buildcache.buildcache(parser, args)
+
+ args = parser.parse_args(['list', '-a'])
+ buildcache.buildcache(parser, args)
+
+ args = parser.parse_args(['list', '-l', '-v'])
+ buildcache.buildcache(parser, args)
+ bindist._cached_specs = set()
+ spack.stage.purge()
+ margs = mparser.parse_args(
+ ['rm', '--scope', 'site', 'test-mirror-def'])
+ mirror.mirror(mparser, margs)
+
+
+@pytest.mark.requires_executables(
+ '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.disable_clean_stage_check
+@pytest.mark.maybeslow
+@pytest.mark.nomockstage
+@pytest.mark.usefixtures('default_config', 'cache_directory',
+ 'install_dir_non_default_layout')
+def test_default_rpaths_install_nondefault_layout(tmpdir,
+ install_mockery):
+ """
+ Test the creation and installation of buildcaches with default rpaths
+ into the non-default directory layout scheme.
+ """
+
+ gspec = Spec('garply')
+ gspec.concretize()
+ cspec = Spec('corge')
+ cspec.concretize()
+
+ global mirror_path_def
+ mparser = argparse.ArgumentParser()
+ mirror.setup_parser(mparser)
+ margs = mparser.parse_args(
+ ['add', '--scope', 'site', 'test-mirror-def', 'file://%s' % mirror_path_def])
+ mirror.mirror(mparser, margs)
+
+ # setup argument parser
+ parser = argparse.ArgumentParser()
+ buildcache.setup_parser(parser)
+
+ # Set default buildcache args
+ install_args = ['install', '-a', '-u', '%s' % cspec.name]
+
+ # Install some packages with dependent packages
+ # test install in non-default install path scheme
+ args = parser.parse_args(install_args)
+ buildcache.buildcache(parser, args)
+ # test force install in non-default install path scheme
+ install_args.insert(install_args.index('-a'), '-f')
+ args = parser.parse_args(install_args)
+ buildcache.buildcache(parser, args)
+
+ bindist._cached_specs = set()
+ spack.stage.purge()
+ margs = mparser.parse_args(
+ ['rm', '--scope', 'site', 'test-mirror-def'])
+ mirror.mirror(mparser, margs)
+
+
+@pytest.mark.requires_executables(
+ '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.disable_clean_stage_check
+@pytest.mark.maybeslow
+@pytest.mark.nomockstage
+@pytest.mark.usefixtures('default_config', 'cache_directory',
+ 'install_dir_default_layout')
+def test_relative_rpaths_create_default_layout(tmpdir,
+ mirror_directory_rel,
+ install_mockery):
+ """
+ Test the creation and installation of buildcaches with relative
+ rpaths into the default directory layout scheme.
+ """
+
+ gspec = Spec('garply')
+ gspec.concretize()
+ cspec = Spec('corge')
+ cspec.concretize()
+
+ global mirror_path_rel
+ mirror_path_rel = mirror_directory_rel
+ # Install patchelf needed for relocate in linux test environment
+ iparser = argparse.ArgumentParser()
+ install.setup_parser(iparser)
+ # Install some packages with dependent packages
+ iargs = iparser.parse_args(['--no-cache', cspec.name])
+ install.install(iparser, iargs)
+
+ # setup argument parser
+ parser = argparse.ArgumentParser()
+ buildcache.setup_parser(parser)
+
+ # set default buildcache args
+ create_args = ['create', '-a', '-u', '-r', '-d',
+ str(mirror_path_rel),
+ cspec.name]
+
+ # create build cache with relatived rpaths
+ args = parser.parse_args(create_args)
+ buildcache.buildcache(parser, args)
+ # create mirror index
+ args = parser.parse_args(['update-index', '-d', 'file://%s' % str(mirror_path_rel)])
+ buildcache.buildcache(parser, args)
+ # Uninstall the package and deps
+ uparser = argparse.ArgumentParser()
+ uninstall.setup_parser(uparser)
+ uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
+ uninstall.uninstall(uparser, uargs)
+
+ bindist._cached_specs = set()
+ spack.stage.purge()
+
+
+@pytest.mark.requires_executables(
+ '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.disable_clean_stage_check
+@pytest.mark.maybeslow
+@pytest.mark.nomockstage
+@pytest.mark.usefixtures('default_config', 'cache_directory',
+ 'install_dir_default_layout')
+def test_relative_rpaths_install_default_layout(tmpdir,
+ install_mockery):
+ """
+ Test the creation and installation of buildcaches with relative
+ rpaths into the default directory layout scheme.
+ """
+
+ gspec = Spec('garply')
+ gspec.concretize()
+ cspec = Spec('corge')
+ cspec.concretize()
+
+ global mirror_path_rel
+ mparser = argparse.ArgumentParser()
+ mirror.setup_parser(mparser)
+ margs = mparser.parse_args(
+ ['add', '--scope', 'site', 'test-mirror-rel', 'file://%s' % mirror_path_rel])
+ mirror.mirror(mparser, margs)
+
+ # Install patchelf needed for relocate in linux test environment
+ iparser = argparse.ArgumentParser()
+ install.setup_parser(iparser)
+
+ # setup argument parser
+ parser = argparse.ArgumentParser()
+ buildcache.setup_parser(parser)
+
+ # set default buildcache args
+ install_args = ['install', '-a', '-u',
+ cspec.name]
+
+ # install buildcache created with relativized rpaths
+ args = parser.parse_args(install_args)
+ buildcache.buildcache(parser, args)
+
+ # This gives warning that spec is already installed
+ buildcache.buildcache(parser, args)
+
+ # Uninstall the package and deps
+ uparser = argparse.ArgumentParser()
+ uninstall.setup_parser(uparser)
+ uargs = uparser.parse_args(['-y', '--dependents', gspec.name])
+ uninstall.uninstall(uparser, uargs)
+
+ # install build cache
+ buildcache.buildcache(parser, args)
+
+ # test overwrite install
+ install_args.insert(install_args.index('-a'), '-f')
+ args = parser.parse_args(install_args)
+ buildcache.buildcache(parser, args)
+
+ bindist._cached_specs = set()
+ spack.stage.purge()
+ margs = mparser.parse_args(
+ ['rm', '--scope', 'site', 'test-mirror-rel'])
+ mirror.mirror(mparser, margs)
+
+
+@pytest.mark.requires_executables(
+ '/usr/bin/gcc', 'patchelf', 'strings', 'file')
+@pytest.mark.disable_clean_stage_check
+@pytest.mark.maybeslow
+@pytest.mark.nomockstage
+@pytest.mark.usefixtures('default_config', 'cache_directory',
+ 'install_dir_non_default_layout')
+def test_relative_rpaths_install_nondefault(tmpdir,
+ install_mockery):
+ """
+ Test the installation of buildcaches with relativized rpaths
+ into the non-default directory layout scheme.
+ """
+
+ gspec = Spec('garply')
+ gspec.concretize()
+ cspec = Spec('corge')
+ cspec.concretize()
+
+ global mirror_path_rel
+
+ mparser = argparse.ArgumentParser()
+ mirror.setup_parser(mparser)
+ margs = mparser.parse_args(
+ ['add', '--scope', 'site', 'test-mirror-rel', 'file://%s' % mirror_path_rel])
+ mirror.mirror(mparser, margs)
+
+ # Install patchelf needed for relocate in linux test environment
+ iparser = argparse.ArgumentParser()
+ install.setup_parser(iparser)
+
+ # setup argument parser
+ parser = argparse.ArgumentParser()
+ buildcache.setup_parser(parser)
+
+ # Set default buildcache args
+ install_args = ['install', '-a', '-u', '%s' % cspec.name]
+
+ # test install in non-default install path scheme and relative path
+ args = parser.parse_args(install_args)
+ buildcache.buildcache(parser, args)
+
+ bindist._cached_specs = set()
+ spack.stage.purge()
+ margs = mparser.parse_args(
+ ['rm', '--scope', 'site', 'test-mirror-rel'])
+ mirror.mirror(mparser, margs)
diff --git a/lib/spack/spack/test/llnl/util/lock.py b/lib/spack/spack/test/llnl/util/lock.py
index a959ea0c73..b2b7cf85ac 100644
--- a/lib/spack/spack/test/llnl/util/lock.py
+++ b/lib/spack/spack/test/llnl/util/lock.py
@@ -1143,8 +1143,6 @@ def test_nested_reads(lock_path):
assert vals['read'] == 1
-@pytest.mark.skipif('macos' in os.environ.get('GITHUB_WORKFLOW', ''),
- reason="Skip failing test for GA on MacOS")
def test_lock_debug_output(lock_path):
host = socket.getfqdn()
diff --git a/lib/spack/spack/test/llnl/util/tty/log.py b/lib/spack/spack/test/llnl/util/tty/log.py
index f23f663713..97950e8324 100644
--- a/lib/spack/spack/test/llnl/util/tty/log.py
+++ b/lib/spack/spack/test/llnl/util/tty/log.py
@@ -111,7 +111,7 @@ def test_log_subproc_and_echo_output_capfd(capfd, tmpdir):
# Tests below use a pseudoterminal to test llnl.util.tty.log
#
def simple_logger(**kwargs):
- """Mock logger (child) process for testing log.keyboard_input."""
+ """Mock logger (minion) process for testing log.keyboard_input."""
def handler(signum, frame):
running[0] = False
signal.signal(signal.SIGUSR1, handler)
@@ -125,7 +125,7 @@ def simple_logger(**kwargs):
def mock_shell_fg(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.fg()
ctl.status()
ctl.wait_enabled()
@@ -134,7 +134,7 @@ def mock_shell_fg(proc, ctl, **kwargs):
def mock_shell_fg_no_termios(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.fg()
ctl.status()
ctl.wait_disabled_fg()
@@ -143,7 +143,7 @@ def mock_shell_fg_no_termios(proc, ctl, **kwargs):
def mock_shell_bg(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.bg()
ctl.status()
ctl.wait_disabled()
@@ -152,7 +152,7 @@ def mock_shell_bg(proc, ctl, **kwargs):
def mock_shell_tstp_cont(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.tstp()
ctl.wait_stopped()
@@ -163,7 +163,7 @@ def mock_shell_tstp_cont(proc, ctl, **kwargs):
def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.tstp()
ctl.wait_stopped()
@@ -177,7 +177,7 @@ def mock_shell_tstp_tstp_cont(proc, ctl, **kwargs):
def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.tstp()
ctl.wait_stopped()
@@ -194,7 +194,7 @@ def mock_shell_tstp_tstp_cont_cont(proc, ctl, **kwargs):
def mock_shell_bg_fg(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.bg()
ctl.status()
ctl.wait_disabled()
@@ -207,7 +207,7 @@ def mock_shell_bg_fg(proc, ctl, **kwargs):
def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.bg()
ctl.status()
ctl.wait_disabled()
@@ -220,7 +220,7 @@ def mock_shell_bg_fg_no_termios(proc, ctl, **kwargs):
def mock_shell_fg_bg(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.fg()
ctl.status()
ctl.wait_enabled()
@@ -233,7 +233,7 @@ def mock_shell_fg_bg(proc, ctl, **kwargs):
def mock_shell_fg_bg_no_termios(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background."""
+ """PseudoShell controller function for test_foreground_background."""
ctl.fg()
ctl.status()
ctl.wait_disabled_fg()
@@ -299,7 +299,7 @@ def test_foreground_background(test_fn, termios_on_or_off, tmpdir):
def synchronized_logger(**kwargs):
- """Mock logger (child) process for testing log.keyboard_input.
+ """Mock logger (minion) process for testing log.keyboard_input.
This logger synchronizes with the parent process to test that 'v' can
toggle output. It is used in ``test_foreground_background_output`` below.
@@ -330,7 +330,7 @@ def synchronized_logger(**kwargs):
def mock_shell_v_v(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background_output."""
+ """Controller function for test_foreground_background_output."""
write_lock = kwargs["write_lock"]
v_lock = kwargs["v_lock"]
@@ -357,7 +357,7 @@ def mock_shell_v_v(proc, ctl, **kwargs):
def mock_shell_v_v_no_termios(proc, ctl, **kwargs):
- """PseudoShell master function for test_foreground_background_output."""
+ """Controller function for test_foreground_background_output."""
write_lock = kwargs["write_lock"]
v_lock = kwargs["v_lock"]
@@ -395,9 +395,9 @@ def test_foreground_background_output(
shell = PseudoShell(test_fn, synchronized_logger)
log_path = str(tmpdir.join("log.txt"))
- # Locks for synchronizing with child
- write_lock = multiprocessing.Lock() # must be held by child to write
- v_lock = multiprocessing.Lock() # held while master is in v mode
+ # Locks for synchronizing with minion
+ write_lock = multiprocessing.Lock() # must be held by minion to write
+ v_lock = multiprocessing.Lock() # held while controller is in v mode
with termios_on_or_off():
shell.start(
@@ -423,16 +423,16 @@ def test_foreground_background_output(
with open(log_path) as log:
log = log.read().strip().split("\n")
- # Master and child process coordinate with locks such that the child
+ # Controller and minion process coordinate with locks such that the minion
# writes "off" when echo is off, and "on" when echo is on. The
# output should contain mostly "on" lines, but may contain an "off"
- # or two. This is because the master toggles echo by sending "v" on
- # stdin to the child, but this is not synchronized with our locks.
+ # or two. This is because the controller toggles echo by sending "v" on
+ # stdin to the minion, but this is not synchronized with our locks.
# It's good enough for a test, though. We allow at most 2 "off"'s in
# the output to account for the race.
assert (
['forced output', 'on'] == uniq(output) or
- output.count("off") <= 2 # if master_fd is a bit slow
+ output.count("off") <= 2 # if controller_fd is a bit slow
)
# log should be off for a while, then on, then off
diff --git a/lib/spack/spack/test/util/executable.py b/lib/spack/spack/test/util/executable.py
index 5e8795f4bf..ae2859ea4b 100644
--- a/lib/spack/spack/test/util/executable.py
+++ b/lib/spack/spack/test/util/executable.py
@@ -40,6 +40,36 @@ print(u'\\xc3')
assert u'\xc3' == script(output=str).strip()
+def test_which_relative_path_with_slash(tmpdir, working_env):
+ tmpdir.ensure('exe')
+ path = str(tmpdir.join('exe'))
+ os.environ['PATH'] = ''
+
+ with tmpdir.as_cwd():
+ no_exe = ex.which('./exe')
+ assert no_exe is None
+
+ fs.set_executable(path)
+ exe = ex.which('./exe')
+ assert exe.path == path
+
+
+def test_which_with_slash_ignores_path(tmpdir, working_env):
+ tmpdir.ensure('exe')
+ tmpdir.ensure('bin{0}exe'.format(os.path.sep))
+
+ path = str(tmpdir.join('exe'))
+ wrong_path = str(tmpdir.join('bin', 'exe'))
+ os.environ['PATH'] = os.path.dirname(wrong_path)
+
+ fs.set_executable(path)
+ fs.set_executable(wrong_path)
+
+ with tmpdir.as_cwd():
+ exe = ex.which('./exe')
+ assert exe.path == path
+
+
def test_which(tmpdir):
os.environ["PATH"] = str(tmpdir)
assert ex.which("spack-test-exe") is None
diff --git a/lib/spack/spack/util/compression.py b/lib/spack/spack/util/compression.py
index 1688b49f1b..ebbe0519d0 100644
--- a/lib/spack/spack/util/compression.py
+++ b/lib/spack/spack/util/compression.py
@@ -14,7 +14,7 @@ EXTS = ["gz", "bz2", "xz", "Z"]
NOTAR_EXTS = ["zip", "tgz", "tbz2", "txz"]
# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz
-ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(
+ALLOWED_ARCHIVE_TYPES = [".".join(ext) for ext in product(
PRE_EXTS, EXTS)] + PRE_EXTS + EXTS + NOTAR_EXTS
@@ -36,7 +36,7 @@ def decompressor_for(path, extension=None):
bunzip2 = which('bunzip2', required=True)
return bunzip2
tar = which('tar', required=True)
- tar.add_default_arg('-xf')
+ tar.add_default_arg('-oxf')
return tar
diff --git a/lib/spack/spack/util/crypto.py b/lib/spack/spack/util/crypto.py
index 566e99da21..74a6ee06bd 100644
--- a/lib/spack/spack/util/crypto.py
+++ b/lib/spack/spack/util/crypto.py
@@ -133,7 +133,7 @@ class Checker(object):
@property
def hash_name(self):
"""Get the name of the hash function this Checker is using."""
- return self.hash_fun().name
+ return self.hash_fun().name.lower()
def check(self, filename):
"""Read the file with the specified name and check its checksum
diff --git a/lib/spack/spack/util/executable.py b/lib/spack/spack/util/executable.py
index 28656b0a32..097da3337e 100644
--- a/lib/spack/spack/util/executable.py
+++ b/lib/spack/spack/util/executable.py
@@ -233,10 +233,15 @@ def which_string(*args, **kwargs):
path = path.split(os.pathsep)
for name in args:
- for directory in path:
- exe = os.path.join(directory, name)
+ if os.path.sep in name:
+ exe = os.path.abspath(name)
if os.path.isfile(exe) and os.access(exe, os.X_OK):
return exe
+ else:
+ for directory in path:
+ exe = os.path.join(directory, name)
+ if os.path.isfile(exe) and os.access(exe, os.X_OK):
+ return exe
if required:
raise CommandNotFoundError(
diff --git a/lib/spack/spack/util/gpg.py b/lib/spack/spack/util/gpg.py
index 29b2add852..e6b7f56741 100644
--- a/lib/spack/spack/util/gpg.py
+++ b/lib/spack/spack/util/gpg.py
@@ -13,7 +13,7 @@ from spack.util.executable import which
_gnupg_version_re = r"^gpg \(GnuPG\) (.*)$"
-GNUPGHOME = spack.paths.gpg_path
+GNUPGHOME = os.getenv('SPACK_GNUPGHOME', spack.paths.gpg_path)
def parse_keys_output(output):