diff options
author | Todd Gamblin <tgamblin@llnl.gov> | 2015-11-04 18:09:59 -0800 |
---|---|---|
committer | Todd Gamblin <tgamblin@llnl.gov> | 2015-11-12 15:16:06 -0800 |
commit | 55662eca696352b5bbe2a0c31868b6ebb33e1c99 (patch) | |
tree | aff710de7b2017e7ee71b8c7a8fcc630b63e124e /lib | |
parent | 6dff42be0973c5949d836eab51cfdffb0eda0a69 (diff) | |
parent | 1c4ef2996219a00e774baafa558880c83ae3d859 (diff) | |
download | spack-55662eca696352b5bbe2a0c31868b6ebb33e1c99.tar.gz spack-55662eca696352b5bbe2a0c31868b6ebb33e1c99.tar.bz2 spack-55662eca696352b5bbe2a0c31868b6ebb33e1c99.tar.xz spack-55662eca696352b5bbe2a0c31868b6ebb33e1c99.zip |
Merge branch 'develop' into mplegendre-multi_pkgsrc_roots
Conflicts:
lib/spack/spack/__init__.py
lib/spack/spack/directives.py
lib/spack/spack/packages.py
Diffstat (limited to 'lib')
37 files changed, 2076 insertions, 172 deletions
diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py index 7303d7fef6..bce9ef0e94 100644 --- a/lib/spack/docs/conf.py +++ b/lib/spack/docs/conf.py @@ -149,7 +149,7 @@ html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = [('show_copyright', False)] +html_theme_options = { 'logo_only' : True } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_themes"] @@ -163,12 +163,12 @@ html_theme_path = ["_themes"] # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +html_logo = '../../../share/spack/logo/spack-logo-white-text-48.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +html_favicon = '../../../share/spack/logo/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/lib/spack/llnl/util/filesystem.py b/lib/spack/llnl/util/filesystem.py index 029a7536df..03f25d3dff 100644 --- a/lib/spack/llnl/util/filesystem.py +++ b/lib/spack/llnl/util/filesystem.py @@ -222,7 +222,7 @@ def working_dir(dirname, **kwargs): def touch(path): """Creates an empty file at the specified path.""" - with closing(open(path, 'a')) as file: + with open(path, 'a') as file: os.utime(path, None) diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py index be6dad867e..108fa98b36 100644 --- a/lib/spack/llnl/util/lang.py +++ b/lib/spack/llnl/util/lang.py @@ -88,10 +88,7 @@ def index_by(objects, *funcs): result = {} for o in objects: key = f(o) - if key not in result: - result[key] = [o] - else: - result[key].append(o) + result.setdefault(key, []).append(o) for key, objects in result.items(): result[key] = index_by(objects, *funcs[1:]) diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py new file mode 100644 index 0000000000..ac3684bd55 --- /dev/null +++ b/lib/spack/llnl/util/lock.py @@ -0,0 +1,175 @@ +############################################################################## +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import os +import fcntl +import errno +import time +import socket + +# Default timeout in seconds, after which locks will raise exceptions. +_default_timeout = 60 + +# Sleep time per iteration in spin loop (in seconds) +_sleep_time = 1e-5 + + +class Lock(object): + def __init__(self,file_path): + self._file_path = file_path + self._fd = None + self._reads = 0 + self._writes = 0 + + + def _lock(self, op, timeout): + """This takes a lock using POSIX locks (``fnctl.lockf``). + + The lock is implemented as a spin lock using a nonblocking + call to lockf(). + + On acquiring an exclusive lock, the lock writes this process's + pid and host to the lock file, in case the holding process + needs to be killed later. + + If the lock times out, it raises a ``LockError``. + """ + start_time = time.time() + while (time.time() - start_time) < timeout: + try: + if self._fd is None: + self._fd = os.open(self._file_path, os.O_RDWR) + + fcntl.lockf(self._fd, op | fcntl.LOCK_NB) + if op == fcntl.LOCK_EX: + os.write(self._fd, "pid=%s,host=%s" % (os.getpid(), socket.getfqdn())) + return + + except IOError as error: + if error.errno == errno.EAGAIN or error.errno == errno.EACCES: + pass + else: + raise + time.sleep(_sleep_time) + + raise LockError("Timed out waiting for lock.") + + + def _unlock(self): + """Releases a lock using POSIX locks (``fcntl.lockf``) + + Releases the lock regardless of mode. Note that read locks may + be masquerading as write locks, but this removes either. + + """ + fcntl.lockf(self._fd,fcntl.LOCK_UN) + os.close(self._fd) + self._fd = None + + + def acquire_read(self, timeout=_default_timeout): + """Acquires a recursive, shared lock for reading. + + Read and write locks can be acquired and released in arbitrary + order, but the POSIX lock is held until all local read and + write locks are released. + + Returns True if it is the first acquire and actually acquires + the POSIX lock, False if it is a nested transaction. + + """ + if self._reads == 0 and self._writes == 0: + self._lock(fcntl.LOCK_SH, timeout) # can raise LockError. + self._reads += 1 + return True + else: + self._reads += 1 + return False + + + def acquire_write(self, timeout=_default_timeout): + """Acquires a recursive, exclusive lock for writing. + + Read and write locks can be acquired and released in arbitrary + order, but the POSIX lock is held until all local read and + write locks are released. + + Returns True if it is the first acquire and actually acquires + the POSIX lock, False if it is a nested transaction. + + """ + if self._writes == 0: + self._lock(fcntl.LOCK_EX, timeout) # can raise LockError. + self._writes += 1 + return True + else: + self._writes += 1 + return False + + + def release_read(self): + """Releases a read lock. + + Returns True if the last recursive lock was released, False if + there are still outstanding locks. + + Does limited correctness checking: if a read lock is released + when none are held, this will raise an assertion error. + + """ + assert self._reads > 0 + + if self._reads == 1 and self._writes == 0: + self._unlock() # can raise LockError. + self._reads -= 1 + return True + else: + self._reads -= 1 + return False + + + def release_write(self): + """Releases a write lock. + + Returns True if the last recursive lock was released, False if + there are still outstanding locks. + + Does limited correctness checking: if a read lock is released + when none are held, this will raise an assertion error. + + """ + assert self._writes > 0 + + if self._writes == 1 and self._reads == 0: + self._unlock() # can raise LockError. + self._writes -= 1 + return True + else: + self._writes -= 1 + return False + + +class LockError(Exception): + """Raised when an attempt to acquire a lock times out.""" + pass diff --git a/lib/spack/llnl/util/tty/colify.py b/lib/spack/llnl/util/tty/colify.py index acf64c1e13..0c5227c6bd 100644 --- a/lib/spack/llnl/util/tty/colify.py +++ b/lib/spack/llnl/util/tty/colify.py @@ -33,8 +33,7 @@ import struct from StringIO import StringIO from llnl.util.tty import terminal_size -from llnl.util.tty.color import clen - +from llnl.util.tty.color import clen, cextra class ColumnConfig: def __init__(self, cols): @@ -42,7 +41,6 @@ class ColumnConfig: self.line_length = 0 self.valid = True self.widths = [0] * cols # does not include ansi colors - self.cwidths = [0] * cols # includes ansi colors def __repr__(self): attrs = [(a,getattr(self, a)) for a in dir(self) if not a.startswith("__")] @@ -66,8 +64,6 @@ def config_variable_cols(elts, console_width, padding, cols=0): # Get a bound on the most columns we could possibly have. # 'clen' ignores length of ansi color sequences. lengths = [clen(e) for e in elts] - clengths = [len(e) for e in elts] - max_cols = max(1, console_width / (min(lengths) + padding)) max_cols = min(len(elts), max_cols) @@ -85,7 +81,6 @@ def config_variable_cols(elts, console_width, padding, cols=0): if conf.widths[col] < (length + p): conf.line_length += length + p - conf.widths[col] conf.widths[col] = length + p - conf.cwidths[col] = clengths[i] + p conf.valid = (conf.line_length < console_width) try: @@ -118,7 +113,6 @@ def config_uniform_cols(elts, console_width, padding, cols=0): config = ColumnConfig(cols) config.widths = [max_len] * cols - config.cwidths = [max_clen] * cols return config @@ -147,9 +141,6 @@ def colify(elts, **options): method=<string> Method to use to fit columns. Options are variable or uniform. Variable-width columns are tighter, uniform columns are all the same width and fit less data on the screen. - - len=<func> Function to use for calculating string length. - Useful for ignoring ansi color. Default is 'len'. """ # Get keyword arguments or set defaults cols = options.pop("cols", 0) @@ -199,9 +190,6 @@ def colify(elts, **options): raise ValueError("method must be one of: " + allowed_methods) cols = config.cols - formats = ["%%-%ds" % width for width in config.cwidths[:-1]] - formats.append("%s") # last column has no trailing space - rows = (len(elts) + cols - 1) / cols rows_last_col = len(elts) % rows @@ -209,7 +197,9 @@ def colify(elts, **options): output.write(" " * indent) for col in xrange(cols): elt = col * rows + row - output.write(formats[col] % elts[elt]) + width = config.widths[col] + cextra(elts[elt]) + fmt = '%%-%ds' % width + output.write(fmt % elts[elt]) output.write("\n") row += 1 diff --git a/lib/spack/llnl/util/tty/color.py b/lib/spack/llnl/util/tty/color.py index 22080a7b37..0d09303da0 100644 --- a/lib/spack/llnl/util/tty/color.py +++ b/lib/spack/llnl/util/tty/color.py @@ -158,6 +158,11 @@ def clen(string): return len(re.sub(r'\033[^m]*m', '', string)) +def cextra(string): + """"Length of extra color characters in a string""" + return len(''.join(re.findall(r'\033[^m]*m', string))) + + def cwrite(string, stream=sys.stdout, color=None): """Replace all color expressions in string with ANSI control codes and write the result to the stream. If color is diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py index 71e3ac3715..eccec12d3b 100644 --- a/lib/spack/spack/__init__.py +++ b/lib/spack/spack/__init__.py @@ -29,25 +29,27 @@ from llnl.util.filesystem import * import llnl.util.tty as tty # This lives in $prefix/lib/spack/spack/__file__ -prefix = ancestor(__file__, 4) +spack_root = ancestor(__file__, 4) # The spack script itself -spack_file = join_path(prefix, "bin", "spack") +spack_file = join_path(spack_root, "bin", "spack") # spack directory hierarchy -etc_path = join_path(prefix, "etc") -lib_path = join_path(prefix, "lib", "spack") +lib_path = join_path(spack_root, "lib", "spack") build_env_path = join_path(lib_path, "env") module_path = join_path(lib_path, "spack") compilers_path = join_path(module_path, "compilers") test_path = join_path(module_path, "test") hooks_path = join_path(module_path, "hooks") -var_path = join_path(prefix, "var", "spack") +var_path = join_path(spack_root, "var", "spack") stage_path = join_path(var_path, "stage") packages_path = join_path(var_path, "packages") +share_path = join_path(spack_root, "share", "spack") + +prefix = spack_root opt_path = join_path(prefix, "opt") install_path = join_path(opt_path, "spack") -share_path = join_path(prefix, "share", "spack") +etc_path = join_path(prefix, "etc") # # Setup the spack.repos namespace @@ -66,6 +68,12 @@ db = spack.packages.PackageFinder(*_repo_paths) sys.meta_path.append(db) # +# Set up the installed packages database +# +from spack.database import Database +installed_db = Database(install_path) + +# # Paths to mock files for testing. # mock_packages_path = join_path(var_path, "mock_packages") diff --git a/lib/spack/spack/architecture.py b/lib/spack/spack/architecture.py index 0c4b605e91..6c874e30be 100644 --- a/lib/spack/spack/architecture.py +++ b/lib/spack/spack/architecture.py @@ -23,13 +23,13 @@ # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os -import platform as py_platform +import re +import platform from llnl.util.lang import memoized import spack import spack.error as serr -from spack.version import Version class InvalidSysTypeError(serr.SpackError): @@ -59,14 +59,11 @@ def get_sys_type_from_environment(): return os.environ.get('SYS_TYPE') -def get_mac_sys_type(): - """Return a Mac OS SYS_TYPE or None if this isn't a mac.""" - mac_ver = py_platform.mac_ver()[0] - if not mac_ver: - return None - - return "macosx_%s_%s" % ( - Version(mac_ver).up_to(2), py_platform.machine()) +def get_sys_type_from_platform(): + """Return the architecture from Python's platform module.""" + sys_type = platform.system() + '-' + platform.machine() + sys_type = re.sub(r'[^\w-]', '_', sys_type) + return sys_type.lower() @memoized @@ -74,7 +71,7 @@ def sys_type(): """Returns a SysType for the current machine.""" methods = [get_sys_type_from_spack_globals, get_sys_type_from_environment, - get_mac_sys_type] + get_sys_type_from_platform] # search for a method that doesn't return None sys_type = None diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py index 03a4930259..96033edc20 100644 --- a/lib/spack/spack/build_environment.py +++ b/lib/spack/spack/build_environment.py @@ -317,4 +317,9 @@ def fork(pkg, function): # message. Just make the parent exit with an error code. pid, returncode = os.waitpid(pid, 0) if returncode != 0: - sys.exit(1) + raise InstallError("Installation process had nonzero exit code." + .format(str(returncode))) + + +class InstallError(spack.error.SpackError): + """Raised when a package fails to install""" diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index b96ac5af51..6ce6fa0960 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -124,7 +124,7 @@ def elide_list(line_list, max_num=10): def disambiguate_spec(spec): - matching_specs = spack.db.get_installed(spec) + matching_specs = spack.installed_db.query(spec) if not matching_specs: tty.die("Spec '%s' matches no installed packages." % spec) diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py index e44be41029..1f0e303cdf 100644 --- a/lib/spack/spack/cmd/deactivate.py +++ b/lib/spack/spack/cmd/deactivate.py @@ -54,7 +54,7 @@ def deactivate(parser, args): if args.all: if pkg.extendable: tty.msg("Deactivating all extensions of %s" % pkg.spec.short_spec) - ext_pkgs = spack.db.installed_extensions_for(spec) + ext_pkgs = spack.installed_db.installed_extensions_for(spec) for ext_pkg in ext_pkgs: ext_pkg.spec.normalize() diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py index 6e7f10fba6..9f8a6d39db 100644 --- a/lib/spack/spack/cmd/diy.py +++ b/lib/spack/spack/cmd/diy.py @@ -58,36 +58,38 @@ def diy(self, args): if len(specs) > 1: tty.die("spack diy only takes one spec.") - spec = specs[0] - if not spack.db.exists(spec.name): - tty.warn("No such package: %s" % spec.name) - create = tty.get_yes_or_no("Create this package?", default=False) - if not create: - tty.msg("Exiting without creating.") - sys.exit(1) - else: - tty.msg("Running 'spack edit -f %s'" % spec.name) - edit_package(spec.name, True) - return + # Take a write lock before checking for existence. + with spack.installed_db.write_transaction(): + spec = specs[0] + if not spack.db.exists(spec.name): + tty.warn("No such package: %s" % spec.name) + create = tty.get_yes_or_no("Create this package?", default=False) + if not create: + tty.msg("Exiting without creating.") + sys.exit(1) + else: + tty.msg("Running 'spack edit -f %s'" % spec.name) + edit_package(spec.name, True) + return - if not spec.version.concrete: - tty.die("spack diy spec must have a single, concrete version.") + if not spec.version.concrete: + tty.die("spack diy spec must have a single, concrete version.") - spec.concretize() - package = spack.db.get(spec) + spec.concretize() + package = spack.db.get(spec) - if package.installed: - tty.error("Already installed in %s" % package.prefix) - tty.msg("Uninstall or try adding a version suffix for this DIY build.") - sys.exit(1) + if package.installed: + tty.error("Already installed in %s" % package.prefix) + tty.msg("Uninstall or try adding a version suffix for this DIY build.") + sys.exit(1) - # Forces the build to run out of the current directory. - package.stage = DIYStage(os.getcwd()) + # Forces the build to run out of the current directory. + package.stage = DIYStage(os.getcwd()) - # TODO: make this an argument, not a global. - spack.do_checksum = False + # TODO: make this an argument, not a global. + spack.do_checksum = False - package.do_install( - keep_prefix=args.keep_prefix, - ignore_deps=args.ignore_deps, - keep_stage=True) # don't remove source dir for DIY. + package.do_install( + keep_prefix=args.keep_prefix, + ignore_deps=args.ignore_deps, + keep_stage=True) # don't remove source dir for DIY. diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py index fc8e6842c3..7cadc424b0 100644 --- a/lib/spack/spack/cmd/extensions.py +++ b/lib/spack/spack/cmd/extensions.py @@ -80,7 +80,7 @@ def extensions(parser, args): colify(ext.name for ext in extensions) # List specs of installed extensions. - installed = [s.spec for s in spack.db.installed_extensions_for(spec)] + installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)] print if not installed: tty.msg("None installed.") diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py index 3c993990b1..0b0dd6ef6f 100644 --- a/lib/spack/spack/cmd/find.py +++ b/lib/spack/spack/cmd/find.py @@ -55,6 +55,16 @@ def setup_parser(subparser): help='Show dependency hashes as well as versions.') subparser.add_argument( + '-u', '--unknown', action='store_true', dest='unknown', + help='Show only specs Spack does not have a package for.') + subparser.add_argument( + '-m', '--missing', action='store_true', dest='missing', + help='Show missing dependencies as well as installed specs.') + subparser.add_argument( + '-M', '--only-missing', action='store_true', dest='only_missing', + help='Show only missing dependencies.') + + subparser.add_argument( 'query_specs', nargs=argparse.REMAINDER, help='optional specs to filter results') @@ -113,6 +123,7 @@ def display_specs(specs, **kwargs): if hashes: string += gray_hash(s, hlen) + ' ' string += s.format('$-_$@$+', color=True) + return string colify(fmt(s) for s in specs) @@ -136,11 +147,21 @@ def find(parser, args): if not query_specs: return + # Set up query arguments. + installed, known = True, any + if args.only_missing: + installed = False + elif args.missing: + installed = any + if args.unknown: + known = False + q_args = { 'installed' : installed, 'known' : known } + # Get all the specs the user asked for if not query_specs: - specs = set(spack.db.installed_package_specs()) + specs = set(spack.installed_db.query(**q_args)) else: - results = [set(spack.db.get_installed(qs)) for qs in query_specs] + results = [set(spack.installed_db.query(qs, **q_args)) for qs in query_specs] specs = set.union(*results) if not args.mode: diff --git a/lib/spack/spack/cmd/info.py b/lib/spack/spack/cmd/info.py index c6209523f0..085e4db44d 100644 --- a/lib/spack/spack/cmd/info.py +++ b/lib/spack/spack/cmd/info.py @@ -65,11 +65,21 @@ def print_text_info(pkg): print "None" else: pad = padder(pkg.variants, 4) + + maxv = max(len(v) for v in sorted(pkg.variants)) + fmt = "%%-%ss%%-10s%%s" % (maxv + 4) + + print " " + fmt % ('Name', 'Default', 'Description') + print for name in sorted(pkg.variants): v = pkg.variants[name] - print " %s%s" % ( - pad(('+' if v.default else '-') + name + ':'), - "\n".join(textwrap.wrap(v.description))) + default = 'on' if v.default else 'off' + + lines = textwrap.wrap(v.description) + lines[1:] = [" " + (" " * maxv) + l for l in lines[1:]] + desc = "\n".join(lines) + + print " " + fmt % (name, default, desc) print print "Dependencies:" diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py index acb688a092..836a6260c8 100644 --- a/lib/spack/spack/cmd/install.py +++ b/lib/spack/spack/cmd/install.py @@ -71,10 +71,11 @@ def install(parser, args): specs = spack.cmd.parse_specs(args.packages, concretize=True) for spec in specs: package = spack.db.get(spec) - package.do_install( - keep_prefix=args.keep_prefix, - keep_stage=args.keep_stage, - ignore_deps=args.ignore_deps, - make_jobs=args.jobs, - verbose=args.verbose, - fake=args.fake) + with spack.installed_db.write_transaction(): + package.do_install( + keep_prefix=args.keep_prefix, + keep_stage=args.keep_stage, + ignore_deps=args.ignore_deps, + make_jobs=args.jobs, + verbose=args.verbose, + fake=args.fake) diff --git a/lib/spack/spack/cmd/module.py b/lib/spack/spack/cmd/module.py index 34f0855a50..654b0cb2fa 100644 --- a/lib/spack/spack/cmd/module.py +++ b/lib/spack/spack/cmd/module.py @@ -65,7 +65,7 @@ def module_find(mtype, spec_array): tty.die("You can only pass one spec.") spec = specs[0] - specs = [s for s in spack.db.installed_package_specs() if s.satisfies(spec)] + specs = spack.installed_db.query(spec) if len(specs) == 0: tty.die("No installed packages match spec %s" % spec) @@ -86,7 +86,7 @@ def module_find(mtype, spec_array): def module_refresh(): """Regenerate all module files for installed packages known to spack (some packages may no longer exist).""" - specs = [s for s in spack.db.installed_known_package_specs()] + specs = [s for s in spack.installed_db.query(installed=True, known=True)] for name, cls in module_types.items(): tty.msg("Regenerating %s module files." % name) diff --git a/lib/spack/spack/cmd/reindex.py b/lib/spack/spack/cmd/reindex.py new file mode 100644 index 0000000000..b584729ea4 --- /dev/null +++ b/lib/spack/spack/cmd/reindex.py @@ -0,0 +1,31 @@ +############################################################################## +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +from external import argparse +import spack + +description = "Rebuild Spack's package database." + +def reindex(parser, args): + spack.installed_db.reindex(spack.install_layout) diff --git a/lib/spack/spack/cmd/test-install.py b/lib/spack/spack/cmd/test-install.py new file mode 100644 index 0000000000..68b761d5dc --- /dev/null +++ b/lib/spack/spack/cmd/test-install.py @@ -0,0 +1,211 @@ +############################################################################## +# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +from external import argparse +import xml.etree.ElementTree as ET +import itertools +import re +import os +import codecs + +import llnl.util.tty as tty +from llnl.util.filesystem import * + +import spack +from spack.build_environment import InstallError +from spack.fetch_strategy import FetchError +import spack.cmd + +description = "Treat package installations as unit tests and output formatted test results" + +def setup_parser(subparser): + subparser.add_argument( + '-j', '--jobs', action='store', type=int, + help="Explicitly set number of make jobs. Default is #cpus.") + + subparser.add_argument( + '-n', '--no-checksum', action='store_true', dest='no_checksum', + help="Do not check packages against checksum") + + subparser.add_argument( + '-o', '--output', action='store', help="test output goes in this file") + + subparser.add_argument( + 'package', nargs=argparse.REMAINDER, help="spec of package to install") + + +class JunitResultFormat(object): + def __init__(self): + self.root = ET.Element('testsuite') + self.tests = [] + + def add_test(self, buildId, testResult, buildInfo=None): + self.tests.append((buildId, testResult, buildInfo)) + + def write_to(self, stream): + self.root.set('tests', '{0}'.format(len(self.tests))) + for buildId, testResult, buildInfo in self.tests: + testcase = ET.SubElement(self.root, 'testcase') + testcase.set('classname', buildId.name) + testcase.set('name', buildId.stringId()) + if testResult == TestResult.FAILED: + failure = ET.SubElement(testcase, 'failure') + failure.set('type', "Build Error") + failure.text = buildInfo + elif testResult == TestResult.SKIPPED: + skipped = ET.SubElement(testcase, 'skipped') + skipped.set('type', "Skipped Build") + skipped.text = buildInfo + ET.ElementTree(self.root).write(stream) + + +class TestResult(object): + PASSED = 0 + FAILED = 1 + SKIPPED = 2 + + +class BuildId(object): + def __init__(self, spec): + self.name = spec.name + self.version = spec.version + self.hashId = spec.dag_hash() + + def stringId(self): + return "-".join(str(x) for x in (self.name, self.version, self.hashId)) + + def __hash__(self): + return hash((self.name, self.version, self.hashId)) + + def __eq__(self, other): + if not isinstance(other, BuildId): + return False + + return ((self.name, self.version, self.hashId) == + (other.name, other.version, other.hashId)) + + +def fetch_log(path): + if not os.path.exists(path): + return list() + with codecs.open(path, 'rb', 'utf-8') as F: + return list(line.strip() for line in F.readlines()) + + +def failed_dependencies(spec): + return set(childSpec for childSpec in spec.dependencies.itervalues() if not + spack.db.get(childSpec).installed) + + +def create_test_output(topSpec, newInstalls, output, getLogFunc=fetch_log): + # Post-order traversal is not strictly required but it makes sense to output + # tests for dependencies first. + for spec in topSpec.traverse(order='post'): + if spec not in newInstalls: + continue + + failedDeps = failed_dependencies(spec) + package = spack.db.get(spec) + if failedDeps: + result = TestResult.SKIPPED + dep = iter(failedDeps).next() + depBID = BuildId(dep) + errOutput = "Skipped due to failed dependency: {0}".format( + depBID.stringId()) + elif (not package.installed) and (not package.stage.source_path): + result = TestResult.FAILED + errOutput = "Failure to fetch package resources." + elif not package.installed: + result = TestResult.FAILED + lines = getLogFunc(package.build_log_path) + errMessages = list(line for line in lines if + re.search('error:', line, re.IGNORECASE)) + errOutput = errMessages if errMessages else lines[-10:] + errOutput = '\n'.join(itertools.chain( + [spec.to_yaml(), "Errors:"], errOutput, + ["Build Log:", package.build_log_path])) + else: + result = TestResult.PASSED + errOutput = None + + bId = BuildId(spec) + output.add_test(bId, result, errOutput) + + +def test_install(parser, args): + if not args.package: + tty.die("install requires a package argument") + + if args.jobs is not None: + if args.jobs <= 0: + tty.die("The -j option must be a positive integer!") + + if args.no_checksum: + spack.do_checksum = False # TODO: remove this global. + + specs = spack.cmd.parse_specs(args.package, concretize=True) + if len(specs) > 1: + tty.die("Only 1 top-level package can be specified") + topSpec = iter(specs).next() + + newInstalls = set() + for spec in topSpec.traverse(): + package = spack.db.get(spec) + if not package.installed: + newInstalls.add(spec) + + if not args.output: + bId = BuildId(topSpec) + outputDir = join_path(os.getcwd(), "test-output") + if not os.path.exists(outputDir): + os.mkdir(outputDir) + outputFpath = join_path(outputDir, "test-{0}.xml".format(bId.stringId())) + else: + outputFpath = args.output + + for spec in topSpec.traverse(order='post'): + # Calling do_install for the top-level package would be sufficient but + # this attempts to keep going if any package fails (other packages which + # are not dependents may succeed) + package = spack.db.get(spec) + if (not failed_dependencies(spec)) and (not package.installed): + try: + package.do_install( + keep_prefix=False, + keep_stage=True, + ignore_deps=False, + make_jobs=args.jobs, + verbose=True, + fake=False) + except InstallError: + pass + except FetchError: + pass + + jrf = JunitResultFormat() + handled = {} + create_test_output(topSpec, newInstalls, jrf) + + with open(outputFpath, 'wb') as F: + jrf.write_to(F) diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index aa62510fed..e80f2d2636 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -53,51 +53,52 @@ def uninstall(parser, args): if not args.packages: tty.die("uninstall requires at least one package argument.") - specs = spack.cmd.parse_specs(args.packages) + with spack.installed_db.write_transaction(): + specs = spack.cmd.parse_specs(args.packages) - # For each spec provided, make sure it refers to only one package. - # Fail and ask user to be unambiguous if it doesn't - pkgs = [] - for spec in specs: - matching_specs = spack.db.get_installed(spec) - if not args.all and len(matching_specs) > 1: - tty.error("%s matches multiple packages:" % spec) - print - display_specs(matching_specs, long=True) - print - print "You can either:" - print " a) Use a more specific spec, or" - print " b) use spack uninstall -a to uninstall ALL matching specs." - sys.exit(1) + # For each spec provided, make sure it refers to only one package. + # Fail and ask user to be unambiguous if it doesn't + pkgs = [] + for spec in specs: + matching_specs = spack.installed_db.query(spec) + if not args.all and len(matching_specs) > 1: + tty.error("%s matches multiple packages:" % spec) + print + display_specs(matching_specs, long=True) + print + print "You can either:" + print " a) Use a more specific spec, or" + print " b) use spack uninstall -a to uninstall ALL matching specs." + sys.exit(1) - if len(matching_specs) == 0: - if args.force: continue - tty.die("%s does not match any installed packages." % spec) + if len(matching_specs) == 0: + if args.force: continue + tty.die("%s does not match any installed packages." % spec) - for s in matching_specs: - try: - # should work if package is known to spack - pkgs.append(s.package) + for s in matching_specs: + try: + # should work if package is known to spack + pkgs.append(s.package) - except spack.packages.UnknownPackageError, e: - # The package.py file has gone away -- but still want to uninstall. - spack.Package(s).do_uninstall(force=True) + except spack.packages.UnknownPackageError, e: + # The package.py file has gone away -- but still want to uninstall. + spack.Package(s).do_uninstall(force=True) - # Sort packages to be uninstalled by the number of installed dependents - # This ensures we do things in the right order - def num_installed_deps(pkg): - return len(pkg.installed_dependents) - pkgs.sort(key=num_installed_deps) + # Sort packages to be uninstalled by the number of installed dependents + # This ensures we do things in the right order + def num_installed_deps(pkg): + return len(pkg.installed_dependents) + pkgs.sort(key=num_installed_deps) - # Uninstall packages in order now. - for pkg in pkgs: - try: - pkg.do_uninstall(force=args.force) - except PackageStillNeededError, e: - tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True)) - print - print "The following packages depend on it:" - display_specs(e.dependents, long=True) - print - print "You can use spack uninstall -f to force this action." - sys.exit(1) + # Uninstall packages in order now. + for pkg in pkgs: + try: + pkg.do_uninstall(force=args.force) + except PackageStillNeededError, e: + tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True)) + print + print "The following packages depend on it:" + display_specs(e.dependents, long=True) + print + print "You can use spack uninstall -f to force this action." + sys.exit(1) diff --git a/lib/spack/spack/compiler.py b/lib/spack/spack/compiler.py index 646050d267..1e800a8979 100644 --- a/lib/spack/spack/compiler.py +++ b/lib/spack/spack/compiler.py @@ -227,14 +227,32 @@ class Compiler(object): for d in dicts: all_keys.update(d) - compilers = [] + compilers = {} for k in all_keys: ver, pre, suf = k + + # Skip compilers with unknown version. + if ver == 'unknown': + continue + paths = tuple(pn[k] if k in pn else None for pn in dicts) spec = spack.spec.CompilerSpec(cls.name, ver) - compilers.append(cls(spec, *paths)) - return compilers + if ver in compilers: + prev = compilers[ver] + + # prefer the one with more compilers. + prev_paths = [prev.cc, prev.cxx, prev.f77, prev.fc] + newcount = len([p for p in paths if p is not None]) + prevcount = len([p for p in prev_paths if p is not None]) + + # Don't add if it's not an improvement over prev compiler. + if newcount <= prevcount: + continue + + compilers[ver] = cls(spec, *paths) + + return list(compilers.values()) def __repr__(self): diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py new file mode 100644 index 0000000000..e0c14a0455 --- /dev/null +++ b/lib/spack/spack/database.py @@ -0,0 +1,628 @@ +############################################################################## +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +"""Spack's installation tracking database. + +The database serves two purposes: + + 1. It implements a cache on top of a potentially very large Spack + directory hierarchy, speeding up many operations that would + otherwise require filesystem access. + + 2. It will allow us to track external installations as well as lost + packages and their dependencies. + +Prior ot the implementation of this store, a direcotry layout served +as the authoritative database of packages in Spack. This module +provides a cache and a sanity checking mechanism for what is in the +filesystem. + +""" +import os +import time +import socket + +from external import yaml +from external.yaml.error import MarkedYAMLError, YAMLError + +import llnl.util.tty as tty +from llnl.util.filesystem import * +from llnl.util.lock import * + +import spack.spec +from spack.version import Version +from spack.spec import Spec +from spack.error import SpackError + +# DB goes in this directory underneath the root +_db_dirname = '.spack-db' + +# DB version. This is stuck in the DB file to track changes in format. +_db_version = Version('0.9') + +# Default timeout for spack database locks is 5 min. +_db_lock_timeout = 60 + + +def _autospec(function): + """Decorator that automatically converts the argument of a single-arg + function to a Spec.""" + def converter(self, spec_like, *args, **kwargs): + if not isinstance(spec_like, spack.spec.Spec): + spec_like = spack.spec.Spec(spec_like) + return function(self, spec_like, *args, **kwargs) + return converter + + +class InstallRecord(object): + """A record represents one installation in the DB. + + The record keeps track of the spec for the installation, its + install path, AND whether or not it is installed. We need the + installed flag in case a user either: + + a) blew away a directory, or + b) used spack uninstall -f to get rid of it + + If, in either case, the package was removed but others still + depend on it, we still need to track its spec, so we don't + actually remove from the database until a spec has no installed + dependents left. + + """ + def __init__(self, spec, path, installed, ref_count=0): + self.spec = spec + self.path = str(path) + self.installed = bool(installed) + self.ref_count = ref_count + + def to_dict(self): + return { 'spec' : self.spec.to_node_dict(), + 'path' : self.path, + 'installed' : self.installed, + 'ref_count' : self.ref_count } + + @classmethod + def from_dict(cls, spec, dictionary): + d = dictionary + return InstallRecord(spec, d['path'], d['installed'], d['ref_count']) + + +class Database(object): + def __init__(self, root, db_dir=None): + """Create a Database for Spack installations under ``root``. + + A Database is a cache of Specs data from ``$prefix/spec.yaml`` + files in Spack installation directories. + + By default, Database files (data and lock files) are stored + under ``root/.spack-db``, which is created if it does not + exist. This is the ``db_dir``. + + The Database will attempt to read an ``index.yaml`` file in + ``db_dir``. If it does not find one, it will be created when + needed by scanning the entire Database root for ``spec.yaml`` + files according to Spack's ``DirectoryLayout``. + + Caller may optionally provide a custom ``db_dir`` parameter + where data will be stored. This is intended to be used for + testing the Database class. + + """ + self.root = root + + if db_dir is None: + # If the db_dir is not provided, default to within the db root. + self._db_dir = join_path(self.root, _db_dirname) + else: + # Allow customizing the database directory location for testing. + self._db_dir = db_dir + + # Set up layout of database files within the db dir + self._index_path = join_path(self._db_dir, 'index.yaml') + self._lock_path = join_path(self._db_dir, 'lock') + + # Create needed directories and files + if not os.path.exists(self._db_dir): + mkdirp(self._db_dir) + + if not os.path.exists(self._lock_path): + touch(self._lock_path) + + # initialize rest of state. + self.lock = Lock(self._lock_path) + self._data = {} + + + def write_transaction(self, timeout=_db_lock_timeout): + """Get a write lock context manager for use in a `with` block.""" + return WriteTransaction(self, self._read, self._write, timeout) + + + def read_transaction(self, timeout=_db_lock_timeout): + """Get a read lock context manager for use in a `with` block.""" + return ReadTransaction(self, self._read, None, timeout) + + + def _write_to_yaml(self, stream): + """Write out the databsae to a YAML file. + + This function does not do any locking or transactions. + """ + # map from per-spec hash code to installation record. + installs = dict((k, v.to_dict()) for k, v in self._data.items()) + + # database includes installation list and version. + + # NOTE: this DB version does not handle multiple installs of + # the same spec well. If there are 2 identical specs with + # different paths, it can't differentiate. + # TODO: fix this before we support multiple install locations. + database = { + 'database' : { + 'installs' : installs, + 'version' : str(_db_version) + } + } + + try: + return yaml.dump(database, stream=stream, default_flow_style=False) + except YAMLError as e: + raise SpackYAMLError("error writing YAML database:", str(e)) + + + def _read_spec_from_yaml(self, hash_key, installs, parent_key=None): + """Recursively construct a spec from a hash in a YAML database. + + Does not do any locking. + """ + if hash_key not in installs: + parent = read_spec(installs[parent_key]['path']) + + spec_dict = installs[hash_key]['spec'] + + # Build spec from dict first. + spec = Spec.from_node_dict(spec_dict) + + # Add dependencies from other records in the install DB to + # form a full spec. + for dep_hash in spec_dict[spec.name]['dependencies'].values(): + child = self._read_spec_from_yaml(dep_hash, installs, hash_key) + spec._add_dependency(child) + + return spec + + + def _read_from_yaml(self, stream): + """ + Fill database from YAML, do not maintain old data + Translate the spec portions from node-dict form to spec form + + Does not do any locking. + """ + try: + if isinstance(stream, basestring): + with open(stream, 'r') as f: + yfile = yaml.load(f) + else: + yfile = yaml.load(stream) + + except MarkedYAMLError as e: + raise SpackYAMLError("error parsing YAML database:", str(e)) + + if yfile is None: + return + + def check(cond, msg): + if not cond: raise CorruptDatabaseError(self._index_path, msg) + + check('database' in yfile, "No 'database' attribute in YAML.") + + # High-level file checks + db = yfile['database'] + check('installs' in db, "No 'installs' in YAML DB.") + check('version' in db, "No 'version' in YAML DB.") + + # TODO: better version checking semantics. + version = Version(db['version']) + if version != _db_version: + raise InvalidDatabaseVersionError(_db_version, version) + + # Iterate through database and check each record. + installs = db['installs'] + data = {} + for hash_key, rec in installs.items(): + try: + # This constructs a spec DAG from the list of all installs + spec = self._read_spec_from_yaml(hash_key, installs) + + # Validate the spec by ensuring the stored and actual + # hashes are the same. + spec_hash = spec.dag_hash() + if not spec_hash == hash_key: + tty.warn("Hash mismatch in database: %s -> spec with hash %s" + % (hash_key, spec_hash)) + continue # TODO: is skipping the right thing to do? + + # Insert the brand new spec in the database. Each + # spec has its own copies of its dependency specs. + # TODO: would a more immmutable spec implementation simplify this? + data[hash_key] = InstallRecord.from_dict(spec, rec) + + except Exception as e: + tty.warn("Invalid database reecord:", + "file: %s" % self._index_path, + "hash: %s" % hash_key, + "cause: %s" % str(e)) + raise + + self._data = data + + + def reindex(self, directory_layout): + """Build database index from scratch based from a directory layout. + + Locks the DB if it isn't locked already. + + """ + with self.write_transaction(): + old_data = self._data + try: + self._data = {} + + # Ask the directory layout to traverse the filesystem. + for spec in directory_layout.all_specs(): + # Create a spec for each known package and add it. + path = directory_layout.path_for_spec(spec) + self._add(spec, path, directory_layout) + + self._check_ref_counts() + + except: + # If anything explodes, restore old data, skip write. + self._data = old_data + raise + + + def _check_ref_counts(self): + """Ensure consistency of reference counts in the DB. + + Raise an AssertionError if something is amiss. + + Does no locking. + """ + counts = {} + for key, rec in self._data.items(): + counts.setdefault(key, 0) + for dep in rec.spec.dependencies.values(): + dep_key = dep.dag_hash() + counts.setdefault(dep_key, 0) + counts[dep_key] += 1 + + for rec in self._data.values(): + key = rec.spec.dag_hash() + expected = counts[key] + found = rec.ref_count + if not expected == found: + raise AssertionError( + "Invalid ref_count: %s: %d (expected %d), in DB %s." + % (key, found, expected, self._index_path)) + + + def _write(self): + """Write the in-memory database index to its file path. + + Does no locking. + + """ + temp_file = self._index_path + ( + '.%s.%s.temp' % (socket.getfqdn(), os.getpid())) + + # Write a temporary database file them move it into place + try: + with open(temp_file, 'w') as f: + self._write_to_yaml(f) + os.rename(temp_file, self._index_path) + except: + # Clean up temp file if something goes wrong. + if os.path.exists(temp_file): + os.remove(temp_file) + raise + + + def _read(self): + """Re-read Database from the data in the set location. + + This does no locking. + """ + if os.path.isfile(self._index_path): + # Read from YAML file if a database exists + self._read_from_yaml(self._index_path) + + else: + # The file doesn't exist, try to traverse the directory. + # reindex() takes its own write lock, so no lock here. + self.reindex(spack.install_layout) + + + def _add(self, spec, path, directory_layout=None): + """Add an install record for spec at path to the database. + + This assumes that the spec is not already installed. It + updates the ref counts on dependencies of the spec in the DB. + + This operation is in-memory, and does not lock the DB. + + """ + key = spec.dag_hash() + if key in self._data: + rec = self._data[key] + rec.installed = True + + # TODO: this overwrites a previous install path (when path != + # self._data[key].path), and the old path still has a + # dependent in the DB. We could consider re-RPATH-ing the + # dependents. This case is probably infrequent and may not be + # worth fixing, but this is where we can discover it. + rec.path = path + + else: + self._data[key] = InstallRecord(spec, path, True) + for dep in spec.dependencies.values(): + self._increment_ref_count(dep, directory_layout) + + + def _increment_ref_count(self, spec, directory_layout=None): + """Recursively examine dependencies and update their DB entries.""" + key = spec.dag_hash() + if key not in self._data: + installed = False + path = None + if directory_layout: + path = directory_layout.path_for_spec(spec) + installed = os.path.isdir(path) + + self._data[key] = InstallRecord(spec.copy(), path, installed) + + for dep in spec.dependencies.values(): + self._increment_ref_count(dep) + + self._data[key].ref_count += 1 + + @_autospec + def add(self, spec, path): + """Add spec at path to database, locking and reading DB to sync. + + ``add()`` will lock and read from the DB on disk. + + """ + # TODO: ensure that spec is concrete? + # Entire add is transactional. + with self.write_transaction(): + self._add(spec, path) + + + def _get_matching_spec_key(self, spec, **kwargs): + """Get the exact spec OR get a single spec that matches.""" + key = spec.dag_hash() + if not key in self._data: + match = self.query_one(spec, **kwargs) + if match: + return match.dag_hash() + raise KeyError("No such spec in database! %s" % spec) + return key + + + @_autospec + def get_record(self, spec, **kwargs): + key = self._get_matching_spec_key(spec, **kwargs) + return self._data[key] + + + def _decrement_ref_count(self, spec): + key = spec.dag_hash() + + if not key in self._data: + # TODO: print something here? DB is corrupt, but + # not much we can do. + return + + rec = self._data[key] + rec.ref_count -= 1 + + if rec.ref_count == 0 and not rec.installed: + del self._data[key] + for dep in spec.dependencies.values(): + self._decrement_ref_count(dep) + + + def _remove(self, spec): + """Non-locking version of remove(); does real work. + """ + key = self._get_matching_spec_key(spec) + rec = self._data[key] + + if rec.ref_count > 0: + rec.installed = False + return rec.spec + + del self._data[key] + for dep in rec.spec.dependencies.values(): + self._decrement_ref_count(dep) + + # Returns the concrete spec so we know it in the case where a + # query spec was passed in. + return rec.spec + + + @_autospec + def remove(self, spec): + """Removes a spec from the database. To be called on uninstall. + + Reads the database, then: + + 1. Marks the spec as not installed. + 2. Removes the spec if it has no more dependents. + 3. If removed, recursively updates dependencies' ref counts + and remvoes them if they are no longer needed. + + """ + # Take a lock around the entire removal. + with self.write_transaction(): + return self._remove(spec) + + + @_autospec + def installed_extensions_for(self, extendee_spec): + """ + Return the specs of all packages that extend + the given spec + """ + for s in self.query(): + try: + if s.package.extends(extendee_spec): + yield s.package + except UnknownPackageError as e: + continue + # skips unknown packages + # TODO: conditional way to do this instead of catching exceptions + + + def query(self, query_spec=any, known=any, installed=True): + """Run a query on the database. + + ``query_spec`` + Queries iterate through specs in the database and return + those that satisfy the supplied ``query_spec``. If + query_spec is `any`, This will match all specs in the + database. If it is a spec, we'll evaluate + ``spec.satisfies(query_spec)``. + + The query can be constrained by two additional attributes: + + ``known`` + Possible values: True, False, any + + Specs that are "known" are those for which Spack can + locate a ``package.py`` file -- i.e., Spack "knows" how to + install them. Specs that are unknown may represent + packages that existed in a previous version of Spack, but + have since either changed their name or been removed. + + ``installed`` + Possible values: True, False, any + + Specs for which a prefix exists are "installed". A spec + that is NOT installed will be in the database if some + other spec depends on it but its installation has gone + away since Spack installed it. + + TODO: Specs are a lot like queries. Should there be a + wildcard spec object, and should specs have attributes + like installed and known that can be queried? Or are + these really special cases that only belong here? + + """ + with self.read_transaction(): + results = [] + for key, rec in self._data.items(): + if installed is not any and rec.installed != installed: + continue + if known is not any and spack.db.exists(rec.spec.name) != known: + continue + if query_spec is any or rec.spec.satisfies(query_spec): + results.append(rec.spec) + + return sorted(results) + + + def query_one(self, query_spec, known=any, installed=True): + """Query for exactly one spec that matches the query spec. + + Raises an assertion error if more than one spec matches the + query. Returns None if no installed package matches. + + """ + concrete_specs = self.query(query_spec, known, installed) + assert len(concrete_specs) <= 1 + return concrete_specs[0] if concrete_specs else None + + + def missing(self, spec): + with self.read_transaction(): + key = spec.dag_hash() + return key in self._data and not self._data[key].installed + + +class _Transaction(object): + """Simple nested transaction context manager that uses a file lock. + + This class can trigger actions when the lock is acquired for the + first time and released for the last. + + Timeout for lock is customizable. + """ + def __init__(self, db, acquire_fn=None, release_fn=None, + timeout=_db_lock_timeout): + self._db = db + self._timeout = timeout + self._acquire_fn = acquire_fn + self._release_fn = release_fn + + def __enter__(self): + if self._enter() and self._acquire_fn: + self._acquire_fn() + + def __exit__(self, type, value, traceback): + if self._exit() and self._release_fn: + self._release_fn() + + +class ReadTransaction(_Transaction): + def _enter(self): + return self._db.lock.acquire_read(self._timeout) + + def _exit(self): + return self._db.lock.release_read() + + +class WriteTransaction(_Transaction): + def _enter(self): + return self._db.lock.acquire_write(self._timeout) + + def _exit(self): + return self._db.lock.release_write() + + +class CorruptDatabaseError(SpackError): + def __init__(self, path, msg=''): + super(CorruptDatabaseError, self).__init__( + "Spack database is corrupt: %s. %s" %(path, msg)) + + +class InvalidDatabaseVersionError(SpackError): + def __init__(self, expected, found): + super(InvalidDatabaseVersionError, self).__init__( + "Expected database version %s but found version %s" + % (expected, found)) diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py index aa2cfd2846..3937aef450 100644 --- a/lib/spack/spack/directives.py +++ b/lib/spack/spack/directives.py @@ -238,13 +238,10 @@ def patch(pkg, url_or_filename, level=1, when=None): if when is None: when = pkg.name when_spec = parse_anonymous_spec(when, pkg.name) - - if when_spec not in pkg.patches: - pkg.patches[when_spec] = [Patch(pkg, pkg.name, url_or_filename, level)] - else: - # if this spec is identical to some other, then append this - # patch to the existing list. - pkg.patches[when_spec].append(Patch(pkg, pkg.name, url_or_filename, level)) + cur_patches = pkg.patches.setdefault(when_spec, []) + # if this spec is identical to some other, then append this + # patch to the existing list. + cur_patches.append(Patch(pkg, url_or_filename, level)) @directive('variants') diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py index e61929d8fd..da8f4187cc 100644 --- a/lib/spack/spack/directory_layout.py +++ b/lib/spack/spack/directory_layout.py @@ -32,7 +32,6 @@ import tempfile from external import yaml import llnl.util.tty as tty -from llnl.util.lang import memoized from llnl.util.filesystem import join_path, mkdirp from spack.spec import Spec @@ -187,14 +186,9 @@ class YamlDirectoryLayout(DirectoryLayout): def relative_path_for_spec(self, spec): _check_concrete(spec) - enabled_variants = ( - '-' + v.name for v in spec.variants.values() - if v.enabled) - - dir_name = "%s-%s%s-%s" % ( + dir_name = "%s-%s-%s" % ( spec.name, spec.version, - ''.join(enabled_variants), spec.dag_hash(self.hash_len)) path = join_path( @@ -263,7 +257,6 @@ class YamlDirectoryLayout(DirectoryLayout): self.write_spec(spec, spec_file_path) - @memoized def all_specs(self): if not os.path.isdir(self.root): return [] @@ -274,7 +267,6 @@ class YamlDirectoryLayout(DirectoryLayout): return [self.read_spec(s) for s in spec_files] - @memoized def specs_by_hash(self): by_hash = {} for spec in self.all_specs(): diff --git a/lib/spack/spack/error.py b/lib/spack/spack/error.py index bfa7951a47..b3b24e6105 100644 --- a/lib/spack/spack/error.py +++ b/lib/spack/spack/error.py @@ -55,8 +55,8 @@ class SpackError(Exception): def __str__(self): msg = self.message - if self.long_message: - msg += "\n %s" % self.long_message + if self._long_message: + msg += "\n %s" % self._long_message return msg class UnsupportedPlatformError(SpackError): diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 090349685b..39d71bb4b9 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -438,9 +438,16 @@ class Package(object): raise ValueError("Can only get a stage for a concrete package.") if self._stage is None: + # Construct a mirror path (TODO: get this out of package.py) mp = spack.mirror.mirror_archive_path(self.spec) - self._stage = Stage( - self.fetcher, mirror_path=mp, name=self.spec.short_spec) + + # Construct a path where the stage should build.. + s = self.spec + stage_name = "%s-%s-%s" % (s.name, s.version, s.dag_hash()) + + # Build the stage + self._stage = Stage(self.fetcher, mirror_path=mp, name=stage_name) + return self._stage @@ -563,9 +570,12 @@ class Package(object): @property def installed_dependents(self): """Return a list of the specs of all installed packages that depend - on this one.""" + on this one. + + TODO: move this method to database.py? + """ dependents = [] - for spec in spack.db.installed_package_specs(): + for spec in spack.installed_db.query(): if self.name == spec.name: continue for dep in spec.traverse(): @@ -785,6 +795,7 @@ class Package(object): "Manually remove this directory to fix:", self.prefix) + def real_work(): try: tty.msg("Building %s." % self.name) @@ -844,6 +855,10 @@ class Package(object): # Do the build. spack.build_environment.fork(self, real_work) + # note: PARENT of the build process adds the new package to + # the database, so that we don't need to re-read from file. + spack.installed_db.add(self.spec, self.prefix) + # Once everything else is done, run post install hooks spack.hooks.post_install(self) @@ -863,6 +878,14 @@ class Package(object): @property + def build_log_path(self): + if self.installed: + return spack.install_layout.build_log_path(self.spec) + else: + return join_path(self.stage.source_path, 'spack-build.out') + + + @property def module(self): """Use this to add variables to the class's module's scope. This lets us use custom syntax in the install method. @@ -916,6 +939,7 @@ class Package(object): # Uninstalling in Spack only requires removing the prefix. self.remove_prefix() + spack.installed_db.remove(self.spec) tty.msg("Successfully uninstalled %s." % self.spec.short_spec) # Once everything else is done, run post install hooks diff --git a/lib/spack/spack/packages.py b/lib/spack/spack/packages.py index b21316ebf7..6005523bc0 100644 --- a/lib/spack/spack/packages.py +++ b/lib/spack/spack/packages.py @@ -363,6 +363,11 @@ class PackageDB(object): return self._instances[spec] + def purge(self): + """Clear entire package instance cache.""" + self._instances.clear() + + @_autospec def providers_for(self, vpkg_spec): if self._provider_index is None: @@ -396,6 +401,7 @@ class PackageDB(object): """ validate_module_name(pkg_name) pkg_dir = self.dirname_for_package_name(pkg_name) + return join_path(pkg_dir, package_file_name) diff --git a/lib/spack/spack/patch.py b/lib/spack/spack/patch.py index e89cf11b2f..da5fa1646b 100644 --- a/lib/spack/spack/patch.py +++ b/lib/spack/spack/patch.py @@ -41,12 +41,8 @@ class Patch(object): """This class describes a patch to be applied to some expanded source code.""" - def __init__(self, pkg, pkg_name, path_or_url, level): - print pkg, pkg.name, type(pkg) - print "pkg:", dir(pkg.__module__) - print "NAMESPACE", pkg.namespace() - - self.pkg_name = pkg_name + def __init__(self, pkg, path_or_url, level): + self.pkg_name = pkg.name self.path_or_url = path_or_url self.path = None self.url = None diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 0d49b1fa95..5e59f240a4 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -641,7 +641,9 @@ class Spec(object): def dag_hash(self, length=None): - """Return a hash of the entire spec DAG, including connectivity.""" + """ + Return a hash of the entire spec DAG, including connectivity. + """ yaml_text = yaml.dump( self.to_node_dict(), default_flow_style=True, width=sys.maxint) sha = hashlib.sha1(yaml_text) @@ -711,7 +713,7 @@ class Spec(object): try: yfile = yaml.load(stream) except MarkedYAMLError, e: - raise SpackYAMLError("error parsing YMAL spec:", str(e)) + raise SpackYAMLError("error parsing YAML spec:", str(e)) for node in yfile['spec']: name = next(iter(node)) @@ -2012,4 +2014,4 @@ class UnsatisfiableDependencySpecError(UnsatisfiableSpecError): class SpackYAMLError(spack.error.SpackError): def __init__(self, msg, yaml_error): - super(SpackError, self).__init__(msg, str(yaml_error)) + super(SpackYAMLError, self).__init__(msg, str(yaml_error)) diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py index 008c5f0429..78930ecb5b 100644 --- a/lib/spack/spack/stage.py +++ b/lib/spack/spack/stage.py @@ -261,7 +261,8 @@ class Stage(object): tty.debug(e) continue else: - tty.die("All fetchers failed for %s" % self.name) + errMessage = "All fetchers failed for %s" % self.name + raise fs.FetchError(errMessage, None) def check(self): diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 6b3715be6f..0f776bfea4 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -56,7 +56,10 @@ test_names = ['versions', 'spec_yaml', 'optional_deps', 'make_executable', - 'configure_guess'] + 'configure_guess', + 'unit_install', + 'lock', + 'database'] def list_tests(): @@ -76,7 +79,7 @@ def run(names, verbose=False): if test not in test_names: tty.error("%s is not a valid spack test name." % test, "Valid names are:") - colify(test_names, indent=4) + colify(sorted(test_names), indent=4) sys.exit(1) runner = unittest.TextTestRunner(verbosity=verbosity) diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py new file mode 100644 index 0000000000..8416143f2d --- /dev/null +++ b/lib/spack/spack/test/database.py @@ -0,0 +1,352 @@ +############################################################################## +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +""" +These tests check the database is functioning properly, +both in memory and in its file +""" +import tempfile +import shutil +import multiprocessing + +from llnl.util.lock import * +from llnl.util.filesystem import join_path + +import spack +from spack.database import Database +from spack.directory_layout import YamlDirectoryLayout +from spack.test.mock_packages_test import * + +from llnl.util.tty.colify import colify + +def _print_ref_counts(): + """Print out all ref counts for the graph used here, for debugging""" + recs = [] + + def add_rec(spec): + cspecs = spack.installed_db.query(spec, installed=any) + + if not cspecs: + recs.append("[ %-7s ] %-20s-" % ('', spec)) + else: + key = cspecs[0].dag_hash() + rec = spack.installed_db.get_record(cspecs[0]) + recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count)) + + with spack.installed_db.read_transaction(): + add_rec('mpileaks ^mpich') + add_rec('callpath ^mpich') + add_rec('mpich') + + add_rec('mpileaks ^mpich2') + add_rec('callpath ^mpich2') + add_rec('mpich2') + + add_rec('mpileaks ^zmpi') + add_rec('callpath ^zmpi') + add_rec('zmpi') + add_rec('fake') + + add_rec('dyninst') + add_rec('libdwarf') + add_rec('libelf') + + colify(recs, cols=3) + + +class DatabaseTest(MockPackagesTest): + + def _mock_install(self, spec): + s = Spec(spec) + pkg = spack.db.get(s.concretized()) + pkg.do_install(fake=True) + + + def _mock_remove(self, spec): + specs = spack.installed_db.query(spec) + assert(len(specs) == 1) + spec = specs[0] + spec.package.do_uninstall(spec) + + + def setUp(self): + super(DatabaseTest, self).setUp() + # + # TODO: make the mockup below easier. + # + + # Make a fake install directory + self.install_path = tempfile.mkdtemp() + self.spack_install_path = spack.install_path + spack.install_path = self.install_path + + self.install_layout = YamlDirectoryLayout(self.install_path) + self.spack_install_layout = spack.install_layout + spack.install_layout = self.install_layout + + # Make fake database and fake install directory. + self.installed_db = Database(self.install_path) + self.spack_installed_db = spack.installed_db + spack.installed_db = self.installed_db + + # make a mock database with some packages installed note that + # the ref count for dyninst here will be 3, as it's recycled + # across each install. + # + # Here is what the mock DB looks like: + # + # o mpileaks o mpileaks' o mpileaks'' + # |\ |\ |\ + # | o callpath | o callpath' | o callpath'' + # |/| |/| |/| + # o | mpich o | mpich2 o | zmpi + # | | o | fake + # | | | + # | |______________/ + # | .____________/ + # |/ + # o dyninst + # |\ + # | o libdwarf + # |/ + # o libelf + # + + # Transaction used to avoid repeated writes. + with spack.installed_db.write_transaction(): + self._mock_install('mpileaks ^mpich') + self._mock_install('mpileaks ^mpich2') + self._mock_install('mpileaks ^zmpi') + + + def tearDown(self): + super(DatabaseTest, self).tearDown() + shutil.rmtree(self.install_path) + spack.install_path = self.spack_install_path + spack.install_layout = self.spack_install_layout + spack.installed_db = self.spack_installed_db + + + def test_005_db_exists(self): + """Make sure db cache file exists after creating.""" + index_file = join_path(self.install_path, '.spack-db', 'index.yaml') + lock_file = join_path(self.install_path, '.spack-db', 'lock') + + self.assertTrue(os.path.exists(index_file)) + self.assertTrue(os.path.exists(lock_file)) + + + def test_010_all_install_sanity(self): + """Ensure that the install layout reflects what we think it does.""" + all_specs = spack.install_layout.all_specs() + self.assertEqual(len(all_specs), 13) + + # query specs with multiple configurations + mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')] + callpath_specs = [s for s in all_specs if s.satisfies('callpath')] + mpi_specs = [s for s in all_specs if s.satisfies('mpi')] + + self.assertEqual(len(mpileaks_specs), 3) + self.assertEqual(len(callpath_specs), 3) + self.assertEqual(len(mpi_specs), 3) + + # query specs with single configurations + dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')] + libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')] + libelf_specs = [s for s in all_specs if s.satisfies('libelf')] + + self.assertEqual(len(dyninst_specs), 1) + self.assertEqual(len(libdwarf_specs), 1) + self.assertEqual(len(libelf_specs), 1) + + # Query by dependency + self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1) + self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1) + self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1) + + + def test_015_write_and_read(self): + # write and read DB + with spack.installed_db.write_transaction(): + specs = spack.installed_db.query() + recs = [spack.installed_db.get_record(s) for s in specs] + + for spec, rec in zip(specs, recs): + new_rec = spack.installed_db.get_record(spec) + self.assertEqual(new_rec.ref_count, rec.ref_count) + self.assertEqual(new_rec.spec, rec.spec) + self.assertEqual(new_rec.path, rec.path) + self.assertEqual(new_rec.installed, rec.installed) + + + def _check_db_sanity(self): + """Utiilty function to check db against install layout.""" + expected = sorted(spack.install_layout.all_specs()) + actual = sorted(self.installed_db.query()) + + self.assertEqual(len(expected), len(actual)) + for e, a in zip(expected, actual): + self.assertEqual(e, a) + + + def test_020_db_sanity(self): + """Make sure query() returns what's actually in the db.""" + self._check_db_sanity() + + + def test_030_db_sanity_from_another_process(self): + def read_and_modify(): + self._check_db_sanity() # check that other process can read DB + with self.installed_db.write_transaction(): + self._mock_remove('mpileaks ^zmpi') + + p = multiprocessing.Process(target=read_and_modify, args=()) + p.start() + p.join() + + # ensure child process change is visible in parent process + with self.installed_db.read_transaction(): + self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0) + + + def test_040_ref_counts(self): + """Ensure that we got ref counts right when we read the DB.""" + self.installed_db._check_ref_counts() + + + def test_050_basic_query(self): + """Ensure that querying the database is consistent with what is installed.""" + # query everything + self.assertEqual(len(spack.installed_db.query()), 13) + + # query specs with multiple configurations + mpileaks_specs = self.installed_db.query('mpileaks') + callpath_specs = self.installed_db.query('callpath') + mpi_specs = self.installed_db.query('mpi') + + self.assertEqual(len(mpileaks_specs), 3) + self.assertEqual(len(callpath_specs), 3) + self.assertEqual(len(mpi_specs), 3) + + # query specs with single configurations + dyninst_specs = self.installed_db.query('dyninst') + libdwarf_specs = self.installed_db.query('libdwarf') + libelf_specs = self.installed_db.query('libelf') + + self.assertEqual(len(dyninst_specs), 1) + self.assertEqual(len(libdwarf_specs), 1) + self.assertEqual(len(libelf_specs), 1) + + # Query by dependency + self.assertEqual(len(self.installed_db.query('mpileaks ^mpich')), 1) + self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1) + self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 1) + + + def _check_remove_and_add_package(self, spec): + """Remove a spec from the DB, then add it and make sure everything's + still ok once it is added. This checks that it was + removed, that it's back when added again, and that ref + counts are consistent. + """ + original = self.installed_db.query() + self.installed_db._check_ref_counts() + + # Remove spec + concrete_spec = self.installed_db.remove(spec) + self.installed_db._check_ref_counts() + remaining = self.installed_db.query() + + # ensure spec we removed is gone + self.assertEqual(len(original) - 1, len(remaining)) + self.assertTrue(all(s in original for s in remaining)) + self.assertTrue(concrete_spec not in remaining) + + # add it back and make sure everything is ok. + self.installed_db.add(concrete_spec, "") + installed = self.installed_db.query() + self.assertEqual(len(installed), len(original)) + + # sanity check against direcory layout and check ref counts. + self._check_db_sanity() + self.installed_db._check_ref_counts() + + + def test_060_remove_and_add_root_package(self): + self._check_remove_and_add_package('mpileaks ^mpich') + + + def test_070_remove_and_add_dependency_package(self): + self._check_remove_and_add_package('dyninst') + + + def test_080_root_ref_counts(self): + rec = self.installed_db.get_record('mpileaks ^mpich') + + # Remove a top-level spec from the DB + self.installed_db.remove('mpileaks ^mpich') + + # record no longer in DB + self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), []) + + # record's deps have updated ref_counts + self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 0) + self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1) + + # put the spec back + self.installed_db.add(rec.spec, rec.path) + + # record is present again + self.assertEqual(len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1) + + # dependencies have ref counts updated + self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 1) + self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2) + + + def test_090_non_root_ref_counts(self): + mpileaks_mpich_rec = self.installed_db.get_record('mpileaks ^mpich') + callpath_mpich_rec = self.installed_db.get_record('callpath ^mpich') + + # "force remove" a non-root spec from the DB + self.installed_db.remove('callpath ^mpich') + + # record still in DB but marked uninstalled + self.assertEqual(self.installed_db.query('callpath ^mpich', installed=True), []) + self.assertEqual(len(self.installed_db.query('callpath ^mpich', installed=any)), 1) + + # record and its deps have same ref_counts + self.assertEqual(self.installed_db.get_record('callpath ^mpich', installed=any).ref_count, 1) + self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2) + + # remove only dependent of uninstalled callpath record + self.installed_db.remove('mpileaks ^mpich') + + # record and parent are completely gone. + self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), []) + self.assertEqual(self.installed_db.query('callpath ^mpich', installed=any), []) + + # mpich ref count updated properly. + mpich_rec = self.installed_db.get_record('mpich') + self.assertEqual(mpich_rec.ref_count, 0) diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py new file mode 100644 index 0000000000..5664e71b03 --- /dev/null +++ b/lib/spack/spack/test/lock.py @@ -0,0 +1,266 @@ +############################################################################## +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +""" +These tests ensure that our lock works correctly. +""" +import unittest +import os +import tempfile +import shutil +from multiprocessing import Process + +from llnl.util.lock import * +from llnl.util.filesystem import join_path, touch + +from spack.util.multiproc import Barrier + +# This is the longest a failed test will take, as the barriers will +# time out and raise an exception. +barrier_timeout = 5 + + +class LockTest(unittest.TestCase): + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + self.lock_path = join_path(self.tempdir, 'lockfile') + touch(self.lock_path) + + + def tearDown(self): + shutil.rmtree(self.tempdir, ignore_errors=True) + + + def multiproc_test(self, *functions): + """Order some processes using simple barrier synchronization.""" + b = Barrier(len(functions), timeout=barrier_timeout) + procs = [Process(target=f, args=(b,)) for f in functions] + for p in procs: p.start() + for p in procs: + p.join() + self.assertEqual(p.exitcode, 0) + + + # + # Process snippets below can be composed into tests. + # + def acquire_write(self, barrier): + lock = Lock(self.lock_path) + lock.acquire_write() # grab exclusive lock + barrier.wait() + barrier.wait() # hold the lock until exception raises in other procs. + + def acquire_read(self, barrier): + lock = Lock(self.lock_path) + lock.acquire_read() # grab shared lock + barrier.wait() + barrier.wait() # hold the lock until exception raises in other procs. + + def timeout_write(self, barrier): + lock = Lock(self.lock_path) + barrier.wait() # wait for lock acquire in first process + self.assertRaises(LockError, lock.acquire_write, 0.1) + barrier.wait() + + def timeout_read(self, barrier): + lock = Lock(self.lock_path) + barrier.wait() # wait for lock acquire in first process + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() + + + # + # Test that exclusive locks on other processes time out when an + # exclusive lock is held. + # + def test_write_lock_timeout_on_write(self): + self.multiproc_test(self.acquire_write, self.timeout_write) + + def test_write_lock_timeout_on_write_2(self): + self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write) + + def test_write_lock_timeout_on_write_3(self): + self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write) + + + # + # Test that shared locks on other processes time out when an + # exclusive lock is held. + # + def test_read_lock_timeout_on_write(self): + self.multiproc_test(self.acquire_write, self.timeout_read) + + def test_read_lock_timeout_on_write_2(self): + self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read) + + def test_read_lock_timeout_on_write_3(self): + self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read) + + + # + # Test that exclusive locks time out when shared locks are held. + # + def test_write_lock_timeout_on_read(self): + self.multiproc_test(self.acquire_read, self.timeout_write) + + def test_write_lock_timeout_on_read_2(self): + self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write) + + def test_write_lock_timeout_on_read_3(self): + self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write) + + + # + # Test that exclusive locks time while lots of shared locks are held. + # + def test_write_lock_timeout_with_multiple_readers_2_1(self): + self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write) + + def test_write_lock_timeout_with_multiple_readers_2_2(self): + self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) + + def test_write_lock_timeout_with_multiple_readers_3_1(self): + self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write) + + def test_write_lock_timeout_with_multiple_readers_3_2(self): + self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) + + + # + # Longer test case that ensures locks are reusable. Ordering is + # enforced by barriers throughout -- steps are shown with numbers. + # + def test_complex_acquire_and_release_chain(self): + def p1(barrier): + lock = Lock(self.lock_path) + + lock.acquire_write() + barrier.wait() # ---------------------------------------- 1 + # others test timeout + barrier.wait() # ---------------------------------------- 2 + lock.release_write() # release and others acquire read + barrier.wait() # ---------------------------------------- 3 + self.assertRaises(LockError, lock.acquire_write, 0.1) + lock.acquire_read() + barrier.wait() # ---------------------------------------- 4 + lock.release_read() + barrier.wait() # ---------------------------------------- 5 + + # p2 upgrades read to write + barrier.wait() # ---------------------------------------- 6 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 7 + # p2 releases write and read + barrier.wait() # ---------------------------------------- 8 + + # p3 acquires read + barrier.wait() # ---------------------------------------- 9 + # p3 upgrades read to write + barrier.wait() # ---------------------------------------- 10 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 11 + # p3 releases locks + barrier.wait() # ---------------------------------------- 12 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 13 + lock.release_read() + + + def p2(barrier): + lock = Lock(self.lock_path) + + # p1 acquires write + barrier.wait() # ---------------------------------------- 1 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 2 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 3 + # p1 tests shared read + barrier.wait() # ---------------------------------------- 4 + # others release reads + barrier.wait() # ---------------------------------------- 5 + + lock.acquire_write() # upgrade read to write + barrier.wait() # ---------------------------------------- 6 + # others test timeout + barrier.wait() # ---------------------------------------- 7 + lock.release_write() # release read AND write (need both) + lock.release_read() + barrier.wait() # ---------------------------------------- 8 + + # p3 acquires read + barrier.wait() # ---------------------------------------- 9 + # p3 upgrades read to write + barrier.wait() # ---------------------------------------- 10 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 11 + # p3 releases locks + barrier.wait() # ---------------------------------------- 12 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 13 + lock.release_read() + + + def p3(barrier): + lock = Lock(self.lock_path) + + # p1 acquires write + barrier.wait() # ---------------------------------------- 1 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 2 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 3 + # p1 tests shared read + barrier.wait() # ---------------------------------------- 4 + lock.release_read() + barrier.wait() # ---------------------------------------- 5 + + # p2 upgrades read to write + barrier.wait() # ---------------------------------------- 6 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 7 + # p2 releases write & read + barrier.wait() # ---------------------------------------- 8 + + lock.acquire_read() + barrier.wait() # ---------------------------------------- 9 + lock.acquire_write() + barrier.wait() # ---------------------------------------- 10 + # others test timeout + barrier.wait() # ---------------------------------------- 11 + lock.release_read() # release read AND write in opposite + lock.release_write() # order from before on p2 + barrier.wait() # ---------------------------------------- 12 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 13 + lock.release_read() + + self.multiproc_test(p1, p2, p3) diff --git a/lib/spack/spack/test/unit_install.py b/lib/spack/spack/test/unit_install.py new file mode 100644 index 0000000000..c4b9092f05 --- /dev/null +++ b/lib/spack/spack/test/unit_install.py @@ -0,0 +1,121 @@ +############################################################################## +# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import unittest +import itertools + +import spack +test_install = __import__("spack.cmd.test-install", + fromlist=["BuildId", "create_test_output", "TestResult"]) + +class MockOutput(object): + def __init__(self): + self.results = {} + + def add_test(self, buildId, passed=True, buildInfo=None): + self.results[buildId] = passed + + def write_to(self, stream): + pass + +class MockSpec(object): + def __init__(self, name, version, hashStr=None): + self.dependencies = {} + self.name = name + self.version = version + self.hash = hashStr if hashStr else hash((name, version)) + + def traverse(self, order=None): + allDeps = itertools.chain.from_iterable(i.traverse() for i in + self.dependencies.itervalues()) + return set(itertools.chain([self], allDeps)) + + def dag_hash(self): + return self.hash + + def to_yaml(self): + return "<<<MOCK YAML {0}>>>".format(test_install.BuildId(self).stringId()) + +class MockPackage(object): + def __init__(self, buildLogPath): + self.installed = False + self.build_log_path = buildLogPath + +specX = MockSpec("X", "1.2.0") +specY = MockSpec("Y", "2.3.8") +specX.dependencies['Y'] = specY +pkgX = MockPackage('logX') +pkgY = MockPackage('logY') +bIdX = test_install.BuildId(specX) +bIdY = test_install.BuildId(specY) + +class UnitInstallTest(unittest.TestCase): + """Tests test-install where X->Y""" + + def setUp(self): + super(UnitInstallTest, self).setUp() + + pkgX.installed = False + pkgY.installed = False + + pkgDb = MockPackageDb({specX:pkgX, specY:pkgY}) + spack.db = pkgDb + + def tearDown(self): + super(UnitInstallTest, self).tearDown() + + def test_installing_both(self): + mo = MockOutput() + + pkgX.installed = True + pkgY.installed = True + test_install.create_test_output(specX, [specX, specY], mo, getLogFunc=test_fetch_log) + + self.assertEqual(mo.results, + {bIdX:test_install.TestResult.PASSED, + bIdY:test_install.TestResult.PASSED}) + + def test_dependency_already_installed(self): + mo = MockOutput() + + pkgX.installed = True + pkgY.installed = True + test_install.create_test_output(specX, [specX], mo, getLogFunc=test_fetch_log) + + self.assertEqual(mo.results, {bIdX:test_install.TestResult.PASSED}) + + #TODO: add test(s) where Y fails to install + +class MockPackageDb(object): + def __init__(self, init=None): + self.specToPkg = {} + if init: + self.specToPkg.update(init) + + def get(self, spec): + return self.specToPkg[spec] + +def test_fetch_log(path): + return [] + diff --git a/lib/spack/spack/url.py b/lib/spack/spack/url.py index 58838306af..6adbfe156d 100644 --- a/lib/spack/spack/url.py +++ b/lib/spack/spack/url.py @@ -209,8 +209,8 @@ def parse_version_offset(path): # e.g. foobar-4.5.1 (r'-((\d+\.)*\d+)$', stem), - # e.g. foobar-4.5.1b - (r'-((\d+\.)*\d+\-?([a-z]|rc|RC|tp|TP)\d*)$', stem), + # e.g. foobar-4.5.1b, foobar4.5RC, foobar.v4.5.1b + (r'[-._]?v?((\d+\.)*\d+[-._]?([a-z]|rc|RC|tp|TP?)\d*)$', stem), # e.g. foobar-4.5.0-beta1, or foobar-4.50-beta (r'-((\d+\.)*\d+-beta(\d+)?)$', stem), diff --git a/lib/spack/spack/util/multiproc.py b/lib/spack/spack/util/multiproc.py index 9e045a090f..21cd6f543d 100644 --- a/lib/spack/spack/util/multiproc.py +++ b/lib/spack/spack/util/multiproc.py @@ -27,9 +27,11 @@ This implements a parallel map operation but it can accept more values than multiprocessing.Pool.apply() can. For example, apply() will fail to pickle functions if they're passed indirectly as parameters. """ -from multiprocessing import Process, Pipe +from multiprocessing import Process, Pipe, Semaphore, Value from itertools import izip +__all__ = ['spawn', 'parmap', 'Barrier'] + def spawn(f): def fun(pipe,x): pipe.send(f(x)) @@ -43,3 +45,49 @@ def parmap(f,X): [p.join() for p in proc] return [p.recv() for (p,c) in pipe] + +class Barrier: + """Simple reusable semaphore barrier. + + Python 2.6 doesn't have multiprocessing barriers so we implement this. + + See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41. + """ + def __init__(self, n, timeout=None): + self.n = n + self.to = timeout + self.count = Value('i', 0) + self.mutex = Semaphore(1) + self.turnstile1 = Semaphore(0) + self.turnstile2 = Semaphore(1) + + + def wait(self): + if not self.mutex.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.count.value += 1 + if self.count.value == self.n: + if not self.turnstile2.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.turnstile1.release() + self.mutex.release() + + if not self.turnstile1.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.turnstile1.release() + + if not self.mutex.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.count.value -= 1 + if self.count.value == 0: + if not self.turnstile1.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.turnstile2.release() + self.mutex.release() + + if not self.turnstile2.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.turnstile2.release() + + +class BarrierTimeoutError: pass diff --git a/lib/spack/spack/virtual.py b/lib/spack/spack/virtual.py index fa070e6bd5..c77b259d61 100644 --- a/lib/spack/spack/virtual.py +++ b/lib/spack/spack/virtual.py @@ -73,10 +73,8 @@ class ProviderIndex(object): for provided_spec, provider_spec in pkg.provided.iteritems(): if provider_spec.satisfies(spec, deps=False): provided_name = provided_spec.name - if provided_name not in self.providers: - self.providers[provided_name] = {} - provider_map = self.providers[provided_name] + provider_map = self.providers.setdefault(provided_name, {}) if not provided_spec in provider_map: provider_map[provided_spec] = set() @@ -133,9 +131,7 @@ class ProviderIndex(object): if lp_spec.name == rp_spec.name: try: const = lp_spec.copy().constrain(rp_spec,deps=False) - if constrained not in result: - result[constrained] = set() - result[constrained].add(const) + result.setdefault(constrained, set()).add(const) except spack.spec.UnsatisfiableSpecError: continue return result |