From 7aaad89ba93e3ace4bfd835306850f43218e0ef8 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 24 May 2016 11:50:01 -0700 Subject: Lazily evaluate all_package_names in repository.py - Don't need to list all packages unless we have to. - Only use the list of all packages for existence checks if we have generated it for some other purpose. --- lib/spack/spack/repository.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index 2c160a5f45..ae9fd7bee6 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -104,7 +104,7 @@ class RepoPath(object): self.by_namespace = NamespaceTrie() self.by_path = {} - self._all_package_names = [] + self._all_package_names = None self._provider_index = None # If repo_dirs is empty, just use the configuration @@ -163,11 +163,6 @@ class RepoPath(object): self.by_namespace[repo.full_namespace] = repo self.by_path[repo.root] = repo - # add names to the cached name list - new_pkgs = set(repo.all_package_names()) - new_pkgs.update(set(self._all_package_names)) - self._all_package_names = sorted(new_pkgs, key=lambda n:n.lower()) - def put_first(self, repo): """Add repo first in the search path.""" @@ -214,6 +209,12 @@ class RepoPath(object): def all_package_names(self): """Return all unique package names in all repositories.""" + if self._all_package_names is None: + all_pkgs = set() + for repo in self.repos: + for name in repo.all_package_names(): + all_pkgs.add(name) + self._all_package_names = sorted(all_pkgs, key=lambda n:n.lower()) return self._all_package_names @@ -682,10 +683,16 @@ class Repo(object): def exists(self, pkg_name): """Whether a package with the supplied name exists.""" - # This does a binary search in the sorted list. - idx = bisect_left(self.all_package_names(), pkg_name) - return (idx < len(self._all_package_names) and - self._all_package_names[idx] == pkg_name) + if self._all_package_names: + # This does a binary search in the sorted list. + idx = bisect_left(self.all_package_names(), pkg_name) + return (idx < len(self._all_package_names) and + self._all_package_names[idx] == pkg_name) + + # If we haven't generated the full package list, don't. + # Just check whether the file exists. + filename = self.filename_for_package_name(pkg_name) + return os.path.exists(filename) def _get_pkg_module(self, pkg_name): -- cgit v1.2.3-70-g09d2 From 025609c63ffa42bce30b15784bd587805aef809c Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Fri, 27 May 2016 20:50:48 -0500 Subject: More compact YAML formatting for abstract specs. - Don't add empty/absent fields to Spec YAML when they're not there. --- lib/spack/spack/spec.py | 50 ++++++++++++++++++++------------------- lib/spack/spack/test/spec_yaml.py | 3 +-- 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 8e44075f42..1300f35ca4 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -904,19 +904,25 @@ class Spec(object): return b32_hash def to_node_dict(self): + d = {} + params = dict((name, v.value) for name, v in self.variants.items()) params.update(dict((name, value) for name, value in self.compiler_flags.items())) - deps = self.dependencies_dict(deptype=('link', 'run')) - d = { - 'parameters': params, - 'arch': self.architecture, - 'dependencies': dict( + + if params: + d['parameters'] = params + + if self.architecture is not None: + d['arch'] = self.architecture + + if self.dependencies: + deps = self.dependencies_dict(deptype=('link', 'run')) + d['dependencies'] = dict( (name, { 'hash': dspec.spec.dag_hash(), 'type': [str(s) for s in dspec.deptypes]}) for name, dspec in deps.items()) - } # Older concrete specs do not have a namespace. Omit for # consistent hashing. @@ -932,9 +938,9 @@ class Spec(object): if self.compiler: d.update(self.compiler.to_dict()) - else: - d['compiler'] = None - d.update(self.versions.to_dict()) + + if self.versions: + d.update(self.versions.to_dict()) return {self.name: d} @@ -954,17 +960,17 @@ class Spec(object): spec = Spec(name) spec.namespace = node.get('namespace', None) - spec.versions = VersionList.from_dict(node) + spec._hash = node.get('hash', None) - if 'hash' in node: - spec._hash = node['hash'] + if 'version' in node or 'versions' in node: + spec.versions = VersionList.from_dict(node) spec.architecture = spack.architecture.arch_from_dict(node['arch']) - if node['compiler'] is None: - spec.compiler = None - else: + if 'compiler' in node: spec.compiler = CompilerSpec.from_dict(node) + else: + spec.compiler = None if 'parameters' in node: for name, value in node['parameters'].items(): @@ -972,14 +978,12 @@ class Spec(object): spec.compiler_flags[name] = value else: spec.variants[name] = VariantSpec(name, value) + elif 'variants' in node: for name, value in node['variants'].items(): spec.variants[name] = VariantSpec(name, value) for name in FlagMap.valid_compiler_flags(): spec.compiler_flags[name] = [] - else: - raise SpackRecordError( - "Did not find a valid format for variants in YAML file") # Don't read dependencies here; from_node_dict() is used by # from_yaml() to read the root *and* each dependency spec. @@ -1037,6 +1041,10 @@ class Spec(object): for node in nodes: # get dependency dict from the node. name = next(iter(node)) + + if 'dependencies' not in node[name]: + continue + yaml_deps = node[name]['dependencies'] for dname, dhash, dtypes in Spec.read_yaml_dep_specs(yaml_deps): # Fill in dependencies by looking them up by name in deps dict @@ -2824,12 +2832,6 @@ class SpackYAMLError(spack.error.SpackError): super(SpackYAMLError, self).__init__(msg, str(yaml_error)) -class SpackRecordError(spack.error.SpackError): - - def __init__(self, msg): - super(SpackRecordError, self).__init__(msg) - - class AmbiguousHashError(SpecError): def __init__(self, msg, *specs): diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py index 0230fc203a..fc0ce0b2f3 100644 --- a/lib/spack/spack/test/spec_yaml.py +++ b/lib/spack/spack/test/spec_yaml.py @@ -30,7 +30,7 @@ YAML format preserves DAG informatoin in the spec. from spack.spec import Spec from spack.test.mock_packages_test import * -class SpecDagTest(MockPackagesTest): +class SpecYamlTest(MockPackagesTest): def check_yaml_round_trip(self, spec): yaml_text = spec.to_yaml() @@ -64,7 +64,6 @@ class SpecDagTest(MockPackagesTest): def test_yaml_subdag(self): spec = Spec('mpileaks^mpich+debug') spec.concretize() - yaml_spec = Spec.from_yaml(spec.to_yaml()) for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'): -- cgit v1.2.3-70-g09d2 From bf028990e70927872cf506cf88bbf5a927ced2c4 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Fri, 27 May 2016 21:20:40 -0500 Subject: Make ProviderIndex yaml-izable. - allow a provider index to be stored and re-read. --- lib/spack/spack/repository.py | 1 + lib/spack/spack/test/__init__.py | 52 ++++++++++++++++++++++++++++++------- lib/spack/spack/test/virtual.py | 43 +++++++++++++++++++++++++++++++ lib/spack/spack/virtual.py | 55 +++++++++++++++++++++++++++++++++++++--- 4 files changed, 138 insertions(+), 13 deletions(-) create mode 100644 lib/spack/spack/test/virtual.py diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index ae9fd7bee6..bf0dac6a22 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -51,6 +51,7 @@ repo_namespace = 'spack.pkg' # These names describe how repos should be laid out in the filesystem. # repo_config_name = 'repo.yaml' # Top-level filename for repo config. +repo_index_name = 'index.yaml' # Top-level filename for repository index. packages_dir_name = 'packages' # Top-level repo directory containing pkgs. package_file_name = 'package.py' # Filename for packages in a repository. diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 3439764ee6..11f7298c04 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -33,15 +33,49 @@ from spack.test.tally_plugin import Tally """Names of tests to be included in Spack's test suite""" test_names = [ - 'architecture', 'versions', 'url_parse', 'url_substitution', 'packages', - 'stage', 'spec_syntax', 'spec_semantics', 'spec_dag', 'concretize', - 'multimethod', 'install', 'package_sanity', 'config', 'directory_layout', - 'pattern', 'python_version', 'git_fetch', 'svn_fetch', 'hg_fetch', - 'mirror', 'modules', 'url_extrapolate', 'cc', 'link_tree', 'spec_yaml', - 'optional_deps', 'make_executable', 'build_system_guess', 'lock', - 'database', 'namespace_trie', 'yaml', 'sbang', 'environment', - 'concretize_preferences', 'cmd.find', 'cmd.uninstall', 'cmd.test_install', - 'cmd.test_compiler_cmd', 'cmd.module' + 'architecture', + 'build_system_guess', + 'cc', + 'cmd.find', + 'cmd.module', + 'cmd.test_compiler_cmd', + 'cmd.test_install', + 'cmd.uninstall', + 'concretize', + 'concretize_preferences', + 'config', + 'configure_guess', + 'database', + 'directory_layout', + 'environment', + 'git_fetch', + 'hg_fetch', + 'install', + 'link_tree', + 'lock', + 'make_executable', + 'mirror', + 'modules', + 'multimethod', + 'namespace_trie', + 'optional_deps', + 'package_sanity', + 'packages', + 'pattern', + 'python_version', + 'sbang', + 'spec_dag', + 'spec_semantics', + 'spec_syntax', + 'spec_yaml', + 'stage', + 'svn_fetch', + 'url_extrapolate', + 'url_parse', + 'url_substitution', + 'versions', + 'virtual', + 'yaml', ] diff --git a/lib/spack/spack/test/virtual.py b/lib/spack/spack/test/virtual.py new file mode 100644 index 0000000000..7699165554 --- /dev/null +++ b/lib/spack/spack/test/virtual.py @@ -0,0 +1,43 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +from StringIO import StringIO +import unittest + +import spack +from spack.virtual import ProviderIndex + + +class VirtualTest(unittest.TestCase): + + def test_write_and_read(self): + p = ProviderIndex(spack.repo.all_package_names()) + + ostream = StringIO () + p.to_yaml(ostream) + + istream = StringIO(ostream.getvalue()) + q = ProviderIndex.from_yaml(istream) + + self.assertTrue(p == q) diff --git a/lib/spack/spack/virtual.py b/lib/spack/spack/virtual.py index bb8333f023..bf6d8227a4 100644 --- a/lib/spack/spack/virtual.py +++ b/lib/spack/spack/virtual.py @@ -25,8 +25,12 @@ """ The ``virtual`` module contains utility classes for virtual dependencies. """ -import spack.spec import itertools +import yaml +from yaml.error import MarkedYAMLError + +import spack + class ProviderIndex(object): """This is a dict of dicts used for finding providers of particular @@ -45,10 +49,11 @@ class ProviderIndex(object): Calling providers_for(spec) will find specs that provide a matching implementation of MPI. """ - def __init__(self, specs, **kwargs): + def __init__(self, specs=None, **kwargs): # TODO: come up with another name for this. This "restricts" values to # the verbatim impu specs (i.e., it doesn't pre-apply package's constraints, and # keeps things as broad as possible, so it's really the wrong name) + if specs is None: specs = [] self.restrict = kwargs.setdefault('restrict', False) self.providers = {} @@ -64,7 +69,7 @@ class ProviderIndex(object): def update(self, spec): - if type(spec) != spack.spec.Spec: + if not isinstance(spec, spack.spec.Spec): spec = spack.spec.Spec(spec) if not spec.name: @@ -75,7 +80,8 @@ class ProviderIndex(object): pkg = spec.package for provided_spec, provider_spec in pkg.provided.iteritems(): - provider_spec.compiler_flags = spec.compiler_flags.copy()#We want satisfaction other than flags + # We want satisfaction other than flags + provider_spec.compiler_flags = spec.compiler_flags.copy() if provider_spec.satisfies(spec, deps=False): provided_name = provided_spec.name @@ -164,3 +170,44 @@ class ProviderIndex(object): result[name] = crossed return all(c in result for c in common) + + + def to_yaml(self, stream=None): + provider_list = dict( + (name, [[vpkg.to_node_dict(), [p.to_node_dict() for p in pset]] + for vpkg, pset in pdict.items()]) + for name, pdict in self.providers.items()) + + yaml.dump({'provider_index': {'providers': provider_list}}, + stream=stream) + + + @staticmethod + def from_yaml(stream): + try: + yfile = yaml.load(stream) + except MarkedYAMLError, e: + raise spack.spec.SpackYAMLError( + "error parsing YAML ProviderIndex cache:", str(e)) + + if not isinstance(yfile, dict): + raise spack.spec.SpackYAMLError( + "YAML ProviderIndex was not a dict.") + + if not 'provider_index' in yfile: + raise spack.spec.SpackYAMLError( + "YAML ProviderIndex does not start with 'provider_index'") + + index = ProviderIndex() + providers = yfile['provider_index']['providers'] + index.providers = dict( + (name, dict((spack.spec.Spec.from_node_dict(vpkg), + set(spack.spec.Spec.from_node_dict(p) for p in plist)) + for vpkg, plist in pdict_list)) + for name, pdict_list in providers.items()) + + return index + + + def __eq__(self, other): + return self.providers == other.providers -- cgit v1.2.3-70-g09d2 From cf2f902b82a3080243eae58ca728b55189108c10 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sat, 28 May 2016 20:13:13 -0500 Subject: Make ProviderIndexes mergeable, so we can cache them per-repo. --- lib/spack/spack/repository.py | 31 +++++++++++++++++++++++-------- lib/spack/spack/test/virtual.py | 6 ++++++ lib/spack/spack/virtual.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 8 deletions(-) diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index bf0dac6a22..6e7e95b8bc 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -224,12 +224,20 @@ class RepoPath(object): yield self.get(name) - @_autospec - def providers_for(self, vpkg_spec): + @property + def provider_index(self): + """Merged ProviderIndex from all Repos in the RepoPath.""" if self._provider_index is None: - self._provider_index = ProviderIndex(self.all_package_names()) + self._provider_index = ProviderIndex() + for repo in reversed(self.repos): + self._provider_index.merge(repo.provider_index) + + return self._provider_index - providers = self._provider_index.providers_for(vpkg_spec) + + @_autospec + def providers_for(self, vpkg_spec): + providers = self.provider_index.providers_for(vpkg_spec) if not providers: raise UnknownPackageError(vpkg_spec.name) return providers @@ -603,12 +611,19 @@ class Repo(object): self._instances.clear() - @_autospec - def providers_for(self, vpkg_spec): + @property + def provider_index(self): + """A provider index with names *specific* to this repo.""" if self._provider_index is None: - self._provider_index = ProviderIndex(self.all_package_names()) + namespaced_names = ['%s.%s' % (self.namespace, n) + for n in self.all_package_names()] + self._provider_index = ProviderIndex(namespaced_names) + return self._provider_index + - providers = self._provider_index.providers_for(vpkg_spec) + @_autospec + def providers_for(self, vpkg_spec): + providers = self.provider_index.providers_for(vpkg_spec) if not providers: raise UnknownPackageError(vpkg_spec.name) return providers diff --git a/lib/spack/spack/test/virtual.py b/lib/spack/spack/test/virtual.py index 7699165554..1923e7006f 100644 --- a/lib/spack/spack/test/virtual.py +++ b/lib/spack/spack/test/virtual.py @@ -41,3 +41,9 @@ class VirtualTest(unittest.TestCase): q = ProviderIndex.from_yaml(istream) self.assertTrue(p == q) + + + def test_copy(self): + p = ProviderIndex(spack.repo.all_package_names()) + q = p.copy() + self.assertTrue(p == q) diff --git a/lib/spack/spack/virtual.py b/lib/spack/spack/virtual.py index bf6d8227a4..2c47921a3f 100644 --- a/lib/spack/spack/virtual.py +++ b/lib/spack/spack/virtual.py @@ -209,5 +209,33 @@ class ProviderIndex(object): return index + def merge(self, other): + """Merge `other` ProviderIndex into this one.""" + other = other.copy() # defensive copy. + + for pkg in other.providers: + if pkg not in self.providers: + self.providers[pkg] = other.providers[pkg] + continue + + spdict, opdict = self.providers[pkg], other.providers[pkg] + for provided_spec in opdict: + if provided_spec not in spdict: + spdict[provided_spec] = opdict[provided_spec] + continue + + spdict[provided_spec] += opdict[provided_spec] + + + def copy(self): + """Deep copy of this ProviderIndex.""" + clone = ProviderIndex() + clone.providers = dict( + (name, dict((vpkg, set((p.copy() for p in pset))) + for vpkg, pset in pdict.items())) + for name, pdict in self.providers.items()) + return clone + + def __eq__(self, other): return self.providers == other.providers -- cgit v1.2.3-70-g09d2 From faa0a0e4c3a6e1bdfc68b97b3158c8cc14356e5d Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sat, 28 May 2016 20:27:22 -0700 Subject: Add a ProviderIndex cache. - Spack will check if the index needs updating, and will only parse all package files if it does. - Spack tries to parse as few package files as necessary. --- .gitignore | 2 + lib/spack/spack/repository.py | 140 ++++++++++++++++++++++++++++++++++++++---- lib/spack/spack/virtual.py | 14 +++++ 3 files changed, 143 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index 960b5b0035..b1215f0c7e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ /var/spack/stage /var/spack/cache +/var/spack/repos/*/index.yaml +/var/spack/repos/*/lock *.pyc /opt *~ diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index 6e7e95b8bc..63ae999ce1 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -23,6 +23,9 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os +import stat +import shutil +import errno import exceptions import sys import inspect @@ -33,6 +36,7 @@ from bisect import bisect_left import yaml import llnl.util.tty as tty +from llnl.util.lock import Lock from llnl.util.filesystem import * import spack.error @@ -394,13 +398,25 @@ class Repo(object): if not condition: raise BadRepoError(msg) # Validate repository layout. - self.config_file = join_path(self.root, repo_config_name) + self.config_file = join_path(self.root, repo_config_name) check(os.path.isfile(self.config_file), "No %s found in '%s'" % (repo_config_name, root)) + self.packages_path = join_path(self.root, packages_dir_name) check(os.path.isdir(self.packages_path), "No directory '%s' found in '%s'" % (repo_config_name, root)) + self.index_file = join_path(self.root, repo_index_name) + check(not os.path.exists(self.index_file) or + (os.path.isfile(self.index_file) and os.access(self.index_file, os.R_OK|os.W_OK)), + "Cannot access repository index file in %s" % root) + + # lock file for reading/writing the index + self._lock_path = join_path(self.root, 'lock') + if not os.path.exists(self._lock_path): + touch(self._lock_path) + self._lock = Lock(self._lock_path) + # Read configuration and validate namespace config = self._read_config() check('namespace' in config, '%s must define a namespace.' @@ -424,7 +440,14 @@ class Repo(object): self._modules = {} self._classes = {} self._instances = {} + + # list of packages that are newer than the index. + self._needs_update = [] + + # Index of virtual dependencies self._provider_index = None + + # Cached list of package names. self._all_package_names = None # make sure the namespace for packages in this repo exists. @@ -611,13 +634,56 @@ class Repo(object): self._instances.clear() + def _update_provider_index(self): + # Check modification dates of all packages + self._fast_package_check() + + def read(): + with open(self.index_file) as f: + self._provider_index = ProviderIndex.from_yaml(f) + + # Read the old ProviderIndex, or make a new one. + index_existed = os.path.isfile(self.index_file) + if index_existed and not self._needs_update: + self._lock.acquire_read() + try: + read() + finally: + self._lock.release_read() + + else: + self._lock.acquire_write() + try: + if index_existed: + with open(self.index_file) as f: + self._provider_index = ProviderIndex.from_yaml(f) + else: + self._provider_index = ProviderIndex() + + for pkg_name in self._needs_update: + namespaced_name = '%s.%s' % (self.namespace, pkg_name) + self._provider_index.remove_provider(namespaced_name) + self._provider_index.update(namespaced_name) + + + tmp = self.index_file + '.tmp' + with open(tmp, 'w') as f: + self._provider_index.to_yaml(f) + os.rename(tmp, self.index_file) + + except: + shutil.rmtree(tmp, ignore_errors=True) + raise + + finally: + self._lock.release_write() + + @property def provider_index(self): """A provider index with names *specific* to this repo.""" if self._provider_index is None: - namespaced_names = ['%s.%s' % (self.namespace, n) - for n in self.all_package_names()] - self._provider_index = ProviderIndex(namespaced_names) + self._update_provider_index() return self._provider_index @@ -663,21 +729,33 @@ class Repo(object): return join_path(pkg_dir, package_file_name) - def all_package_names(self): - """Returns a sorted list of all package names in the Repo.""" + def _fast_package_check(self): + """List packages in the repo and cehck whether index is up to date. + + Both of these opreations require checking all `package.py` + files so we do them at the same time. We list the repo + directory and look at package.py files, and we compare the + index modification date with the ost recently modified package + file, storing the result. + + The implementation here should try to minimize filesystem + calls. At the moment, it is O(number of packages) and makes + about one stat call per package. This is resonably fast, and + avoids actually importing packages in Spack, which is slow. + + """ if self._all_package_names is None: self._all_package_names = [] + # Get index modification time. + index_mtime = 0 + if os.path.exists(self.index_file): + sinfo = os.stat(self.index_file) + index_mtime = sinfo.st_mtime + for pkg_name in os.listdir(self.packages_path): # Skip non-directories in the package root. pkg_dir = join_path(self.packages_path, pkg_name) - if not os.path.isdir(pkg_dir): - continue - - # Skip directories without a package.py in them. - pkg_file = join_path(self.packages_path, pkg_name, package_file_name) - if not os.path.isfile(pkg_file): - continue # Warn about invalid names that look like packages. if not valid_module_name(pkg_name): @@ -685,14 +763,50 @@ class Repo(object): % (pkg_dir, pkg_name)) continue + # construct the file name from the directory + pkg_file = join_path( + self.packages_path, pkg_name, package_file_name) + + # Use stat here to avoid lots of calls to the filesystem. + try: + sinfo = os.stat(pkg_file) + except OSError as e: + if e.errno == errno.ENOENT: + # No package.py file here. + continue + elif e.errno == errno.EACCES: + tty.warn("Can't read package file %s." % pkg_file) + continue + raise e + + # if it's not a file, skip it. + if stat.S_ISDIR(sinfo.st_mode): + continue + # All checks passed. Add it to the list. self._all_package_names.append(pkg_name) + + # record the package if it is newer than the index. + if sinfo.st_mtime > index_mtime: + self._needs_update.append(pkg_name) + self._all_package_names.sort() return self._all_package_names + def all_package_names(self): + """Returns a sorted list of all package names in the Repo.""" + self._fast_package_check() + return self._all_package_names + + def all_packages(self): + """Iterator over all packages in the repository. + + Use this with care, because loading packages is slow. + + """ for name in self.all_package_names(): yield self.get(name) diff --git a/lib/spack/spack/virtual.py b/lib/spack/spack/virtual.py index 2c47921a3f..785ab98918 100644 --- a/lib/spack/spack/virtual.py +++ b/lib/spack/spack/virtual.py @@ -227,6 +227,20 @@ class ProviderIndex(object): spdict[provided_spec] += opdict[provided_spec] + def remove_provider(self, pkg_name): + """Remove a provider from the ProviderIndex.""" + for pkg in self.providers: + pkg_dict = self.providers[pkg] + for provided, pset in pkg_dict.items(): + for provider in pset: + if provider.fullname == pkg_name: + pset.remove(provider) + if not pset: + del pkg_dict[provided] + if not pkg_dict: + del self.providers[pkg] + + def copy(self): """Deep copy of this ProviderIndex.""" clone = ProviderIndex() -- cgit v1.2.3-70-g09d2 From 37fc2583136f1abd5d39b15bb05d2102d182003a Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 29 May 2016 23:23:33 -0700 Subject: Remove vestigial methods from Package. --- lib/spack/spack/package.py | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index c916bfaaa2..43aefbf65e 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -630,50 +630,12 @@ class Package(object): exts = spack.install_layout.extension_map(self.extendee_spec) return (self.name in exts) and (exts[self.name] == self.spec) - def preorder_traversal(self, visited=None, **kwargs): - """This does a preorder traversal of the package's dependence DAG.""" - virtual = kwargs.get("virtual", False) - - if visited is None: - visited = set() - - if self.name in visited: - return - visited.add(self.name) - - if not virtual: - yield self - - for name in sorted(self.dependencies.keys()): - dep_spec = self.get_dependency(name) - spec = dep_spec.spec - - # Currently, we do not descend into virtual dependencies, as this - # makes doing a sensible traversal much harder. We just assume - # that ANY of the virtual deps will work, which might not be true - # (due to conflicts or unsatisfiable specs). For now this is ok, - # but we might want to reinvestigate if we start using a lot of - # complicated virtual dependencies - # TODO: reinvestigate this. - if spec.virtual: - if virtual: - yield spec - continue - - for pkg in spack.repo.get(name).preorder_traversal(visited, - **kwargs): - yield pkg - def provides(self, vpkg_name): """ True if this package provides a virtual package with the specified name """ return any(s.name == vpkg_name for s in self.provided) - def virtual_dependencies(self, visited=None): - for spec in sorted(set(self.preorder_traversal(virtual=True))): - yield spec - @property def installed(self): return os.path.isdir(self.prefix) -- cgit v1.2.3-70-g09d2 From ab049eca4129b389e7dab53d6dd475b24f8099ed Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 29 May 2016 23:51:39 -0700 Subject: Faster key in FlagMap._cmp_key --- lib/spack/spack/spec.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 1300f35ca4..b9d9d3e0a4 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -438,8 +438,7 @@ class FlagMap(HashableMap): return clone def _cmp_key(self): - return ''.join(str(key) + ' '.join(str(v) for v in value) - for key, value in sorted(self.items())) + return tuple((k, tuple(v)) for k, v in sorted(self.iteritems())) def __str__(self): sorted_keys = filter( -- cgit v1.2.3-70-g09d2 From 1f5a21decf5aa97897692501337e700c572a25f6 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Mon, 30 May 2016 00:26:49 -0700 Subject: Fix namespace support in Repo.get_pkg_class() --- lib/spack/spack/repository.py | 6 ++++++ lib/spack/spack/spec.py | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index 63ae999ce1..6aa9b8dd2f 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -864,6 +864,12 @@ class Repo(object): package. Then extracts the package class from the module according to Spack's naming convention. """ + fullname = pkg_name + namespace, _, pkg_name = pkg_name.rpartition('.') + if namespace and (namespace != self.namespace): + raise InvalidNamespaceError('Invalid namespace for %s repo: %s' + % (self.namespace, namespace)) + class_name = mod_to_class(pkg_name) module = self._get_pkg_module(pkg_name) diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index b9d9d3e0a4..24459fd3b3 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -714,7 +714,7 @@ class Spec(object): """Internal package call gets only the class object for a package. Use this to just get package metadata. """ - return spack.repo.get_pkg_class(self.name) + return spack.repo.get_pkg_class(self.fullname) @property def virtual(self): @@ -1574,7 +1574,7 @@ class Spec(object): UnsupportedCompilerError. """ for spec in self.traverse(): - # Don't get a package for a virtual name. + # raise an UnknownPackageError if the spec's package isn't real. if (not spec.virtual) and spec.name: spack.repo.get(spec.fullname) -- cgit v1.2.3-70-g09d2 From ce6ac93abed2a7193745f7082670e1cbe2ffede2 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 5 Jun 2016 00:52:52 -0700 Subject: rename `virtual` module to `provider_index` --- lib/spack/spack/provider_index.py | 255 +++++++++++++++++++++++++++++++++ lib/spack/spack/repository.py | 2 +- lib/spack/spack/spec.py | 15 +- lib/spack/spack/test/__init__.py | 5 +- lib/spack/spack/test/provider_index.py | 49 +++++++ lib/spack/spack/test/virtual.py | 49 ------- lib/spack/spack/virtual.py | 255 --------------------------------- 7 files changed, 317 insertions(+), 313 deletions(-) create mode 100644 lib/spack/spack/provider_index.py create mode 100644 lib/spack/spack/test/provider_index.py delete mode 100644 lib/spack/spack/test/virtual.py delete mode 100644 lib/spack/spack/virtual.py diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py new file mode 100644 index 0000000000..785ab98918 --- /dev/null +++ b/lib/spack/spack/provider_index.py @@ -0,0 +1,255 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +""" +The ``virtual`` module contains utility classes for virtual dependencies. +""" +import itertools +import yaml +from yaml.error import MarkedYAMLError + +import spack + + +class ProviderIndex(object): + """This is a dict of dicts used for finding providers of particular + virtual dependencies. The dict of dicts looks like: + + { vpkg name : + { full vpkg spec : set(packages providing spec) } } + + Callers can use this to first find which packages provide a vpkg, + then find a matching full spec. e.g., in this scenario: + + { 'mpi' : + { mpi@:1.1 : set([mpich]), + mpi@:2.3 : set([mpich2@1.9:]) } } + + Calling providers_for(spec) will find specs that provide a + matching implementation of MPI. + """ + def __init__(self, specs=None, **kwargs): + # TODO: come up with another name for this. This "restricts" values to + # the verbatim impu specs (i.e., it doesn't pre-apply package's constraints, and + # keeps things as broad as possible, so it's really the wrong name) + if specs is None: specs = [] + self.restrict = kwargs.setdefault('restrict', False) + + self.providers = {} + + for spec in specs: + if not isinstance(spec, spack.spec.Spec): + spec = spack.spec.Spec(spec) + + if spec.virtual: + continue + + self.update(spec) + + + def update(self, spec): + if not isinstance(spec, spack.spec.Spec): + spec = spack.spec.Spec(spec) + + if not spec.name: + # Empty specs do not have a package + return + + assert(not spec.virtual) + + pkg = spec.package + for provided_spec, provider_spec in pkg.provided.iteritems(): + # We want satisfaction other than flags + provider_spec.compiler_flags = spec.compiler_flags.copy() + if provider_spec.satisfies(spec, deps=False): + provided_name = provided_spec.name + + provider_map = self.providers.setdefault(provided_name, {}) + if not provided_spec in provider_map: + provider_map[provided_spec] = set() + + if self.restrict: + provider_set = provider_map[provided_spec] + + # If this package existed in the index before, + # need to take the old versions out, as they're + # now more constrained. + old = set([s for s in provider_set if s.name == spec.name]) + provider_set.difference_update(old) + + # Now add the new version. + provider_set.add(spec) + + else: + # Before putting the spec in the map, constrain it so that + # it provides what was asked for. + constrained = spec.copy() + constrained.constrain(provider_spec) + provider_map[provided_spec].add(constrained) + + + def providers_for(self, *vpkg_specs): + """Gives specs of all packages that provide virtual packages + with the supplied specs.""" + providers = set() + for vspec in vpkg_specs: + # Allow string names to be passed as input, as well as specs + if type(vspec) == str: + vspec = spack.spec.Spec(vspec) + + # Add all the providers that satisfy the vpkg spec. + if vspec.name in self.providers: + for provider_spec, spec_set in self.providers[vspec.name].items(): + if provider_spec.satisfies(vspec, deps=False): + providers.update(spec_set) + + # Return providers in order + return sorted(providers) + + + # TODO: this is pretty darned nasty, and inefficient, but there + # are not that many vdeps in most specs. + def _cross_provider_maps(self, lmap, rmap): + result = {} + for lspec, rspec in itertools.product(lmap, rmap): + try: + constrained = lspec.constrained(rspec) + except spack.spec.UnsatisfiableSpecError: + continue + + # lp and rp are left and right provider specs. + for lp_spec, rp_spec in itertools.product(lmap[lspec], rmap[rspec]): + if lp_spec.name == rp_spec.name: + try: + const = lp_spec.constrained(rp_spec, deps=False) + result.setdefault(constrained, set()).add(const) + except spack.spec.UnsatisfiableSpecError: + continue + return result + + + def __contains__(self, name): + """Whether a particular vpkg name is in the index.""" + return name in self.providers + + + def satisfies(self, other): + """Check that providers of virtual specs are compatible.""" + common = set(self.providers) & set(other.providers) + if not common: + return True + + # This ensures that some provider in other COULD satisfy the + # vpkg constraints on self. + result = {} + for name in common: + crossed = self._cross_provider_maps(self.providers[name], + other.providers[name]) + if crossed: + result[name] = crossed + + return all(c in result for c in common) + + + def to_yaml(self, stream=None): + provider_list = dict( + (name, [[vpkg.to_node_dict(), [p.to_node_dict() for p in pset]] + for vpkg, pset in pdict.items()]) + for name, pdict in self.providers.items()) + + yaml.dump({'provider_index': {'providers': provider_list}}, + stream=stream) + + + @staticmethod + def from_yaml(stream): + try: + yfile = yaml.load(stream) + except MarkedYAMLError, e: + raise spack.spec.SpackYAMLError( + "error parsing YAML ProviderIndex cache:", str(e)) + + if not isinstance(yfile, dict): + raise spack.spec.SpackYAMLError( + "YAML ProviderIndex was not a dict.") + + if not 'provider_index' in yfile: + raise spack.spec.SpackYAMLError( + "YAML ProviderIndex does not start with 'provider_index'") + + index = ProviderIndex() + providers = yfile['provider_index']['providers'] + index.providers = dict( + (name, dict((spack.spec.Spec.from_node_dict(vpkg), + set(spack.spec.Spec.from_node_dict(p) for p in plist)) + for vpkg, plist in pdict_list)) + for name, pdict_list in providers.items()) + + return index + + + def merge(self, other): + """Merge `other` ProviderIndex into this one.""" + other = other.copy() # defensive copy. + + for pkg in other.providers: + if pkg not in self.providers: + self.providers[pkg] = other.providers[pkg] + continue + + spdict, opdict = self.providers[pkg], other.providers[pkg] + for provided_spec in opdict: + if provided_spec not in spdict: + spdict[provided_spec] = opdict[provided_spec] + continue + + spdict[provided_spec] += opdict[provided_spec] + + + def remove_provider(self, pkg_name): + """Remove a provider from the ProviderIndex.""" + for pkg in self.providers: + pkg_dict = self.providers[pkg] + for provided, pset in pkg_dict.items(): + for provider in pset: + if provider.fullname == pkg_name: + pset.remove(provider) + if not pset: + del pkg_dict[provided] + if not pkg_dict: + del self.providers[pkg] + + + def copy(self): + """Deep copy of this ProviderIndex.""" + clone = ProviderIndex() + clone.providers = dict( + (name, dict((vpkg, set((p.copy() for p in pset))) + for vpkg, pset in pdict.items())) + for name, pdict in self.providers.items()) + return clone + + + def __eq__(self, other): + return self.providers == other.providers diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index 6aa9b8dd2f..3b0d3167b3 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -42,7 +42,7 @@ from llnl.util.filesystem import * import spack.error import spack.config import spack.spec -from spack.virtual import ProviderIndex +from spack.provider_index import ProviderIndex from spack.util.naming import * # diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 24459fd3b3..8a47ec95ad 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -102,23 +102,26 @@ import sys from StringIO import StringIO from operator import attrgetter +import yaml +from yaml.error import MarkedYAMLError + import llnl.util.tty as tty +from llnl.util.filesystem import join_path +from llnl.util.lang import * +from llnl.util.tty.color import * + import spack import spack.architecture import spack.compilers as compilers import spack.error import spack.parse -import yaml -from llnl.util.filesystem import join_path -from llnl.util.lang import * -from llnl.util.tty.color import * from spack.build_environment import get_path_from_module, load_module from spack.util.naming import mod_to_class from spack.util.prefix import Prefix from spack.util.string import * from spack.version import * -from spack.virtual import ProviderIndex -from yaml.error import MarkedYAMLError +from spack.provider_index import ProviderIndex + # Valid pattern for an identifier in Spack identifier_re = r'\w[\w-]*' diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 11f7298c04..e092a50913 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -32,6 +32,8 @@ from llnl.util.tty.colify import colify from spack.test.tally_plugin import Tally """Names of tests to be included in Spack's test suite""" +# All the tests Spack knows about. +# Keep these one per line so that it's easy to see changes in diffs. test_names = [ 'architecture', 'build_system_guess', @@ -74,11 +76,10 @@ test_names = [ 'url_parse', 'url_substitution', 'versions', - 'virtual', + 'provider_index', 'yaml', ] - def list_tests(): """Return names of all tests that can be run for Spack.""" return test_names diff --git a/lib/spack/spack/test/provider_index.py b/lib/spack/spack/test/provider_index.py new file mode 100644 index 0000000000..15fb9acff2 --- /dev/null +++ b/lib/spack/spack/test/provider_index.py @@ -0,0 +1,49 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +from StringIO import StringIO +import unittest + +import spack +from spack.provider_index import ProviderIndex + + +class ProviderIndexTest(unittest.TestCase): + + def test_write_and_read(self): + p = ProviderIndex(spack.repo.all_package_names()) + + ostream = StringIO() + p.to_yaml(ostream) + + istream = StringIO(ostream.getvalue()) + q = ProviderIndex.from_yaml(istream) + + self.assertTrue(p == q) + + + def test_copy(self): + p = ProviderIndex(spack.repo.all_package_names()) + q = p.copy() + self.assertTrue(p == q) diff --git a/lib/spack/spack/test/virtual.py b/lib/spack/spack/test/virtual.py deleted file mode 100644 index 1923e7006f..0000000000 --- a/lib/spack/spack/test/virtual.py +++ /dev/null @@ -1,49 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -from StringIO import StringIO -import unittest - -import spack -from spack.virtual import ProviderIndex - - -class VirtualTest(unittest.TestCase): - - def test_write_and_read(self): - p = ProviderIndex(spack.repo.all_package_names()) - - ostream = StringIO () - p.to_yaml(ostream) - - istream = StringIO(ostream.getvalue()) - q = ProviderIndex.from_yaml(istream) - - self.assertTrue(p == q) - - - def test_copy(self): - p = ProviderIndex(spack.repo.all_package_names()) - q = p.copy() - self.assertTrue(p == q) diff --git a/lib/spack/spack/virtual.py b/lib/spack/spack/virtual.py deleted file mode 100644 index 785ab98918..0000000000 --- a/lib/spack/spack/virtual.py +++ /dev/null @@ -1,255 +0,0 @@ -############################################################################## -# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# -# This file is part of Spack. -# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. -# LLNL-CODE-647188 -# -# For details, see https://github.com/llnl/spack -# Please also see the LICENSE file for our notice and the LGPL. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License (as -# published by the Free Software Foundation) version 2.1, February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and -# conditions of the GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -############################################################################## -""" -The ``virtual`` module contains utility classes for virtual dependencies. -""" -import itertools -import yaml -from yaml.error import MarkedYAMLError - -import spack - - -class ProviderIndex(object): - """This is a dict of dicts used for finding providers of particular - virtual dependencies. The dict of dicts looks like: - - { vpkg name : - { full vpkg spec : set(packages providing spec) } } - - Callers can use this to first find which packages provide a vpkg, - then find a matching full spec. e.g., in this scenario: - - { 'mpi' : - { mpi@:1.1 : set([mpich]), - mpi@:2.3 : set([mpich2@1.9:]) } } - - Calling providers_for(spec) will find specs that provide a - matching implementation of MPI. - """ - def __init__(self, specs=None, **kwargs): - # TODO: come up with another name for this. This "restricts" values to - # the verbatim impu specs (i.e., it doesn't pre-apply package's constraints, and - # keeps things as broad as possible, so it's really the wrong name) - if specs is None: specs = [] - self.restrict = kwargs.setdefault('restrict', False) - - self.providers = {} - - for spec in specs: - if not isinstance(spec, spack.spec.Spec): - spec = spack.spec.Spec(spec) - - if spec.virtual: - continue - - self.update(spec) - - - def update(self, spec): - if not isinstance(spec, spack.spec.Spec): - spec = spack.spec.Spec(spec) - - if not spec.name: - # Empty specs do not have a package - return - - assert(not spec.virtual) - - pkg = spec.package - for provided_spec, provider_spec in pkg.provided.iteritems(): - # We want satisfaction other than flags - provider_spec.compiler_flags = spec.compiler_flags.copy() - if provider_spec.satisfies(spec, deps=False): - provided_name = provided_spec.name - - provider_map = self.providers.setdefault(provided_name, {}) - if not provided_spec in provider_map: - provider_map[provided_spec] = set() - - if self.restrict: - provider_set = provider_map[provided_spec] - - # If this package existed in the index before, - # need to take the old versions out, as they're - # now more constrained. - old = set([s for s in provider_set if s.name == spec.name]) - provider_set.difference_update(old) - - # Now add the new version. - provider_set.add(spec) - - else: - # Before putting the spec in the map, constrain it so that - # it provides what was asked for. - constrained = spec.copy() - constrained.constrain(provider_spec) - provider_map[provided_spec].add(constrained) - - - def providers_for(self, *vpkg_specs): - """Gives specs of all packages that provide virtual packages - with the supplied specs.""" - providers = set() - for vspec in vpkg_specs: - # Allow string names to be passed as input, as well as specs - if type(vspec) == str: - vspec = spack.spec.Spec(vspec) - - # Add all the providers that satisfy the vpkg spec. - if vspec.name in self.providers: - for provider_spec, spec_set in self.providers[vspec.name].items(): - if provider_spec.satisfies(vspec, deps=False): - providers.update(spec_set) - - # Return providers in order - return sorted(providers) - - - # TODO: this is pretty darned nasty, and inefficient, but there - # are not that many vdeps in most specs. - def _cross_provider_maps(self, lmap, rmap): - result = {} - for lspec, rspec in itertools.product(lmap, rmap): - try: - constrained = lspec.constrained(rspec) - except spack.spec.UnsatisfiableSpecError: - continue - - # lp and rp are left and right provider specs. - for lp_spec, rp_spec in itertools.product(lmap[lspec], rmap[rspec]): - if lp_spec.name == rp_spec.name: - try: - const = lp_spec.constrained(rp_spec, deps=False) - result.setdefault(constrained, set()).add(const) - except spack.spec.UnsatisfiableSpecError: - continue - return result - - - def __contains__(self, name): - """Whether a particular vpkg name is in the index.""" - return name in self.providers - - - def satisfies(self, other): - """Check that providers of virtual specs are compatible.""" - common = set(self.providers) & set(other.providers) - if not common: - return True - - # This ensures that some provider in other COULD satisfy the - # vpkg constraints on self. - result = {} - for name in common: - crossed = self._cross_provider_maps(self.providers[name], - other.providers[name]) - if crossed: - result[name] = crossed - - return all(c in result for c in common) - - - def to_yaml(self, stream=None): - provider_list = dict( - (name, [[vpkg.to_node_dict(), [p.to_node_dict() for p in pset]] - for vpkg, pset in pdict.items()]) - for name, pdict in self.providers.items()) - - yaml.dump({'provider_index': {'providers': provider_list}}, - stream=stream) - - - @staticmethod - def from_yaml(stream): - try: - yfile = yaml.load(stream) - except MarkedYAMLError, e: - raise spack.spec.SpackYAMLError( - "error parsing YAML ProviderIndex cache:", str(e)) - - if not isinstance(yfile, dict): - raise spack.spec.SpackYAMLError( - "YAML ProviderIndex was not a dict.") - - if not 'provider_index' in yfile: - raise spack.spec.SpackYAMLError( - "YAML ProviderIndex does not start with 'provider_index'") - - index = ProviderIndex() - providers = yfile['provider_index']['providers'] - index.providers = dict( - (name, dict((spack.spec.Spec.from_node_dict(vpkg), - set(spack.spec.Spec.from_node_dict(p) for p in plist)) - for vpkg, plist in pdict_list)) - for name, pdict_list in providers.items()) - - return index - - - def merge(self, other): - """Merge `other` ProviderIndex into this one.""" - other = other.copy() # defensive copy. - - for pkg in other.providers: - if pkg not in self.providers: - self.providers[pkg] = other.providers[pkg] - continue - - spdict, opdict = self.providers[pkg], other.providers[pkg] - for provided_spec in opdict: - if provided_spec not in spdict: - spdict[provided_spec] = opdict[provided_spec] - continue - - spdict[provided_spec] += opdict[provided_spec] - - - def remove_provider(self, pkg_name): - """Remove a provider from the ProviderIndex.""" - for pkg in self.providers: - pkg_dict = self.providers[pkg] - for provided, pset in pkg_dict.items(): - for provider in pset: - if provider.fullname == pkg_name: - pset.remove(provider) - if not pset: - del pkg_dict[provided] - if not pkg_dict: - del self.providers[pkg] - - - def copy(self): - """Deep copy of this ProviderIndex.""" - clone = ProviderIndex() - clone.providers = dict( - (name, dict((vpkg, set((p.copy() for p in pset))) - for vpkg, pset in pdict.items())) - for name, pdict in self.providers.items()) - return clone - - - def __eq__(self, other): - return self.providers == other.providers -- cgit v1.2.3-70-g09d2 From 4de45c268417e518c7ee616d7454c1c91a5b8b35 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 5 Jun 2016 01:27:35 -0700 Subject: fix scoping issue. --- lib/spack/spack/repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index 3b0d3167b3..df21810a12 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -652,6 +652,7 @@ class Repo(object): self._lock.release_read() else: + tmp = self.index_file + '.tmp' self._lock.acquire_write() try: if index_existed: @@ -666,7 +667,6 @@ class Repo(object): self._provider_index.update(namespaced_name) - tmp = self.index_file + '.tmp' with open(tmp, 'w') as f: self._provider_index.to_yaml(f) os.rename(tmp, self.index_file) -- cgit v1.2.3-70-g09d2 From 5e5024342f95fe3bea86b25ae488c8e738566a2e Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 5 Jun 2016 01:27:54 -0700 Subject: Fix iterator invalidation issues. --- lib/spack/spack/provider_index.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py index 785ab98918..6cd2134e96 100644 --- a/lib/spack/spack/provider_index.py +++ b/lib/spack/spack/provider_index.py @@ -50,9 +50,10 @@ class ProviderIndex(object): matching implementation of MPI. """ def __init__(self, specs=None, **kwargs): - # TODO: come up with another name for this. This "restricts" values to - # the verbatim impu specs (i.e., it doesn't pre-apply package's constraints, and - # keeps things as broad as possible, so it's really the wrong name) + # TODO: come up with another name for this. This "restricts" + # values to the verbatim impu specs (i.e., it doesn't + # pre-apply package's constraints, and keeps things as broad + # as possible, so it's really the wrong name) if specs is None: specs = [] self.restrict = kwargs.setdefault('restrict', False) @@ -229,16 +230,24 @@ class ProviderIndex(object): def remove_provider(self, pkg_name): """Remove a provider from the ProviderIndex.""" - for pkg in self.providers: - pkg_dict = self.providers[pkg] + empty_pkg_dict = [] + for pkg, pkg_dict in self.providers.items(): + empty_pset = [] for provided, pset in pkg_dict.items(): - for provider in pset: - if provider.fullname == pkg_name: - pset.remove(provider) + same_name = set(p for p in pset if p.fullname == pkg_name) + pset.difference_update(same_name) + if not pset: - del pkg_dict[provided] + empty_pset.append(provided) + + for provided in empty_pset: + del pkg_dict[provided] + if not pkg_dict: - del self.providers[pkg] + empty_pkg_dict.append(pkg) + + for pkg in empty_pkg_dict: + del self.providers[pkg] def copy(self): -- cgit v1.2.3-70-g09d2 From d195576fba37672a6a26ebb6208acd5f00e4871f Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 12 Jun 2016 23:50:59 -0700 Subject: WIP --- lib/spack/spack/provider_index.py | 82 +++++++++++++++++++++++++--------- lib/spack/spack/test/concretize.py | 3 -- lib/spack/spack/test/provider_index.py | 59 ++++++++++++++++++++++-- 3 files changed, 117 insertions(+), 27 deletions(-) diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py index 6cd2134e96..ecdc25c4d2 100644 --- a/lib/spack/spack/provider_index.py +++ b/lib/spack/spack/provider_index.py @@ -26,6 +26,8 @@ The ``virtual`` module contains utility classes for virtual dependencies. """ import itertools +from pprint import pformat + import yaml from yaml.error import MarkedYAMLError @@ -48,15 +50,30 @@ class ProviderIndex(object): Calling providers_for(spec) will find specs that provide a matching implementation of MPI. + """ - def __init__(self, specs=None, **kwargs): - # TODO: come up with another name for this. This "restricts" - # values to the verbatim impu specs (i.e., it doesn't - # pre-apply package's constraints, and keeps things as broad - # as possible, so it's really the wrong name) + + + def __init__(self, specs=None, restrict=False): + """Create a new ProviderIndex. + + Optional arguments: + + specs + List (or sequence) of specs. If provided, will call + `update` on this ProviderIndex with each spec in the list. + + restrict + "restricts" values to the verbatim input specs; do not + pre-apply package's constraints. + + TODO: rename this. It is intended to keep things as broad + as possible without overly restricting results, so it is + not the best name. + """ if specs is None: specs = [] - self.restrict = kwargs.setdefault('restrict', False) + self.restrict = restrict self.providers = {} for spec in specs: @@ -174,10 +191,9 @@ class ProviderIndex(object): def to_yaml(self, stream=None): - provider_list = dict( - (name, [[vpkg.to_node_dict(), [p.to_node_dict() for p in pset]] - for vpkg, pset in pdict.items()]) - for name, pdict in self.providers.items()) + provider_list = self._transform( + lambda vpkg, pset: [ + vpkg.to_node_dict(), [p.to_node_dict() for p in pset]], list) yaml.dump({'provider_index': {'providers': provider_list}}, stream=stream) @@ -201,12 +217,11 @@ class ProviderIndex(object): index = ProviderIndex() providers = yfile['provider_index']['providers'] - index.providers = dict( - (name, dict((spack.spec.Spec.from_node_dict(vpkg), - set(spack.spec.Spec.from_node_dict(p) for p in plist)) - for vpkg, plist in pdict_list)) - for name, pdict_list in providers.items()) - + index.providers = _transform( + providers, + lambda vpkg, plist: ( + spack.spec.Spec.from_node_dict(vpkg), + set(spack.spec.Spec.from_node_dict(p) for p in plist))) return index @@ -253,12 +268,39 @@ class ProviderIndex(object): def copy(self): """Deep copy of this ProviderIndex.""" clone = ProviderIndex() - clone.providers = dict( - (name, dict((vpkg, set((p.copy() for p in pset))) - for vpkg, pset in pdict.items())) - for name, pdict in self.providers.items()) + clone.providers = self._transform( + lambda vpkg, pset: (vpkg, set((p.copy() for p in pset)))) return clone def __eq__(self, other): return self.providers == other.providers + + + def _transform(self, transform_fun, out_mapping_type=dict): + return _transform(self.providers, transform_fun, out_mapping_type) + + + def __str__(self): + return pformat( + _transform(self.providers, + lambda k, v: (k, list(v)))) + + +def _transform(providers, transform_fun, out_mapping_type=dict): + """Syntactic sugar for transforming a providers dict. + + transform_fun takes a (vpkg, pset) mapping and runs it on each + pair in nested dicts. + + """ + def mapiter(mappings): + if isinstance(mappings, dict): + return mappings.iteritems() + else: + return iter(mappings) + + return dict( + (name, out_mapping_type([ + transform_fun(vpkg, pset) for vpkg, pset in mapiter(mappings)])) + for name, mappings in providers.items()) diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py index ae3ceecfc8..ec0a2ec244 100644 --- a/lib/spack/spack/test/concretize.py +++ b/lib/spack/spack/test/concretize.py @@ -153,9 +153,6 @@ class ConcretizeTest(MockPackagesTest): self.assertTrue(not any(spec.satisfies('mpich2@:1.1') for spec in spack.repo.providers_for('mpi@2.2'))) - self.assertTrue(not any(spec.satisfies('mpich2@:1.1') - for spec in spack.repo.providers_for('mpi@2.2'))) - self.assertTrue(not any(spec.satisfies('mpich@:1') for spec in spack.repo.providers_for('mpi@2'))) diff --git a/lib/spack/spack/test/provider_index.py b/lib/spack/spack/test/provider_index.py index 15fb9acff2..7d5f997b0a 100644 --- a/lib/spack/spack/test/provider_index.py +++ b/lib/spack/spack/test/provider_index.py @@ -26,12 +26,27 @@ from StringIO import StringIO import unittest import spack +from spack.spec import Spec from spack.provider_index import ProviderIndex +from spack.test.mock_packages_test import * +# Test assume that mock packages provide this: +# +# {'blas': { +# blas: set([netlib-blas, openblas, openblas-with-lapack])}, +# 'lapack': {lapack: set([netlib-lapack, openblas-with-lapack])}, +# 'mpi': {mpi@:1: set([mpich@:1]), +# mpi@:2.0: set([mpich2]), +# mpi@:2.1: set([mpich2@1.1:]), +# mpi@:2.2: set([mpich2@1.2:]), +# mpi@:3: set([mpich@3:]), +# mpi@:10.0: set([zmpi])}, +# 'stuff': {stuff: set([externalvirtual])}} +# -class ProviderIndexTest(unittest.TestCase): +class ProviderIndexTest(MockPackagesTest): - def test_write_and_read(self): + def test_yaml_round_trip(self): p = ProviderIndex(spack.repo.all_package_names()) ostream = StringIO() @@ -40,10 +55,46 @@ class ProviderIndexTest(unittest.TestCase): istream = StringIO(ostream.getvalue()) q = ProviderIndex.from_yaml(istream) - self.assertTrue(p == q) + self.assertEqual(p, q) + + + def test_providers_for_simple(self): + p = ProviderIndex(spack.repo.all_package_names()) + + blas_providers = p.providers_for('blas') + self.assertTrue(Spec('netlib-blas') in blas_providers) + self.assertTrue(Spec('openblas') in blas_providers) + self.assertTrue(Spec('openblas-with-lapack') in blas_providers) + + lapack_providers = p.providers_for('lapack') + self.assertTrue(Spec('netlib-lapack') in lapack_providers) + self.assertTrue(Spec('openblas-with-lapack') in lapack_providers) + + + def test_mpi_providers(self): + p = ProviderIndex(spack.repo.all_package_names()) + + mpi_2_providers = p.providers_for('mpi@2') + self.assertTrue(Spec('mpich2') in mpi_2_providers) + self.assertTrue(Spec('mpich@3:') in mpi_2_providers) + + mpi_3_providers = p.providers_for('mpi@3') + self.assertTrue(Spec('mpich2') not in mpi_3_providers) + self.assertTrue(Spec('mpich@3:') in mpi_3_providers) + self.assertTrue(Spec('zmpi') in mpi_3_providers) + + + def test_equal(self): + p = ProviderIndex(spack.repo.all_package_names()) + q = ProviderIndex(spack.repo.all_package_names()) + self.assertEqual(p, q) def test_copy(self): p = ProviderIndex(spack.repo.all_package_names()) q = p.copy() - self.assertTrue(p == q) + self.assertEqual(p, q) + + + def test_copy(self): + pass -- cgit v1.2.3-70-g09d2 From e5743db9b996b5f0aa1934471a4d603bc24f3725 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 7 Aug 2016 17:39:18 -0700 Subject: Fix issues with import order in tests. - modules weren't set properly as attributes in parent modules --- lib/spack/spack/repository.py | 45 ++++++++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index df21810a12..58747ba25d 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -33,6 +33,8 @@ import imp import re import traceback from bisect import bisect_left +from types import ModuleType + import yaml import llnl.util.tty as tty @@ -73,12 +75,21 @@ def _autospec(function): return converter -def _make_namespace_module(ns): - module = imp.new_module(ns) - module.__file__ = "(spack namespace)" - module.__path__ = [] - module.__package__ = ns - return module +class SpackNamespace(ModuleType): + """ Allow lazy loading of modules.""" + def __init__(self, namespace): + super(ModuleType, self).__init__(self, namespace) + self.__file__ = "(spack namespace)" + self.__path__ = [] + self.__name__ = namespace + self.__package__ = namespace + self.__modules = {} + + def __getattr__(self, name): + """Getattr lazily loads modules if they're not already loaded.""" + submodule = self.__package__ + '.' + name + setattr(self, name, __import__(submodule)) + return getattr(self, name) def substitute_spack_prefix(path): @@ -287,13 +298,10 @@ class RepoPath(object): if fullname in sys.modules: return sys.modules[fullname] - # partition fullname into prefix and module name. - namespace, dot, module_name = fullname.rpartition('.') - if not self.by_namespace.is_prefix(fullname): raise ImportError("No such Spack repo: %s" % fullname) - module = _make_namespace_module(namespace) + module = SpackNamespace(fullname) module.__loader__ = self sys.modules[fullname] = module return module @@ -464,8 +472,9 @@ class Repo(object): parent = None for l in range(1, len(self._names)+1): ns = '.'.join(self._names[:l]) + if not ns in sys.modules: - module = _make_namespace_module(ns) + module = SpackNamespace(ns) module.__loader__ = self sys.modules[ns] = module @@ -476,11 +485,12 @@ class Repo(object): # import spack.pkg.builtin.mpich as mpich if parent: modname = self._names[l-1] - if not hasattr(parent, modname): - setattr(parent, modname, module) + setattr(parent, modname, module) else: - # no need to set up a module, but keep track of the parent. + # no need to set up a module module = sys.modules[ns] + + # but keep track of the parent in this loop parent = module @@ -543,7 +553,7 @@ class Repo(object): namespace, dot, module_name = fullname.rpartition('.') if self.is_prefix(fullname): - module = _make_namespace_module(fullname) + module = SpackNamespace(fullname) elif namespace == self.full_namespace: real_name = self.real_name(module_name) @@ -556,6 +566,11 @@ class Repo(object): module.__loader__ = self sys.modules[fullname] = module + if namespace != fullname: + parent = sys.modules[namespace] + if not hasattr(parent, module_name): + setattr(parent, module_name, module) + return module -- cgit v1.2.3-70-g09d2 From 5d690c9270bd20366923da4bc1b8498621c2ff69 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 7 Aug 2016 17:53:59 -0700 Subject: Make compiler command test last until caching is fixed. - global compiler cache breaks tests that come after this one. --- lib/spack/spack/test/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index e092a50913..3cc7ed512b 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -23,6 +23,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import sys +import os import llnl.util.tty as tty import nose @@ -40,13 +41,11 @@ test_names = [ 'cc', 'cmd.find', 'cmd.module', - 'cmd.test_compiler_cmd', 'cmd.test_install', 'cmd.uninstall', 'concretize', 'concretize_preferences', 'config', - 'configure_guess', 'database', 'directory_layout', 'environment', @@ -78,6 +77,8 @@ test_names = [ 'versions', 'provider_index', 'yaml', + # This test needs to be last until global compiler cache is fixed. + 'cmd.test_compiler_cmd', ] def list_tests(): -- cgit v1.2.3-70-g09d2 From 1339714eecc927e46a1336241512483dd8d11eab Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 7 Aug 2016 17:54:37 -0700 Subject: Restore text output in verbose mode. --- lib/spack/spack/test/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 3cc7ed512b..7795cb59c7 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -89,6 +89,10 @@ def list_tests(): def run(names, outputDir, verbose=False): """Run tests with the supplied names. Names should be a list. If it's empty, run ALL of Spack's tests.""" + # Print output to stdout if verbose is 1. + if verbose: + os.environ['NOSE_NOCAPTURE'] = '1' + if not names: names = test_names else: -- cgit v1.2.3-70-g09d2 From 2042e9a6d85d02adc9424ce6f973e17341ebb292 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sun, 7 Aug 2016 18:36:11 -0700 Subject: Fix bugs with sparse spec printing. - Make namespace, arch, and dependnecies show up in spec yaml only if they're set. - Lost some of this functionality with deptypes --- lib/spack/spack/architecture.py | 7 +++++++ lib/spack/spack/spec.py | 14 ++++---------- lib/spack/spack/test/architecture.py | 23 +++++++++++++++++++++++ 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/lib/spack/spack/architecture.py b/lib/spack/spack/architecture.py index 974505ee3a..886e170b1a 100644 --- a/lib/spack/spack/architecture.py +++ b/lib/spack/spack/architecture.py @@ -383,6 +383,13 @@ class Arch(object): def __contains__(self, string): return string in str(self) + # TODO: make this unnecessary: don't include an empty arch on *every* spec. + def __nonzero__(self): + return (self.platform is not None or + self.platform_os is not None or + self.target is not None) + __bool__ = __nonzero__ + def _cmp_key(self): if isinstance(self.platform, Platform): platform = self.platform.name diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 8a47ec95ad..a37b39be67 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -915,10 +915,7 @@ class Spec(object): if params: d['parameters'] = params - if self.architecture is not None: - d['arch'] = self.architecture - - if self.dependencies: + if self.dependencies(): deps = self.dependencies_dict(deptype=('link', 'run')) d['dependencies'] = dict( (name, { @@ -926,17 +923,13 @@ class Spec(object): 'type': [str(s) for s in dspec.deptypes]}) for name, dspec in deps.items()) - # Older concrete specs do not have a namespace. Omit for - # consistent hashing. - if not self.concrete or self.namespace: + if self.namespace: d['namespace'] = self.namespace if self.architecture: # TODO: Fix the target.to_dict to account for the tuple # Want it to be a dict of dicts d['arch'] = self.architecture.to_dict() - else: - d['arch'] = None if self.compiler: d.update(self.compiler.to_dict()) @@ -967,7 +960,8 @@ class Spec(object): if 'version' in node or 'versions' in node: spec.versions = VersionList.from_dict(node) - spec.architecture = spack.architecture.arch_from_dict(node['arch']) + if 'arch' in node: + spec.architecture = spack.architecture.arch_from_dict(node['arch']) if 'compiler' in node: spec.compiler = CompilerSpec.from_dict(node) diff --git a/lib/spack/spack/test/architecture.py b/lib/spack/spack/test/architecture.py index 42dd9f4c04..b8441bdac4 100644 --- a/lib/spack/spack/test/architecture.py +++ b/lib/spack/spack/test/architecture.py @@ -86,6 +86,29 @@ class ArchitectureTest(MockPackagesTest): self.assertEqual(str(output_platform_class), str(my_platform_class)) + def test_boolness(self): + # Make sure architecture reports that it's False when nothing's set. + arch = spack.architecture.Arch() + self.assertFalse(arch) + + # Dummy architecture parts + plat = spack.architecture.platform() + plat_os = plat.operating_system('default_os') + plat_target = plat.target('default_target') + + # Make sure architecture reports that it's True when anything is set. + arch = spack.architecture.Arch() + arch.platform = plat + self.assertTrue(arch) + + arch = spack.architecture.Arch() + arch.platform_os = plat_os + self.assertTrue(arch) + + arch = spack.architecture.Arch() + arch.target = plat_target + self.assertTrue(arch) + def test_user_front_end_input(self): """Test when user inputs just frontend that both the frontend target and frontend operating system match -- cgit v1.2.3-70-g09d2 From 102ac7bcf1bc7fd134b10a9c54e40302d4f1345b Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 9 Aug 2016 00:24:54 -0700 Subject: Move provider cache to home directory and refactor Transactions Major stuff: - Created a FileCache for managing user cache files in Spack. Currently just handles virtuals. - Moved virtual cache from the repository to the home directory so that users do not need write access to Spack repositories to use them. - Refactored `Transaction` class in `database.py` -- moved it to `LockTransaction` in `lock.py` and made it reusable by other classes. Other additions: - Added tests for file cache and transactions. - Added a few more tests for database - Fixed bug in DB where writes could happen even if exceptions were raised during a transaction. - `spack uninstall` now attempts to repair the database when it discovers that a prefix doesn't exist but a DB record does. --- lib/spack/llnl/util/lock.py | 69 ++++++++++++- lib/spack/spack/__init__.py | 9 +- lib/spack/spack/cmd/purge.py | 10 +- lib/spack/spack/cmd/test.py | 10 +- lib/spack/spack/cmd/uninstall.py | 3 +- lib/spack/spack/config.py | 2 +- lib/spack/spack/database.py | 61 +++-------- lib/spack/spack/file_cache.py | 181 +++++++++++++++++++++++++++++++++ lib/spack/spack/modules.py | 3 +- lib/spack/spack/package.py | 10 +- lib/spack/spack/repository.py | 58 +++-------- lib/spack/spack/stage.py | 5 +- lib/spack/spack/test/__init__.py | 1 + lib/spack/spack/test/database.py | 34 +++++++ lib/spack/spack/test/file_cache.py | 84 +++++++++++++++ lib/spack/spack/test/lock.py | 163 ++++++++++++++++++++++++++++- lib/spack/spack/test/mock_database.py | 6 +- lib/spack/spack/test/provider_index.py | 4 - 18 files changed, 600 insertions(+), 113 deletions(-) create mode 100644 lib/spack/spack/file_cache.py create mode 100644 lib/spack/spack/test/file_cache.py diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index 479a1b0167..e1f5b4878a 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -28,6 +28,9 @@ import errno import time import socket +__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction', + 'LockError'] + # Default timeout in seconds, after which locks will raise exceptions. _default_timeout = 60 @@ -63,7 +66,9 @@ class Lock(object): fcntl.lockf(self._fd, op | fcntl.LOCK_NB) if op == fcntl.LOCK_EX: - os.write(self._fd, "pid=%s,host=%s" % (os.getpid(), socket.getfqdn())) + os.write( + self._fd, + "pid=%s,host=%s" % (os.getpid(), socket.getfqdn())) return except IOError as error: @@ -170,6 +175,66 @@ class Lock(object): return False +class LockTransaction(object): + """Simple nested transaction context manager that uses a file lock. + + This class can trigger actions when the lock is acquired for the + first time and released for the last. + + If the acquire_fn returns a value, it is used as the return value for + __enter__, allowing it to be passed as the `as` argument of a `with` + statement. + + If acquire_fn returns a context manager, *its* `__enter__` function will be + called in `__enter__` after acquire_fn, and its `__exit__` funciton will be + called before `release_fn` in `__exit__`, allowing you to nest a context + manager to be used along with the lock. + + Timeout for lock is customizable. + + """ + + def __init__(self, lock, acquire_fn=None, release_fn=None, + timeout=_default_timeout): + self._lock = lock + self._timeout = timeout + self._acquire_fn = acquire_fn + self._release_fn = release_fn + self._as = None + + def __enter__(self): + if self._enter() and self._acquire_fn: + self._as = self._acquire_fn() + if hasattr(self._as, '__enter__'): + return self._as.__enter__() + else: + return self._as + + def __exit__(self, type, value, traceback): + if self._exit(): + if self._as and hasattr(self._as, '__exit__'): + self._as.__exit__(type, value, traceback) + if self._release_fn: + self._release_fn(type, value, traceback) + if value: + raise value + + +class ReadTransaction(LockTransaction): + def _enter(self): + return self._lock.acquire_read(self._timeout) + + def _exit(self): + return self._lock.release_read() + + +class WriteTransaction(LockTransaction): + def _enter(self): + return self._lock.acquire_write(self._timeout) + + def _exit(self): + return self._lock.release_write() + + class LockError(Exception): """Raised when an attempt to acquire a lock times out.""" - pass diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py index d67585aac4..a6e21987c8 100644 --- a/lib/spack/spack/__init__.py +++ b/lib/spack/spack/__init__.py @@ -50,8 +50,15 @@ repos_path = join_path(var_path, "repos") share_path = join_path(spack_root, "share", "spack") cache_path = join_path(var_path, "cache") +# User configuration location +user_config_path = os.path.expanduser('~/.spack') + import spack.fetch_strategy -cache = spack.fetch_strategy.FsCache(cache_path) +fetch_cache = spack.fetch_strategy.FsCache(cache_path) + +from spack.file_cache import FileCache +user_cache_path = join_path(user_config_path, 'cache') +user_cache = FileCache(user_cache_path) prefix = spack_root opt_path = join_path(prefix, "opt") diff --git a/lib/spack/spack/cmd/purge.py b/lib/spack/spack/cmd/purge.py index f4e27a3969..26745810a8 100644 --- a/lib/spack/spack/cmd/purge.py +++ b/lib/spack/spack/cmd/purge.py @@ -33,7 +33,11 @@ def setup_parser(subparser): '-s', '--stage', action='store_true', default=True, help="Remove all temporary build stages (default).") subparser.add_argument( - '-c', '--cache', action='store_true', help="Remove cached downloads.") + '-d', '--downloads', action='store_true', + help="Remove cached downloads.") + subparser.add_argument( + '-u', '--user-cache', action='store_true', + help="Remove caches in user home directory. Includes virtual indices.") subparser.add_argument( '-a', '--all', action='store_true', help="Remove all of the above.") @@ -49,4 +53,6 @@ def purge(parser, args): if args.stage or args.all: stage.purge() if args.cache or args.all: - spack.cache.destroy() + spack.fetch_cache.destroy() + if args.user_cache or args.all: + spack.user_cache.destroy() diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py index 36810321ef..2667b42820 100644 --- a/lib/spack/spack/cmd/test.py +++ b/lib/spack/spack/cmd/test.py @@ -41,10 +41,10 @@ def setup_parser(subparser): subparser.add_argument( '-l', '--list', action='store_true', dest='list', help="Show available tests") subparser.add_argument( - '--createXmlOutput', action='store_true', dest='createXmlOutput', + '--createXmlOutput', action='store_true', dest='createXmlOutput', help="Create JUnit XML from test results") subparser.add_argument( - '--xmlOutputDir', dest='xmlOutputDir', + '--xmlOutputDir', dest='xmlOutputDir', help="Nose creates XML files in this directory") subparser.add_argument( '-v', '--verbose', action='store_true', dest='verbose', @@ -62,7 +62,7 @@ class MockCache(object): class MockCacheFetcher(object): def set_stage(self, stage): pass - + def fetch(self): raise FetchError("Mock cache always fails for tests") @@ -82,8 +82,8 @@ def test(parser, args): outputDir = join_path(os.getcwd(), "test-output") else: outputDir = os.path.abspath(args.xmlOutputDir) - + if not os.path.exists(outputDir): mkdirp(outputDir) - spack.cache = MockCache() + spack.fetch_cache = MockCache() spack.test.run(args.names, outputDir, args.verbose) diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index a17b7c685c..dbe6cd6584 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -184,7 +184,8 @@ def uninstall(parser, args): uninstall_list = list(set(uninstall_list)) if has_error: - tty.die('You can use spack uninstall --dependents to uninstall these dependencies as well') # NOQA: ignore=E501 + tty.die('You can use spack uninstall --dependents ' + 'to uninstall these dependencies as well') if not args.yes_to_all: tty.msg("The following packages will be uninstalled : ") diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py index 31f0eb3a56..a4e274893c 100644 --- a/lib/spack/spack/config.py +++ b/lib/spack/spack/config.py @@ -525,7 +525,7 @@ ConfigScope('defaults', os.path.join(spack.etc_path, 'spack', 'defaults')) ConfigScope('site', os.path.join(spack.etc_path, 'spack')) """User configuration can override both spack defaults and site config.""" -ConfigScope('user', os.path.expanduser('~/.spack')) +ConfigScope('user', spack.user_config_path) def highest_precedence_scope(): diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 317b0d5784..5ce42b2e67 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -165,11 +165,11 @@ class Database(object): def write_transaction(self, timeout=_db_lock_timeout): """Get a write lock context manager for use in a `with` block.""" - return WriteTransaction(self, self._read, self._write, timeout) + return WriteTransaction(self.lock, self._read, self._write, timeout) def read_transaction(self, timeout=_db_lock_timeout): """Get a read lock context manager for use in a `with` block.""" - return ReadTransaction(self, self._read, None, timeout) + return ReadTransaction(self.lock, self._read, timeout=timeout) def _write_to_yaml(self, stream): """Write out the databsae to a YAML file. @@ -352,12 +352,22 @@ class Database(object): "Invalid ref_count: %s: %d (expected %d), in DB %s" % (key, found, expected, self._index_path)) - def _write(self): + def _write(self, type, value, traceback): """Write the in-memory database index to its file path. - Does no locking. + This is a helper function called by the WriteTransaction context + manager. If there is an exception while the write lock is active, + nothing will be written to the database file, but the in-memory database + *may* be left in an inconsistent state. It will be consistent after the + start of the next transaction, when it read from disk again. + + This routine does no locking. """ + # Do not write if exceptions were raised + if type is not None: + return + temp_file = self._index_path + ( '.%s.%s.temp' % (socket.getfqdn(), os.getpid())) @@ -589,49 +599,6 @@ class Database(object): return key in self._data and not self._data[key].installed -class _Transaction(object): - """Simple nested transaction context manager that uses a file lock. - - This class can trigger actions when the lock is acquired for the - first time and released for the last. - - Timeout for lock is customizable. - """ - - def __init__(self, db, - acquire_fn=None, - release_fn=None, - timeout=_db_lock_timeout): - self._db = db - self._timeout = timeout - self._acquire_fn = acquire_fn - self._release_fn = release_fn - - def __enter__(self): - if self._enter() and self._acquire_fn: - self._acquire_fn() - - def __exit__(self, type, value, traceback): - if self._exit() and self._release_fn: - self._release_fn() - - -class ReadTransaction(_Transaction): - def _enter(self): - return self._db.lock.acquire_read(self._timeout) - - def _exit(self): - return self._db.lock.release_read() - - -class WriteTransaction(_Transaction): - def _enter(self): - return self._db.lock.acquire_write(self._timeout) - - def _exit(self): - return self._db.lock.release_write() - - class CorruptDatabaseError(SpackError): def __init__(self, path, msg=''): super(CorruptDatabaseError, self).__init__( diff --git a/lib/spack/spack/file_cache.py b/lib/spack/spack/file_cache.py new file mode 100644 index 0000000000..2124df9c9c --- /dev/null +++ b/lib/spack/spack/file_cache.py @@ -0,0 +1,181 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import os +import shutil + +from llnl.util.filesystem import * +from llnl.util.lock import * + +import spack +from spack.error import SpackError + + +class FileCache(object): + """This class manages cached data in the filesystem. + + - Cache files are fetched and stored by unique keys. Keys can be relative + paths, so that thre can be some hierarchy in the cache. + + - The FileCache handles locking cache files for reading and writing, so + client code need not manage locks for cache entries. + + """ + def __init__(self, root): + """Create a file cache object. + + This will create the cache directory if it does not exist yet. + + """ + self.root = root.rstrip(os.path.sep) + if not os.path.exists(self.root): + mkdirp(self.root) + + self._locks = {} + + def purge(self): + """Remove all files under the cache root.""" + for f in os.listdir(self.root): + path = join_path(self.root, f) + shutil.rmtree(f) + + def cache_path(self, key): + """Path to the file in the cache for a particular key.""" + return join_path(self.root, key) + + def _lock_path(self, key): + """Path to the file in the cache for a particular key.""" + keyfile = os.path.basename(key) + keydir = os.path.dirname(key) + + return join_path(self.root, keydir, '.' + keyfile + '.lock') + + def _get_lock(self, key): + """Create a lock for a key, if necessary, and return a lock object.""" + if key not in self._locks: + lock_file = self._lock_path(key) + if not os.path.exists(lock_file): + touch(lock_file) + self._locks[key] = Lock(lock_file) + return self._locks[key] + + def init_entry(self, key): + """Ensure we can access a cache file. Create a lock for it if needed. + + Return whether the cache file exists yet or not. + """ + cache_path = self.cache_path(key) + + exists = os.path.exists(cache_path) + if exists: + if not os.path.isfile(cache_path): + raise CacheError("Cache file is not a file: %s" % cache_path) + + if not os.access(cache_path, os.R_OK|os.W_OK): + raise CacheError("Cannot access cache file: %s" % cache_path) + else: + # if the file is hierarchical, make parent directories + parent = os.path.dirname(cache_path) + if parent.rstrip(os.path.sep) != self.root: + mkdirp(parent) + + if not os.access(parent, os.R_OK|os.W_OK): + raise CacheError("Cannot access cache directory: %s" % parent) + + # ensure lock is created for this key + self._get_lock(key) + return exists + + def read_transaction(self, key): + """Get a read transaction on a file cache item. + + Returns a ReadTransaction context manager and opens the cache file for + reading. You can use it like this: + + with spack.user_cache.read_transaction(key) as cache_file: + cache_file.read() + + """ + return ReadTransaction( + self._get_lock(key), lambda: open(self.cache_path(key))) + + def write_transaction(self, key): + """Get a write transaction on a file cache item. + + Returns a WriteTransaction context manager that opens a temporary file + for writing. Once the context manager finishes, if nothing went wrong, + moves the file into place on top of the old file atomically. + + """ + class WriteContextManager(object): + def __enter__(cm): + cm.orig_filename = self.cache_path(key) + cm.orig_file = None + if os.path.exists(cm.orig_filename): + cm.orig_file = open(cm.orig_filename, 'r') + + cm.tmp_filename = self.cache_path(key) + '.tmp' + cm.tmp_file = open(cm.tmp_filename, 'w') + + return cm.orig_file, cm.tmp_file + + def __exit__(cm, type, value, traceback): + if cm.orig_file: + cm.orig_file.close() + cm.tmp_file.close() + + if value: + # remove tmp on exception & raise it + shutil.rmtree(cm.tmp_filename, True) + raise value + else: + os.rename(cm.tmp_filename, cm.orig_filename) + + return WriteTransaction(self._get_lock(key), WriteContextManager) + + + def mtime(self, key): + """Return modification time of cache file, or 0 if it does not exist. + + Time is in units returned by os.stat in the mtime field, which is + platform-dependent. + + """ + if not self.init_entry(key): + return 0 + else: + sinfo = os.stat(self.cache_path(key)) + return sinfo.st_mtime + + + def remove(self, key): + lock = self._get_lock(key) + try: + lock.acquire_write() + os.unlink(self.cache_path(key)) + finally: + lock.release_write() + os.unlink(self._lock_path(key)) + +class CacheError(SpackError): pass diff --git a/lib/spack/spack/modules.py b/lib/spack/spack/modules.py index 8701a31c49..8ac6a77d13 100644 --- a/lib/spack/spack/modules.py +++ b/lib/spack/spack/modules.py @@ -520,7 +520,8 @@ class Dotkit(EnvModule): def prerequisite(self, spec): tty.warn('prerequisites: not supported by dotkit module files') - tty.warn('\tYou may want to check ~/.spack/modules.yaml') + tty.warn('\tYou may want to check %s/modules.yaml' + % spack.user_config_path) return '' diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 43aefbf65e..475155937c 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -1198,7 +1198,15 @@ class Package(object): def do_uninstall(self, force=False): if not self.installed: - raise InstallError(str(self.spec) + " is not installed.") + # prefix may not exist, but DB may be inconsistent. Try to fix by + # removing, but omit hooks. + specs = spack.installed_db.query(self.spec, installed=True) + if specs: + spack.installed_db.remove(specs[0]) + tty.msg("Removed stale DB entry for %s" % self.spec.short_spec) + return + else: + raise InstallError(str(self.spec) + " is not installed.") if not force: dependents = self.installed_dependents diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index 58747ba25d..a0904a2cde 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -41,6 +41,7 @@ import llnl.util.tty as tty from llnl.util.lock import Lock from llnl.util.filesystem import * +import spack import spack.error import spack.config import spack.spec @@ -414,17 +415,6 @@ class Repo(object): check(os.path.isdir(self.packages_path), "No directory '%s' found in '%s'" % (repo_config_name, root)) - self.index_file = join_path(self.root, repo_index_name) - check(not os.path.exists(self.index_file) or - (os.path.isfile(self.index_file) and os.access(self.index_file, os.R_OK|os.W_OK)), - "Cannot access repository index file in %s" % root) - - # lock file for reading/writing the index - self._lock_path = join_path(self.root, 'lock') - if not os.path.exists(self._lock_path): - touch(self._lock_path) - self._lock = Lock(self._lock_path) - # Read configuration and validate namespace config = self._read_config() check('namespace' in config, '%s must define a namespace.' @@ -461,6 +451,8 @@ class Repo(object): # make sure the namespace for packages in this repo exists. self._create_namespace() + # Unique filename for cache of virtual dependency providers + self._cache_file = 'providers/%s-index.yaml' % self.namespace def _create_namespace(self): """Create this repo's namespace module and insert it into sys.modules. @@ -658,21 +650,15 @@ class Repo(object): self._provider_index = ProviderIndex.from_yaml(f) # Read the old ProviderIndex, or make a new one. - index_existed = os.path.isfile(self.index_file) + key = self._cache_file + index_existed = spack.user_cache.init_entry(key) if index_existed and not self._needs_update: - self._lock.acquire_read() - try: - read() - finally: - self._lock.release_read() - + with spack.user_cache.read_transaction(key) as f: + self._provider_index = ProviderIndex.from_yaml(f) else: - tmp = self.index_file + '.tmp' - self._lock.acquire_write() - try: - if index_existed: - with open(self.index_file) as f: - self._provider_index = ProviderIndex.from_yaml(f) + with spack.user_cache.write_transaction(key) as (old, new): + if old: + self._provider_index = ProviderIndex.from_yaml(old) else: self._provider_index = ProviderIndex() @@ -681,17 +667,7 @@ class Repo(object): self._provider_index.remove_provider(namespaced_name) self._provider_index.update(namespaced_name) - - with open(tmp, 'w') as f: - self._provider_index.to_yaml(f) - os.rename(tmp, self.index_file) - - except: - shutil.rmtree(tmp, ignore_errors=True) - raise - - finally: - self._lock.release_write() + self._provider_index.to_yaml(new) @property @@ -745,7 +721,7 @@ class Repo(object): def _fast_package_check(self): - """List packages in the repo and cehck whether index is up to date. + """List packages in the repo and check whether index is up to date. Both of these opreations require checking all `package.py` files so we do them at the same time. We list the repo @@ -763,10 +739,7 @@ class Repo(object): self._all_package_names = [] # Get index modification time. - index_mtime = 0 - if os.path.exists(self.index_file): - sinfo = os.stat(self.index_file) - index_mtime = sinfo.st_mtime + index_mtime = spack.user_cache.mtime(self._cache_file) for pkg_name in os.listdir(self.packages_path): # Skip non-directories in the package root. @@ -774,8 +747,9 @@ class Repo(object): # Warn about invalid names that look like packages. if not valid_module_name(pkg_name): - tty.warn("Skipping package at %s. '%s' is not a valid Spack module name." - % (pkg_dir, pkg_name)) + msg = ("Skipping package at %s. " + "'%s' is not a valid Spack module name.") + tty.warn(msg % (pkg_dir, pkg_name)) continue # construct the file name from the directory diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py index 8fcc331482..7676cb9ab6 100644 --- a/lib/spack/spack/stage.py +++ b/lib/spack/spack/stage.py @@ -315,7 +315,8 @@ class Stage(object): # Add URL strategies for all the mirrors with the digest for url in urls: fetchers.insert(0, fs.URLFetchStrategy(url, digest)) - fetchers.insert(0, spack.cache.fetcher(self.mirror_path, digest)) + fetchers.insert(0, spack.fetch_cache.fetcher(self.mirror_path, + digest)) # Look for the archive in list_url package_name = os.path.dirname(self.mirror_path) @@ -365,7 +366,7 @@ class Stage(object): self.fetcher.check() def cache_local(self): - spack.cache.store(self.fetcher, self.mirror_path) + spack.fetch_cache.store(self.fetcher, self.mirror_path) def expand_archive(self): """Changes to the stage directory and attempt to expand the downloaded diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 7795cb59c7..4969081e63 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -49,6 +49,7 @@ test_names = [ 'database', 'directory_layout', 'environment', + 'file_cache', 'git_fetch', 'hg_fetch', 'install', diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py index e1322f2081..a2f09450bc 100644 --- a/lib/spack/spack/test/database.py +++ b/lib/spack/spack/test/database.py @@ -273,3 +273,37 @@ class DatabaseTest(MockDatabase): # mpich ref count updated properly. mpich_rec = self.installed_db.get_record('mpich') self.assertEqual(mpich_rec.ref_count, 0) + + def test_100_no_write_with_exception_on_remove(self): + def fail_while_writing(): + with self.installed_db.write_transaction(): + self._mock_remove('mpileaks ^zmpi') + raise Exception() + + with self.installed_db.read_transaction(): + self.assertEqual( + len(self.installed_db.query('mpileaks ^zmpi', installed=any)), 1) + + self.assertRaises(Exception, fail_while_writing) + + # reload DB and make sure zmpi is still there. + with self.installed_db.read_transaction(): + self.assertEqual( + len(self.installed_db.query('mpileaks ^zmpi', installed=any)), 1) + + def test_110_no_write_with_exception_on_install(self): + def fail_while_writing(): + with self.installed_db.write_transaction(): + self._mock_install('cmake') + raise Exception() + + with self.installed_db.read_transaction(): + self.assertEqual( + self.installed_db.query('cmake', installed=any), []) + + self.assertRaises(Exception, fail_while_writing) + + # reload DB and make sure cmake was not written. + with self.installed_db.read_transaction(): + self.assertEqual( + self.installed_db.query('cmake', installed=any), []) diff --git a/lib/spack/spack/test/file_cache.py b/lib/spack/spack/test/file_cache.py new file mode 100644 index 0000000000..6142b135eb --- /dev/null +++ b/lib/spack/spack/test/file_cache.py @@ -0,0 +1,84 @@ +############################################################################## +# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://github.com/llnl/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License (as +# published by the Free Software Foundation) version 2.1, February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +""" +Test Spack's FileCache. +""" +import os +import shutil +import tempfile +import unittest + +import spack +from spack.file_cache import FileCache + + +class FileCacheTest(unittest.TestCase): + """Ensure that a file cache can properly write to a file and recover its + contents.""" + + def setUp(self): + self.scratch_dir = tempfile.mkdtemp() + self.cache = FileCache(self.scratch_dir) + + def tearDown(self): + shutil.rmtree(self.scratch_dir) + + def test_write_and_read_cache_file(self): + """Test writing then reading a cached file.""" + with self.cache.write_transaction('test.yaml') as (old, new): + self.assertTrue(old is None) + self.assertTrue(new is not None) + new.write("foobar\n") + + with self.cache.read_transaction('test.yaml') as stream: + text = stream.read() + self.assertEqual("foobar\n", text) + + def test_remove(self): + """Test removing an entry from the cache.""" + self.test_write_and_write_cache_file() + + self.cache.remove('test.yaml') + + self.assertFalse(os.path.exists(self.cache.cache_path('test.yaml'))) + self.assertFalse(os.path.exists(self.cache._lock_path('test.yaml'))) + + def test_write_and_write_cache_file(self): + """Test two write transactions on a cached file.""" + with self.cache.write_transaction('test.yaml') as (old, new): + self.assertTrue(old is None) + self.assertTrue(new is not None) + new.write("foobar\n") + + with self.cache.write_transaction('test.yaml') as (old, new): + self.assertTrue(old is not None) + text = old.read() + self.assertEqual("foobar\n", text) + self.assertTrue(new is not None) + new.write("barbaz\n") + + with self.cache.read_transaction('test.yaml') as stream: + text = stream.read() + self.assertEqual("barbaz\n", text) diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py index 0e9f6daf4d..aaf573241b 100644 --- a/lib/spack/spack/test/lock.py +++ b/lib/spack/spack/test/lock.py @@ -187,7 +187,6 @@ class LockTest(unittest.TestCase): barrier.wait() # ---------------------------------------- 13 lock.release_read() - def p2(barrier): lock = Lock(self.lock_path) @@ -224,7 +223,6 @@ class LockTest(unittest.TestCase): barrier.wait() # ---------------------------------------- 13 lock.release_read() - def p3(barrier): lock = Lock(self.lock_path) @@ -262,3 +260,164 @@ class LockTest(unittest.TestCase): lock.release_read() self.multiproc_test(p1, p2, p3) + + def test_transaction(self): + def enter_fn(): + vals['entered'] = True + + def exit_fn(t, v, tb): + vals['exited'] = True + vals['exception'] = (t or v or tb) + + lock = Lock(self.lock_path) + vals = {'entered': False, 'exited': False, 'exception': False } + with ReadTransaction(lock, enter_fn, exit_fn): pass + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertFalse(vals['exception']) + + vals = {'entered': False, 'exited': False, 'exception': False } + with WriteTransaction(lock, enter_fn, exit_fn): pass + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertFalse(vals['exception']) + + def test_transaction_with_exception(self): + def enter_fn(): + vals['entered'] = True + + def exit_fn(t, v, tb): + vals['exited'] = True + vals['exception'] = (t or v or tb) + + lock = Lock(self.lock_path) + + def do_read_with_exception(): + with ReadTransaction(lock, enter_fn, exit_fn): + raise Exception() + + def do_write_with_exception(): + with WriteTransaction(lock, enter_fn, exit_fn): + raise Exception() + + vals = {'entered': False, 'exited': False, 'exception': False } + self.assertRaises(Exception, do_read_with_exception) + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertTrue(vals['exception']) + + vals = {'entered': False, 'exited': False, 'exception': False } + self.assertRaises(Exception, do_write_with_exception) + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertTrue(vals['exception']) + + def test_transaction_with_context_manager(self): + class TestContextManager(object): + def __enter__(self): + vals['entered'] = True + + def __exit__(self, t, v, tb): + vals['exited'] = True + vals['exception'] = (t or v or tb) + + def exit_fn(t, v, tb): + vals['exited_fn'] = True + vals['exception_fn'] = (t or v or tb) + + lock = Lock(self.lock_path) + + vals = {'entered': False, 'exited': False, 'exited_fn': False, + 'exception': False, 'exception_fn': False } + with ReadTransaction(lock, TestContextManager, exit_fn): pass + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertFalse(vals['exception']) + self.assertTrue(vals['exited_fn']) + self.assertFalse(vals['exception_fn']) + + vals = {'entered': False, 'exited': False, 'exited_fn': False, + 'exception': False, 'exception_fn': False } + with ReadTransaction(lock, TestContextManager): pass + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertFalse(vals['exception']) + self.assertFalse(vals['exited_fn']) + self.assertFalse(vals['exception_fn']) + + vals = {'entered': False, 'exited': False, 'exited_fn': False, + 'exception': False, 'exception_fn': False } + with WriteTransaction(lock, TestContextManager, exit_fn): pass + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertFalse(vals['exception']) + self.assertTrue(vals['exited_fn']) + self.assertFalse(vals['exception_fn']) + + vals = {'entered': False, 'exited': False, 'exited_fn': False, + 'exception': False, 'exception_fn': False } + with WriteTransaction(lock, TestContextManager): pass + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertFalse(vals['exception']) + self.assertFalse(vals['exited_fn']) + self.assertFalse(vals['exception_fn']) + + def test_transaction_with_context_manager_and_exception(self): + class TestContextManager(object): + def __enter__(self): + vals['entered'] = True + + def __exit__(self, t, v, tb): + vals['exited'] = True + vals['exception'] = (t or v or tb) + + def exit_fn(t, v, tb): + vals['exited_fn'] = True + vals['exception_fn'] = (t or v or tb) + + lock = Lock(self.lock_path) + + def do_read_with_exception(exit_fn): + with ReadTransaction(lock, TestContextManager, exit_fn): + raise Exception() + + def do_write_with_exception(exit_fn): + with WriteTransaction(lock, TestContextManager, exit_fn): + raise Exception() + + vals = {'entered': False, 'exited': False, 'exited_fn': False, + 'exception': False, 'exception_fn': False } + self.assertRaises(Exception, do_read_with_exception, exit_fn) + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertTrue(vals['exception']) + self.assertTrue(vals['exited_fn']) + self.assertTrue(vals['exception_fn']) + + vals = {'entered': False, 'exited': False, 'exited_fn': False, + 'exception': False, 'exception_fn': False } + self.assertRaises(Exception, do_read_with_exception, None) + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertTrue(vals['exception']) + self.assertFalse(vals['exited_fn']) + self.assertFalse(vals['exception_fn']) + + vals = {'entered': False, 'exited': False, 'exited_fn': False, + 'exception': False, 'exception_fn': False } + self.assertRaises(Exception, do_write_with_exception, exit_fn) + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertTrue(vals['exception']) + self.assertTrue(vals['exited_fn']) + self.assertTrue(vals['exception_fn']) + + vals = {'entered': False, 'exited': False, 'exited_fn': False, + 'exception': False, 'exception_fn': False } + self.assertRaises(Exception, do_write_with_exception, None) + self.assertTrue(vals['entered']) + self.assertTrue(vals['exited']) + self.assertTrue(vals['exception']) + self.assertFalse(vals['exited_fn']) + self.assertFalse(vals['exception_fn']) diff --git a/lib/spack/spack/test/mock_database.py b/lib/spack/spack/test/mock_database.py index b1194f2451..da01e82bfa 100644 --- a/lib/spack/spack/test/mock_database.py +++ b/lib/spack/spack/test/mock_database.py @@ -95,8 +95,10 @@ class MockDatabase(MockPackagesTest): self._mock_install('mpileaks ^zmpi') def tearDown(self): - for spec in spack.installed_db.query(): - spec.package.do_uninstall(spec) + with spack.installed_db.write_transaction(): + for spec in spack.installed_db.query(): + spec.package.do_uninstall(spec) + super(MockDatabase, self).tearDown() shutil.rmtree(self.install_path) spack.install_path = self.spack_install_path diff --git a/lib/spack/spack/test/provider_index.py b/lib/spack/spack/test/provider_index.py index 7d5f997b0a..861814e0ae 100644 --- a/lib/spack/spack/test/provider_index.py +++ b/lib/spack/spack/test/provider_index.py @@ -94,7 +94,3 @@ class ProviderIndexTest(MockPackagesTest): p = ProviderIndex(spack.repo.all_package_names()) q = p.copy() self.assertEqual(p, q) - - - def test_copy(self): - pass -- cgit v1.2.3-70-g09d2 From 0c75c13cc0b26e10dd4c06cca24d597a18230f8c Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 9 Aug 2016 01:37:19 -0700 Subject: Flake8 fixes --- lib/spack/llnl/util/lock.py | 18 ++-- lib/spack/spack/__init__.py | 22 ++-- lib/spack/spack/cmd/purge.py | 4 +- lib/spack/spack/cmd/test.py | 9 +- lib/spack/spack/database.py | 6 +- lib/spack/spack/file_cache.py | 18 ++-- lib/spack/spack/package.py | 1 + lib/spack/spack/provider_index.py | 32 ++---- lib/spack/spack/repository.py | 110 +++++++------------ lib/spack/spack/test/__init__.py | 1 + lib/spack/spack/test/concretize.py | 102 ++++++++---------- lib/spack/spack/test/database.py | 74 ++++++------- lib/spack/spack/test/file_cache.py | 1 - lib/spack/spack/test/lock.py | 187 ++++++++++++++++++--------------- lib/spack/spack/test/provider_index.py | 33 +++--- lib/spack/spack/test/spec_yaml.py | 6 +- 16 files changed, 294 insertions(+), 330 deletions(-) diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index e1f5b4878a..bef20025ba 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -39,13 +39,20 @@ _sleep_time = 1e-5 class Lock(object): - def __init__(self,file_path): + """This is an implementation of a filesystem lock using Python's lockf. + + In Python, `lockf` actually calls `fcntl`, so this should work with any + filesystem implementation that supports locking through the fcntl calls. + This includes distributed filesystems like Lustre (when flock is enabled) + and recent NFS versions. + + """ + def __init__(self, file_path): self._file_path = file_path self._fd = None self._reads = 0 self._writes = 0 - def _lock(self, op, timeout): """This takes a lock using POSIX locks (``fnctl.lockf``). @@ -80,7 +87,6 @@ class Lock(object): raise LockError("Timed out waiting for lock.") - def _unlock(self): """Releases a lock using POSIX locks (``fcntl.lockf``) @@ -88,11 +94,10 @@ class Lock(object): be masquerading as write locks, but this removes either. """ - fcntl.lockf(self._fd,fcntl.LOCK_UN) + fcntl.lockf(self._fd, fcntl.LOCK_UN) os.close(self._fd) self._fd = None - def acquire_read(self, timeout=_default_timeout): """Acquires a recursive, shared lock for reading. @@ -112,7 +117,6 @@ class Lock(object): self._reads += 1 return False - def acquire_write(self, timeout=_default_timeout): """Acquires a recursive, exclusive lock for writing. @@ -132,7 +136,6 @@ class Lock(object): self._writes += 1 return False - def release_read(self): """Releases a read lock. @@ -153,7 +156,6 @@ class Lock(object): self._reads -= 1 return False - def release_write(self): """Releases a write lock. diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py index a6e21987c8..3d508d0fde 100644 --- a/lib/spack/spack/__init__.py +++ b/lib/spack/spack/__init__.py @@ -1,3 +1,4 @@ +# flake8: noqa ############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. @@ -147,7 +148,7 @@ _tmp_user = getpass.getuser() _tmp_candidates = (_default_tmp, '/nfs/tmp2', '/tmp', '/var/tmp') for path in _tmp_candidates: # don't add a second username if it's already unique by user. - if not _tmp_user in path: + if _tmp_user not in path: tmp_dirs.append(join_path(path, '%u', 'spack-stage')) else: tmp_dirs.append(join_path(path, 'spack-stage')) @@ -179,12 +180,13 @@ sys_type = None # Spack internal code should call 'import spack' and accesses other # variables (spack.repo, paths, etc.) directly. # -# TODO: maybe this should be separated out and should go in build_environment.py? -# TODO: it's not clear where all the stuff that needs to be included in packages -# should live. This file is overloaded for spack core vs. for packages. +# TODO: maybe this should be separated out to build_environment.py? +# TODO: it's not clear where all the stuff that needs to be included in +# packages should live. This file is overloaded for spack core vs. +# for packages. # -__all__ = ['Package', 'StagedPackage', 'CMakePackage', \ - 'Version', 'when', 'ver', 'alldeps', 'nolink'] +__all__ = ['Package', 'StagedPackage', 'CMakePackage', + 'Version', 'when', 'ver', 'alldeps', 'nolink'] from spack.package import Package, ExtensionConflictError from spack.package import StagedPackage, CMakePackage from spack.version import Version, ver @@ -204,8 +206,8 @@ from spack.util.executable import * __all__ += spack.util.executable.__all__ from spack.package import \ - install_dependency_symlinks, flatten_dependencies, DependencyConflictError, \ - InstallError, ExternalPackageError + install_dependency_symlinks, flatten_dependencies, \ + DependencyConflictError, InstallError, ExternalPackageError __all__ += [ - 'install_dependency_symlinks', 'flatten_dependencies', 'DependencyConflictError', - 'InstallError', 'ExternalPackageError'] + 'install_dependency_symlinks', 'flatten_dependencies', + 'DependencyConflictError', 'InstallError', 'ExternalPackageError'] diff --git a/lib/spack/spack/cmd/purge.py b/lib/spack/spack/cmd/purge.py index 26745810a8..26d160635c 100644 --- a/lib/spack/spack/cmd/purge.py +++ b/lib/spack/spack/cmd/purge.py @@ -45,14 +45,14 @@ def setup_parser(subparser): def purge(parser, args): # Special case: no flags. - if not any((args.stage, args.cache, args.all)): + if not any((args.stage, args.downloads, args.user_cache, args.all)): stage.purge() return # handle other flags with fall through. if args.stage or args.all: stage.purge() - if args.cache or args.all: + if args.downloads or args.all: spack.fetch_cache.destroy() if args.user_cache or args.all: spack.user_cache.destroy() diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py index 2667b42820..b9f2a449ae 100644 --- a/lib/spack/spack/cmd/test.py +++ b/lib/spack/spack/cmd/test.py @@ -23,23 +23,23 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os -from pprint import pprint from llnl.util.filesystem import join_path, mkdirp from llnl.util.tty.colify import colify -from llnl.util.lang import list_modules import spack import spack.test from spack.fetch_strategy import FetchError -description ="Run unit tests" +description = "Run unit tests" + def setup_parser(subparser): subparser.add_argument( 'names', nargs='*', help="Names of tests to run.") subparser.add_argument( - '-l', '--list', action='store_true', dest='list', help="Show available tests") + '-l', '--list', action='store_true', dest='list', + help="Show available tests") subparser.add_argument( '--createXmlOutput', action='store_true', dest='createXmlOutput', help="Create JUnit XML from test results") @@ -69,6 +69,7 @@ class MockCacheFetcher(object): def __str__(self): return "[mock fetcher]" + def test(parser, args): if args.list: print "Available tests:" diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 5ce42b2e67..16814429dc 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -357,9 +357,9 @@ class Database(object): This is a helper function called by the WriteTransaction context manager. If there is an exception while the write lock is active, - nothing will be written to the database file, but the in-memory database - *may* be left in an inconsistent state. It will be consistent after the - start of the next transaction, when it read from disk again. + nothing will be written to the database file, but the in-memory + database *may* be left in an inconsistent state. It will be consistent + after the start of the next transaction, when it read from disk again. This routine does no locking. diff --git a/lib/spack/spack/file_cache.py b/lib/spack/spack/file_cache.py index 2124df9c9c..fb9ccf46b8 100644 --- a/lib/spack/spack/file_cache.py +++ b/lib/spack/spack/file_cache.py @@ -28,7 +28,6 @@ import shutil from llnl.util.filesystem import * from llnl.util.lock import * -import spack from spack.error import SpackError @@ -54,11 +53,14 @@ class FileCache(object): self._locks = {} - def purge(self): + def destroy(self): """Remove all files under the cache root.""" for f in os.listdir(self.root): path = join_path(self.root, f) - shutil.rmtree(f) + if os.path.isdir(path): + shutil.rmtree(path, True) + else: + os.remove(path) def cache_path(self, key): """Path to the file in the cache for a particular key.""" @@ -92,7 +94,7 @@ class FileCache(object): if not os.path.isfile(cache_path): raise CacheError("Cache file is not a file: %s" % cache_path) - if not os.access(cache_path, os.R_OK|os.W_OK): + if not os.access(cache_path, os.R_OK | os.W_OK): raise CacheError("Cannot access cache file: %s" % cache_path) else: # if the file is hierarchical, make parent directories @@ -100,7 +102,7 @@ class FileCache(object): if parent.rstrip(os.path.sep) != self.root: mkdirp(parent) - if not os.access(parent, os.R_OK|os.W_OK): + if not os.access(parent, os.R_OK | os.W_OK): raise CacheError("Cannot access cache directory: %s" % parent) # ensure lock is created for this key @@ -154,7 +156,6 @@ class FileCache(object): return WriteTransaction(self._get_lock(key), WriteContextManager) - def mtime(self, key): """Return modification time of cache file, or 0 if it does not exist. @@ -168,7 +169,6 @@ class FileCache(object): sinfo = os.stat(self.cache_path(key)) return sinfo.st_mtime - def remove(self, key): lock = self._get_lock(key) try: @@ -178,4 +178,6 @@ class FileCache(object): lock.release_write() os.unlink(self._lock_path(key)) -class CacheError(SpackError): pass + +class CacheError(SpackError): + pass diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 475155937c..25e07541d0 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -1416,6 +1416,7 @@ def use_cray_compiler_names(): os.environ['FC'] = 'ftn' os.environ['F77'] = 'ftn' + def flatten_dependencies(spec, flat_dir): """Make each dependency of spec present in dir via symlink.""" for dep in spec.traverse(root=False): diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py index ecdc25c4d2..b5fbb67c6e 100644 --- a/lib/spack/spack/provider_index.py +++ b/lib/spack/spack/provider_index.py @@ -25,7 +25,7 @@ """ The ``virtual`` module contains utility classes for virtual dependencies. """ -import itertools +from itertools import product as iproduct from pprint import pformat import yaml @@ -52,8 +52,6 @@ class ProviderIndex(object): matching implementation of MPI. """ - - def __init__(self, specs=None, restrict=False): """Create a new ProviderIndex. @@ -71,7 +69,8 @@ class ProviderIndex(object): as possible without overly restricting results, so it is not the best name. """ - if specs is None: specs = [] + if specs is None: + specs = [] self.restrict = restrict self.providers = {} @@ -85,7 +84,6 @@ class ProviderIndex(object): self.update(spec) - def update(self, spec): if not isinstance(spec, spack.spec.Spec): spec = spack.spec.Spec(spec) @@ -104,7 +102,7 @@ class ProviderIndex(object): provided_name = provided_spec.name provider_map = self.providers.setdefault(provided_name, {}) - if not provided_spec in provider_map: + if provided_spec not in provider_map: provider_map[provided_spec] = set() if self.restrict: @@ -126,7 +124,6 @@ class ProviderIndex(object): constrained.constrain(provider_spec) provider_map[provided_spec].add(constrained) - def providers_for(self, *vpkg_specs): """Gives specs of all packages that provide virtual packages with the supplied specs.""" @@ -138,26 +135,25 @@ class ProviderIndex(object): # Add all the providers that satisfy the vpkg spec. if vspec.name in self.providers: - for provider_spec, spec_set in self.providers[vspec.name].items(): - if provider_spec.satisfies(vspec, deps=False): + for p_spec, spec_set in self.providers[vspec.name].items(): + if p_spec.satisfies(vspec, deps=False): providers.update(spec_set) # Return providers in order return sorted(providers) - # TODO: this is pretty darned nasty, and inefficient, but there # are not that many vdeps in most specs. def _cross_provider_maps(self, lmap, rmap): result = {} - for lspec, rspec in itertools.product(lmap, rmap): + for lspec, rspec in iproduct(lmap, rmap): try: constrained = lspec.constrained(rspec) except spack.spec.UnsatisfiableSpecError: continue # lp and rp are left and right provider specs. - for lp_spec, rp_spec in itertools.product(lmap[lspec], rmap[rspec]): + for lp_spec, rp_spec in iproduct(lmap[lspec], rmap[rspec]): if lp_spec.name == rp_spec.name: try: const = lp_spec.constrained(rp_spec, deps=False) @@ -166,12 +162,10 @@ class ProviderIndex(object): continue return result - def __contains__(self, name): """Whether a particular vpkg name is in the index.""" return name in self.providers - def satisfies(self, other): """Check that providers of virtual specs are compatible.""" common = set(self.providers) & set(other.providers) @@ -189,7 +183,6 @@ class ProviderIndex(object): return all(c in result for c in common) - def to_yaml(self, stream=None): provider_list = self._transform( lambda vpkg, pset: [ @@ -198,7 +191,6 @@ class ProviderIndex(object): yaml.dump({'provider_index': {'providers': provider_list}}, stream=stream) - @staticmethod def from_yaml(stream): try: @@ -211,7 +203,7 @@ class ProviderIndex(object): raise spack.spec.SpackYAMLError( "YAML ProviderIndex was not a dict.") - if not 'provider_index' in yfile: + if 'provider_index' not in yfile: raise spack.spec.SpackYAMLError( "YAML ProviderIndex does not start with 'provider_index'") @@ -224,7 +216,6 @@ class ProviderIndex(object): set(spack.spec.Spec.from_node_dict(p) for p in plist))) return index - def merge(self, other): """Merge `other` ProviderIndex into this one.""" other = other.copy() # defensive copy. @@ -242,7 +233,6 @@ class ProviderIndex(object): spdict[provided_spec] += opdict[provided_spec] - def remove_provider(self, pkg_name): """Remove a provider from the ProviderIndex.""" empty_pkg_dict = [] @@ -264,7 +254,6 @@ class ProviderIndex(object): for pkg in empty_pkg_dict: del self.providers[pkg] - def copy(self): """Deep copy of this ProviderIndex.""" clone = ProviderIndex() @@ -272,15 +261,12 @@ class ProviderIndex(object): lambda vpkg, pset: (vpkg, set((p.copy() for p in pset)))) return clone - def __eq__(self, other): return self.providers == other.providers - def _transform(self, transform_fun, out_mapping_type=dict): return _transform(self.providers, transform_fun, out_mapping_type) - def __str__(self): return pformat( _transform(self.providers, diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py index a0904a2cde..eada10f7cb 100644 --- a/lib/spack/spack/repository.py +++ b/lib/spack/spack/repository.py @@ -38,7 +38,6 @@ from types import ModuleType import yaml import llnl.util.tty as tty -from llnl.util.lock import Lock from llnl.util.filesystem import * import spack @@ -142,7 +141,6 @@ class RepoPath(object): "To remove the bad repository, run this command:", " spack repo rm %s" % root) - def swap(self, other): """Convenience function to make swapping repostiories easier. @@ -160,7 +158,6 @@ class RepoPath(object): setattr(self, attr, getattr(other, attr)) setattr(other, attr, tmp) - def _add(self, repo): """Add a repository to the namespace and path indexes. @@ -174,31 +171,28 @@ class RepoPath(object): if repo.namespace in self.by_namespace: raise DuplicateRepoError( "Package repos '%s' and '%s' both provide namespace %s" - % (repo.root, self.by_namespace[repo.namespace].root, repo.namespace)) + % (repo.root, self.by_namespace[repo.namespace].root, + repo.namespace)) # Add repo to the pkg indexes self.by_namespace[repo.full_namespace] = repo self.by_path[repo.root] = repo - def put_first(self, repo): """Add repo first in the search path.""" self._add(repo) self.repos.insert(0, repo) - def put_last(self, repo): """Add repo last in the search path.""" self._add(repo) self.repos.append(repo) - def remove(self, repo): """Remove a repo from the search path.""" if repo in self.repos: self.repos.remove(repo) - def get_repo(self, namespace, default=NOT_PROVIDED): """Get a repository by namespace. Arguments @@ -218,12 +212,10 @@ class RepoPath(object): return default return self.by_namespace[fullspace] - def first_repo(self): """Get the first repo in precedence order.""" return self.repos[0] if self.repos else None - def all_package_names(self): """Return all unique package names in all repositories.""" if self._all_package_names is None: @@ -231,15 +223,13 @@ class RepoPath(object): for repo in self.repos: for name in repo.all_package_names(): all_pkgs.add(name) - self._all_package_names = sorted(all_pkgs, key=lambda n:n.lower()) + self._all_package_names = sorted(all_pkgs, key=lambda n: n.lower()) return self._all_package_names - def all_packages(self): for name in self.all_package_names(): yield self.get(name) - @property def provider_index(self): """Merged ProviderIndex from all Repos in the RepoPath.""" @@ -250,7 +240,6 @@ class RepoPath(object): return self._provider_index - @_autospec def providers_for(self, vpkg_spec): providers = self.provider_index.providers_for(vpkg_spec) @@ -258,12 +247,10 @@ class RepoPath(object): raise UnknownPackageError(vpkg_spec.name) return providers - @_autospec def extensions_for(self, extendee_spec): return [p for p in self.all_packages() if p.extends(extendee_spec)] - def find_module(self, fullname, path=None): """Implements precedence for overlaid namespaces. @@ -290,7 +277,6 @@ class RepoPath(object): return None - def load_module(self, fullname): """Handles loading container namespaces when necessary. @@ -307,7 +293,6 @@ class RepoPath(object): sys.modules[fullname] = module return module - @_autospec def repo_for_pkg(self, spec): """Given a spec, get the repository for its package.""" @@ -329,7 +314,6 @@ class RepoPath(object): # that can operate on packages that don't exist yet. return self.first_repo() - @_autospec def get(self, spec, new=False): """Find a repo that contains the supplied spec's package. @@ -338,12 +322,10 @@ class RepoPath(object): """ return self.repo_for_pkg(spec).get(spec) - def get_pkg_class(self, pkg_name): """Find a class for the spec's package and return the class object.""" return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name) - @_autospec def dump_provenance(self, spec, path): """Dump provenance information for a spec to a particular path. @@ -353,24 +335,19 @@ class RepoPath(object): """ return self.repo_for_pkg(spec).dump_provenance(spec, path) - def dirname_for_package_name(self, pkg_name): return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name) - def filename_for_package_name(self, pkg_name): return self.repo_for_pkg(pkg_name).filename_for_package_name(pkg_name) - def exists(self, pkg_name): return any(repo.exists(pkg_name) for repo in self.repos) - def __contains__(self, pkg_name): return self.exists(pkg_name) - class Repo(object): """Class representing a package repository in the filesystem. @@ -404,7 +381,8 @@ class Repo(object): # check and raise BadRepoError on fail. def check(condition, msg): - if not condition: raise BadRepoError(msg) + if not condition: + raise BadRepoError(msg) # Validate repository layout. self.config_file = join_path(self.root, repo_config_name) @@ -422,12 +400,14 @@ class Repo(object): self.namespace = config['namespace'] check(re.match(r'[a-zA-Z][a-zA-Z0-9_.]+', self.namespace), - ("Invalid namespace '%s' in repo '%s'. " % (self.namespace, self.root)) + + ("Invalid namespace '%s' in repo '%s'. " + % (self.namespace, self.root)) + "Namespaces must be valid python identifiers separated by '.'") # Set up 'full_namespace' to include the super-namespace if self.super_namespace: - self.full_namespace = "%s.%s" % (self.super_namespace, self.namespace) + self.full_namespace = "%s.%s" % ( + self.super_namespace, self.namespace) else: self.full_namespace = self.namespace @@ -462,10 +442,10 @@ class Repo(object): """ parent = None - for l in range(1, len(self._names)+1): + for l in range(1, len(self._names) + 1): ns = '.'.join(self._names[:l]) - if not ns in sys.modules: + if ns not in sys.modules: module = SpackNamespace(ns) module.__loader__ = self sys.modules[ns] = module @@ -476,7 +456,7 @@ class Repo(object): # This ensures that we can do things like: # import spack.pkg.builtin.mpich as mpich if parent: - modname = self._names[l-1] + modname = self._names[l - 1] setattr(parent, modname, module) else: # no need to set up a module @@ -485,7 +465,6 @@ class Repo(object): # but keep track of the parent in this loop parent = module - def real_name(self, import_name): """Allow users to import Spack packages using Python identifiers. @@ -511,13 +490,11 @@ class Repo(object): return name return None - def is_prefix(self, fullname): """True if fullname is a prefix of this Repo's namespace.""" parts = fullname.split('.') return self._names[:len(parts)] == parts - def find_module(self, fullname, path=None): """Python find_module import hook. @@ -533,7 +510,6 @@ class Repo(object): return None - def load_module(self, fullname): """Python importer load hook. @@ -565,7 +541,6 @@ class Repo(object): return module - def _read_config(self): """Check for a YAML config file in this db's root directory.""" try: @@ -573,40 +548,39 @@ class Repo(object): yaml_data = yaml.load(reponame_file) if (not yaml_data or 'repo' not in yaml_data or - not isinstance(yaml_data['repo'], dict)): - tty.die("Invalid %s in repository %s" - % (repo_config_name, self.root)) + not isinstance(yaml_data['repo'], dict)): + tty.die("Invalid %s in repository %s" % ( + repo_config_name, self.root)) return yaml_data['repo'] - except exceptions.IOError, e: + except exceptions.IOError: tty.die("Error reading %s when opening %s" % (self.config_file, self.root)) - @_autospec def get(self, spec, new=False): if spec.virtual: raise UnknownPackageError(spec.name) if spec.namespace and spec.namespace != self.namespace: - raise UnknownPackageError("Repository %s does not contain package %s" - % (self.namespace, spec.fullname)) + raise UnknownPackageError( + "Repository %s does not contain package %s" + % (self.namespace, spec.fullname)) key = hash(spec) if new or key not in self._instances: package_class = self.get_pkg_class(spec.name) try: - copy = spec.copy() # defensive copy. Package owns its spec. + copy = spec.copy() # defensive copy. Package owns its spec. self._instances[key] = package_class(copy) - except Exception, e: + except Exception: if spack.debug: sys.excepthook(*sys.exc_info()) raise FailedConstructorError(spec.fullname, *sys.exc_info()) return self._instances[key] - @_autospec def dump_provenance(self, spec, path): """Dump provenance information for a spec to a particular path. @@ -619,8 +593,9 @@ class Repo(object): raise UnknownPackageError(spec.name) if spec.namespace and spec.namespace != self.namespace: - raise UnknownPackageError("Repository %s does not contain package %s." - % (self.namespace, spec.fullname)) + raise UnknownPackageError( + "Repository %s does not contain package %s." + % (self.namespace, spec.fullname)) # Install any patch files needed by packages. mkdirp(path) @@ -635,12 +610,10 @@ class Repo(object): # Install the package.py file itself. install(self.filename_for_package_name(spec), path) - def purge(self): """Clear entire package instance cache.""" self._instances.clear() - def _update_provider_index(self): # Check modification dates of all packages self._fast_package_check() @@ -669,7 +642,6 @@ class Repo(object): self._provider_index.to_yaml(new) - @property def provider_index(self): """A provider index with names *specific* to this repo.""" @@ -677,7 +649,6 @@ class Repo(object): self._update_provider_index() return self._provider_index - @_autospec def providers_for(self, vpkg_spec): providers = self.provider_index.providers_for(vpkg_spec) @@ -685,18 +656,15 @@ class Repo(object): raise UnknownPackageError(vpkg_spec.name) return providers - @_autospec def extensions_for(self, extendee_spec): return [p for p in self.all_packages() if p.extends(extendee_spec)] - def _check_namespace(self, spec): """Check that the spec's namespace is the same as this repository's.""" if spec.namespace and spec.namespace != self.namespace: raise UnknownNamespaceError(spec.namespace) - @_autospec def dirname_for_package_name(self, spec): """Get the directory name for a particular package. This is the @@ -704,7 +672,6 @@ class Repo(object): self._check_namespace(spec) return join_path(self.packages_path, spec.name) - @_autospec def filename_for_package_name(self, spec): """Get the filename for the module we should load for a particular @@ -719,7 +686,6 @@ class Repo(object): pkg_dir = self.dirname_for_package_name(spec.name) return join_path(pkg_dir, package_file_name) - def _fast_package_check(self): """List packages in the repo and check whether index is up to date. @@ -783,13 +749,11 @@ class Repo(object): return self._all_package_names - def all_package_names(self): """Returns a sorted list of all package names in the Repo.""" self._fast_package_check() return self._all_package_names - def all_packages(self): """Iterator over all packages in the repository. @@ -799,7 +763,6 @@ class Repo(object): for name in self.all_package_names(): yield self.get(name) - def exists(self, pkg_name): """Whether a package with the supplied name exists.""" if self._all_package_names: @@ -813,7 +776,6 @@ class Repo(object): filename = self.filename_for_package_name(pkg_name) return os.path.exists(filename) - def _get_pkg_module(self, pkg_name): """Create a module for a particular package. @@ -845,7 +807,6 @@ class Repo(object): return self._modules[pkg_name] - def get_pkg_class(self, pkg_name): """Get the class for the package out of its module. @@ -853,7 +814,6 @@ class Repo(object): package. Then extracts the package class from the module according to Spack's naming convention. """ - fullname = pkg_name namespace, _, pkg_name = pkg_name.rpartition('.') if namespace and (namespace != self.namespace): raise InvalidNamespaceError('Invalid namespace for %s repo: %s' @@ -868,15 +828,12 @@ class Repo(object): return cls - def __str__(self): return "[Repo '%s' at '%s']" % (self.namespace, self.root) - def __repr__(self): return self.__str__() - def __contains__(self, pkg_name): return self.exists(pkg_name) @@ -885,30 +842,37 @@ def create_repo(root, namespace=None): """Create a new repository in root with the specified namespace. If the namespace is not provided, use basename of root. - Return the canonicalized path and the namespace of the created repository. + Return the canonicalized path and namespace of the created repository. """ root = canonicalize_path(root) if not namespace: namespace = os.path.basename(root) if not re.match(r'\w[\.\w-]*', namespace): - raise InvalidNamespaceError("'%s' is not a valid namespace." % namespace) + raise InvalidNamespaceError( + "'%s' is not a valid namespace." % namespace) existed = False if os.path.exists(root): if os.path.isfile(root): - raise BadRepoError('File %s already exists and is not a directory' % root) + raise BadRepoError('File %s already exists and is not a directory' + % root) elif os.path.isdir(root): if not os.access(root, os.R_OK | os.W_OK): - raise BadRepoError('Cannot create new repo in %s: cannot access directory.' % root) + raise BadRepoError( + 'Cannot create new repo in %s: cannot access directory.' + % root) if os.listdir(root): - raise BadRepoError('Cannot create new repo in %s: directory is not empty.' % root) + raise BadRepoError( + 'Cannot create new repo in %s: directory is not empty.' + % root) existed = True full_path = os.path.realpath(root) parent = os.path.dirname(full_path) if not os.access(parent, os.R_OK | os.W_OK): - raise BadRepoError("Cannot create repository in %s: can't access parent!" % root) + raise BadRepoError( + "Cannot create repository in %s: can't access parent!" % root) try: config_path = os.path.join(root, repo_config_name) diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 4969081e63..db683917b5 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -82,6 +82,7 @@ test_names = [ 'cmd.test_compiler_cmd', ] + def list_tests(): """Return names of all tests that can be run for Spack.""" return test_names diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py index ec0a2ec244..8ecbddbda2 100644 --- a/lib/spack/spack/test/concretize.py +++ b/lib/spack/spack/test/concretize.py @@ -29,6 +29,7 @@ from spack.version import ver from spack.concretize import find_spec from spack.test.mock_packages_test import * + class ConcretizeTest(MockPackagesTest): def check_spec(self, abstract, concrete): @@ -59,7 +60,6 @@ class ConcretizeTest(MockPackagesTest): if abstract.architecture and abstract.architecture.concrete: self.assertEqual(abstract.architecture, concrete.architecture) - def check_concretize(self, abstract_spec): abstract = Spec(abstract_spec) concrete = abstract.concretized() @@ -70,29 +70,24 @@ class ConcretizeTest(MockPackagesTest): return concrete - def test_concretize_no_deps(self): self.check_concretize('libelf') self.check_concretize('libelf@0.8.13') - def test_concretize_dag(self): self.check_concretize('callpath') self.check_concretize('mpileaks') self.check_concretize('libelf') - def test_concretize_variant(self): self.check_concretize('mpich+debug') self.check_concretize('mpich~debug') self.check_concretize('mpich debug=2') self.check_concretize('mpich') - def test_conretize_compiler_flags(self): self.check_concretize('mpich cppflags="-O3"') - def test_concretize_preferred_version(self): spec = self.check_concretize('python') self.assertEqual(spec.versions, ver('2.7.11')) @@ -100,7 +95,6 @@ class ConcretizeTest(MockPackagesTest): spec = self.check_concretize('python@3.5.1') self.assertEqual(spec.versions, ver('3.5.1')) - def test_concretize_with_virtual(self): self.check_concretize('mpileaks ^mpi') self.check_concretize('mpileaks ^mpi@:1.1') @@ -111,7 +105,6 @@ class ConcretizeTest(MockPackagesTest): self.check_concretize('mpileaks ^mpi@:1') self.check_concretize('mpileaks ^mpi@1.2:2') - def test_concretize_with_restricted_virtual(self): self.check_concretize('mpileaks ^mpich2') @@ -142,55 +135,55 @@ class ConcretizeTest(MockPackagesTest): concrete = self.check_concretize('mpileaks ^mpich2@1.3.1:1.4') self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.3.1:1.4')) - def test_concretize_with_provides_when(self): """Make sure insufficient versions of MPI are not in providers list when we ask for some advanced version. """ - self.assertTrue(not any(spec.satisfies('mpich2@:1.0') - for spec in spack.repo.providers_for('mpi@2.1'))) + self.assertTrue( + not any(spec.satisfies('mpich2@:1.0') + for spec in spack.repo.providers_for('mpi@2.1'))) - self.assertTrue(not any(spec.satisfies('mpich2@:1.1') - for spec in spack.repo.providers_for('mpi@2.2'))) + self.assertTrue( + not any(spec.satisfies('mpich2@:1.1') + for spec in spack.repo.providers_for('mpi@2.2'))) - self.assertTrue(not any(spec.satisfies('mpich@:1') - for spec in spack.repo.providers_for('mpi@2'))) + self.assertTrue( + not any(spec.satisfies('mpich@:1') + for spec in spack.repo.providers_for('mpi@2'))) - self.assertTrue(not any(spec.satisfies('mpich@:1') - for spec in spack.repo.providers_for('mpi@3'))) - - self.assertTrue(not any(spec.satisfies('mpich2') - for spec in spack.repo.providers_for('mpi@3'))) + self.assertTrue( + not any(spec.satisfies('mpich@:1') + for spec in spack.repo.providers_for('mpi@3'))) + self.assertTrue( + not any(spec.satisfies('mpich2') + for spec in spack.repo.providers_for('mpi@3'))) def test_concretize_two_virtuals(self): """Test a package with multiple virtual dependencies.""" - s = Spec('hypre').concretize() - + Spec('hypre').concretize() def test_concretize_two_virtuals_with_one_bound(self): """Test a package with multiple virtual dependencies and one preset.""" - s = Spec('hypre ^openblas').concretize() - + Spec('hypre ^openblas').concretize() def test_concretize_two_virtuals_with_two_bound(self): - """Test a package with multiple virtual dependencies and two of them preset.""" - s = Spec('hypre ^openblas ^netlib-lapack').concretize() - + """Test a package with multiple virtual deps and two of them preset.""" + Spec('hypre ^openblas ^netlib-lapack').concretize() def test_concretize_two_virtuals_with_dual_provider(self): """Test a package with multiple virtual dependencies and force a provider that provides both.""" - s = Spec('hypre ^openblas-with-lapack').concretize() - + Spec('hypre ^openblas-with-lapack').concretize() def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self): - """Test a package with multiple virtual dependencies and force a provider - that provides both, and another conflicting package that provides one.""" + """Test a package with multiple virtual dependencies and force a + provider that provides both, and another conflicting package that + provides one. + """ s = Spec('hypre ^openblas-with-lapack ^netlib-lapack') self.assertRaises(spack.spec.MultipleProviderError, s.concretize) - def test_virtual_is_fully_expanded_for_callpath(self): # force dependence on fake "zmpi" by asking for MPI 10.0 spec = Spec('callpath ^mpi@10.0') @@ -207,7 +200,6 @@ class ConcretizeTest(MockPackagesTest): self.assertTrue('fake' in spec._dependencies['zmpi'].spec) - def test_virtual_is_fully_expanded_for_mpileaks(self): spec = Spec('mpileaks ^mpi@10.0') self.assertTrue('mpi' in spec._dependencies) @@ -217,23 +209,24 @@ class ConcretizeTest(MockPackagesTest): self.assertTrue('zmpi' in spec._dependencies) self.assertTrue('callpath' in spec._dependencies) - self.assertTrue('zmpi' in spec._dependencies['callpath']. - spec._dependencies) - self.assertTrue('fake' in spec._dependencies['callpath']. - spec._dependencies['zmpi']. - spec._dependencies) - - self.assertTrue(all(not 'mpi' in d._dependencies for d in spec.traverse())) + self.assertTrue( + 'zmpi' in spec._dependencies['callpath'] + .spec._dependencies) + self.assertTrue( + 'fake' in spec._dependencies['callpath'] + .spec._dependencies['zmpi'] + .spec._dependencies) + + self.assertTrue( + all('mpi' not in d._dependencies for d in spec.traverse())) self.assertTrue('zmpi' in spec) self.assertTrue('mpi' in spec) - def test_my_dep_depends_on_provider_of_my_virtual_dep(self): spec = Spec('indirect_mpich') spec.normalize() spec.concretize() - def test_compiler_inheritance(self): spec = Spec('mpileaks') spec.normalize() @@ -245,26 +238,26 @@ class ConcretizeTest(MockPackagesTest): self.assertTrue(spec['libdwarf'].compiler.satisfies('clang')) self.assertTrue(spec['libelf'].compiler.satisfies('clang')) - def test_external_package(self): spec = Spec('externaltool%gcc') spec.concretize() - self.assertEqual(spec['externaltool'].external, '/path/to/external_tool') + self.assertEqual( + spec['externaltool'].external, '/path/to/external_tool') self.assertFalse('externalprereq' in spec) self.assertTrue(spec['externaltool'].compiler.satisfies('gcc')) - def test_external_package_module(self): # No tcl modules on darwin/linux machines # TODO: improved way to check for this. - if (spack.architecture.platform().name == 'darwin' or - spack.architecture.platform().name == 'linux'): + platform = spack.architecture.platform().name + if (platform == 'darwin' or platform == 'linux'): return spec = Spec('externalmodule') spec.concretize() - self.assertEqual(spec['externalmodule'].external_module, 'external-module') + self.assertEqual( + spec['externalmodule'].external_module, 'external-module') self.assertFalse('externalprereq' in spec) self.assertTrue(spec['externalmodule'].compiler.satisfies('gcc')) @@ -277,16 +270,16 @@ class ConcretizeTest(MockPackagesTest): got_error = True self.assertTrue(got_error) - def test_external_and_virtual(self): spec = Spec('externaltest') spec.concretize() - self.assertEqual(spec['externaltool'].external, '/path/to/external_tool') - self.assertEqual(spec['stuff'].external, '/path/to/external_virtual_gcc') + self.assertEqual( + spec['externaltool'].external, '/path/to/external_tool') + self.assertEqual( + spec['stuff'].external, '/path/to/external_virtual_gcc') self.assertTrue(spec['externaltool'].compiler.satisfies('gcc')) self.assertTrue(spec['stuff'].compiler.satisfies('gcc')) - def test_find_spec_parents(self): """Tests the spec finding logic used by concretization. """ s = Spec('a +foo', @@ -297,7 +290,6 @@ class ConcretizeTest(MockPackagesTest): self.assertEqual('a', find_spec(s['b'], lambda s: '+foo' in s).name) - def test_find_spec_children(self): s = Spec('a', Spec('b +foo', @@ -312,7 +304,6 @@ class ConcretizeTest(MockPackagesTest): Spec('e +foo')) self.assertEqual('c', find_spec(s['b'], lambda s: '+foo' in s).name) - def test_find_spec_sibling(self): s = Spec('a', Spec('b +foo', @@ -330,7 +321,6 @@ class ConcretizeTest(MockPackagesTest): Spec('f +foo'))) self.assertEqual('f', find_spec(s['b'], lambda s: '+foo' in s).name) - def test_find_spec_self(self): s = Spec('a', Spec('b +foo', @@ -339,7 +329,6 @@ class ConcretizeTest(MockPackagesTest): Spec('e')) self.assertEqual('b', find_spec(s['b'], lambda s: '+foo' in s).name) - def test_find_spec_none(self): s = Spec('a', Spec('b', @@ -348,7 +337,6 @@ class ConcretizeTest(MockPackagesTest): Spec('e')) self.assertEqual(None, find_spec(s['b'], lambda s: '+foo' in s)) - def test_compiler_child(self): s = Spec('mpileaks%clang ^dyninst%gcc') s.concretize() diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py index a2f09450bc..0d44a27b7e 100644 --- a/lib/spack/spack/test/database.py +++ b/lib/spack/spack/test/database.py @@ -31,7 +31,6 @@ import multiprocessing import spack from llnl.util.filesystem import join_path -from llnl.util.lock import * from llnl.util.tty.colify import colify from spack.test.mock_database import MockDatabase @@ -88,26 +87,28 @@ class DatabaseTest(MockDatabase): # query specs with multiple configurations mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')] callpath_specs = [s for s in all_specs if s.satisfies('callpath')] - mpi_specs = [s for s in all_specs if s.satisfies('mpi')] + mpi_specs = [s for s in all_specs if s.satisfies('mpi')] self.assertEqual(len(mpileaks_specs), 3) self.assertEqual(len(callpath_specs), 3) self.assertEqual(len(mpi_specs), 3) # query specs with single configurations - dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')] + dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')] libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')] - libelf_specs = [s for s in all_specs if s.satisfies('libelf')] + libelf_specs = [s for s in all_specs if s.satisfies('libelf')] self.assertEqual(len(dyninst_specs), 1) self.assertEqual(len(libdwarf_specs), 1) self.assertEqual(len(libelf_specs), 1) # Query by dependency - self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1) - self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1) - self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1) - + self.assertEqual( + len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1) + self.assertEqual( + len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1) + self.assertEqual( + len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1) def test_015_write_and_read(self): # write and read DB @@ -122,7 +123,6 @@ class DatabaseTest(MockDatabase): self.assertEqual(new_rec.path, rec.path) self.assertEqual(new_rec.installed, rec.installed) - def _check_db_sanity(self): """Utiilty function to check db against install layout.""" expected = sorted(spack.install_layout.all_specs()) @@ -132,12 +132,10 @@ class DatabaseTest(MockDatabase): for e, a in zip(expected, actual): self.assertEqual(e, a) - def test_020_db_sanity(self): """Make sure query() returns what's actually in the db.""" self._check_db_sanity() - def test_030_db_sanity_from_another_process(self): def read_and_modify(): self._check_db_sanity() # check that other process can read DB @@ -152,30 +150,28 @@ class DatabaseTest(MockDatabase): with self.installed_db.read_transaction(): self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0) - def test_040_ref_counts(self): """Ensure that we got ref counts right when we read the DB.""" self.installed_db._check_ref_counts() - def test_050_basic_query(self): - """Ensure that querying the database is consistent with what is installed.""" + """Ensure querying database is consistent with what is installed.""" # query everything self.assertEqual(len(spack.installed_db.query()), 13) # query specs with multiple configurations mpileaks_specs = self.installed_db.query('mpileaks') callpath_specs = self.installed_db.query('callpath') - mpi_specs = self.installed_db.query('mpi') + mpi_specs = self.installed_db.query('mpi') self.assertEqual(len(mpileaks_specs), 3) self.assertEqual(len(callpath_specs), 3) self.assertEqual(len(mpi_specs), 3) # query specs with single configurations - dyninst_specs = self.installed_db.query('dyninst') + dyninst_specs = self.installed_db.query('dyninst') libdwarf_specs = self.installed_db.query('libdwarf') - libelf_specs = self.installed_db.query('libelf') + libelf_specs = self.installed_db.query('libelf') self.assertEqual(len(dyninst_specs), 1) self.assertEqual(len(libdwarf_specs), 1) @@ -186,7 +182,6 @@ class DatabaseTest(MockDatabase): self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1) self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 1) - def _check_remove_and_add_package(self, spec): """Remove a spec from the DB, then add it and make sure everything's still ok once it is added. This checks that it was @@ -215,15 +210,12 @@ class DatabaseTest(MockDatabase): self._check_db_sanity() self.installed_db._check_ref_counts() - def test_060_remove_and_add_root_package(self): self._check_remove_and_add_package('mpileaks ^mpich') - def test_070_remove_and_add_dependency_package(self): self._check_remove_and_add_package('dyninst') - def test_080_root_ref_counts(self): rec = self.installed_db.get_record('mpileaks ^mpich') @@ -231,44 +223,52 @@ class DatabaseTest(MockDatabase): self.installed_db.remove('mpileaks ^mpich') # record no longer in DB - self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), []) + self.assertEqual( + self.installed_db.query('mpileaks ^mpich', installed=any), []) # record's deps have updated ref_counts - self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 0) + self.assertEqual( + self.installed_db.get_record('callpath ^mpich').ref_count, 0) self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1) - # put the spec back + # Put the spec back self.installed_db.add(rec.spec, rec.path) # record is present again - self.assertEqual(len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1) + self.assertEqual( + len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1) # dependencies have ref counts updated - self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 1) + self.assertEqual( + self.installed_db.get_record('callpath ^mpich').ref_count, 1) self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2) - def test_090_non_root_ref_counts(self): - mpileaks_mpich_rec = self.installed_db.get_record('mpileaks ^mpich') - callpath_mpich_rec = self.installed_db.get_record('callpath ^mpich') + self.installed_db.get_record('mpileaks ^mpich') + self.installed_db.get_record('callpath ^mpich') # "force remove" a non-root spec from the DB self.installed_db.remove('callpath ^mpich') # record still in DB but marked uninstalled - self.assertEqual(self.installed_db.query('callpath ^mpich', installed=True), []) - self.assertEqual(len(self.installed_db.query('callpath ^mpich', installed=any)), 1) + self.assertEqual( + self.installed_db.query('callpath ^mpich', installed=True), []) + self.assertEqual( + len(self.installed_db.query('callpath ^mpich', installed=any)), 1) # record and its deps have same ref_counts - self.assertEqual(self.installed_db.get_record('callpath ^mpich', installed=any).ref_count, 1) + self.assertEqual(self.installed_db.get_record( + 'callpath ^mpich', installed=any).ref_count, 1) self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2) # remove only dependent of uninstalled callpath record self.installed_db.remove('mpileaks ^mpich') # record and parent are completely gone. - self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), []) - self.assertEqual(self.installed_db.query('callpath ^mpich', installed=any), []) + self.assertEqual( + self.installed_db.query('mpileaks ^mpich', installed=any), []) + self.assertEqual( + self.installed_db.query('callpath ^mpich', installed=any), []) # mpich ref count updated properly. mpich_rec = self.installed_db.get_record('mpich') @@ -282,14 +282,16 @@ class DatabaseTest(MockDatabase): with self.installed_db.read_transaction(): self.assertEqual( - len(self.installed_db.query('mpileaks ^zmpi', installed=any)), 1) + len(self.installed_db.query('mpileaks ^zmpi', installed=any)), + 1) self.assertRaises(Exception, fail_while_writing) # reload DB and make sure zmpi is still there. with self.installed_db.read_transaction(): self.assertEqual( - len(self.installed_db.query('mpileaks ^zmpi', installed=any)), 1) + len(self.installed_db.query('mpileaks ^zmpi', installed=any)), + 1) def test_110_no_write_with_exception_on_install(self): def fail_while_writing(): diff --git a/lib/spack/spack/test/file_cache.py b/lib/spack/spack/test/file_cache.py index 6142b135eb..cc66beda2e 100644 --- a/lib/spack/spack/test/file_cache.py +++ b/lib/spack/spack/test/file_cache.py @@ -30,7 +30,6 @@ import shutil import tempfile import unittest -import spack from spack.file_cache import FileCache diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py index aaf573241b..b24050aa74 100644 --- a/lib/spack/spack/test/lock.py +++ b/lib/spack/spack/test/lock.py @@ -46,21 +46,21 @@ class LockTest(unittest.TestCase): self.lock_path = join_path(self.tempdir, 'lockfile') touch(self.lock_path) - def tearDown(self): - shutil.rmtree(self.tempdir, ignore_errors=True) - + shutil.rmtree(self.tempdir, ignore_errors=True) def multiproc_test(self, *functions): """Order some processes using simple barrier synchronization.""" b = Barrier(len(functions), timeout=barrier_timeout) procs = [Process(target=f, args=(b,)) for f in functions] - for p in procs: p.start() + + for p in procs: + p.start() + for p in procs: p.join() self.assertEqual(p.exitcode, 0) - # # Process snippets below can be composed into tests. # @@ -68,27 +68,26 @@ class LockTest(unittest.TestCase): lock = Lock(self.lock_path) lock.acquire_write() # grab exclusive lock barrier.wait() - barrier.wait() # hold the lock until exception raises in other procs. + barrier.wait() # hold the lock until exception raises in other procs. def acquire_read(self, barrier): lock = Lock(self.lock_path) lock.acquire_read() # grab shared lock barrier.wait() - barrier.wait() # hold the lock until exception raises in other procs. + barrier.wait() # hold the lock until exception raises in other procs. def timeout_write(self, barrier): lock = Lock(self.lock_path) - barrier.wait() # wait for lock acquire in first process + barrier.wait() # wait for lock acquire in first process self.assertRaises(LockError, lock.acquire_write, 0.1) barrier.wait() def timeout_read(self, barrier): lock = Lock(self.lock_path) - barrier.wait() # wait for lock acquire in first process + barrier.wait() # wait for lock acquire in first process self.assertRaises(LockError, lock.acquire_read, 0.1) barrier.wait() - # # Test that exclusive locks on other processes time out when an # exclusive lock is held. @@ -97,11 +96,13 @@ class LockTest(unittest.TestCase): self.multiproc_test(self.acquire_write, self.timeout_write) def test_write_lock_timeout_on_write_2(self): - self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write) + self.multiproc_test( + self.acquire_write, self.timeout_write, self.timeout_write) def test_write_lock_timeout_on_write_3(self): - self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write) - + self.multiproc_test( + self.acquire_write, self.timeout_write, self.timeout_write, + self.timeout_write) # # Test that shared locks on other processes time out when an @@ -111,11 +112,13 @@ class LockTest(unittest.TestCase): self.multiproc_test(self.acquire_write, self.timeout_read) def test_read_lock_timeout_on_write_2(self): - self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read) + self.multiproc_test( + self.acquire_write, self.timeout_read, self.timeout_read) def test_read_lock_timeout_on_write_3(self): - self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read) - + self.multiproc_test( + self.acquire_write, self.timeout_read, self.timeout_read, + self.timeout_read) # # Test that exclusive locks time out when shared locks are held. @@ -124,27 +127,35 @@ class LockTest(unittest.TestCase): self.multiproc_test(self.acquire_read, self.timeout_write) def test_write_lock_timeout_on_read_2(self): - self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write) + self.multiproc_test( + self.acquire_read, self.timeout_write, self.timeout_write) def test_write_lock_timeout_on_read_3(self): - self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write) - + self.multiproc_test( + self.acquire_read, self.timeout_write, self.timeout_write, + self.timeout_write) # # Test that exclusive locks time while lots of shared locks are held. # def test_write_lock_timeout_with_multiple_readers_2_1(self): - self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write) + self.multiproc_test( + self.acquire_read, self.acquire_read, self.timeout_write) def test_write_lock_timeout_with_multiple_readers_2_2(self): - self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) + self.multiproc_test( + self.acquire_read, self.acquire_read, self.timeout_write, + self.timeout_write) def test_write_lock_timeout_with_multiple_readers_3_1(self): - self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write) + self.multiproc_test( + self.acquire_read, self.acquire_read, self.acquire_read, + self.timeout_write) def test_write_lock_timeout_with_multiple_readers_3_2(self): - self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) - + self.multiproc_test( + self.acquire_read, self.acquire_read, self.acquire_read, + self.timeout_write, self.timeout_write) # # Longer test case that ensures locks are reusable. Ordering is @@ -155,108 +166,108 @@ class LockTest(unittest.TestCase): lock = Lock(self.lock_path) lock.acquire_write() - barrier.wait() # ---------------------------------------- 1 + barrier.wait() # ---------------------------------------- 1 # others test timeout - barrier.wait() # ---------------------------------------- 2 + barrier.wait() # ---------------------------------------- 2 lock.release_write() # release and others acquire read - barrier.wait() # ---------------------------------------- 3 + barrier.wait() # ---------------------------------------- 3 self.assertRaises(LockError, lock.acquire_write, 0.1) lock.acquire_read() - barrier.wait() # ---------------------------------------- 4 + barrier.wait() # ---------------------------------------- 4 lock.release_read() - barrier.wait() # ---------------------------------------- 5 + barrier.wait() # ---------------------------------------- 5 # p2 upgrades read to write - barrier.wait() # ---------------------------------------- 6 + barrier.wait() # ---------------------------------------- 6 self.assertRaises(LockError, lock.acquire_write, 0.1) self.assertRaises(LockError, lock.acquire_read, 0.1) - barrier.wait() # ---------------------------------------- 7 + barrier.wait() # ---------------------------------------- 7 # p2 releases write and read - barrier.wait() # ---------------------------------------- 8 + barrier.wait() # ---------------------------------------- 8 # p3 acquires read - barrier.wait() # ---------------------------------------- 9 + barrier.wait() # ---------------------------------------- 9 # p3 upgrades read to write - barrier.wait() # ---------------------------------------- 10 + barrier.wait() # ---------------------------------------- 10 self.assertRaises(LockError, lock.acquire_write, 0.1) self.assertRaises(LockError, lock.acquire_read, 0.1) - barrier.wait() # ---------------------------------------- 11 + barrier.wait() # ---------------------------------------- 11 # p3 releases locks - barrier.wait() # ---------------------------------------- 12 + barrier.wait() # ---------------------------------------- 12 lock.acquire_read() - barrier.wait() # ---------------------------------------- 13 + barrier.wait() # ---------------------------------------- 13 lock.release_read() def p2(barrier): lock = Lock(self.lock_path) # p1 acquires write - barrier.wait() # ---------------------------------------- 1 + barrier.wait() # ---------------------------------------- 1 self.assertRaises(LockError, lock.acquire_write, 0.1) self.assertRaises(LockError, lock.acquire_read, 0.1) - barrier.wait() # ---------------------------------------- 2 + barrier.wait() # ---------------------------------------- 2 lock.acquire_read() - barrier.wait() # ---------------------------------------- 3 + barrier.wait() # ---------------------------------------- 3 # p1 tests shared read - barrier.wait() # ---------------------------------------- 4 + barrier.wait() # ---------------------------------------- 4 # others release reads - barrier.wait() # ---------------------------------------- 5 + barrier.wait() # ---------------------------------------- 5 - lock.acquire_write() # upgrade read to write - barrier.wait() # ---------------------------------------- 6 + lock.acquire_write() # upgrade read to write + barrier.wait() # ---------------------------------------- 6 # others test timeout - barrier.wait() # ---------------------------------------- 7 + barrier.wait() # ---------------------------------------- 7 lock.release_write() # release read AND write (need both) lock.release_read() - barrier.wait() # ---------------------------------------- 8 + barrier.wait() # ---------------------------------------- 8 # p3 acquires read - barrier.wait() # ---------------------------------------- 9 + barrier.wait() # ---------------------------------------- 9 # p3 upgrades read to write - barrier.wait() # ---------------------------------------- 10 + barrier.wait() # ---------------------------------------- 10 self.assertRaises(LockError, lock.acquire_write, 0.1) self.assertRaises(LockError, lock.acquire_read, 0.1) - barrier.wait() # ---------------------------------------- 11 + barrier.wait() # ---------------------------------------- 11 # p3 releases locks - barrier.wait() # ---------------------------------------- 12 + barrier.wait() # ---------------------------------------- 12 lock.acquire_read() - barrier.wait() # ---------------------------------------- 13 + barrier.wait() # ---------------------------------------- 13 lock.release_read() def p3(barrier): lock = Lock(self.lock_path) # p1 acquires write - barrier.wait() # ---------------------------------------- 1 + barrier.wait() # ---------------------------------------- 1 self.assertRaises(LockError, lock.acquire_write, 0.1) self.assertRaises(LockError, lock.acquire_read, 0.1) - barrier.wait() # ---------------------------------------- 2 + barrier.wait() # ---------------------------------------- 2 lock.acquire_read() - barrier.wait() # ---------------------------------------- 3 + barrier.wait() # ---------------------------------------- 3 # p1 tests shared read - barrier.wait() # ---------------------------------------- 4 + barrier.wait() # ---------------------------------------- 4 lock.release_read() - barrier.wait() # ---------------------------------------- 5 + barrier.wait() # ---------------------------------------- 5 # p2 upgrades read to write - barrier.wait() # ---------------------------------------- 6 + barrier.wait() # ---------------------------------------- 6 self.assertRaises(LockError, lock.acquire_write, 0.1) self.assertRaises(LockError, lock.acquire_read, 0.1) - barrier.wait() # ---------------------------------------- 7 + barrier.wait() # ---------------------------------------- 7 # p2 releases write & read - barrier.wait() # ---------------------------------------- 8 + barrier.wait() # ---------------------------------------- 8 lock.acquire_read() - barrier.wait() # ---------------------------------------- 9 + barrier.wait() # ---------------------------------------- 9 lock.acquire_write() - barrier.wait() # ---------------------------------------- 10 + barrier.wait() # ---------------------------------------- 10 # others test timeout - barrier.wait() # ---------------------------------------- 11 + barrier.wait() # ---------------------------------------- 11 lock.release_read() # release read AND write in opposite lock.release_write() # order from before on p2 - barrier.wait() # ---------------------------------------- 12 + barrier.wait() # ---------------------------------------- 12 lock.acquire_read() - barrier.wait() # ---------------------------------------- 13 + barrier.wait() # ---------------------------------------- 13 lock.release_read() self.multiproc_test(p1, p2, p3) @@ -270,14 +281,18 @@ class LockTest(unittest.TestCase): vals['exception'] = (t or v or tb) lock = Lock(self.lock_path) - vals = {'entered': False, 'exited': False, 'exception': False } - with ReadTransaction(lock, enter_fn, exit_fn): pass + vals = {'entered': False, 'exited': False, 'exception': False} + with ReadTransaction(lock, enter_fn, exit_fn): + pass + self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) self.assertFalse(vals['exception']) - vals = {'entered': False, 'exited': False, 'exception': False } - with WriteTransaction(lock, enter_fn, exit_fn): pass + vals = {'entered': False, 'exited': False, 'exception': False} + with WriteTransaction(lock, enter_fn, exit_fn): + pass + self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) self.assertFalse(vals['exception']) @@ -300,13 +315,13 @@ class LockTest(unittest.TestCase): with WriteTransaction(lock, enter_fn, exit_fn): raise Exception() - vals = {'entered': False, 'exited': False, 'exception': False } + vals = {'entered': False, 'exited': False, 'exception': False} self.assertRaises(Exception, do_read_with_exception) self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) self.assertTrue(vals['exception']) - vals = {'entered': False, 'exited': False, 'exception': False } + vals = {'entered': False, 'exited': False, 'exception': False} self.assertRaises(Exception, do_write_with_exception) self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) @@ -328,8 +343,10 @@ class LockTest(unittest.TestCase): lock = Lock(self.lock_path) vals = {'entered': False, 'exited': False, 'exited_fn': False, - 'exception': False, 'exception_fn': False } - with ReadTransaction(lock, TestContextManager, exit_fn): pass + 'exception': False, 'exception_fn': False} + with ReadTransaction(lock, TestContextManager, exit_fn): + pass + self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) self.assertFalse(vals['exception']) @@ -337,8 +354,10 @@ class LockTest(unittest.TestCase): self.assertFalse(vals['exception_fn']) vals = {'entered': False, 'exited': False, 'exited_fn': False, - 'exception': False, 'exception_fn': False } - with ReadTransaction(lock, TestContextManager): pass + 'exception': False, 'exception_fn': False} + with ReadTransaction(lock, TestContextManager): + pass + self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) self.assertFalse(vals['exception']) @@ -346,8 +365,10 @@ class LockTest(unittest.TestCase): self.assertFalse(vals['exception_fn']) vals = {'entered': False, 'exited': False, 'exited_fn': False, - 'exception': False, 'exception_fn': False } - with WriteTransaction(lock, TestContextManager, exit_fn): pass + 'exception': False, 'exception_fn': False} + with WriteTransaction(lock, TestContextManager, exit_fn): + pass + self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) self.assertFalse(vals['exception']) @@ -355,8 +376,10 @@ class LockTest(unittest.TestCase): self.assertFalse(vals['exception_fn']) vals = {'entered': False, 'exited': False, 'exited_fn': False, - 'exception': False, 'exception_fn': False } - with WriteTransaction(lock, TestContextManager): pass + 'exception': False, 'exception_fn': False} + with WriteTransaction(lock, TestContextManager): + pass + self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) self.assertFalse(vals['exception']) @@ -387,7 +410,7 @@ class LockTest(unittest.TestCase): raise Exception() vals = {'entered': False, 'exited': False, 'exited_fn': False, - 'exception': False, 'exception_fn': False } + 'exception': False, 'exception_fn': False} self.assertRaises(Exception, do_read_with_exception, exit_fn) self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) @@ -396,7 +419,7 @@ class LockTest(unittest.TestCase): self.assertTrue(vals['exception_fn']) vals = {'entered': False, 'exited': False, 'exited_fn': False, - 'exception': False, 'exception_fn': False } + 'exception': False, 'exception_fn': False} self.assertRaises(Exception, do_read_with_exception, None) self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) @@ -405,7 +428,7 @@ class LockTest(unittest.TestCase): self.assertFalse(vals['exception_fn']) vals = {'entered': False, 'exited': False, 'exited_fn': False, - 'exception': False, 'exception_fn': False } + 'exception': False, 'exception_fn': False} self.assertRaises(Exception, do_write_with_exception, exit_fn) self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) @@ -414,7 +437,7 @@ class LockTest(unittest.TestCase): self.assertTrue(vals['exception_fn']) vals = {'entered': False, 'exited': False, 'exited_fn': False, - 'exception': False, 'exception_fn': False } + 'exception': False, 'exception_fn': False} self.assertRaises(Exception, do_write_with_exception, None) self.assertTrue(vals['entered']) self.assertTrue(vals['exited']) diff --git a/lib/spack/spack/test/provider_index.py b/lib/spack/spack/test/provider_index.py index 861814e0ae..9847dd05a6 100644 --- a/lib/spack/spack/test/provider_index.py +++ b/lib/spack/spack/test/provider_index.py @@ -22,27 +22,28 @@ # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## +"""Tests for provider index cache files. + +Tests assume that mock packages provide this: + + {'blas': { + blas: set([netlib-blas, openblas, openblas-with-lapack])}, + 'lapack': {lapack: set([netlib-lapack, openblas-with-lapack])}, + 'mpi': {mpi@:1: set([mpich@:1]), + mpi@:2.0: set([mpich2]), + mpi@:2.1: set([mpich2@1.1:]), + mpi@:2.2: set([mpich2@1.2:]), + mpi@:3: set([mpich@3:]), + mpi@:10.0: set([zmpi])}, + 'stuff': {stuff: set([externalvirtual])}} +""" from StringIO import StringIO -import unittest import spack from spack.spec import Spec from spack.provider_index import ProviderIndex from spack.test.mock_packages_test import * -# Test assume that mock packages provide this: -# -# {'blas': { -# blas: set([netlib-blas, openblas, openblas-with-lapack])}, -# 'lapack': {lapack: set([netlib-lapack, openblas-with-lapack])}, -# 'mpi': {mpi@:1: set([mpich@:1]), -# mpi@:2.0: set([mpich2]), -# mpi@:2.1: set([mpich2@1.1:]), -# mpi@:2.2: set([mpich2@1.2:]), -# mpi@:3: set([mpich@3:]), -# mpi@:10.0: set([zmpi])}, -# 'stuff': {stuff: set([externalvirtual])}} -# class ProviderIndexTest(MockPackagesTest): @@ -57,7 +58,6 @@ class ProviderIndexTest(MockPackagesTest): self.assertEqual(p, q) - def test_providers_for_simple(self): p = ProviderIndex(spack.repo.all_package_names()) @@ -70,7 +70,6 @@ class ProviderIndexTest(MockPackagesTest): self.assertTrue(Spec('netlib-lapack') in lapack_providers) self.assertTrue(Spec('openblas-with-lapack') in lapack_providers) - def test_mpi_providers(self): p = ProviderIndex(spack.repo.all_package_names()) @@ -83,13 +82,11 @@ class ProviderIndexTest(MockPackagesTest): self.assertTrue(Spec('mpich@3:') in mpi_3_providers) self.assertTrue(Spec('zmpi') in mpi_3_providers) - def test_equal(self): p = ProviderIndex(spack.repo.all_package_names()) q = ProviderIndex(spack.repo.all_package_names()) self.assertEqual(p, q) - def test_copy(self): p = ProviderIndex(spack.repo.all_package_names()) q = p.copy() diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py index fc0ce0b2f3..964aea9422 100644 --- a/lib/spack/spack/test/spec_yaml.py +++ b/lib/spack/spack/test/spec_yaml.py @@ -30,6 +30,7 @@ YAML format preserves DAG informatoin in the spec. from spack.spec import Spec from spack.test.mock_packages_test import * + class SpecYamlTest(MockPackagesTest): def check_yaml_round_trip(self, spec): @@ -37,30 +38,25 @@ class SpecYamlTest(MockPackagesTest): spec_from_yaml = Spec.from_yaml(yaml_text) self.assertTrue(spec.eq_dag(spec_from_yaml)) - def test_simple_spec(self): spec = Spec('mpileaks') self.check_yaml_round_trip(spec) - def test_normal_spec(self): spec = Spec('mpileaks+debug~opt') spec.normalize() self.check_yaml_round_trip(spec) - def test_ambiguous_version_spec(self): spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt') spec.normalize() self.check_yaml_round_trip(spec) - def test_concrete_spec(self): spec = Spec('mpileaks+debug~opt') spec.concretize() self.check_yaml_round_trip(spec) - def test_yaml_subdag(self): spec = Spec('mpileaks^mpich+debug') spec.concretize() -- cgit v1.2.3-70-g09d2 From 9d4a36a62f0ccd8cffc6fb96d6487f0805928e0b Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 9 Aug 2016 02:06:30 -0700 Subject: Properly re-raise exceptions from lock context handler. --- lib/spack/llnl/util/lock.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index bef20025ba..4a4aec2385 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -213,13 +213,15 @@ class LockTransaction(object): return self._as def __exit__(self, type, value, traceback): + suppress = False if self._exit(): if self._as and hasattr(self._as, '__exit__'): - self._as.__exit__(type, value, traceback) + if self._as.__exit__(type, value, traceback): + suppress = True if self._release_fn: - self._release_fn(type, value, traceback) - if value: - raise value + if self._release_fn(type, value, traceback): + suppress = True + return suppress class ReadTransaction(LockTransaction): -- cgit v1.2.3-70-g09d2