From 1da56e5290ba7272ea4bd0f18cdf5cae2b83f093 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Fri, 21 Aug 2015 11:32:12 -0700 Subject: Added a database of installed packages. No methods use the database so far. Also, a bug fix: Previous version did not remove the staging directory on a failed install This led to spack refusing to uninstall dependencies of the failed install Added to cleanup() to blow away the staging directory on failed install. --- lib/spack/spack/database.py | 150 ++++++++++++++++++++++++++++++++++++ lib/spack/spack/directory_layout.py | 8 ++ lib/spack/spack/package.py | 12 +++ lib/spack/spack/spec.py | 8 +- 4 files changed, 175 insertions(+), 3 deletions(-) create mode 100644 lib/spack/spack/database.py (limited to 'lib') diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py new file mode 100644 index 0000000000..8ae71a4385 --- /dev/null +++ b/lib/spack/spack/database.py @@ -0,0 +1,150 @@ +############################################################################## +# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import os +import sys +import inspect +import glob +import imp + +from external import yaml +from external.yaml.error import MarkedYAMLError + +import llnl.util.tty as tty +from llnl.util.filesystem import join_path +from llnl.util.lang import * + +import spack.error +import spack.spec +from spack.spec import Spec +from spack.error import SpackError +from spack.virtual import ProviderIndex +from spack.util.naming import mod_to_class, validate_module_name + + +class Database(object): + def __init__(self,file_name="specDB.yaml"): + """ + Create an empty Database + Location defaults to root/specDB.yaml + """ + self.file_name = file_name + self.data = [] + + + def from_yaml(self,stream): + """ + Fill database from YAML + Translate the spec portions from node-dict form to spec from + """ + try: + file = yaml.load(stream) + except MarkedYAMLError, e: + raise SpackYAMLError("error parsing YAML database:", str(e)) + + if file==None: + return + + for sp in file['database']: + spec = Spec.from_node_dict(sp['spec']) + path = sp['path'] + db_entry = {'spec': spec, 'path': path} + self.data.append(db_entry) + + + @staticmethod + def read_database(root): + """Create a Database from the data in the standard location""" + database = Database() + full_path = join_path(root,database.file_name) + if os.path.isfile(full_path): + with open(full_path,'r') as f: + database.from_yaml(f) + else: + with open(full_path,'w+') as f: + database.from_yaml(f) + + return database + + + def write_database_to_yaml(self,stream): + """ + Replace each spec with its dict-node form + Then stream all data to YAML + """ + node_list = [] + for sp in self.data: + node = {} + node['spec']=Spec.to_node_dict(sp['spec']) + node['spec'][sp['spec'].name]['hash']=sp['spec'].dag_hash() + node['path']=sp['path'] + node_list.append(node) + return yaml.dump({ 'database' : node_list}, + stream=stream, default_flow_style=False) + + + def write(self,root): + """Write the database to the standard location""" + full_path = join_path(root,self.file_name) + #test for file existence + with open(full_path,'w') as f: + self.write_database_to_yaml(f) + + + @staticmethod + def add(root, spec, path): + """Read the database from the standard location + Add the specified entry as a dict + Write the database back to memory + + TODO: Caching databases + """ + database = Database.read_database(root) + + spec_and_path = {} + spec_and_path['spec']=spec + spec_and_path['path']=path + + database.data.append(spec_and_path) + + database.write(root) + + + @staticmethod + def remove(root, spec): + """ + Reads the database from the standard location + Searches for and removes the specified spec + Writes the database back to memory + + TODO: Caching databases + """ + database = Database.read_database(root) + + for sp in database.data: + #This requires specs w/o dependencies, is that sustainable? + if sp['spec'] == spec: + database.data.remove(sp) + + database.write(root) diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py index e61929d8fd..0bbe6c9d85 100644 --- a/lib/spack/spack/directory_layout.py +++ b/lib/spack/spack/directory_layout.py @@ -37,6 +37,7 @@ from llnl.util.filesystem import join_path, mkdirp from spack.spec import Spec from spack.error import SpackError +from spack.database import Database def _check_concrete(spec): @@ -152,6 +153,8 @@ class DirectoryLayout(object): os.rmdir(path) path = os.path.dirname(path) + Database.remove(self.root,spec) + class YamlDirectoryLayout(DirectoryLayout): """Lays out installation directories like this:: @@ -263,6 +266,11 @@ class YamlDirectoryLayout(DirectoryLayout): self.write_spec(spec, spec_file_path) + def add_to_database(self, spec): + """Simply adds a spec to the database""" + Database.add(self.root, spec, self.path_for_spec(spec)) + + @memoized def all_specs(self): if not os.path.isdir(self.root): diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 3507807373..a425c555a2 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -779,6 +779,15 @@ class Package(object): "Manually remove this directory to fix:", self.prefix) + if not (keep_prefix and keep_stage): + self.do_clean() + else: + tty.warn("Keeping stage in place despite error.", + "Spack will refuse to uninstall dependencies of this package." + + "Manually remove this directory to fix:", + self.stage.path) + + def real_work(): try: tty.msg("Building %s." % self.name) @@ -808,6 +817,9 @@ class Package(object): log_install_path = spack.install_layout.build_log_path(self.spec) install(log_path, log_install_path) + #Update the database once we know install successful + spack.install_layout.add_to_database(self.spec) + # On successful install, remove the stage. if not keep_stage: self.stage.destroy() diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index e1fbb84423..df74b6064e 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -427,7 +427,6 @@ class Spec(object): spec = dep if isinstance(dep, Spec) else Spec(dep) self._add_dependency(spec) - # # Private routines here are called by the parser when building a spec. # @@ -640,7 +639,10 @@ class Spec(object): def dag_hash(self, length=None): - """Return a hash of the entire spec DAG, including connectivity.""" + """ + Return a hash of the entire spec DAG, including connectivity. + Stores the hash iff the spec is concrete. + """ yaml_text = yaml.dump( self.to_node_dict(), default_flow_style=True, width=sys.maxint) sha = hashlib.sha1(yaml_text) @@ -710,7 +712,7 @@ class Spec(object): try: yfile = yaml.load(stream) except MarkedYAMLError, e: - raise SpackYAMLError("error parsing YMAL spec:", str(e)) + raise SpackYAMLError("error parsing YAML spec:", str(e)) for node in yfile['spec']: name = next(iter(node)) -- cgit v1.2.3-70-g09d2 From 55f68bb2b05322297c3fc4d9fe265870b9385d4c Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Fri, 21 Aug 2015 13:04:27 -0700 Subject: Added hashes to the database --- lib/spack/spack/database.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 8ae71a4385..4646530d52 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -48,11 +48,15 @@ class Database(object): """ Create an empty Database Location defaults to root/specDB.yaml + The individual data are dicts containing + spec: the top level spec of a package + path: the path to the install of that package + dep_hash: a hash of the dependence DAG for that package """ self.file_name = file_name self.data = [] - + def from_yaml(self,stream): """ Fill database from YAML @@ -69,10 +73,11 @@ class Database(object): for sp in file['database']: spec = Spec.from_node_dict(sp['spec']) path = sp['path'] - db_entry = {'spec': spec, 'path': path} + dep_hash = sp['hash'] + db_entry = {'spec': spec, 'path': path, 'hash':dep_hash} self.data.append(db_entry) - + @staticmethod def read_database(root): """Create a Database from the data in the standard location""" @@ -87,7 +92,7 @@ class Database(object): return database - + def write_database_to_yaml(self,stream): """ Replace each spec with its dict-node form @@ -97,7 +102,8 @@ class Database(object): for sp in self.data: node = {} node['spec']=Spec.to_node_dict(sp['spec']) - node['spec'][sp['spec'].name]['hash']=sp['spec'].dag_hash() +# node['spec'][sp['spec'].name]['hash']=sp['spec'].dag_hash() + node['hash']=sp['hash'] node['path']=sp['path'] node_list.append(node) return yaml.dump({ 'database' : node_list}, @@ -121,13 +127,14 @@ class Database(object): TODO: Caching databases """ database = Database.read_database(root) - - spec_and_path = {} - spec_and_path['spec']=spec - spec_and_path['path']=path - - database.data.append(spec_and_path) - + + sph = {} + sph['spec']=spec + sph['path']=path + sph['hash']=spec.dag_hash() + + database.data.append(sph) + database.write(root) -- cgit v1.2.3-70-g09d2 From fb1874165b420c5a8866bec7ac87a98e97f2b670 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Fri, 21 Aug 2015 16:42:12 -0700 Subject: Eliminated all calls that relied on finding all packages in the opt directory Replaced them all with references to the database Implemented caching in the database. The database now only re-reads data if the database file exists and was changed since this file last wrote to it. Added the installed_db field to the spack instance Left the call to all_specs from testdirectory_layout.py for now. --- lib/spack/spack/__init__.py | 6 ++ lib/spack/spack/cmd/__init__.py | 2 +- lib/spack/spack/cmd/deactivate.py | 2 +- lib/spack/spack/cmd/extensions.py | 2 +- lib/spack/spack/cmd/find.py | 4 +- lib/spack/spack/cmd/module.py | 4 +- lib/spack/spack/cmd/uninstall.py | 2 +- lib/spack/spack/database.py | 140 ++++++++++++++++++++++++++---------- lib/spack/spack/directory_layout.py | 6 -- lib/spack/spack/package.py | 6 +- lib/spack/spack/packages.py | 42 ----------- 11 files changed, 121 insertions(+), 95 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py index caa09eb6e0..4d10bc2da6 100644 --- a/lib/spack/spack/__init__.py +++ b/lib/spack/spack/__init__.py @@ -53,6 +53,12 @@ from spack.packages import PackageDB packages_path = join_path(var_path, "packages") db = PackageDB(packages_path) +# +# Set up the installed packages database +# +from spack.database import Database +installed_db = Database(install_path) + # # Paths to mock files for testing. # diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index b96ac5af51..fd3ef3ed27 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -124,7 +124,7 @@ def elide_list(line_list, max_num=10): def disambiguate_spec(spec): - matching_specs = spack.db.get_installed(spec) + matching_specs = spack.installed_db.get_installed(spec) if not matching_specs: tty.die("Spec '%s' matches no installed packages." % spec) diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py index e44be41029..1f0e303cdf 100644 --- a/lib/spack/spack/cmd/deactivate.py +++ b/lib/spack/spack/cmd/deactivate.py @@ -54,7 +54,7 @@ def deactivate(parser, args): if args.all: if pkg.extendable: tty.msg("Deactivating all extensions of %s" % pkg.spec.short_spec) - ext_pkgs = spack.db.installed_extensions_for(spec) + ext_pkgs = spack.installed_db.installed_extensions_for(spec) for ext_pkg in ext_pkgs: ext_pkg.spec.normalize() diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py index fc8e6842c3..e919b1c4fb 100644 --- a/lib/spack/spack/cmd/extensions.py +++ b/lib/spack/spack/cmd/extensions.py @@ -80,7 +80,7 @@ def extensions(parser, args): colify(ext.name for ext in extensions) # List specs of installed extensions. - installed = [s.spec for s in spack.db.installed_extensions_for(spec)] + installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)] print if not installed: tty.msg("None installed.") diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py index 3c993990b1..6f9072e311 100644 --- a/lib/spack/spack/cmd/find.py +++ b/lib/spack/spack/cmd/find.py @@ -138,9 +138,9 @@ def find(parser, args): # Get all the specs the user asked for if not query_specs: - specs = set(spack.db.installed_package_specs()) + specs = set(spack.installed_db.installed_package_specs()) else: - results = [set(spack.db.get_installed(qs)) for qs in query_specs] + results = [set(spack.installed_db.get_installed(qs)) for qs in query_specs] specs = set.union(*results) if not args.mode: diff --git a/lib/spack/spack/cmd/module.py b/lib/spack/spack/cmd/module.py index 34f0855a50..215d877bd0 100644 --- a/lib/spack/spack/cmd/module.py +++ b/lib/spack/spack/cmd/module.py @@ -65,7 +65,7 @@ def module_find(mtype, spec_array): tty.die("You can only pass one spec.") spec = specs[0] - specs = [s for s in spack.db.installed_package_specs() if s.satisfies(spec)] + specs = [s for s in spack.installed_db.installed_package_specs() if s.satisfies(spec)] if len(specs) == 0: tty.die("No installed packages match spec %s" % spec) @@ -86,7 +86,7 @@ def module_find(mtype, spec_array): def module_refresh(): """Regenerate all module files for installed packages known to spack (some packages may no longer exist).""" - specs = [s for s in spack.db.installed_known_package_specs()] + specs = [s for s in spack.installed_db.installed_known_package_specs()] for name, cls in module_types.items(): tty.msg("Regenerating %s module files." % name) diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index aa62510fed..4870712eb6 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -59,7 +59,7 @@ def uninstall(parser, args): # Fail and ask user to be unambiguous if it doesn't pkgs = [] for spec in specs: - matching_specs = spack.db.get_installed(spec) + matching_specs = spack.installed_db.get_installed(spec) if not args.all and len(matching_specs) > 1: tty.error("%s matches multiple packages:" % spec) print diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 4646530d52..d027db312f 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -28,6 +28,9 @@ import inspect import glob import imp +import time +import copy + from external import yaml from external.yaml.error import MarkedYAMLError @@ -43,8 +46,18 @@ from spack.virtual import ProviderIndex from spack.util.naming import mod_to_class, validate_module_name +def _autospec(function): + """Decorator that automatically converts the argument of a single-arg + function to a Spec.""" + def converter(self, spec_like, **kwargs): + if not isinstance(spec_like, spack.spec.Spec): + spec_like = spack.spec.Spec(spec_like) + return function(self, spec_like, **kwargs) + return converter + + class Database(object): - def __init__(self,file_name="specDB.yaml"): + def __init__(self,root,file_name="specDB.yaml"): """ Create an empty Database Location defaults to root/specDB.yaml @@ -53,13 +66,16 @@ class Database(object): path: the path to the install of that package dep_hash: a hash of the dependence DAG for that package """ + self.root = root self.file_name = file_name + self.file_path = join_path(self.root,self.file_name) self.data = [] + self.last_write_time = 0 def from_yaml(self,stream): """ - Fill database from YAML + Fill database from YAML, do not maintain old data Translate the spec portions from node-dict form to spec from """ try: @@ -70,6 +86,7 @@ class Database(object): if file==None: return + self.data = [] for sp in file['database']: spec = Spec.from_node_dict(sp['spec']) path = sp['path'] @@ -78,19 +95,14 @@ class Database(object): self.data.append(db_entry) - @staticmethod - def read_database(root): - """Create a Database from the data in the standard location""" - database = Database() - full_path = join_path(root,database.file_name) - if os.path.isfile(full_path): - with open(full_path,'r') as f: - database.from_yaml(f) + def read_database(self): + """Reread Database from the data in the set location""" + if os.path.isfile(self.file_path): + with open(self.file_path,'r') as f: + self.from_yaml(f) else: - with open(full_path,'w+') as f: - database.from_yaml(f) - - return database + #The file doesn't exist, construct empty data. + self.data = [] def write_database_to_yaml(self,stream): @@ -110,48 +122,104 @@ class Database(object): stream=stream, default_flow_style=False) - def write(self,root): + def write(self): """Write the database to the standard location""" - full_path = join_path(root,self.file_name) - #test for file existence - with open(full_path,'w') as f: + #creates file if necessary + with open(self.file_path,'w') as f: + self.last_write_time = int(time.time()) self.write_database_to_yaml(f) - @staticmethod - def add(root, spec, path): - """Read the database from the standard location + def is_dirty(self): + """ + Returns true iff the database file exists + and was most recently written to by another spack instance. + """ + return (os.path.isfile(self.file_path) and (os.path.getmtime(self.file_path) > self.last_write_time)) + + +# @_autospec + def add(self, spec, path): + """Re-read the database from the set location if data is dirty Add the specified entry as a dict Write the database back to memory - - TODO: Caching databases """ - database = Database.read_database(root) + if self.is_dirty(): + self.read_database() sph = {} sph['spec']=spec sph['path']=path sph['hash']=spec.dag_hash() - database.data.append(sph) + self.data.append(sph) - database.write(root) + self.write() - @staticmethod - def remove(root, spec): + @_autospec + def remove(self, spec): """ - Reads the database from the standard location + Re-reads the database from the set location if data is dirty Searches for and removes the specified spec Writes the database back to memory + """ + if self.is_dirty(): + self.read_database() + + for sp in self.data: + + if sp['hash'] == spec.dag_hash() and sp['spec'] == Spec.from_node_dict(spec.to_node_dict()): + self.data.remove(sp) + + self.write() - TODO: Caching databases + + @_autospec + def get_installed(self, spec): + """ + Get all the installed specs that satisfy the provided spec constraint + """ + return [s for s in self.installed_package_specs() if s.satisfies(spec)] + + + @_autospec + def installed_extensions_for(self, extendee_spec): + """ + Return the specs of all packages that extend + the given spec + """ + for s in self.installed_package_specs(): + try: + if s.package.extends(extendee_spec): + yield s.package + except UnknownPackageError, e: + continue + #skips unknown packages + #TODO: conditional way to do this instead of catching exceptions + + + def installed_package_specs(self): """ - database = Database.read_database(root) + Read installed package names from the database + and return their specs + """ + if self.is_dirty(): + self.read_database() + + installed = [] + for sph in self.data: + sph['spec'].normalize() + sph['spec'].concretize() + installed.append(sph['spec']) + return installed - for sp in database.data: - #This requires specs w/o dependencies, is that sustainable? - if sp['spec'] == spec: - database.data.remove(sp) - database.write(root) + def installed_known_package_specs(self): + """ + Read installed package names from the database. + Return only the specs for which the package is known + to this version of spack + """ + return [s for s in self.installed_package_specs() if spack.db.exists(s.name)] + diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py index 0bbe6c9d85..6dc1b0e550 100644 --- a/lib/spack/spack/directory_layout.py +++ b/lib/spack/spack/directory_layout.py @@ -153,7 +153,6 @@ class DirectoryLayout(object): os.rmdir(path) path = os.path.dirname(path) - Database.remove(self.root,spec) class YamlDirectoryLayout(DirectoryLayout): @@ -266,11 +265,6 @@ class YamlDirectoryLayout(DirectoryLayout): self.write_spec(spec, spec_file_path) - def add_to_database(self, spec): - """Simply adds a spec to the database""" - Database.add(self.root, spec, self.path_for_spec(spec)) - - @memoized def all_specs(self): if not os.path.isdir(self.root): diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index a425c555a2..acf558d639 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -565,7 +565,7 @@ class Package(object): """Return a list of the specs of all installed packages that depend on this one.""" dependents = [] - for spec in spack.db.installed_package_specs(): + for spec in spack.installed_db.installed_package_specs(): if self.name == spec.name: continue for dep in spec.traverse(): @@ -601,7 +601,7 @@ class Package(object): def remove_prefix(self): """Removes the prefix for a package along with any empty parent directories.""" spack.install_layout.remove_install_directory(self.spec) - + spack.installed_db.remove(self.spec) def do_fetch(self): """Creates a stage directory and downloads the taball for this package. @@ -818,7 +818,7 @@ class Package(object): install(log_path, log_install_path) #Update the database once we know install successful - spack.install_layout.add_to_database(self.spec) + spack.installed_db.add(self.spec, spack.install_layout.path_for_spec(self.spec)) # On successful install, remove the stage. if not keep_stage: diff --git a/lib/spack/spack/packages.py b/lib/spack/spack/packages.py index adfbc26c1d..2e3e95ca40 100644 --- a/lib/spack/spack/packages.py +++ b/lib/spack/spack/packages.py @@ -95,12 +95,6 @@ class PackageDB(object): self.instances.clear() - @_autospec - def get_installed(self, spec): - """Get all the installed specs that satisfy the provided spec constraint.""" - return [s for s in self.installed_package_specs() if s.satisfies(spec)] - - @_autospec def providers_for(self, vpkg_spec): if self.provider_index is None: @@ -117,19 +111,6 @@ class PackageDB(object): return [p for p in self.all_packages() if p.extends(extendee_spec)] - @_autospec - def installed_extensions_for(self, extendee_spec): - for s in self.installed_package_specs(): - try: - if s.package.extends(extendee_spec): - yield s.package - except UnknownPackageError, e: - # Skip packages we know nothing about - continue - # TODO: add some conditional way to do this instead of - # catching exceptions. - - def dirname_for_package_name(self, pkg_name): """Get the directory name for a particular package. This is the directory that contains its package.py file.""" @@ -150,29 +131,6 @@ class PackageDB(object): return join_path(pkg_dir, _package_file_name) - def installed_package_specs(self): - """Read installed package names straight from the install directory - layout. - """ - # Get specs from the directory layout but ensure that they're - # all normalized properly. - installed = [] - for spec in spack.install_layout.all_specs(): - spec.normalize() - installed.append(spec) - return installed - - - def installed_known_package_specs(self): - """Read installed package names straight from the install - directory layout, but return only specs for which the - package is known to this version of spack. - """ - for spec in spack.install_layout.all_specs(): - if self.exists(spec.name): - yield spec - - @memoized def all_package_names(self): """Generator function for all packages. This looks for -- cgit v1.2.3-70-g09d2 From 4a2bd1753a3c0b96c0b38ee9ec909cbf98308125 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Tue, 25 Aug 2015 15:11:18 -0700 Subject: Added dependency indices to database, ensuring correctly reconstructed specs from database Began work on file locking, currently commented out. --- lib/spack/spack/database.py | 102 +++++++++++++++++++++++++++++++++----------- lib/spack/spack/package.py | 1 + 2 files changed, 78 insertions(+), 25 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index d027db312f..7e2c3ac079 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -30,6 +30,7 @@ import imp import time import copy +import errno from external import yaml from external.yaml.error import MarkedYAMLError @@ -69,6 +70,10 @@ class Database(object): self.root = root self.file_name = file_name self.file_path = join_path(self.root,self.file_name) + + self.lock_name = "db_lock" + self.lock_path = join_path(self.root,self.lock_name) + self.data = [] self.last_write_time = 0 @@ -86,17 +91,38 @@ class Database(object): if file==None: return - self.data = [] - for sp in file['database']: + data = {} + for index, sp in file['database'].items(): spec = Spec.from_node_dict(sp['spec']) + deps = sp['dependency_indices'] path = sp['path'] dep_hash = sp['hash'] - db_entry = {'spec': spec, 'path': path, 'hash':dep_hash} - self.data.append(db_entry) + db_entry = {'deps':deps, 'spec': spec, 'path': path, 'hash':dep_hash} + data[index] = db_entry + + for sph in data.values(): + for idx in sph['deps']: + sph['spec'].dependencies[data[idx]['spec'].name] = data[idx]['spec'] + + self.data = data.values() def read_database(self): - """Reread Database from the data in the set location""" + """ + Re-read Database from the data in the set location + If the cache is fresh, return immediately. + Implemented with mkdir locking for the database file. + """ + if not self.is_dirty(): + return + """ + while True: + try: + os.mkdir(self.lock_path) + break + except OSError as err: + pass + """ if os.path.isfile(self.file_path): with open(self.file_path,'r') as f: self.from_yaml(f) @@ -104,6 +130,8 @@ class Database(object): #The file doesn't exist, construct empty data. self.data = [] +# os.rmdir(self.lock_path) + def write_database_to_yaml(self,stream): """ @@ -111,24 +139,54 @@ class Database(object): Then stream all data to YAML """ node_list = [] - for sp in self.data: + spec_list = [sph['spec'] for sph in self.data] + + for sph in self.data: node = {} - node['spec']=Spec.to_node_dict(sp['spec']) -# node['spec'][sp['spec'].name]['hash']=sp['spec'].dag_hash() - node['hash']=sp['hash'] - node['path']=sp['path'] + deps = [] + for name,spec in sph['spec'].dependencies.items(): + deps.append(spec_list.index(spec)) + node['spec']=Spec.to_node_dict(sph['spec']) + node['hash']=sph['hash'] + node['path']=sph['path'] + node['dependency_indices']=deps node_list.append(node) - return yaml.dump({ 'database' : node_list}, + + node_dict = dict(enumerate(node_list)) + return yaml.dump({ 'database' : node_dict}, stream=stream, default_flow_style=False) def write(self): - """Write the database to the standard location""" - #creates file if necessary + """ + Write the database to the standard location + Implements mkdir locking for the database file + """ + """ + while True: + try: + os.mkdir(self.lock_path) + break + except OSError as err: + pass + """ with open(self.file_path,'w') as f: self.last_write_time = int(time.time()) self.write_database_to_yaml(f) + # os.rmdir(self.lock_path) + + + def get_index_of(self, spec): + """ + Returns the index of a spec in the database + If unable to find the spec it returns -1 + """ + for index, sph in enumerate(self.data): + if sph['spec'] == spec: + return index + return -1 + def is_dirty(self): """ @@ -140,12 +198,11 @@ class Database(object): # @_autospec def add(self, spec, path): - """Re-read the database from the set location if data is dirty + """Read the database from the set location Add the specified entry as a dict Write the database back to memory """ - if self.is_dirty(): - self.read_database() + self.read_database() sph = {} sph['spec']=spec @@ -160,16 +217,14 @@ class Database(object): @_autospec def remove(self, spec): """ - Re-reads the database from the set location if data is dirty + Reads the database from the set location Searches for and removes the specified spec Writes the database back to memory """ - if self.is_dirty(): - self.read_database() + self.read_database() for sp in self.data: - - if sp['hash'] == spec.dag_hash() and sp['spec'] == Spec.from_node_dict(spec.to_node_dict()): + if sp['hash'] == spec.dag_hash() and sp['spec'] == spec: self.data.remove(sp) self.write() @@ -204,13 +259,10 @@ class Database(object): Read installed package names from the database and return their specs """ - if self.is_dirty(): - self.read_database() + self.read_database() installed = [] for sph in self.data: - sph['spec'].normalize() - sph['spec'].concretize() installed.append(sph['spec']) return installed diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index acf558d639..929e0c086c 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -557,6 +557,7 @@ class Package(object): @property def installed(self): + print self.prefix return os.path.isdir(self.prefix) -- cgit v1.2.3-70-g09d2 From e32c59f805c4e8d9cb23ce9fcf2edcf571ce3949 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Tue, 25 Aug 2015 15:32:45 -0700 Subject: Fixed file locking. Fix is slightly ugly (lock integer added) but it gets the job done It avoids having to spin simply on the OSError. --- lib/spack/spack/database.py | 49 ++++++++++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 18 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 7e2c3ac079..5e8bb172b8 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -115,22 +115,29 @@ class Database(object): """ if not self.is_dirty(): return - """ - while True: + + lock=0 + while lock==0: try: os.mkdir(self.lock_path) - break + lock=1 except OSError as err: pass - """ - if os.path.isfile(self.file_path): - with open(self.file_path,'r') as f: - self.from_yaml(f) - else: + + #The try statement ensures that a failure won't leave the + #database locked to other processes. + try: + if os.path.isfile(self.file_path): + with open(self.file_path,'r') as f: + self.from_yaml(f) + else: #The file doesn't exist, construct empty data. - self.data = [] + self.data = [] + except: + os.rmdir(self.lock_path) + raise -# os.rmdir(self.lock_path) + os.rmdir(self.lock_path) def write_database_to_yaml(self,stream): @@ -162,19 +169,25 @@ class Database(object): Write the database to the standard location Implements mkdir locking for the database file """ - """ - while True: + lock=0 + while lock==0: try: os.mkdir(self.lock_path) - break + lock=1 except OSError as err: pass - """ - with open(self.file_path,'w') as f: - self.last_write_time = int(time.time()) - self.write_database_to_yaml(f) - # os.rmdir(self.lock_path) + #The try statement ensures that a failure won't leave the + #database locked to other processes. + try: + with open(self.file_path,'w') as f: + self.last_write_time = int(time.time()) + self.write_database_to_yaml(f) + except: + os.rmdir(self.lock_path) + raise + + os.rmdir(self.lock_path) def get_index_of(self, spec): -- cgit v1.2.3-70-g09d2 From ce8df65d7b9162b71be88e6268ee5172deab4cd9 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Tue, 25 Aug 2015 16:28:55 -0700 Subject: Eliminated unnecessary differences in pull request --- lib/spack/spack/directory_layout.py | 2 -- lib/spack/spack/package.py | 1 - lib/spack/spack/spec.py | 1 + 3 files changed, 1 insertion(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py index 6dc1b0e550..e61929d8fd 100644 --- a/lib/spack/spack/directory_layout.py +++ b/lib/spack/spack/directory_layout.py @@ -37,7 +37,6 @@ from llnl.util.filesystem import join_path, mkdirp from spack.spec import Spec from spack.error import SpackError -from spack.database import Database def _check_concrete(spec): @@ -154,7 +153,6 @@ class DirectoryLayout(object): path = os.path.dirname(path) - class YamlDirectoryLayout(DirectoryLayout): """Lays out installation directories like this:: / diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 929e0c086c..acf558d639 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -557,7 +557,6 @@ class Package(object): @property def installed(self): - print self.prefix return os.path.isdir(self.prefix) diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index df74b6064e..8050b73b9e 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -427,6 +427,7 @@ class Spec(object): spec = dep if isinstance(dep, Spec) else Spec(dep) self._add_dependency(spec) + # # Private routines here are called by the parser when building a spec. # -- cgit v1.2.3-70-g09d2 From 9345e7877983d1bde8a4aefbecdc4a8cab181186 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Tue, 25 Aug 2015 16:31:09 -0700 Subject: Fixed inaccurate comment in spec.py --- lib/spack/spack/spec.py | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 8050b73b9e..cde2e168a0 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -642,7 +642,6 @@ class Spec(object): def dag_hash(self, length=None): """ Return a hash of the entire spec DAG, including connectivity. - Stores the hash iff the spec is concrete. """ yaml_text = yaml.dump( self.to_node_dict(), default_flow_style=True, width=sys.maxint) -- cgit v1.2.3-70-g09d2 From f406fcb843edcea359e3fb6103eed560dadc1355 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Mon, 31 Aug 2015 09:38:38 -0700 Subject: Fixed several issues from code review Most importantly wrote the Lock, Read_Lock_Instance, and Write_Lock_Instance classes in lock.py Updated the locking in database.py TODO: Lock on larger areas --- lib/spack/llnl/util/lock.py | 136 ++++++++++++++++++++++++++++++++++++++++++++ lib/spack/spack/database.py | 130 ++++++++++++++++-------------------------- 2 files changed, 184 insertions(+), 82 deletions(-) create mode 100644 lib/spack/llnl/util/lock.py (limited to 'lib') diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py new file mode 100644 index 0000000000..05641475ed --- /dev/null +++ b/lib/spack/llnl/util/lock.py @@ -0,0 +1,136 @@ +############################################################################## +# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import os +import fcntl +import errno +import time +import socket + + +class Read_Lock_Instance(object): + """ + A context manager for getting shared access to the object lock + Arguments are lock and timeout (default 5 minutes) + """ + def __init__(self,lock,timeout = 300): + self._lock = lock + self._timeout = timeout + def __enter__(self): + self._lock.acquire_read(self._timeout) + def __exit__(self,type,value,traceback): + self._lock.release_read() + + +class Write_Lock_Instance(object): + """ + A context manager for getting exclusive access to the object lock + Arguments are lock and timeout (default 5 minutes) + """ + def __init__(self,lock,timeout = 300): + self._lock = lock + self._timeout = timeout + def __enter__(self): + self._lock.acquire_write(self._timeout) + def __exit__(self,type,value,traceback): + self._lock.release_write() + + +class Lock(object): + def __init__(self,file_path): + self._file_path = file_path + self._fd = os.open(file_path,os.O_RDWR) + self._reads = 0 + self._writes = 0 + + + def acquire_read(self,timeout): + """ + Implements recursive lock. If held in both read and write mode, + the write lock will be maintained until all locks are released + """ + if self._reads == 0 and self._writes == 0: + self._lock(fcntl.LOCK_SH,timeout) + self._reads += 1 + + + def acquire_write(self,timeout): + """ + Implements recursive lock + """ + if self._writes == 0: + self._lock(fcntl.LOCK_EX,timeout) + self._writes += 1 + + + def _lock(self,op,timeout): + """ + The timeout is implemented using nonblocking flock() + to avoid using signals for timing + Write locks store pid and host information to the lock file + Read locks do not store data + """ + total_time = 0 + while total_time < timeout: + try: + fcntl.flock(self._fd, op | fcntl.LOCK_NB) + if op == fcntl.LOCK_EX: + with open(self._file_path,'w') as f: + f.write("pid = "+str(os.getpid())+", host = "+socket.getfqdn()) + return + except IOError as error: + if error.errno == errno.EAGAIN or error.errno == EACCES: + pass + else: + raise + time.sleep(0.1) + total_time += 0.1 + + + def release_read(self): + """ + Assert there is a lock of the right type to release, recursive lock + """ + assert self._reads > 0 + if self._reads == 1 and self._writes == 0: + self._unlock() + self._reads -= 1 + + + def release_write(self): + """ + Assert there is a lock of the right type to release, recursive lock + """ + assert self._writes > 0 + if self._writes == 1 and self._reads == 0: + self._unlock() + self._writes -= 1 + + + def _unlock(self): + """ + Releases the lock regardless of mode. Note that read locks may be + masquerading as write locks at times, but this removes either. + """ + fcntl.flock(self._fd,fcntl.LOCK_UN) diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 5e8bb172b8..6a429b68a9 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -38,6 +38,7 @@ from external.yaml.error import MarkedYAMLError import llnl.util.tty as tty from llnl.util.filesystem import join_path from llnl.util.lang import * +from llnl.util.lock import * import spack.error import spack.spec @@ -58,7 +59,7 @@ def _autospec(function): class Database(object): - def __init__(self,root,file_name="specDB.yaml"): + def __init__(self,root,file_name="_index.yaml"): """ Create an empty Database Location defaults to root/specDB.yaml @@ -67,28 +68,31 @@ class Database(object): path: the path to the install of that package dep_hash: a hash of the dependence DAG for that package """ - self.root = root - self.file_name = file_name - self.file_path = join_path(self.root,self.file_name) + self._root = root + self._file_name = file_name + self._file_path = join_path(self._root,self._file_name) - self.lock_name = "db_lock" - self.lock_path = join_path(self.root,self.lock_name) + self._lock_name = "_db_lock" + self._lock_path = join_path(self._root,self._lock_name) + if not os.path.exists(self._lock_path): + open(self._lock_path,'w').close() + self.lock = Lock(self._lock_path) - self.data = [] - self.last_write_time = 0 + self._data = [] + self._last_write_time = 0 - def from_yaml(self,stream): + def _read_from_yaml(self,stream): """ Fill database from YAML, do not maintain old data - Translate the spec portions from node-dict form to spec from + Translate the spec portions from node-dict form to spec form """ try: file = yaml.load(stream) except MarkedYAMLError, e: raise SpackYAMLError("error parsing YAML database:", str(e)) - if file==None: + if file is None: return data = {} @@ -104,51 +108,34 @@ class Database(object): for idx in sph['deps']: sph['spec'].dependencies[data[idx]['spec'].name] = data[idx]['spec'] - self.data = data.values() + self._data = data.values() def read_database(self): """ Re-read Database from the data in the set location If the cache is fresh, return immediately. - Implemented with mkdir locking for the database file. """ if not self.is_dirty(): return - lock=0 - while lock==0: - try: - os.mkdir(self.lock_path) - lock=1 - except OSError as err: - pass - - #The try statement ensures that a failure won't leave the - #database locked to other processes. - try: - if os.path.isfile(self.file_path): - with open(self.file_path,'r') as f: - self.from_yaml(f) - else: + if os.path.isfile(self._file_path): + with open(self._file_path,'r') as f: + self._read_from_yaml(f) + else: #The file doesn't exist, construct empty data. - self.data = [] - except: - os.rmdir(self.lock_path) - raise - - os.rmdir(self.lock_path) + self._data = [] - def write_database_to_yaml(self,stream): + def _write_database_to_yaml(self,stream): """ Replace each spec with its dict-node form Then stream all data to YAML """ node_list = [] - spec_list = [sph['spec'] for sph in self.data] + spec_list = [sph['spec'] for sph in self._data] - for sph in self.data: + for sph in self._data: node = {} deps = [] for name,spec in sph['spec'].dependencies.items(): @@ -167,46 +154,23 @@ class Database(object): def write(self): """ Write the database to the standard location - Implements mkdir locking for the database file + Everywhere that the database is written it is read + within the same lock, so there is no need to refresh + the database within write() """ - lock=0 - while lock==0: - try: - os.mkdir(self.lock_path) - lock=1 - except OSError as err: - pass - - #The try statement ensures that a failure won't leave the - #database locked to other processes. - try: - with open(self.file_path,'w') as f: - self.last_write_time = int(time.time()) - self.write_database_to_yaml(f) - except: - os.rmdir(self.lock_path) - raise - - os.rmdir(self.lock_path) - - - def get_index_of(self, spec): - """ - Returns the index of a spec in the database - If unable to find the spec it returns -1 - """ - for index, sph in enumerate(self.data): - if sph['spec'] == spec: - return index - return -1 - + temp_name = os.getpid() + socket.getfqdn() + ".temp" + temp_file = path.join(self._root,temp_name) + with open(self.temp_path,'w') as f: + self._last_write_time = int(time.time()) + self._write_database_to_yaml(f) + os.rename(temp_name,self._file_path) def is_dirty(self): """ Returns true iff the database file exists and was most recently written to by another spack instance. """ - return (os.path.isfile(self.file_path) and (os.path.getmtime(self.file_path) > self.last_write_time)) + return (os.path.isfile(self._file_path) and (os.path.getmtime(self._file_path) > self._last_write_time)) # @_autospec @@ -215,16 +179,15 @@ class Database(object): Add the specified entry as a dict Write the database back to memory """ - self.read_database() - sph = {} sph['spec']=spec sph['path']=path sph['hash']=spec.dag_hash() - self.data.append(sph) - - self.write() + with Write_Lock_Instance(self.lock,60): + self.read_database() + self._data.append(sph) + self.write() @_autospec @@ -234,13 +197,15 @@ class Database(object): Searches for and removes the specified spec Writes the database back to memory """ - self.read_database() + with Write_Lock_Instance(self.lock,60): + self.read_database() - for sp in self.data: - if sp['hash'] == spec.dag_hash() and sp['spec'] == spec: - self.data.remove(sp) + for sp in self._data: + #Not sure the hash comparison is necessary + if sp['hash'] == spec.dag_hash() and sp['spec'] == spec: + self._data.remove(sp) - self.write() + self.write() @_autospec @@ -272,10 +237,11 @@ class Database(object): Read installed package names from the database and return their specs """ - self.read_database() + with Read_Lock_Instance(self.lock,60): + self.read_database() installed = [] - for sph in self.data: + for sph in self._data: installed.append(sph['spec']) return installed -- cgit v1.2.3-70-g09d2 From c3246ee8ba26909ba73c3587ead9e61342692957 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Mon, 31 Aug 2015 09:46:55 -0700 Subject: Removed incorrect stage removal code from cleanup() in do_install() --- lib/spack/spack/package.py | 8 -------- 1 file changed, 8 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index acf558d639..e64b427852 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -779,14 +779,6 @@ class Package(object): "Manually remove this directory to fix:", self.prefix) - if not (keep_prefix and keep_stage): - self.do_clean() - else: - tty.warn("Keeping stage in place despite error.", - "Spack will refuse to uninstall dependencies of this package." + - "Manually remove this directory to fix:", - self.stage.path) - def real_work(): try: -- cgit v1.2.3-70-g09d2 From 9c8e46dc228e096a83a892e5f53424bb3446d8f6 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Thu, 3 Sep 2015 09:21:19 -0700 Subject: Added conservative locking to the spack commands that access the database at _index --- lib/spack/spack/cmd/__init__.py | 22 +++++++------- lib/spack/spack/cmd/deactivate.py | 12 ++++---- lib/spack/spack/cmd/diy.py | 60 ++++++++++++++++++++------------------- lib/spack/spack/cmd/extensions.py | 4 ++- lib/spack/spack/cmd/find.py | 7 +++-- lib/spack/spack/cmd/install.py | 22 +++++++------- lib/spack/spack/cmd/uninstall.py | 52 +++++++++++++++++---------------- lib/spack/spack/database.py | 11 ++++--- 8 files changed, 104 insertions(+), 86 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index fd3ef3ed27..c62d22979a 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -28,6 +28,7 @@ import sys import llnl.util.tty as tty from llnl.util.lang import attr_setdefault +from llnl.util.lock import * import spack import spack.spec @@ -124,15 +125,16 @@ def elide_list(line_list, max_num=10): def disambiguate_spec(spec): - matching_specs = spack.installed_db.get_installed(spec) - if not matching_specs: - tty.die("Spec '%s' matches no installed packages." % spec) - - elif len(matching_specs) > 1: - args = ["%s matches multiple packages." % spec, - "Matching packages:"] - args += [" " + str(s) for s in matching_specs] - args += ["Use a more specific spec."] - tty.die(*args) + with Read_Lock_Instance(spack.installed_db.lock,1800): + matching_specs = spack.installed_db.get_installed(spec) + if not matching_specs: + tty.die("Spec '%s' matches no installed packages." % spec) + + elif len(matching_specs) > 1: + args = ["%s matches multiple packages." % spec, + "Matching packages:"] + args += [" " + str(s) for s in matching_specs] + args += ["Use a more specific spec."] + tty.die(*args) return matching_specs[0] diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py index 1f0e303cdf..015809345a 100644 --- a/lib/spack/spack/cmd/deactivate.py +++ b/lib/spack/spack/cmd/deactivate.py @@ -24,6 +24,7 @@ ############################################################################## from external import argparse import llnl.util.tty as tty +from llnl.util.lock import * import spack import spack.cmd @@ -54,12 +55,13 @@ def deactivate(parser, args): if args.all: if pkg.extendable: tty.msg("Deactivating all extensions of %s" % pkg.spec.short_spec) - ext_pkgs = spack.installed_db.installed_extensions_for(spec) + with Read_Lock_Instance(spack.installed_db.lock,1800): + ext_pkgs = spack.installed_db.installed_extensions_for(spec) - for ext_pkg in ext_pkgs: - ext_pkg.spec.normalize() - if ext_pkg.activated: - ext_pkg.do_deactivate(force=True) + for ext_pkg in ext_pkgs: + ext_pkg.spec.normalize() + if ext_pkg.activated: + ext_pkg.do_deactivate(force=True) elif pkg.is_extension: if not args.force and not spec.package.activated: diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py index 6e7f10fba6..7403b9e3e8 100644 --- a/lib/spack/spack/cmd/diy.py +++ b/lib/spack/spack/cmd/diy.py @@ -27,6 +27,7 @@ import os from external import argparse import llnl.util.tty as tty +from llnl.util.lock import * import spack import spack.cmd @@ -54,40 +55,41 @@ def diy(self, args): if not args.spec: tty.die("spack diy requires a package spec argument.") - specs = spack.cmd.parse_specs(args.spec) - if len(specs) > 1: - tty.die("spack diy only takes one spec.") + with Write_Lock_Instance(spack.installed_db.lock,1800): + specs = spack.cmd.parse_specs(args.spec) + if len(specs) > 1: + tty.die("spack diy only takes one spec.") - spec = specs[0] - if not spack.db.exists(spec.name): - tty.warn("No such package: %s" % spec.name) - create = tty.get_yes_or_no("Create this package?", default=False) - if not create: - tty.msg("Exiting without creating.") - sys.exit(1) - else: - tty.msg("Running 'spack edit -f %s'" % spec.name) - edit_package(spec.name, True) - return + spec = specs[0] + if not spack.db.exists(spec.name): + tty.warn("No such package: %s" % spec.name) + create = tty.get_yes_or_no("Create this package?", default=False) + if not create: + tty.msg("Exiting without creating.") + sys.exit(1) + else: + tty.msg("Running 'spack edit -f %s'" % spec.name) + edit_package(spec.name, True) + return - if not spec.version.concrete: - tty.die("spack diy spec must have a single, concrete version.") + if not spec.version.concrete: + tty.die("spack diy spec must have a single, concrete version.") - spec.concretize() - package = spack.db.get(spec) + spec.concretize() + package = spack.db.get(spec) - if package.installed: - tty.error("Already installed in %s" % package.prefix) - tty.msg("Uninstall or try adding a version suffix for this DIY build.") - sys.exit(1) + if package.installed: + tty.error("Already installed in %s" % package.prefix) + tty.msg("Uninstall or try adding a version suffix for this DIY build.") + sys.exit(1) - # Forces the build to run out of the current directory. - package.stage = DIYStage(os.getcwd()) + # Forces the build to run out of the current directory. + package.stage = DIYStage(os.getcwd()) # TODO: make this an argument, not a global. - spack.do_checksum = False + spack.do_checksum = False - package.do_install( - keep_prefix=args.keep_prefix, - ignore_deps=args.ignore_deps, - keep_stage=True) # don't remove source dir for DIY. + package.do_install( + keep_prefix=args.keep_prefix, + ignore_deps=args.ignore_deps, + keep_stage=True) # don't remove source dir for DIY. diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py index e919b1c4fb..66211e29a9 100644 --- a/lib/spack/spack/cmd/extensions.py +++ b/lib/spack/spack/cmd/extensions.py @@ -27,6 +27,7 @@ from external import argparse import llnl.util.tty as tty from llnl.util.tty.colify import colify +from llnl.util.lock import * import spack import spack.cmd @@ -80,7 +81,8 @@ def extensions(parser, args): colify(ext.name for ext in extensions) # List specs of installed extensions. - installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)] + with Read_Lock_Instance(spack.installed_db.lock,1800): + installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)] print if not installed: tty.msg("None installed.") diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py index 6f9072e311..f7fa409ebb 100644 --- a/lib/spack/spack/cmd/find.py +++ b/lib/spack/spack/cmd/find.py @@ -32,6 +32,7 @@ import llnl.util.tty as tty from llnl.util.tty.colify import * from llnl.util.tty.color import * from llnl.util.lang import * +from llnl.util.lock import * import spack import spack.spec @@ -138,9 +139,11 @@ def find(parser, args): # Get all the specs the user asked for if not query_specs: - specs = set(spack.installed_db.installed_package_specs()) + with Read_Lock_Instance(spack.installed_db.lock,1800): + specs = set(spack.installed_db.installed_package_specs()) else: - results = [set(spack.installed_db.get_installed(qs)) for qs in query_specs] + with Read_Lock_Instance(spack.installed_db.lock,1800): + results = [set(spack.installed_db.get_installed(qs)) for qs in query_specs] specs = set.union(*results) if not args.mode: diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py index acb688a092..330774b8d9 100644 --- a/lib/spack/spack/cmd/install.py +++ b/lib/spack/spack/cmd/install.py @@ -25,6 +25,7 @@ from external import argparse import llnl.util.tty as tty +from llnl.util.lock import * import spack import spack.cmd @@ -68,13 +69,14 @@ def install(parser, args): if args.no_checksum: spack.do_checksum = False # TODO: remove this global. - specs = spack.cmd.parse_specs(args.packages, concretize=True) - for spec in specs: - package = spack.db.get(spec) - package.do_install( - keep_prefix=args.keep_prefix, - keep_stage=args.keep_stage, - ignore_deps=args.ignore_deps, - make_jobs=args.jobs, - verbose=args.verbose, - fake=args.fake) + with Write_Lock_Instance(spack.installed_db.lock,1800): + specs = spack.cmd.parse_specs(args.packages, concretize=True) + for spec in specs: + package = spack.db.get(spec) + package.do_install( + keep_prefix=args.keep_prefix, + keep_stage=args.keep_stage, + ignore_deps=args.ignore_deps, + make_jobs=args.jobs, + verbose=args.verbose, + fake=args.fake) diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index 4870712eb6..4b0267dac2 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -27,6 +27,7 @@ from external import argparse import llnl.util.tty as tty from llnl.util.tty.colify import colify +from llnl.util.lock import * import spack import spack.cmd @@ -53,35 +54,36 @@ def uninstall(parser, args): if not args.packages: tty.die("uninstall requires at least one package argument.") - specs = spack.cmd.parse_specs(args.packages) + with Write_Lock_Instance(spack.installed_db.lock,1800): + specs = spack.cmd.parse_specs(args.packages) - # For each spec provided, make sure it refers to only one package. - # Fail and ask user to be unambiguous if it doesn't - pkgs = [] - for spec in specs: - matching_specs = spack.installed_db.get_installed(spec) - if not args.all and len(matching_specs) > 1: - tty.error("%s matches multiple packages:" % spec) - print - display_specs(matching_specs, long=True) - print - print "You can either:" - print " a) Use a more specific spec, or" - print " b) use spack uninstall -a to uninstall ALL matching specs." - sys.exit(1) + # For each spec provided, make sure it refers to only one package. + # Fail and ask user to be unambiguous if it doesn't + pkgs = [] + for spec in specs: + matching_specs = spack.installed_db.get_installed(spec) + if not args.all and len(matching_specs) > 1: + tty.error("%s matches multiple packages:" % spec) + print + display_specs(matching_specs, long=True) + print + print "You can either:" + print " a) Use a more specific spec, or" + print " b) use spack uninstall -a to uninstall ALL matching specs." + sys.exit(1) - if len(matching_specs) == 0: - if args.force: continue - tty.die("%s does not match any installed packages." % spec) + if len(matching_specs) == 0: + if args.force: continue + tty.die("%s does not match any installed packages." % spec) - for s in matching_specs: - try: - # should work if package is known to spack - pkgs.append(s.package) + for s in matching_specs: + try: + # should work if package is known to spack + pkgs.append(s.package) - except spack.packages.UnknownPackageError, e: - # The package.py file has gone away -- but still want to uninstall. - spack.Package(s).do_uninstall(force=True) + except spack.packages.UnknownPackageError, e: + # The package.py file has gone away -- but still want to uninstall. + spack.Package(s).do_uninstall(force=True) # Sort packages to be uninstalled by the number of installed dependents # This ensures we do things in the right order diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 6a429b68a9..9b759827d3 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -158,12 +158,12 @@ class Database(object): within the same lock, so there is no need to refresh the database within write() """ - temp_name = os.getpid() + socket.getfqdn() + ".temp" - temp_file = path.join(self._root,temp_name) - with open(self.temp_path,'w') as f: + temp_name = str(os.getpid()) + socket.getfqdn() + ".temp" + temp_file = join_path(self._root,temp_name) + with open(temp_file,'w') as f: self._last_write_time = int(time.time()) self._write_database_to_yaml(f) - os.rename(temp_name,self._file_path) + os.rename(temp_file,self._file_path) def is_dirty(self): """ @@ -184,6 +184,7 @@ class Database(object): sph['path']=path sph['hash']=spec.dag_hash() + #Should always already be locked with Write_Lock_Instance(self.lock,60): self.read_database() self._data.append(sph) @@ -197,6 +198,7 @@ class Database(object): Searches for and removes the specified spec Writes the database back to memory """ + #Should always already be locked with Write_Lock_Instance(self.lock,60): self.read_database() @@ -237,6 +239,7 @@ class Database(object): Read installed package names from the database and return their specs """ + #Should always already be locked with Read_Lock_Instance(self.lock,60): self.read_database() -- cgit v1.2.3-70-g09d2 From cd23d2eaa2d06e4a407a4ae88c28d13cbbd7fd1e Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Tue, 15 Sep 2015 14:20:19 -0700 Subject: Added spack fsck and re-read from glob if the database file does not exist. Allows older versions to smoothly upgrade to the database. --- lib/spack/spack/cmd/fsck.py | 43 +++++++++++++++++++++++++++++++++++++++++++ lib/spack/spack/database.py | 18 ++++++++++++++---- 2 files changed, 57 insertions(+), 4 deletions(-) create mode 100644 lib/spack/spack/cmd/fsck.py (limited to 'lib') diff --git a/lib/spack/spack/cmd/fsck.py b/lib/spack/spack/cmd/fsck.py new file mode 100644 index 0000000000..3141a7031d --- /dev/null +++ b/lib/spack/spack/cmd/fsck.py @@ -0,0 +1,43 @@ +############################################################################## +# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +from external import argparse + +from llnl.util.lock import * + +import spack +import os + +description = "Correct database irregularities" + +#Very basic version of spack fsck +def fsck(parser, args): + with Write_Lock_Instance(spack.installed_db.lock,1800): + #remove database file + if os.path.exists(spack.installed_db._file_path): + os.remove(spack.installed_db._file_path) + #read database + spack.installed_db.read_database() + #write database + spack.installed_db.write() diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 9b759827d3..680d184f1f 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -123,8 +123,15 @@ class Database(object): with open(self._file_path,'r') as f: self._read_from_yaml(f) else: - #The file doesn't exist, construct empty data. + #The file doesn't exist, construct from file paths self._data = [] + specs = spack.install_layout.all_specs() + for spec in specs: + sph = {} + sph['spec']=spec + sph['hash']=spec.dag_hash() + sph['path']=spack.install_layout.path_for_spec(spec) + self._data.append(sph) def _write_database_to_yaml(self,stream): @@ -167,10 +174,13 @@ class Database(object): def is_dirty(self): """ - Returns true iff the database file exists - and was most recently written to by another spack instance. + Returns true iff the database file does not exist + or was most recently written to by another spack instance. """ - return (os.path.isfile(self._file_path) and (os.path.getmtime(self._file_path) > self._last_write_time)) + if not os.path.isfile(self._file_path): + return True + else: + return (os.path.getmtime(self._file_path) > self._last_write_time) # @_autospec -- cgit v1.2.3-70-g09d2 From ccf311c9c6eb958c16e9621d8a2ef7665a4fa4d7 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Thu, 17 Sep 2015 00:16:12 -0700 Subject: Several changes to DB implementation. 1. Database stores a file version, so we can add to it in the future. 2. Database indexed by hashes and not numerical indexes. 3. Specs built by database have consistent hashes and it's checked. 4. minor naming and whitespace changes. --- lib/spack/llnl/util/filesystem.py | 2 +- lib/spack/llnl/util/lock.py | 27 ++-- lib/spack/spack/cmd/find.py | 5 +- lib/spack/spack/database.py | 321 +++++++++++++++++++++++--------------- lib/spack/spack/error.py | 4 +- lib/spack/spack/spec.py | 2 +- 6 files changed, 221 insertions(+), 140 deletions(-) (limited to 'lib') diff --git a/lib/spack/llnl/util/filesystem.py b/lib/spack/llnl/util/filesystem.py index 029a7536df..03f25d3dff 100644 --- a/lib/spack/llnl/util/filesystem.py +++ b/lib/spack/llnl/util/filesystem.py @@ -222,7 +222,7 @@ def working_dir(dirname, **kwargs): def touch(path): """Creates an empty file at the specified path.""" - with closing(open(path, 'a')) as file: + with open(path, 'a') as file: os.utime(path, None) diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index 05641475ed..bb3b15c9cf 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -22,6 +22,7 @@ # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## +"""Lock implementation for shared filesystems.""" import os import fcntl import errno @@ -34,11 +35,13 @@ class Read_Lock_Instance(object): A context manager for getting shared access to the object lock Arguments are lock and timeout (default 5 minutes) """ - def __init__(self,lock,timeout = 300): + def __init__(self, lock, timeout=300): self._lock = lock self._timeout = timeout + def __enter__(self): self._lock.acquire_read(self._timeout) + def __exit__(self,type,value,traceback): self._lock.release_read() @@ -48,17 +51,21 @@ class Write_Lock_Instance(object): A context manager for getting exclusive access to the object lock Arguments are lock and timeout (default 5 minutes) """ - def __init__(self,lock,timeout = 300): + def __init__(self, lock, timeout=300): self._lock = lock self._timeout = timeout + def __enter__(self): self._lock.acquire_write(self._timeout) + def __exit__(self,type,value,traceback): self._lock.release_write() class Lock(object): - def __init__(self,file_path): + """Distributed file-based lock using ``flock``.""" + + def __init__(self, file_path): self._file_path = file_path self._fd = os.open(file_path,os.O_RDWR) self._reads = 0 @@ -71,20 +78,20 @@ class Lock(object): the write lock will be maintained until all locks are released """ if self._reads == 0 and self._writes == 0: - self._lock(fcntl.LOCK_SH,timeout) + self._lock(fcntl.LOCK_SH, timeout) self._reads += 1 - def acquire_write(self,timeout): + def acquire_write(self, timeout): """ Implements recursive lock """ if self._writes == 0: - self._lock(fcntl.LOCK_EX,timeout) + self._lock(fcntl.LOCK_EX, timeout) self._writes += 1 - def _lock(self,op,timeout): + def _lock(self, op, timeout): """ The timeout is implemented using nonblocking flock() to avoid using signals for timing @@ -96,8 +103,8 @@ class Lock(object): try: fcntl.flock(self._fd, op | fcntl.LOCK_NB) if op == fcntl.LOCK_EX: - with open(self._file_path,'w') as f: - f.write("pid = "+str(os.getpid())+", host = "+socket.getfqdn()) + with open(self._file_path, 'w') as f: + f.write("pid = " + str(os.getpid()) + ", host = " + socket.getfqdn()) return except IOError as error: if error.errno == errno.EAGAIN or error.errno == EACCES: @@ -133,4 +140,4 @@ class Lock(object): Releases the lock regardless of mode. Note that read locks may be masquerading as write locks at times, but this removes either. """ - fcntl.flock(self._fd,fcntl.LOCK_UN) + fcntl.flock(self._fd, fcntl.LOCK_UN) diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py index f7fa409ebb..2d2b884368 100644 --- a/lib/spack/spack/cmd/find.py +++ b/lib/spack/spack/cmd/find.py @@ -139,10 +139,11 @@ def find(parser, args): # Get all the specs the user asked for if not query_specs: - with Read_Lock_Instance(spack.installed_db.lock,1800): + with Read_Lock_Instance(spack.installed_db.lock, 1800): specs = set(spack.installed_db.installed_package_specs()) + else: - with Read_Lock_Instance(spack.installed_db.lock,1800): + with Read_Lock_Instance(spack.installed_db.lock, 1800): results = [set(spack.installed_db.get_installed(qs)) for qs in query_specs] specs = set.union(*results) diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 680d184f1f..43ba178fed 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -1,5 +1,5 @@ ############################################################################## -# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. @@ -23,95 +23,192 @@ # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os -import sys -import inspect -import glob -import imp - import time -import copy -import errno from external import yaml -from external.yaml.error import MarkedYAMLError +from external.yaml.error import MarkedYAMLError, YAMLError import llnl.util.tty as tty -from llnl.util.filesystem import join_path -from llnl.util.lang import * +from llnl.util.filesystem import * from llnl.util.lock import * -import spack.error import spack.spec +from spack.version import Version from spack.spec import Spec from spack.error import SpackError -from spack.virtual import ProviderIndex -from spack.util.naming import mod_to_class, validate_module_name + +# DB goes in this directory underneath the root +_db_dirname = '.spack-db' + +# DB version. This is stuck in the DB file to track changes in format. +_db_version = Version('0.9') def _autospec(function): """Decorator that automatically converts the argument of a single-arg function to a Spec.""" - def converter(self, spec_like, **kwargs): + def converter(self, spec_like, *args, **kwargs): if not isinstance(spec_like, spack.spec.Spec): spec_like = spack.spec.Spec(spec_like) - return function(self, spec_like, **kwargs) + return function(self, spec_like, *args, **kwargs) return converter +class InstallRecord(object): + """A record represents one installation in the DB.""" + def __init__(self, spec, path): + self.spec = spec + self.path = path + + def to_dict(self): + return { 'spec' : self.spec.to_node_dict(), + 'path' : self.path } + + @classmethod + def from_dict(cls, d): + return InstallRecord(d['spec'], d['path']) + + class Database(object): - def __init__(self,root,file_name="_index.yaml"): - """ - Create an empty Database - Location defaults to root/specDB.yaml + def __init__(self, root): + """Create an empty Database. + + Location defaults to root/_index.yaml The individual data are dicts containing spec: the top level spec of a package path: the path to the install of that package dep_hash: a hash of the dependence DAG for that package """ self._root = root - self._file_name = file_name - self._file_path = join_path(self._root,self._file_name) - self._lock_name = "_db_lock" - self._lock_path = join_path(self._root,self._lock_name) + # Set up layout of database files. + self._db_dir = join_path(self._root, _db_dirname) + self._index_path = join_path(self._db_dir, 'index.yaml') + self._lock_path = join_path(self._db_dir, 'lock') + + # Create needed directories and files + if not os.path.exists(self._db_dir): + mkdirp(self._db_dir) + if not os.path.exists(self._lock_path): - open(self._lock_path,'w').close() - self.lock = Lock(self._lock_path) + touch(self._lock_path) - self._data = [] + # initialize rest of state. + self.lock = Lock(self._lock_path) + self._data = {} self._last_write_time = 0 - def _read_from_yaml(self,stream): + def _write_to_yaml(self, stream): + """Write out the databsae to a YAML file.""" + # map from per-spec hash code to installation record. + installs = dict((k, v.to_dict()) for k, v in self._data.items()) + + # databaes includes installation list and version. + + # NOTE: this DB version does not handle multiple installs of + # the same spec well. If there are 2 identical specs with + # different paths, it can't differentiate. + # TODO: fix this before we support multiple install locations. + database = { + 'database' : { + 'installs' : installs, + 'version' : str(_db_version) + } + } + + try: + return yaml.dump(database, stream=stream, default_flow_style=False) + except YAMLError as e: + raise SpackYAMLError("error writing YAML database:", str(e)) + + + def _read_spec_from_yaml(self, hash_key, installs): + """Recursively construct a spec from a hash in a YAML database.""" + # TODO: check validity of hash_key records here. + spec_dict = installs[hash_key]['spec'] + + # Build spec from dict first. + spec = Spec.from_node_dict(spec_dict) + + # Add dependencies from other records in the install DB to + # form a full spec. + for dep_hash in spec_dict[spec.name]['dependencies'].values(): + spec._add_dependency(self._read_spec_from_yaml(dep_hash, installs)) + + return spec + + + def _read_from_yaml(self, stream): """ Fill database from YAML, do not maintain old data Translate the spec portions from node-dict form to spec form """ try: - file = yaml.load(stream) - except MarkedYAMLError, e: + if isinstance(stream, basestring): + with open(stream, 'r') as f: + yfile = yaml.load(f) + else: + yfile = yaml.load(stream) + + except MarkedYAMLError as e: raise SpackYAMLError("error parsing YAML database:", str(e)) - if file is None: + if yfile is None: return - data = {} - for index, sp in file['database'].items(): - spec = Spec.from_node_dict(sp['spec']) - deps = sp['dependency_indices'] - path = sp['path'] - dep_hash = sp['hash'] - db_entry = {'deps':deps, 'spec': spec, 'path': path, 'hash':dep_hash} - data[index] = db_entry + def check(cond, msg): + if not cond: raise CorruptDatabaseError(self._index_path, msg) + + check('database' in yfile, "No 'database' attribute in YAML.") + + # High-level file checks. + db = yfile['database'] + check('installs' in db, "No 'installs' in YAML DB.") + check('version' in db, "No 'version' in YAML DB.") - for sph in data.values(): - for idx in sph['deps']: - sph['spec'].dependencies[data[idx]['spec'].name] = data[idx]['spec'] + # TODO: better version check. + version = Version(db['version']) + if version != _db_version: + raise InvalidDatabaseVersionError(_db_version, version) - self._data = data.values() + # Iterate through database and check each record. + installs = db['installs'] + data = {} + for hash_key, rec in installs.items(): + try: + spec = self._read_spec_from_yaml(hash_key, installs) + spec_hash = spec.dag_hash() + if not spec_hash == hash_key: + tty.warn("Hash mismatch in database: %s -> spec with hash %s" + % (hash_key, spec_hash)) + continue + + data[hash_key] = InstallRecord(spec, rec['path']) + + except Exception as e: + tty.warn("Invalid database reecord:", + "file: %s" % self._index_path, + "hash: %s" % hash_key, + "cause: %s" % str(e)) + raise + + self._data = data + + + def reindex(self, directory_layout): + """Build database index from scratch based from a directory layout.""" + with Write_Lock_Instance(self.lock, 60): + data = {} + for spec in directory_layout.all_specs(): + path = directory_layout.path_for_spec(spec) + hash_key = spec.dag_hash() + data[hash_key] = InstallRecord(spec, path) + self._data = data + self.write() - def read_database(self): + def read(self): """ Re-read Database from the data in the set location If the cache is fresh, return immediately. @@ -119,43 +216,12 @@ class Database(object): if not self.is_dirty(): return - if os.path.isfile(self._file_path): - with open(self._file_path,'r') as f: - self._read_from_yaml(f) + if os.path.isfile(self._index_path): + # Read from YAML file if a database exists + self._read_from_yaml(self._index_path) else: - #The file doesn't exist, construct from file paths - self._data = [] - specs = spack.install_layout.all_specs() - for spec in specs: - sph = {} - sph['spec']=spec - sph['hash']=spec.dag_hash() - sph['path']=spack.install_layout.path_for_spec(spec) - self._data.append(sph) - - - def _write_database_to_yaml(self,stream): - """ - Replace each spec with its dict-node form - Then stream all data to YAML - """ - node_list = [] - spec_list = [sph['spec'] for sph in self._data] - - for sph in self._data: - node = {} - deps = [] - for name,spec in sph['spec'].dependencies.items(): - deps.append(spec_list.index(spec)) - node['spec']=Spec.to_node_dict(sph['spec']) - node['hash']=sph['hash'] - node['path']=sph['path'] - node['dependency_indices']=deps - node_list.append(node) - - node_dict = dict(enumerate(node_list)) - return yaml.dump({ 'database' : node_dict}, - stream=stream, default_flow_style=False) + # The file doesn't exist, try to traverse the directory. + self.reindex(spack.install_layout) def write(self): @@ -165,39 +231,42 @@ class Database(object): within the same lock, so there is no need to refresh the database within write() """ - temp_name = str(os.getpid()) + socket.getfqdn() + ".temp" - temp_file = join_path(self._root,temp_name) - with open(temp_file,'w') as f: - self._last_write_time = int(time.time()) - self._write_database_to_yaml(f) - os.rename(temp_file,self._file_path) + temp_name = '%s.%s.temp' % (socket.getfqdn(), os.getpid()) + temp_file = join_path(self._db_dir, temp_name) + + # Write a temporary database file them move it into place + try: + with open(temp_file, 'w') as f: + self._last_write_time = int(time.time()) + self._write_to_yaml(f) + os.rename(temp_file, self._index_path) + + except: + # Clean up temp file if something goes wrong. + if os.path.exists(temp_file): + os.remove(temp_file) + raise + def is_dirty(self): """ Returns true iff the database file does not exist or was most recently written to by another spack instance. """ - if not os.path.isfile(self._file_path): - return True - else: - return (os.path.getmtime(self._file_path) > self._last_write_time) + return (not os.path.isfile(self._index_path) or + (os.path.getmtime(self._index_path) > self._last_write_time)) -# @_autospec + @_autospec def add(self, spec, path): """Read the database from the set location Add the specified entry as a dict Write the database back to memory """ - sph = {} - sph['spec']=spec - sph['path']=path - sph['hash']=spec.dag_hash() - - #Should always already be locked - with Write_Lock_Instance(self.lock,60): - self.read_database() - self._data.append(sph) + # Should always already be locked + with Write_Lock_Instance(self.lock, 60): + self.read() + self._data[spec.dag_hash()] = InstallRecord(spec, path) self.write() @@ -208,23 +277,18 @@ class Database(object): Searches for and removes the specified spec Writes the database back to memory """ - #Should always already be locked - with Write_Lock_Instance(self.lock,60): - self.read_database() - - for sp in self._data: - #Not sure the hash comparison is necessary - if sp['hash'] == spec.dag_hash() and sp['spec'] == spec: - self._data.remove(sp) - + # Should always already be locked + with Write_Lock_Instance(self.lock, 60): + self.read() + hash_key = spec.dag_hash() + if hash_key in self._data: + del self._data[hash_key] self.write() @_autospec def get_installed(self, spec): - """ - Get all the installed specs that satisfy the provided spec constraint - """ + """Get installed specs that satisfy the provided spec constraint.""" return [s for s in self.installed_package_specs() if s.satisfies(spec)] @@ -238,10 +302,10 @@ class Database(object): try: if s.package.extends(extendee_spec): yield s.package - except UnknownPackageError, e: + except UnknownPackageError as e: continue - #skips unknown packages - #TODO: conditional way to do this instead of catching exceptions + # skips unknown packages + # TODO: conditional way to do this instead of catching exceptions def installed_package_specs(self): @@ -249,14 +313,10 @@ class Database(object): Read installed package names from the database and return their specs """ - #Should always already be locked - with Read_Lock_Instance(self.lock,60): - self.read_database() - - installed = [] - for sph in self._data: - installed.append(sph['spec']) - return installed + # Should always already be locked + with Read_Lock_Instance(self.lock, 60): + self.read() + return sorted(rec.spec for rec in self._data.values()) def installed_known_package_specs(self): @@ -265,5 +325,18 @@ class Database(object): Return only the specs for which the package is known to this version of spack """ - return [s for s in self.installed_package_specs() if spack.db.exists(s.name)] + return [s for s in self.installed_package_specs() + if spack.db.exists(s.name)] + + +class CorruptDatabaseError(SpackError): + def __init__(self, path, msg=''): + super(CorruptDatabaseError, self).__init__( + "Spack database is corrupt: %s. %s" %(path, msg)) + +class InvalidDatabaseVersionError(SpackError): + def __init__(self, expected, found): + super(InvalidDatabaseVersionError, self).__init__( + "Expected database version %s but found version %s" + % (expected, found)) diff --git a/lib/spack/spack/error.py b/lib/spack/spack/error.py index bfa7951a47..b3b24e6105 100644 --- a/lib/spack/spack/error.py +++ b/lib/spack/spack/error.py @@ -55,8 +55,8 @@ class SpackError(Exception): def __str__(self): msg = self.message - if self.long_message: - msg += "\n %s" % self.long_message + if self._long_message: + msg += "\n %s" % self._long_message return msg class UnsupportedPlatformError(SpackError): diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index cde2e168a0..7b79feb311 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -2000,4 +2000,4 @@ class UnsatisfiableDependencySpecError(UnsatisfiableSpecError): class SpackYAMLError(spack.error.SpackError): def __init__(self, msg, yaml_error): - super(SpackError, self).__init__(msg, str(yaml_error)) + super(SpackYAMLError, self).__init__(msg, str(yaml_error)) -- cgit v1.2.3-70-g09d2 From e17ad6a684b94211e9b4267cca68cc7fdf6ad277 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Thu, 17 Sep 2015 01:05:19 -0700 Subject: Simplify lock context managers. --- lib/spack/llnl/util/lock.py | 30 +++++++++++++++++++++--------- lib/spack/spack/cmd/__init__.py | 3 +-- lib/spack/spack/cmd/deactivate.py | 3 +-- lib/spack/spack/cmd/diy.py | 3 +-- lib/spack/spack/cmd/extensions.py | 3 +-- lib/spack/spack/cmd/find.py | 5 ++--- lib/spack/spack/cmd/fsck.py | 17 +++-------------- lib/spack/spack/cmd/install.py | 3 +-- lib/spack/spack/cmd/uninstall.py | 3 +-- lib/spack/spack/database.py | 22 +++++++++++++++++----- 10 files changed, 49 insertions(+), 43 deletions(-) (limited to 'lib') diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index bb3b15c9cf..3cd02befe5 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -29,13 +29,15 @@ import errno import time import socket +# Default timeout for locks. +DEFAULT_TIMEOUT = 60 + +class _ReadLockContext(object): + """Context manager that takes and releases a read lock. -class Read_Lock_Instance(object): - """ - A context manager for getting shared access to the object lock Arguments are lock and timeout (default 5 minutes) """ - def __init__(self, lock, timeout=300): + def __init__(self, lock, timeout=DEFAULT_TIMEOUT): self._lock = lock self._timeout = timeout @@ -46,12 +48,12 @@ class Read_Lock_Instance(object): self._lock.release_read() -class Write_Lock_Instance(object): - """ - A context manager for getting exclusive access to the object lock +class _WriteLockContext(object): + """Context manager that takes and releases a write lock. + Arguments are lock and timeout (default 5 minutes) """ - def __init__(self, lock, timeout=300): + def __init__(self, lock, timeout=DEFAULT_TIMEOUT): self._lock = lock self._timeout = timeout @@ -72,7 +74,17 @@ class Lock(object): self._writes = 0 - def acquire_read(self,timeout): + def write_lock(self, timeout=DEFAULT_TIMEOUT): + """Convenience method that returns a write lock context.""" + return _WriteLockContext(self, timeout) + + + def read_lock(self, timeout=DEFAULT_TIMEOUT): + """Convenience method that returns a read lock context.""" + return _ReadLockContext(self, timeout) + + + def acquire_read(self, timeout): """ Implements recursive lock. If held in both read and write mode, the write lock will be maintained until all locks are released diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index c62d22979a..a8e8b1a48b 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -28,7 +28,6 @@ import sys import llnl.util.tty as tty from llnl.util.lang import attr_setdefault -from llnl.util.lock import * import spack import spack.spec @@ -125,7 +124,7 @@ def elide_list(line_list, max_num=10): def disambiguate_spec(spec): - with Read_Lock_Instance(spack.installed_db.lock,1800): + with spack.installed_db.read_lock(): matching_specs = spack.installed_db.get_installed(spec) if not matching_specs: tty.die("Spec '%s' matches no installed packages." % spec) diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py index 015809345a..5428e3d2de 100644 --- a/lib/spack/spack/cmd/deactivate.py +++ b/lib/spack/spack/cmd/deactivate.py @@ -24,7 +24,6 @@ ############################################################################## from external import argparse import llnl.util.tty as tty -from llnl.util.lock import * import spack import spack.cmd @@ -55,7 +54,7 @@ def deactivate(parser, args): if args.all: if pkg.extendable: tty.msg("Deactivating all extensions of %s" % pkg.spec.short_spec) - with Read_Lock_Instance(spack.installed_db.lock,1800): + with spack.installed_db.read_lock(): ext_pkgs = spack.installed_db.installed_extensions_for(spec) for ext_pkg in ext_pkgs: diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py index 7403b9e3e8..6178c9c3e3 100644 --- a/lib/spack/spack/cmd/diy.py +++ b/lib/spack/spack/cmd/diy.py @@ -27,7 +27,6 @@ import os from external import argparse import llnl.util.tty as tty -from llnl.util.lock import * import spack import spack.cmd @@ -55,7 +54,7 @@ def diy(self, args): if not args.spec: tty.die("spack diy requires a package spec argument.") - with Write_Lock_Instance(spack.installed_db.lock,1800): + with spack.installed_db.write_lock(): specs = spack.cmd.parse_specs(args.spec) if len(specs) > 1: tty.die("spack diy only takes one spec.") diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py index 66211e29a9..f0f99a2691 100644 --- a/lib/spack/spack/cmd/extensions.py +++ b/lib/spack/spack/cmd/extensions.py @@ -27,7 +27,6 @@ from external import argparse import llnl.util.tty as tty from llnl.util.tty.colify import colify -from llnl.util.lock import * import spack import spack.cmd @@ -81,7 +80,7 @@ def extensions(parser, args): colify(ext.name for ext in extensions) # List specs of installed extensions. - with Read_Lock_Instance(spack.installed_db.lock,1800): + with spack.installed_db.read_lock(): installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)] print if not installed: diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py index 2d2b884368..e2edd454f4 100644 --- a/lib/spack/spack/cmd/find.py +++ b/lib/spack/spack/cmd/find.py @@ -32,7 +32,6 @@ import llnl.util.tty as tty from llnl.util.tty.colify import * from llnl.util.tty.color import * from llnl.util.lang import * -from llnl.util.lock import * import spack import spack.spec @@ -139,11 +138,11 @@ def find(parser, args): # Get all the specs the user asked for if not query_specs: - with Read_Lock_Instance(spack.installed_db.lock, 1800): + with spack.installed_db.read_lock(): specs = set(spack.installed_db.installed_package_specs()) else: - with Read_Lock_Instance(spack.installed_db.lock, 1800): + with spack.installed_db.read_lock(): results = [set(spack.installed_db.get_installed(qs)) for qs in query_specs] specs = set.union(*results) diff --git a/lib/spack/spack/cmd/fsck.py b/lib/spack/spack/cmd/fsck.py index 3141a7031d..9a3c450dcf 100644 --- a/lib/spack/spack/cmd/fsck.py +++ b/lib/spack/spack/cmd/fsck.py @@ -1,5 +1,5 @@ ############################################################################## -# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. @@ -23,21 +23,10 @@ # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from external import argparse - -from llnl.util.lock import * - import spack -import os description = "Correct database irregularities" -#Very basic version of spack fsck +# Very basic version of spack fsck def fsck(parser, args): - with Write_Lock_Instance(spack.installed_db.lock,1800): - #remove database file - if os.path.exists(spack.installed_db._file_path): - os.remove(spack.installed_db._file_path) - #read database - spack.installed_db.read_database() - #write database - spack.installed_db.write() + spack.installed_db.reindex(spack.install_layout) diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py index 330774b8d9..ada655b937 100644 --- a/lib/spack/spack/cmd/install.py +++ b/lib/spack/spack/cmd/install.py @@ -25,7 +25,6 @@ from external import argparse import llnl.util.tty as tty -from llnl.util.lock import * import spack import spack.cmd @@ -69,7 +68,7 @@ def install(parser, args): if args.no_checksum: spack.do_checksum = False # TODO: remove this global. - with Write_Lock_Instance(spack.installed_db.lock,1800): + with spack.installed_db.write_lock(): specs = spack.cmd.parse_specs(args.packages, concretize=True) for spec in specs: package = spack.db.get(spec) diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index 4b0267dac2..7425db3ca3 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -27,7 +27,6 @@ from external import argparse import llnl.util.tty as tty from llnl.util.tty.colify import colify -from llnl.util.lock import * import spack import spack.cmd @@ -54,7 +53,7 @@ def uninstall(parser, args): if not args.packages: tty.die("uninstall requires at least one package argument.") - with Write_Lock_Instance(spack.installed_db.lock,1800): + with spack.installed_db.write_lock(): specs = spack.cmd.parse_specs(args.packages) # For each spec provided, make sure it refers to only one package. diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 43ba178fed..cea56eb1b9 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -24,13 +24,14 @@ ############################################################################## import os import time +import socket from external import yaml from external.yaml.error import MarkedYAMLError, YAMLError import llnl.util.tty as tty from llnl.util.filesystem import * -from llnl.util.lock import * +from llnl.util.lock import Lock import spack.spec from spack.version import Version @@ -99,6 +100,16 @@ class Database(object): self._last_write_time = 0 + def write_lock(self): + """Get a write lock context for use in a `with` block.""" + return self.lock.write_lock() + + + def read_lock(self): + """Get a read lock context for use in a `with` block.""" + return self.lock.read_lock() + + def _write_to_yaml(self, stream): """Write out the databsae to a YAML file.""" # map from per-spec hash code to installation record. @@ -198,13 +209,14 @@ class Database(object): def reindex(self, directory_layout): """Build database index from scratch based from a directory layout.""" - with Write_Lock_Instance(self.lock, 60): + with self.write_lock(): data = {} for spec in directory_layout.all_specs(): path = directory_layout.path_for_spec(spec) hash_key = spec.dag_hash() data[hash_key] = InstallRecord(spec, path) self._data = data + self.write() @@ -264,7 +276,7 @@ class Database(object): Write the database back to memory """ # Should always already be locked - with Write_Lock_Instance(self.lock, 60): + with self.write_lock(): self.read() self._data[spec.dag_hash()] = InstallRecord(spec, path) self.write() @@ -278,7 +290,7 @@ class Database(object): Writes the database back to memory """ # Should always already be locked - with Write_Lock_Instance(self.lock, 60): + with self.write_lock(): self.read() hash_key = spec.dag_hash() if hash_key in self._data: @@ -314,7 +326,7 @@ class Database(object): and return their specs """ # Should always already be locked - with Read_Lock_Instance(self.lock, 60): + with self.read_lock(): self.read() return sorted(rec.spec for rec in self._data.values()) -- cgit v1.2.3-70-g09d2 From fb73979345d16b4912ea1f6da148d35c676a6576 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Thu, 17 Sep 2015 16:09:59 -0700 Subject: Allow custom timeout for database locking. --- lib/spack/spack/database.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index cea56eb1b9..e74217a262 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -44,6 +44,8 @@ _db_dirname = '.spack-db' # DB version. This is stuck in the DB file to track changes in format. _db_version = Version('0.9') +# Default timeout for spack database locks is 5 min. +_db_lock_timeout = 300 def _autospec(function): """Decorator that automatically converts the argument of a single-arg @@ -100,14 +102,14 @@ class Database(object): self._last_write_time = 0 - def write_lock(self): + def write_lock(self, timeout=_db_lock_timeout): """Get a write lock context for use in a `with` block.""" - return self.lock.write_lock() + return self.lock.write_lock(timeout) - def read_lock(self): + def read_lock(self, timeout=_db_lock_timeout): """Get a read lock context for use in a `with` block.""" - return self.lock.read_lock() + return self.lock.read_lock(timeout) def _write_to_yaml(self, stream): -- cgit v1.2.3-70-g09d2 From d0e22b22406c8fb064031dbd4ac887b7a9abbc95 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Fri, 18 Sep 2015 11:40:05 -0700 Subject: Add ref counting to database. This does not handle removal properly yet. --- lib/spack/spack/cmd/__init__.py | 2 +- lib/spack/spack/cmd/find.py | 35 ++++++-- lib/spack/spack/cmd/module.py | 4 +- lib/spack/spack/cmd/uninstall.py | 2 +- lib/spack/spack/database.py | 188 ++++++++++++++++++++++++++++++--------- lib/spack/spack/package.py | 7 +- 6 files changed, 183 insertions(+), 55 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index a8e8b1a48b..d4778b1375 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -125,7 +125,7 @@ def elide_list(line_list, max_num=10): def disambiguate_spec(spec): with spack.installed_db.read_lock(): - matching_specs = spack.installed_db.get_installed(spec) + matching_specs = spack.installed_db.query(spec) if not matching_specs: tty.die("Spec '%s' matches no installed packages." % spec) diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py index e2edd454f4..6a0c3d11ff 100644 --- a/lib/spack/spack/cmd/find.py +++ b/lib/spack/spack/cmd/find.py @@ -54,6 +54,16 @@ def setup_parser(subparser): '-L', '--very-long', action='store_true', dest='very_long', help='Show dependency hashes as well as versions.') + subparser.add_argument( + '-u', '--unknown', action='store_true', dest='unknown', + help='Show only specs Spack does not have a package for.') + subparser.add_argument( + '-m', '--missing', action='store_true', dest='missing', + help='Show missing dependencies as well as installed specs.') + subparser.add_argument( + '-M', '--only-missing', action='store_true', dest='only_missing', + help='Show only missing dependencies.') + subparser.add_argument( 'query_specs', nargs=argparse.REMAINDER, help='optional specs to filter results') @@ -113,6 +123,7 @@ def display_specs(specs, **kwargs): if hashes: string += gray_hash(s, hlen) + ' ' string += s.format('$-_$@$+', color=True) + return string colify(fmt(s) for s in specs) @@ -136,15 +147,23 @@ def find(parser, args): if not query_specs: return + # Set up query arguments. + installed, known = True, any + if args.only_missing: + installed = False + elif args.missing: + installed = any + if args.unknown: + known = False + q_args = { 'installed' : installed, 'known' : known } + # Get all the specs the user asked for - if not query_specs: - with spack.installed_db.read_lock(): - specs = set(spack.installed_db.installed_package_specs()) - - else: - with spack.installed_db.read_lock(): - results = [set(spack.installed_db.get_installed(qs)) for qs in query_specs] - specs = set.union(*results) + with spack.installed_db.read_lock(): + if not query_specs: + specs = set(spack.installed_db.query(**q_args)) + else: + results = [set(spack.installed_db.query(qs, **q_args)) for qs in query_specs] + specs = set.union(*results) if not args.mode: args.mode = 'short' diff --git a/lib/spack/spack/cmd/module.py b/lib/spack/spack/cmd/module.py index 215d877bd0..654b0cb2fa 100644 --- a/lib/spack/spack/cmd/module.py +++ b/lib/spack/spack/cmd/module.py @@ -65,7 +65,7 @@ def module_find(mtype, spec_array): tty.die("You can only pass one spec.") spec = specs[0] - specs = [s for s in spack.installed_db.installed_package_specs() if s.satisfies(spec)] + specs = spack.installed_db.query(spec) if len(specs) == 0: tty.die("No installed packages match spec %s" % spec) @@ -86,7 +86,7 @@ def module_find(mtype, spec_array): def module_refresh(): """Regenerate all module files for installed packages known to spack (some packages may no longer exist).""" - specs = [s for s in spack.installed_db.installed_known_package_specs()] + specs = [s for s in spack.installed_db.query(installed=True, known=True)] for name, cls in module_types.items(): tty.msg("Regenerating %s module files." % name) diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index 7425db3ca3..7b7c32c065 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -60,7 +60,7 @@ def uninstall(parser, args): # Fail and ask user to be unambiguous if it doesn't pkgs = [] for spec in specs: - matching_specs = spack.installed_db.get_installed(spec) + matching_specs = spack.installed_db.query(spec) if not args.all and len(matching_specs) > 1: tty.error("%s matches multiple packages:" % spec) print diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index e74217a262..1d1c640d66 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -22,6 +22,23 @@ # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## +"""Spack's installation tracking database. + +The database serves two purposes: + + 1. It implements a cache on top of a potentially very large Spack + directory hierarchy, speeding up many operations that would + otherwise require filesystem access. + + 2. It will allow us to track external installations as well as lost + packages and their dependencies. + +Prior ot the implementation of this store, a direcotry layout served +as the authoritative database of packages in Spack. This module +provides a cache and a sanity checking mechanism for what is in the +filesystem. + +""" import os import time import socket @@ -58,18 +75,37 @@ def _autospec(function): class InstallRecord(object): - """A record represents one installation in the DB.""" - def __init__(self, spec, path): + """A record represents one installation in the DB. + + The record keeps track of the spec for the installation, its + install path, AND whether or not it is installed. We need the + installed flag in case a user either: + + a) blew away a directory, or + b) used spack uninstall -f to get rid of it + + If, in either case, the package was removed but others still + depend on it, we still need to track its spec, so we don't + actually remove from the database until a spec has no installed + dependents left. + + """ + def __init__(self, spec, path, installed): self.spec = spec self.path = path + self.installed = installed + self.ref_count = 0 def to_dict(self): - return { 'spec' : self.spec.to_node_dict(), - 'path' : self.path } + return { 'spec' : self.spec.to_node_dict(), + 'path' : self.path, + 'installed' : self.installed, + 'ref_count' : self.ref_count } @classmethod def from_dict(cls, d): - return InstallRecord(d['spec'], d['path']) + # TODO: check the dict more rigorously. + return InstallRecord(d['spec'], d['path'], d['installed'], d['ref_count']) class Database(object): @@ -136,9 +172,11 @@ class Database(object): raise SpackYAMLError("error writing YAML database:", str(e)) - def _read_spec_from_yaml(self, hash_key, installs): + def _read_spec_from_yaml(self, hash_key, installs, parent_key=None): """Recursively construct a spec from a hash in a YAML database.""" - # TODO: check validity of hash_key records here. + if hash_key not in installs: + parent = read_spec(installs[parent_key]['path']) + spec_dict = installs[hash_key]['spec'] # Build spec from dict first. @@ -147,7 +185,8 @@ class Database(object): # Add dependencies from other records in the install DB to # form a full spec. for dep_hash in spec_dict[spec.name]['dependencies'].values(): - spec._add_dependency(self._read_spec_from_yaml(dep_hash, installs)) + child = self._read_spec_from_yaml(dep_hash, installs, hash_key) + spec._add_dependency(child) return spec @@ -175,12 +214,12 @@ class Database(object): check('database' in yfile, "No 'database' attribute in YAML.") - # High-level file checks. + # High-level file checks db = yfile['database'] check('installs' in db, "No 'installs' in YAML DB.") check('version' in db, "No 'version' in YAML DB.") - # TODO: better version check. + # TODO: better version checking semantics. version = Version(db['version']) if version != _db_version: raise InvalidDatabaseVersionError(_db_version, version) @@ -190,14 +229,21 @@ class Database(object): data = {} for hash_key, rec in installs.items(): try: + # This constructs a spec DAG from the list of all installs spec = self._read_spec_from_yaml(hash_key, installs) + + # Validate the spec by ensuring the stored and actual + # hashes are the same. spec_hash = spec.dag_hash() if not spec_hash == hash_key: tty.warn("Hash mismatch in database: %s -> spec with hash %s" % (hash_key, spec_hash)) - continue + continue # TODO: is skipping the right thing to do? - data[hash_key] = InstallRecord(spec, rec['path']) + # Insert the brand new spec in the database. Each + # spec has its own copies of its dependency specs. + # TODO: would a more immmutable spec implementation simplify this? + data[hash_key] = InstallRecord(spec, rec['path'], rec['installed']) except Exception as e: tty.warn("Invalid database reecord:", @@ -213,12 +259,29 @@ class Database(object): """Build database index from scratch based from a directory layout.""" with self.write_lock(): data = {} + + # Ask the directory layout to traverse the filesystem. for spec in directory_layout.all_specs(): + # Create a spec for each known package and add it. path = directory_layout.path_for_spec(spec) hash_key = spec.dag_hash() - data[hash_key] = InstallRecord(spec, path) + data[hash_key] = InstallRecord(spec, path, True) + + # Recursively examine dependencies and add them, even + # if they are NOT installed. This ensures we know + # about missing dependencies. + for dep in spec.traverse(root=False): + dep_hash = dep.dag_hash() + if dep_hash not in data: + path = directory_layout.path_for_spec(dep) + installed = os.path.isdir(path) + data[dep_hash] = InstallRecord(dep.copy(), path, installed) + data[dep_hash].ref_count += 1 + + # Assuming everything went ok, replace this object's data. self._data = data + # write out, blowing away the old version if necessary self.write() @@ -274,22 +337,37 @@ class Database(object): @_autospec def add(self, spec, path): """Read the database from the set location - Add the specified entry as a dict - Write the database back to memory + + Add the specified entry as a dict, then write the database + back to memory. This assumes that ALL dependencies are already in + the database. Should not be called otherwise. + """ # Should always already be locked with self.write_lock(): self.read() - self._data[spec.dag_hash()] = InstallRecord(spec, path) + self._data[spec.dag_hash()] = InstallRecord(spec, path, True) + + # sanity check the dependencies in case something went + # wrong during install() + # TODO: ensure no races during distributed install. + for dep in spec.traverse(root=False): + assert dep.dag_hash() in self._data + self.write() @_autospec def remove(self, spec): - """ - Reads the database from the set location - Searches for and removes the specified spec - Writes the database back to memory + """Removes a spec from the database. To be called on uninstall. + + Reads the database, then: + + 1. Marks the spec as not installed. + 2. Removes the spec if it has no more dependents. + 3. If removed, recursively updates dependencies' ref counts + and remvoes them if they are no longer needed. + """ # Should always already be locked with self.write_lock(): @@ -300,19 +378,13 @@ class Database(object): self.write() - @_autospec - def get_installed(self, spec): - """Get installed specs that satisfy the provided spec constraint.""" - return [s for s in self.installed_package_specs() if s.satisfies(spec)] - - @_autospec def installed_extensions_for(self, extendee_spec): """ Return the specs of all packages that extend the given spec """ - for s in self.installed_package_specs(): + for s in self.query(): try: if s.package.extends(extendee_spec): yield s.package @@ -322,25 +394,59 @@ class Database(object): # TODO: conditional way to do this instead of catching exceptions - def installed_package_specs(self): - """ - Read installed package names from the database - and return their specs + def query(self, query_spec=any, known=any, installed=True): + """Run a query on the database. + + ``query_spec`` + Queries iterate through specs in the database and return + those that satisfy the supplied ``query_spec``. If + query_spec is `any`, This will match all specs in the + database. If it is a spec, we'll evaluate + ``spec.satisfies(query_spec)``. + + The query can be constrained by two additional attributes: + + ``known`` + Possible values: True, False, any + + Specs that are "known" are those for which Spack can + locate a ``package.py`` file -- i.e., Spack "knows" how to + install them. Specs that are unknown may represent + packages that existed in a previous version of Spack, but + have since either changed their name or been removed. + + ``installed`` + Possible values: True, False, any + + Specs for which a prefix exists are "installed". A spec + that is NOT installed will be in the database if some + other spec depends on it but its installation has gone + away since Spack installed it. + + TODO: Specs are a lot like queries. Should there be a + wildcard spec object, and should specs have attributes + like installed and known that can be queried? Or are + these really special cases that only belong here? + """ - # Should always already be locked with self.read_lock(): self.read() - return sorted(rec.spec for rec in self._data.values()) + results = [] + for key, rec in self._data.items(): + if installed is not any and rec.installed != installed: + continue + if known is not any and spack.db.exists(rec.spec.name) != known: + continue + if query_spec is any or rec.spec.satisfies(query_spec): + results.append(rec.spec) - def installed_known_package_specs(self): - """ - Read installed package names from the database. - Return only the specs for which the package is known - to this version of spack - """ - return [s for s in self.installed_package_specs() - if spack.db.exists(s.name)] + return sorted(results) + + + def missing(self, spec): + key = spec.dag_hash() + return key in self._data and not self._data[key].installed class CorruptDatabaseError(SpackError): diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index e64b427852..e6944ce40c 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -563,9 +563,12 @@ class Package(object): @property def installed_dependents(self): """Return a list of the specs of all installed packages that depend - on this one.""" + on this one. + + TODO: move this method to database.py? + """ dependents = [] - for spec in spack.installed_db.installed_package_specs(): + for spec in spack.installed_db.query(): if self.name == spec.name: continue for dep in spec.traverse(): -- cgit v1.2.3-70-g09d2 From 5fda7daf57e2362c803d4f2152da93ed270818d9 Mon Sep 17 00:00:00 2001 From: Gregory Becker Date: Thu, 17 Sep 2015 11:29:27 -0700 Subject: an ordered database test --- lib/spack/spack/test/__init__.py | 3 +- lib/spack/spack/test/database.py | 104 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 lib/spack/spack/test/database.py (limited to 'lib') diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index 6b3715be6f..c3b39b76f8 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -56,7 +56,8 @@ test_names = ['versions', 'spec_yaml', 'optional_deps', 'make_executable', - 'configure_guess'] + 'configure_guess', + 'database'] def list_tests(): diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py new file mode 100644 index 0000000000..a3386bad99 --- /dev/null +++ b/lib/spack/spack/test/database.py @@ -0,0 +1,104 @@ +############################################################################## +# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +""" +These tests check the database is functioning properly, +both in memory and in its file +""" +import unittest + +from llnl.util.lock import * +from llnl.util.filesystem import join_path + +import spack +from spack.database import Database + + +class DatabaseTest(unittest.TestCase): + def setUp(self): + self.original_db = spack.installed_db + spack.installed_db = Database(self.original_db._root,"_test_index.yaml") + self.file_path = join_path(self.original_db._root,"_test_index.yaml") + if os.path.exists(self.file_path): + os.remove(self.file_path) + + def tearDown(self): + spack.installed_db = self.original_db + os.remove(self.file_path) + + def _test_read_from_install_tree(self): + specs = spack.install_layout.all_specs() + spack.installed_db.read_database() + spack.installed_db.write() + for sph in spack.installed_db._data: + self.assertTrue(sph['spec'] in specs) + self.assertEqual(len(specs),len(spack.installed_db._data)) + + def _test_remove_and_add(self): + specs = spack.install_layout.all_specs() + spack.installed_db.remove(specs[len(specs)-1]) + for sph in spack.installed_db._data: + self.assertTrue(sph['spec'] in specs[:len(specs)-1]) + self.assertEqual(len(specs)-1,len(spack.installed_db._data)) + + spack.installed_db.add(specs[len(specs)-1],"") + for sph in spack.installed_db._data: + self.assertTrue(sph['spec'] in specs) + self.assertEqual(len(specs),len(spack.installed_db._data)) + + def _test_read_from_file(self): + spack.installed_db.read_database() + size = len(spack.installed_db._data) + spack.installed_db._data = spack.installed_db._data[1:] + os.utime(spack.installed_db._file_path,None) + spack.installed_db.read_database() + self.assertEqual(size,len(spack.installed_db._data)) + + specs = spack.install_layout.all_specs() + self.assertEqual(size,len(specs)) + for sph in spack.installed_db._data: + self.assertTrue(sph['spec'] in specs) + + + def _test_write_to_file(self): + spack.installed_db.read_database() + size = len(spack.installed_db._data) + real_data = spack.installed_db._data + spack.installed_db._data = real_data[:size-1] + spack.installed_db.write() + spack.installed_db._data = real_data + os.utime(spack.installed_db._file_path,None) + spack.installed_db.read_database() + self.assertEqual(size-1,len(spack.installed_db._data)) + + specs = spack.install_layout.all_specs() + self.assertEqual(size,len(specs)) + for sph in spack.installed_db._data: + self.assertTrue(sph['spec'] in specs[:size-1]) + + def test_ordered_test(self): + self._test_read_from_install_tree() + self._test_remove_and_add() + self._test_read_from_file() + self._test_write_to_file() -- cgit v1.2.3-70-g09d2 From 908a93a470cafcb3fad8f7412e438e7f5b939d04 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sat, 24 Oct 2015 19:54:52 -0700 Subject: Add a multiprocess Barrier class to use for testing parallel code. --- lib/spack/spack/util/multiproc.py | 50 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/spack/spack/util/multiproc.py b/lib/spack/spack/util/multiproc.py index 9e045a090f..21cd6f543d 100644 --- a/lib/spack/spack/util/multiproc.py +++ b/lib/spack/spack/util/multiproc.py @@ -27,9 +27,11 @@ This implements a parallel map operation but it can accept more values than multiprocessing.Pool.apply() can. For example, apply() will fail to pickle functions if they're passed indirectly as parameters. """ -from multiprocessing import Process, Pipe +from multiprocessing import Process, Pipe, Semaphore, Value from itertools import izip +__all__ = ['spawn', 'parmap', 'Barrier'] + def spawn(f): def fun(pipe,x): pipe.send(f(x)) @@ -43,3 +45,49 @@ def parmap(f,X): [p.join() for p in proc] return [p.recv() for (p,c) in pipe] + +class Barrier: + """Simple reusable semaphore barrier. + + Python 2.6 doesn't have multiprocessing barriers so we implement this. + + See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41. + """ + def __init__(self, n, timeout=None): + self.n = n + self.to = timeout + self.count = Value('i', 0) + self.mutex = Semaphore(1) + self.turnstile1 = Semaphore(0) + self.turnstile2 = Semaphore(1) + + + def wait(self): + if not self.mutex.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.count.value += 1 + if self.count.value == self.n: + if not self.turnstile2.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.turnstile1.release() + self.mutex.release() + + if not self.turnstile1.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.turnstile1.release() + + if not self.mutex.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.count.value -= 1 + if self.count.value == 0: + if not self.turnstile1.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.turnstile2.release() + self.mutex.release() + + if not self.turnstile2.acquire(timeout=self.to): + raise BarrierTimeoutError() + self.turnstile2.release() + + +class BarrierTimeoutError: pass -- cgit v1.2.3-70-g09d2 From ead8ac58c6ecde1b8bd32e9f651483b57c7e3bd5 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Sat, 24 Oct 2015 19:55:22 -0700 Subject: Working Lock class, now uses POSIX fcntl locks, extensive unit test. - llnl.util.lock now uses fcntl.lockf instead of flock - purported to have more NFS compatibility. - Added an extensive test case for locks. - tests acquiring, releasing, upgrading, timeouts, shared, & exclusive cases. --- lib/spack/llnl/util/lock.py | 167 +++++++++++++------------ lib/spack/spack/test/__init__.py | 3 +- lib/spack/spack/test/lock.py | 264 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 350 insertions(+), 84 deletions(-) create mode 100644 lib/spack/spack/test/lock.py (limited to 'lib') diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index 3cd02befe5..6e49bf74e6 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -1,5 +1,5 @@ ############################################################################## -# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. @@ -22,134 +22,135 @@ # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## -"""Lock implementation for shared filesystems.""" import os import fcntl import errno import time import socket -# Default timeout for locks. -DEFAULT_TIMEOUT = 60 +# Default timeout in seconds, after which locks will raise exceptions. +_default_timeout = 60 -class _ReadLockContext(object): - """Context manager that takes and releases a read lock. +# Sleep time per iteration in spin loop (in seconds) +_sleep_time = 1e-5 - Arguments are lock and timeout (default 5 minutes) - """ - def __init__(self, lock, timeout=DEFAULT_TIMEOUT): - self._lock = lock - self._timeout = timeout - def __enter__(self): - self._lock.acquire_read(self._timeout) +class Lock(object): + def __init__(self,file_path): + self._file_path = file_path + self._fd = None + self._reads = 0 + self._writes = 0 - def __exit__(self,type,value,traceback): - self._lock.release_read() + def _lock(self, op, timeout): + """This takes a lock using POSIX locks (``fnctl.lockf``). -class _WriteLockContext(object): - """Context manager that takes and releases a write lock. + The lock is implemented as a spin lock using a nonblocking + call to lockf(). - Arguments are lock and timeout (default 5 minutes) - """ - def __init__(self, lock, timeout=DEFAULT_TIMEOUT): - self._lock = lock - self._timeout = timeout + On acquiring an exclusive lock, the lock writes this process's + pid and host to the lock file, in case the holding process + needs to be killed later. - def __enter__(self): - self._lock.acquire_write(self._timeout) + If the lock times out, it raises a ``LockError``. + """ + start_time = time.time() + while (time.time() - start_time) < timeout: + try: + if self._fd is None: + self._fd = os.open(self._file_path, os.O_RDWR) - def __exit__(self,type,value,traceback): - self._lock.release_write() + fcntl.lockf(self._fd, op | fcntl.LOCK_NB) + if op == fcntl.LOCK_EX: + os.write(self._fd, "pid=%s,host=%s" % (os.getpid(), socket.getfqdn())) + return + + except IOError as error: + if error.errno == errno.EAGAIN or error.errno == errno.EACCES: + pass + else: + raise + time.sleep(_sleep_time) + raise LockError("Timed out waiting for lock.") -class Lock(object): - """Distributed file-based lock using ``flock``.""" - def __init__(self, file_path): - self._file_path = file_path - self._fd = os.open(file_path,os.O_RDWR) - self._reads = 0 - self._writes = 0 + def _unlock(self): + """Releases a lock using POSIX locks (``fcntl.lockf``) + Releases the lock regardless of mode. Note that read locks may + be masquerading as write locks, but this removes either. - def write_lock(self, timeout=DEFAULT_TIMEOUT): - """Convenience method that returns a write lock context.""" - return _WriteLockContext(self, timeout) + """ + fcntl.lockf(self._fd,fcntl.LOCK_UN) + os.close(self._fd) + self._fd = None - def read_lock(self, timeout=DEFAULT_TIMEOUT): - """Convenience method that returns a read lock context.""" - return _ReadLockContext(self, timeout) + def acquire_read(self, timeout=_default_timeout): + """Acquires a recursive, shared lock for reading. + Read and write locks can be acquired and released in arbitrary + order, but the POSIX lock is held until all local read and + write locks are released. - def acquire_read(self, timeout): - """ - Implements recursive lock. If held in both read and write mode, - the write lock will be maintained until all locks are released """ if self._reads == 0 and self._writes == 0: self._lock(fcntl.LOCK_SH, timeout) self._reads += 1 - def acquire_write(self, timeout): - """ - Implements recursive lock + def acquire_write(self, timeout=_default_timeout): + """Acquires a recursive, exclusive lock for writing. + + Read and write locks can be acquired and released in arbitrary + order, but the POSIX lock is held until all local read and + write locks are released. """ if self._writes == 0: self._lock(fcntl.LOCK_EX, timeout) self._writes += 1 - def _lock(self, op, timeout): - """ - The timeout is implemented using nonblocking flock() - to avoid using signals for timing - Write locks store pid and host information to the lock file - Read locks do not store data - """ - total_time = 0 - while total_time < timeout: - try: - fcntl.flock(self._fd, op | fcntl.LOCK_NB) - if op == fcntl.LOCK_EX: - with open(self._file_path, 'w') as f: - f.write("pid = " + str(os.getpid()) + ", host = " + socket.getfqdn()) - return - except IOError as error: - if error.errno == errno.EAGAIN or error.errno == EACCES: - pass - else: - raise - time.sleep(0.1) - total_time += 0.1 + def release_read(self): + """Releases a read lock. + Returns True if the last recursive lock was released, False if + there are still outstanding locks. + + Does limited correctness checking: if a read lock is released + when none are held, this will raise an assertion error. - def release_read(self): - """ - Assert there is a lock of the right type to release, recursive lock """ assert self._reads > 0 - if self._reads == 1 and self._writes == 0: - self._unlock() + self._reads -= 1 + if self._reads == 0 and self._writes == 0: + self._unlock() + return True + return False def release_write(self): - """ - Assert there is a lock of the right type to release, recursive lock + """Releases a write lock. + + Returns True if the last recursive lock was released, False if + there are still outstanding locks. + + Does limited correctness checking: if a read lock is released + when none are held, this will raise an assertion error. + """ assert self._writes > 0 - if self._writes == 1 and self._reads == 0: - self._unlock() + self._writes -= 1 + if self._writes == 0 and self._reads == 0: + self._unlock() + return True + return False - def _unlock(self): - """ - Releases the lock regardless of mode. Note that read locks may be - masquerading as write locks at times, but this removes either. - """ - fcntl.flock(self._fd, fcntl.LOCK_UN) +class LockError(Exception): + """Raised when an attempt to acquire a lock times out.""" + pass diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py index c3b39b76f8..84419781e2 100644 --- a/lib/spack/spack/test/__init__.py +++ b/lib/spack/spack/test/__init__.py @@ -57,6 +57,7 @@ test_names = ['versions', 'optional_deps', 'make_executable', 'configure_guess', + 'lock', 'database'] @@ -77,7 +78,7 @@ def run(names, verbose=False): if test not in test_names: tty.error("%s is not a valid spack test name." % test, "Valid names are:") - colify(test_names, indent=4) + colify(sorted(test_names), indent=4) sys.exit(1) runner = unittest.TextTestRunner(verbosity=verbosity) diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py new file mode 100644 index 0000000000..2e7440bbbc --- /dev/null +++ b/lib/spack/spack/test/lock.py @@ -0,0 +1,264 @@ +############################################################################## +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +""" +These tests ensure that our lock works correctly. +""" +import unittest +import os +import tempfile +import shutil +from multiprocessing import Process + +from llnl.util.lock import * +from llnl.util.filesystem import join_path, touch + +from spack.util.multiproc import Barrier + +# This is the longest a failed test will take, as the barriers will +# time out and raise an exception. +barrier_timeout = 5 + + +def order_processes(*functions): + """Order some processes using simple barrier synchronization.""" + b = Barrier(len(functions), timeout=barrier_timeout) + procs = [Process(target=f, args=(b,)) for f in functions] + for p in procs: p.start() + for p in procs: p.join() + + +class LockTest(unittest.TestCase): + + def setUp(self): + self.tempdir = tempfile.mkdtemp() + self.lock_path = join_path(self.tempdir, 'lockfile') + touch(self.lock_path) + + + def tearDown(self): + shutil.rmtree(self.tempdir, ignore_errors=True) + + + # + # Process snippets below can be composed into tests. + # + def acquire_write(self, barrier): + lock = Lock(self.lock_path) + lock.acquire_write() # grab exclusive lock + barrier.wait() + barrier.wait() # hold the lock until exception raises in other procs. + + def acquire_read(self, barrier): + lock = Lock(self.lock_path) + lock.acquire_read() # grab shared lock + barrier.wait() + barrier.wait() # hold the lock until exception raises in other procs. + + def timeout_write(self, barrier): + lock = Lock(self.lock_path) + barrier.wait() # wait for lock acquire in first process + self.assertRaises(LockError, lock.acquire_write, 0.1) + barrier.wait() + + def timeout_read(self, barrier): + lock = Lock(self.lock_path) + barrier.wait() # wait for lock acquire in first process + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() + + + # + # Test that exclusive locks on other processes time out when an + # exclusive lock is held. + # + def test_write_lock_timeout_on_write(self): + order_processes(self.acquire_write, self.timeout_write) + + def test_write_lock_timeout_on_write_2(self): + order_processes(self.acquire_write, self.timeout_write, self.timeout_write) + + def test_write_lock_timeout_on_write_3(self): + order_processes(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write) + + + # + # Test that shared locks on other processes time out when an + # exclusive lock is held. + # + def test_read_lock_timeout_on_write(self): + order_processes(self.acquire_write, self.timeout_read) + + def test_read_lock_timeout_on_write_2(self): + order_processes(self.acquire_write, self.timeout_read, self.timeout_read) + + def test_read_lock_timeout_on_write_3(self): + order_processes(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read) + + + # + # Test that exclusive locks time out when shared locks are held. + # + def test_write_lock_timeout_on_read(self): + order_processes(self.acquire_read, self.timeout_write) + + def test_write_lock_timeout_on_read_2(self): + order_processes(self.acquire_read, self.timeout_write, self.timeout_write) + + def test_write_lock_timeout_on_read_3(self): + order_processes(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write) + + + # + # Test that exclusive locks time while lots of shared locks are held. + # + def test_write_lock_timeout_with_multiple_readers_2_1(self): + order_processes(self.acquire_read, self.acquire_read, self.timeout_write) + + def test_write_lock_timeout_with_multiple_readers_2_2(self): + order_processes(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) + + def test_write_lock_timeout_with_multiple_readers_3_1(self): + order_processes(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write) + + def test_write_lock_timeout_with_multiple_readers_3_2(self): + order_processes(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) + + + # + # Longer test case that ensures locks are reusable. Ordering is + # enforced by barriers throughout -- steps are shown with numbers. + # + def test_complex_acquire_and_release_chain(self): + def p1(barrier): + lock = Lock(self.lock_path) + + lock.acquire_write() + barrier.wait() # ---------------------------------------- 1 + # others test timeout + barrier.wait() # ---------------------------------------- 2 + lock.release_write() # release and others acquire read + barrier.wait() # ---------------------------------------- 3 + self.assertRaises(LockError, lock.acquire_write, 0.1) + lock.acquire_read() + barrier.wait() # ---------------------------------------- 4 + lock.release_read() + barrier.wait() # ---------------------------------------- 5 + + # p2 upgrades read to write + barrier.wait() # ---------------------------------------- 6 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 7 + # p2 releases write and read + barrier.wait() # ---------------------------------------- 8 + + # p3 acquires read + barrier.wait() # ---------------------------------------- 9 + # p3 upgrades read to write + barrier.wait() # ---------------------------------------- 10 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 11 + # p3 releases locks + barrier.wait() # ---------------------------------------- 12 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 13 + lock.release_read() + + + def p2(barrier): + lock = Lock(self.lock_path) + + # p1 acquires write + barrier.wait() # ---------------------------------------- 1 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 2 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 3 + # p1 tests shared read + barrier.wait() # ---------------------------------------- 4 + # others release reads + barrier.wait() # ---------------------------------------- 5 + + lock.acquire_write() # upgrade read to write + barrier.wait() # ---------------------------------------- 6 + # others test timeout + barrier.wait() # ---------------------------------------- 7 + lock.release_write() # release read AND write (need both) + lock.release_read() + barrier.wait() # ---------------------------------------- 8 + + # p3 acquires read + barrier.wait() # ---------------------------------------- 9 + # p3 upgrades read to write + barrier.wait() # ---------------------------------------- 10 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 11 + # p3 releases locks + barrier.wait() # ---------------------------------------- 12 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 13 + lock.release_read() + + + def p3(barrier): + lock = Lock(self.lock_path) + + # p1 acquires write + barrier.wait() # ---------------------------------------- 1 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 2 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 3 + # p1 tests shared read + barrier.wait() # ---------------------------------------- 4 + lock.release_read() + barrier.wait() # ---------------------------------------- 5 + + # p2 upgrades read to write + barrier.wait() # ---------------------------------------- 6 + self.assertRaises(LockError, lock.acquire_write, 0.1) + self.assertRaises(LockError, lock.acquire_read, 0.1) + barrier.wait() # ---------------------------------------- 7 + # p2 releases write & read + barrier.wait() # ---------------------------------------- 8 + + lock.acquire_read() + barrier.wait() # ---------------------------------------- 9 + lock.acquire_write() + barrier.wait() # ---------------------------------------- 10 + # others test timeout + barrier.wait() # ---------------------------------------- 11 + lock.release_read() # release read AND write in opposite + lock.release_write() # order from before on p2 + barrier.wait() # ---------------------------------------- 12 + lock.acquire_read() + barrier.wait() # ---------------------------------------- 13 + lock.release_read() + + order_processes(p1, p2, p3) -- cgit v1.2.3-70-g09d2 From af7b96c14a555805a50f13a1fa1343650db184ab Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 27 Oct 2015 00:35:06 -0700 Subject: Lock acquires return True/False depending on whether they got POSIX lock. --- lib/spack/llnl/util/lock.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index 6e49bf74e6..dcca37687e 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -95,10 +95,15 @@ class Lock(object): order, but the POSIX lock is held until all local read and write locks are released. + Returns True if it is the first acquire and actually acquires + the POSIX lock, False if it is a nested transaction. + """ - if self._reads == 0 and self._writes == 0: - self._lock(fcntl.LOCK_SH, timeout) self._reads += 1 + if self._reads == 1 and self._writes == 0: + self._lock(fcntl.LOCK_SH, timeout) + return True + return False def acquire_write(self, timeout=_default_timeout): @@ -107,10 +112,16 @@ class Lock(object): Read and write locks can be acquired and released in arbitrary order, but the POSIX lock is held until all local read and write locks are released. + + Returns True if it is the first acquire and actually acquires + the POSIX lock, False if it is a nested transaction. + """ - if self._writes == 0: - self._lock(fcntl.LOCK_EX, timeout) self._writes += 1 + if self._writes == 1: + self._lock(fcntl.LOCK_EX, timeout) + return True + return False def release_read(self): -- cgit v1.2.3-70-g09d2 From bf8479bec6311f28cd9e18c580e23794001cbf23 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 27 Oct 2015 16:29:14 -0700 Subject: Fix stupid lock bug. - Code simplification ignored case where exception was raised. - If LockError was raised, read and write counts were incremented erroneously. - updated lock test. --- lib/spack/llnl/util/lock.py | 40 +++++++++++++++++++++++--------------- lib/spack/spack/package.py | 8 +++++--- lib/spack/spack/test/lock.py | 46 +++++++++++++++++++++++--------------------- 3 files changed, 53 insertions(+), 41 deletions(-) (limited to 'lib') diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py index dcca37687e..ac3684bd55 100644 --- a/lib/spack/llnl/util/lock.py +++ b/lib/spack/llnl/util/lock.py @@ -99,11 +99,13 @@ class Lock(object): the POSIX lock, False if it is a nested transaction. """ - self._reads += 1 - if self._reads == 1 and self._writes == 0: - self._lock(fcntl.LOCK_SH, timeout) + if self._reads == 0 and self._writes == 0: + self._lock(fcntl.LOCK_SH, timeout) # can raise LockError. + self._reads += 1 return True - return False + else: + self._reads += 1 + return False def acquire_write(self, timeout=_default_timeout): @@ -117,11 +119,13 @@ class Lock(object): the POSIX lock, False if it is a nested transaction. """ - self._writes += 1 - if self._writes == 1: - self._lock(fcntl.LOCK_EX, timeout) + if self._writes == 0: + self._lock(fcntl.LOCK_EX, timeout) # can raise LockError. + self._writes += 1 return True - return False + else: + self._writes += 1 + return False def release_read(self): @@ -136,11 +140,13 @@ class Lock(object): """ assert self._reads > 0 - self._reads -= 1 - if self._reads == 0 and self._writes == 0: - self._unlock() + if self._reads == 1 and self._writes == 0: + self._unlock() # can raise LockError. + self._reads -= 1 return True - return False + else: + self._reads -= 1 + return False def release_write(self): @@ -155,11 +161,13 @@ class Lock(object): """ assert self._writes > 0 - self._writes -= 1 - if self._writes == 0 and self._reads == 0: - self._unlock() + if self._writes == 1 and self._reads == 0: + self._unlock() # can raise LockError. + self._writes -= 1 return True - return False + else: + self._writes -= 1 + return False class LockError(Exception): diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index e6944ce40c..2957257b1a 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -606,6 +606,7 @@ class Package(object): spack.install_layout.remove_install_directory(self.spec) spack.installed_db.remove(self.spec) + def do_fetch(self): """Creates a stage directory and downloads the taball for this package. Working directory will be set to the stage directory. @@ -812,9 +813,6 @@ class Package(object): log_install_path = spack.install_layout.build_log_path(self.spec) install(log_path, log_install_path) - #Update the database once we know install successful - spack.installed_db.add(self.spec, spack.install_layout.path_for_spec(self.spec)) - # On successful install, remove the stage. if not keep_stage: self.stage.destroy() @@ -845,6 +843,10 @@ class Package(object): # Do the build. spack.build_environment.fork(self, real_work) + # note: PARENT of the build process adds the new package to + # the database, so that we don't need to re-read from file. + spack.installed_db.add(self.spec, spack.install_layout.path_for_spec(self.spec)) + # Once everything else is done, run post install hooks spack.hooks.post_install(self) diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py index 2e7440bbbc..5664e71b03 100644 --- a/lib/spack/spack/test/lock.py +++ b/lib/spack/spack/test/lock.py @@ -41,14 +41,6 @@ from spack.util.multiproc import Barrier barrier_timeout = 5 -def order_processes(*functions): - """Order some processes using simple barrier synchronization.""" - b = Barrier(len(functions), timeout=barrier_timeout) - procs = [Process(target=f, args=(b,)) for f in functions] - for p in procs: p.start() - for p in procs: p.join() - - class LockTest(unittest.TestCase): def setUp(self): @@ -61,6 +53,16 @@ class LockTest(unittest.TestCase): shutil.rmtree(self.tempdir, ignore_errors=True) + def multiproc_test(self, *functions): + """Order some processes using simple barrier synchronization.""" + b = Barrier(len(functions), timeout=barrier_timeout) + procs = [Process(target=f, args=(b,)) for f in functions] + for p in procs: p.start() + for p in procs: + p.join() + self.assertEqual(p.exitcode, 0) + + # # Process snippets below can be composed into tests. # @@ -94,13 +96,13 @@ class LockTest(unittest.TestCase): # exclusive lock is held. # def test_write_lock_timeout_on_write(self): - order_processes(self.acquire_write, self.timeout_write) + self.multiproc_test(self.acquire_write, self.timeout_write) def test_write_lock_timeout_on_write_2(self): - order_processes(self.acquire_write, self.timeout_write, self.timeout_write) + self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write) def test_write_lock_timeout_on_write_3(self): - order_processes(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write) + self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write) # @@ -108,42 +110,42 @@ class LockTest(unittest.TestCase): # exclusive lock is held. # def test_read_lock_timeout_on_write(self): - order_processes(self.acquire_write, self.timeout_read) + self.multiproc_test(self.acquire_write, self.timeout_read) def test_read_lock_timeout_on_write_2(self): - order_processes(self.acquire_write, self.timeout_read, self.timeout_read) + self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read) def test_read_lock_timeout_on_write_3(self): - order_processes(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read) + self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read) # # Test that exclusive locks time out when shared locks are held. # def test_write_lock_timeout_on_read(self): - order_processes(self.acquire_read, self.timeout_write) + self.multiproc_test(self.acquire_read, self.timeout_write) def test_write_lock_timeout_on_read_2(self): - order_processes(self.acquire_read, self.timeout_write, self.timeout_write) + self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write) def test_write_lock_timeout_on_read_3(self): - order_processes(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write) + self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write) # # Test that exclusive locks time while lots of shared locks are held. # def test_write_lock_timeout_with_multiple_readers_2_1(self): - order_processes(self.acquire_read, self.acquire_read, self.timeout_write) + self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write) def test_write_lock_timeout_with_multiple_readers_2_2(self): - order_processes(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) + self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) def test_write_lock_timeout_with_multiple_readers_3_1(self): - order_processes(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write) + self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write) def test_write_lock_timeout_with_multiple_readers_3_2(self): - order_processes(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) + self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write) # @@ -261,4 +263,4 @@ class LockTest(unittest.TestCase): barrier.wait() # ---------------------------------------- 13 lock.release_read() - order_processes(p1, p2, p3) + self.multiproc_test(p1, p2, p3) -- cgit v1.2.3-70-g09d2 From a58ae0c5d0002a7c6cce606b3308dbf53fc29317 Mon Sep 17 00:00:00 2001 From: Todd Gamblin Date: Tue, 27 Oct 2015 16:36:44 -0700 Subject: Build database working with simple transaction support; all tests passing. --- lib/spack/llnl/util/tty/color.py | 5 + lib/spack/spack/cmd/__init__.py | 21 +- lib/spack/spack/cmd/deactivate.py | 11 +- lib/spack/spack/cmd/diy.py | 11 +- lib/spack/spack/cmd/extensions.py | 3 +- lib/spack/spack/cmd/find.py | 11 +- lib/spack/spack/cmd/install.py | 8 +- lib/spack/spack/cmd/uninstall.py | 34 ++-- lib/spack/spack/database.py | 393 ++++++++++++++++++++++++++---------- lib/spack/spack/directory_layout.py | 3 - lib/spack/spack/package.py | 2 +- lib/spack/spack/test/database.py | 373 ++++++++++++++++++++++++++++------ 12 files changed, 645 insertions(+), 230 deletions(-) (limited to 'lib') diff --git a/lib/spack/llnl/util/tty/color.py b/lib/spack/llnl/util/tty/color.py index 22080a7b37..0d09303da0 100644 --- a/lib/spack/llnl/util/tty/color.py +++ b/lib/spack/llnl/util/tty/color.py @@ -158,6 +158,11 @@ def clen(string): return len(re.sub(r'\033[^m]*m', '', string)) +def cextra(string): + """"Length of extra color characters in a string""" + return len(''.join(re.findall(r'\033[^m]*m', string))) + + def cwrite(string, stream=sys.stdout, color=None): """Replace all color expressions in string with ANSI control codes and write the result to the stream. If color is diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py index d4778b1375..6ce6fa0960 100644 --- a/lib/spack/spack/cmd/__init__.py +++ b/lib/spack/spack/cmd/__init__.py @@ -124,16 +124,15 @@ def elide_list(line_list, max_num=10): def disambiguate_spec(spec): - with spack.installed_db.read_lock(): - matching_specs = spack.installed_db.query(spec) - if not matching_specs: - tty.die("Spec '%s' matches no installed packages." % spec) - - elif len(matching_specs) > 1: - args = ["%s matches multiple packages." % spec, - "Matching packages:"] - args += [" " + str(s) for s in matching_specs] - args += ["Use a more specific spec."] - tty.die(*args) + matching_specs = spack.installed_db.query(spec) + if not matching_specs: + tty.die("Spec '%s' matches no installed packages." % spec) + + elif len(matching_specs) > 1: + args = ["%s matches multiple packages." % spec, + "Matching packages:"] + args += [" " + str(s) for s in matching_specs] + args += ["Use a more specific spec."] + tty.die(*args) return matching_specs[0] diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py index 5428e3d2de..1f0e303cdf 100644 --- a/lib/spack/spack/cmd/deactivate.py +++ b/lib/spack/spack/cmd/deactivate.py @@ -54,13 +54,12 @@ def deactivate(parser, args): if args.all: if pkg.extendable: tty.msg("Deactivating all extensions of %s" % pkg.spec.short_spec) - with spack.installed_db.read_lock(): - ext_pkgs = spack.installed_db.installed_extensions_for(spec) + ext_pkgs = spack.installed_db.installed_extensions_for(spec) - for ext_pkg in ext_pkgs: - ext_pkg.spec.normalize() - if ext_pkg.activated: - ext_pkg.do_deactivate(force=True) + for ext_pkg in ext_pkgs: + ext_pkg.spec.normalize() + if ext_pkg.activated: + ext_pkg.do_deactivate(force=True) elif pkg.is_extension: if not args.force and not spec.package.activated: diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py index 6178c9c3e3..f7998720ac 100644 --- a/lib/spack/spack/cmd/diy.py +++ b/lib/spack/spack/cmd/diy.py @@ -54,11 +54,12 @@ def diy(self, args): if not args.spec: tty.die("spack diy requires a package spec argument.") - with spack.installed_db.write_lock(): - specs = spack.cmd.parse_specs(args.spec) - if len(specs) > 1: - tty.die("spack diy only takes one spec.") + specs = spack.cmd.parse_specs(args.spec) + if len(specs) > 1: + tty.die("spack diy only takes one spec.") + # Take a write lock before checking for existence. + with spack.installed_db.write_lock(): spec = specs[0] if not spack.db.exists(spec.name): tty.warn("No such package: %s" % spec.name) @@ -85,7 +86,7 @@ def diy(self, args): # Forces the build to run out of the current directory. package.stage = DIYStage(os.getcwd()) - # TODO: make this an argument, not a global. + # TODO: make this an argument, not a global. spack.do_checksum = False package.do_install( diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py index f0f99a2691..7cadc424b0 100644 --- a/lib/spack/spack/cmd/extensions.py +++ b/lib/spack/spack/cmd/extensions.py @@ -80,8 +80,7 @@ def extensions(parser, args): colify(ext.name for ext in extensions) # List specs of installed extensions. - with spack.installed_db.read_lock(): - installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)] + installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)] print if not installed: tty.msg("None installed.") diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py index 6a0c3d11ff..0b0dd6ef6f 100644 --- a/lib/spack/spack/cmd/find.py +++ b/lib/spack/spack/cmd/find.py @@ -158,12 +158,11 @@ def find(parser, args): q_args = { 'installed' : installed, 'known' : known } # Get all the specs the user asked for - with spack.installed_db.read_lock(): - if not query_specs: - specs = set(spack.installed_db.query(**q_args)) - else: - results = [set(spack.installed_db.query(qs, **q_args)) for qs in query_specs] - specs = set.union(*results) + if not query_specs: + specs = set(spack.installed_db.query(**q_args)) + else: + results = [set(spack.installed_db.query(qs, **q_args)) for qs in query_specs] + specs = set.union(*results) if not args.mode: args.mode = 'short' diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py index ada655b937..ba824bd658 100644 --- a/lib/spack/spack/cmd/install.py +++ b/lib/spack/spack/cmd/install.py @@ -68,10 +68,10 @@ def install(parser, args): if args.no_checksum: spack.do_checksum = False # TODO: remove this global. - with spack.installed_db.write_lock(): - specs = spack.cmd.parse_specs(args.packages, concretize=True) - for spec in specs: - package = spack.db.get(spec) + specs = spack.cmd.parse_specs(args.packages, concretize=True) + for spec in specs: + package = spack.db.get(spec) + with spack.installed_db.write_lock(): package.do_install( keep_prefix=args.keep_prefix, keep_stage=args.keep_stage, diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py index 7b7c32c065..1dae84444a 100644 --- a/lib/spack/spack/cmd/uninstall.py +++ b/lib/spack/spack/cmd/uninstall.py @@ -84,21 +84,21 @@ def uninstall(parser, args): # The package.py file has gone away -- but still want to uninstall. spack.Package(s).do_uninstall(force=True) - # Sort packages to be uninstalled by the number of installed dependents - # This ensures we do things in the right order - def num_installed_deps(pkg): - return len(pkg.installed_dependents) - pkgs.sort(key=num_installed_deps) + # Sort packages to be uninstalled by the number of installed dependents + # This ensures we do things in the right order + def num_installed_deps(pkg): + return len(pkg.installed_dependents) + pkgs.sort(key=num_installed_deps) - # Uninstall packages in order now. - for pkg in pkgs: - try: - pkg.do_uninstall(force=args.force) - except PackageStillNeededError, e: - tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True)) - print - print "The following packages depend on it:" - display_specs(e.dependents, long=True) - print - print "You can use spack uninstall -f to force this action." - sys.exit(1) + # Uninstall packages in order now. + for pkg in pkgs: + try: + pkg.do_uninstall(force=args.force) + except PackageStillNeededError, e: + tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True)) + print + print "The following packages depend on it:" + display_specs(e.dependents, long=True) + print + print "You can use spack uninstall -f to force this action." + sys.exit(1) diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py index 1d1c640d66..9ce00a45e9 100644 --- a/lib/spack/spack/database.py +++ b/lib/spack/spack/database.py @@ -48,7 +48,7 @@ from external.yaml.error import MarkedYAMLError, YAMLError import llnl.util.tty as tty from llnl.util.filesystem import * -from llnl.util.lock import Lock +from llnl.util.lock import * import spack.spec from spack.version import Version @@ -62,7 +62,8 @@ _db_dirname = '.spack-db' _db_version = Version('0.9') # Default timeout for spack database locks is 5 min. -_db_lock_timeout = 300 +_db_lock_timeout = 60 + def _autospec(function): """Decorator that automatically converts the argument of a single-arg @@ -90,11 +91,11 @@ class InstallRecord(object): dependents left. """ - def __init__(self, spec, path, installed): + def __init__(self, spec, path, installed, ref_count=0): self.spec = spec self.path = path self.installed = installed - self.ref_count = 0 + self.ref_count = ref_count def to_dict(self): return { 'spec' : self.spec.to_node_dict(), @@ -103,25 +104,42 @@ class InstallRecord(object): 'ref_count' : self.ref_count } @classmethod - def from_dict(cls, d): - # TODO: check the dict more rigorously. - return InstallRecord(d['spec'], d['path'], d['installed'], d['ref_count']) + def from_dict(cls, spec, dictionary): + d = dictionary + return InstallRecord(spec, d['path'], d['installed'], d['ref_count']) class Database(object): - def __init__(self, root): - """Create an empty Database. - - Location defaults to root/_index.yaml - The individual data are dicts containing - spec: the top level spec of a package - path: the path to the install of that package - dep_hash: a hash of the dependence DAG for that package + def __init__(self, root, db_dir=None): + """Create a Database for Spack installations under ``root``. + + A Database is a cache of Specs data from ``$prefix/spec.yaml`` + files in Spack installation directories. + + By default, Database files (data and lock files) are stored + under ``root/.spack-db``, which is created if it does not + exist. This is the ``db_dir``. + + The Database will attempt to read an ``index.yaml`` file in + ``db_dir``. If it does not find one, it will be created when + needed by scanning the entire Database root for ``spec.yaml`` + files according to Spack's ``DirectoryLayout``. + + Caller may optionally provide a custom ``db_dir`` parameter + where data will be stored. This is intended to be used for + testing the Database class. + """ - self._root = root + self.root = root - # Set up layout of database files. - self._db_dir = join_path(self._root, _db_dirname) + if db_dir is None: + # If the db_dir is not provided, default to within the db root. + self._db_dir = join_path(self.root, _db_dirname) + else: + # Allow customizing the database directory location for testing. + self._db_dir = db_dir + + # Set up layout of database files within the db dir self._index_path = join_path(self._db_dir, 'index.yaml') self._lock_path = join_path(self._db_dir, 'lock') @@ -135,21 +153,23 @@ class Database(object): # initialize rest of state. self.lock = Lock(self._lock_path) self._data = {} - self._last_write_time = 0 - def write_lock(self, timeout=_db_lock_timeout): - """Get a write lock context for use in a `with` block.""" - return self.lock.write_lock(timeout) + def write_transaction(self, timeout=_db_lock_timeout): + """Get a write lock context manager for use in a `with` block.""" + return WriteTransaction(self, self._read, self._write, timeout) - def read_lock(self, timeout=_db_lock_timeout): - """Get a read lock context for use in a `with` block.""" - return self.lock.read_lock(timeout) + def read_transaction(self, timeout=_db_lock_timeout): + """Get a read lock context manager for use in a `with` block.""" + return ReadTransaction(self, self._read, None, timeout) def _write_to_yaml(self, stream): - """Write out the databsae to a YAML file.""" + """Write out the databsae to a YAML file. + + This function does not do any locking or transactions. + """ # map from per-spec hash code to installation record. installs = dict((k, v.to_dict()) for k, v in self._data.items()) @@ -173,7 +193,10 @@ class Database(object): def _read_spec_from_yaml(self, hash_key, installs, parent_key=None): - """Recursively construct a spec from a hash in a YAML database.""" + """Recursively construct a spec from a hash in a YAML database. + + Does not do any locking. + """ if hash_key not in installs: parent = read_spec(installs[parent_key]['path']) @@ -195,6 +218,8 @@ class Database(object): """ Fill database from YAML, do not maintain old data Translate the spec portions from node-dict form to spec form + + Does not do any locking. """ try: if isinstance(stream, basestring): @@ -243,7 +268,7 @@ class Database(object): # Insert the brand new spec in the database. Each # spec has its own copies of its dependency specs. # TODO: would a more immmutable spec implementation simplify this? - data[hash_key] = InstallRecord(spec, rec['path'], rec['installed']) + data[hash_key] = InstallRecord.from_dict(spec, rec) except Exception as e: tty.warn("Invalid database reecord:", @@ -256,57 +281,60 @@ class Database(object): def reindex(self, directory_layout): - """Build database index from scratch based from a directory layout.""" - with self.write_lock(): - data = {} + """Build database index from scratch based from a directory layout. - # Ask the directory layout to traverse the filesystem. - for spec in directory_layout.all_specs(): - # Create a spec for each known package and add it. - path = directory_layout.path_for_spec(spec) - hash_key = spec.dag_hash() - data[hash_key] = InstallRecord(spec, path, True) + Locks the DB if it isn't locked already. - # Recursively examine dependencies and add them, even - # if they are NOT installed. This ensures we know - # about missing dependencies. - for dep in spec.traverse(root=False): - dep_hash = dep.dag_hash() - if dep_hash not in data: - path = directory_layout.path_for_spec(dep) - installed = os.path.isdir(path) - data[dep_hash] = InstallRecord(dep.copy(), path, installed) - data[dep_hash].ref_count += 1 + """ + with self.write_transaction(): + old_data = self._data + try: + self._data = {} - # Assuming everything went ok, replace this object's data. - self._data = data + # Ask the directory layout to traverse the filesystem. + for spec in directory_layout.all_specs(): + # Create a spec for each known package and add it. + path = directory_layout.path_for_spec(spec) + self._add(spec, path, directory_layout) - # write out, blowing away the old version if necessary - self.write() + self._check_ref_counts() + except: + # If anything explodes, restore old data, skip write. + self._data = old_data + raise - def read(self): - """ - Re-read Database from the data in the set location - If the cache is fresh, return immediately. - """ - if not self.is_dirty(): - return - if os.path.isfile(self._index_path): - # Read from YAML file if a database exists - self._read_from_yaml(self._index_path) - else: - # The file doesn't exist, try to traverse the directory. - self.reindex(spack.install_layout) + def _check_ref_counts(self): + """Ensure consistency of reference counts in the DB. + Raise an AssertionError if something is amiss. - def write(self): + Does no locking. """ - Write the database to the standard location - Everywhere that the database is written it is read - within the same lock, so there is no need to refresh - the database within write() + counts = {} + for key, rec in self._data.items(): + counts.setdefault(key, 0) + for dep in rec.spec.dependencies.values(): + dep_key = dep.dag_hash() + counts.setdefault(dep_key, 0) + counts[dep_key] += 1 + + for rec in self._data.values(): + key = rec.spec.dag_hash() + expected = counts[key] + found = rec.ref_count + if not expected == found: + raise AssertionError( + "Invalid ref_count: %s: %d (expected %d), in DB %s." + % (key, found, expected, self._index_path)) + + + def _write(self): + """Write the in-memory database index to its file path. + + Does no locking. + """ temp_name = '%s.%s.temp' % (socket.getfqdn(), os.getpid()) temp_file = join_path(self._db_dir, temp_name) @@ -314,7 +342,6 @@ class Database(object): # Write a temporary database file them move it into place try: with open(temp_file, 'w') as f: - self._last_write_time = int(time.time()) self._write_to_yaml(f) os.rename(temp_file, self._index_path) @@ -325,36 +352,137 @@ class Database(object): raise - def is_dirty(self): + def _read(self): + """Re-read Database from the data in the set location. + + This does no locking. """ - Returns true iff the database file does not exist - or was most recently written to by another spack instance. + if os.path.isfile(self._index_path): + # Read from YAML file if a database exists + self._read_from_yaml(self._index_path) + + else: + # The file doesn't exist, try to traverse the directory. + # reindex() takes its own write lock, so no lock here. + self.reindex(spack.install_layout) + + + def read(self): + with self.read_transaction(): pass + + + def write(self): + with self.write_transaction(): pass + + + def _add(self, spec, path, directory_layout=None): + """Add an install record for spec at path to the database. + + This assumes that the spec is not already installed. It + updates the ref counts on dependencies of the spec in the DB. + + This operation is in-memory, and does not lock the DB. + """ - return (not os.path.isfile(self._index_path) or - (os.path.getmtime(self._index_path) > self._last_write_time)) + key = spec.dag_hash() + if key in self._data: + rec = self._data[key] + rec.installed = True + + # TODO: this overwrites a previous install path (when path != + # self._data[key].path), and the old path still has a + # dependent in the DB. We could consider re-RPATH-ing the + # dependents. This case is probably infrequent and may not be + # worth fixing, but this is where we can discover it. + rec.path = path + else: + self._data[key] = InstallRecord(spec, path, True) + for dep in spec.dependencies.values(): + self._increment_ref_count(dep, directory_layout) + + + def _increment_ref_count(self, spec, directory_layout=None): + """Recursively examine dependencies and update their DB entries.""" + key = spec.dag_hash() + if key not in self._data: + installed = False + path = None + if directory_layout: + path = directory_layout.path_for_spec(spec) + installed = os.path.isdir(path) + + self._data[key] = InstallRecord(spec.copy(), path, installed) + + for dep in spec.dependencies.values(): + self._increment_ref_count(dep) + + self._data[key].ref_count += 1 @_autospec def add(self, spec, path): - """Read the database from the set location + """Add spec at path to database, locking and reading DB to sync. + + ``add()`` will lock and read from the DB on disk. + + """ + # TODO: ensure that spec is concrete? + # Entire add is transactional. + with self.write_transaction(): + self._add(spec, path) + + + def _get_matching_spec_key(self, spec, **kwargs): + """Get the exact spec OR get a single spec that matches.""" + key = spec.dag_hash() + if not key in self._data: + match = self.query_one(spec, **kwargs) + if match: + return match.dag_hash() + raise KeyError("No such spec in database! %s" % spec) + return key + + + @_autospec + def get_record(self, spec, **kwargs): + key = self._get_matching_spec_key(spec, **kwargs) + return self._data[key] + + + def _decrement_ref_count(self, spec): + key = spec.dag_hash() + + if not key in self._data: + # TODO: print something here? DB is corrupt, but + # not much we can do. + return - Add the specified entry as a dict, then write the database - back to memory. This assumes that ALL dependencies are already in - the database. Should not be called otherwise. + rec = self._data[key] + rec.ref_count -= 1 + if rec.ref_count == 0 and not rec.installed: + del self._data[key] + for dep in spec.dependencies.values(): + self._decrement_ref_count(dep) + + + def _remove(self, spec): + """Non-locking version of remove(); does real work. """ - # Should always already be locked - with self.write_lock(): - self.read() - self._data[spec.dag_hash()] = InstallRecord(spec, path, True) + key = self._get_matching_spec_key(spec) + rec = self._data[key] + + if rec.ref_count > 0: + rec.installed = False + return rec.spec - # sanity check the dependencies in case something went - # wrong during install() - # TODO: ensure no races during distributed install. - for dep in spec.traverse(root=False): - assert dep.dag_hash() in self._data + del self._data[key] + for dep in rec.spec.dependencies.values(): + self._decrement_ref_count(dep) - self.write() + # Returns the concrete spec so we know it in the case where a + # query spec was passed in. + return rec.spec @_autospec @@ -369,13 +497,9 @@ class Database(object): and remvoes them if they are no longer needed. """ - # Should always already be locked - with self.write_lock(): - self.read() - hash_key = spec.dag_hash() - if hash_key in self._data: - del self._data[hash_key] - self.write() + # Take a lock around the entire removal. + with self.write_transaction(): + return self._remove(spec) @_autospec @@ -429,24 +553,75 @@ class Database(object): these really special cases that only belong here? """ - with self.read_lock(): - self.read() + with self.read_transaction(): + results = [] + for key, rec in self._data.items(): + if installed is not any and rec.installed != installed: + continue + if known is not any and spack.db.exists(rec.spec.name) != known: + continue + if query_spec is any or rec.spec.satisfies(query_spec): + results.append(rec.spec) - results = [] - for key, rec in self._data.items(): - if installed is not any and rec.installed != installed: - continue - if known is not any and spack.db.exists(rec.spec.name) != known: - continue - if query_spec is any or rec.spec.satisfies(query_spec): - results.append(rec.spec) + return sorted(results) - return sorted(results) + + def query_one(self, query_spec, known=any, installed=True): + """Query for exactly one spec that matches the query spec. + + Raises an assertion error if more than one spec matches the + query. Returns None if no installed package matches. + + """ + concrete_specs = self.query(query_spec, known, installed) + assert len(concrete_specs) <= 1 + return concrete_specs[0] if concrete_specs else None def missing(self, spec): - key = spec.dag_hash() - return key in self._data and not self._data[key].installed + with self.read_transaction(): + key = spec.dag_hash() + return key in self._data and not self._data[key].installed + + +class _Transaction(object): + """Simple nested transaction context manager that uses a file lock. + + This class can trigger actions when the lock is acquired for the + first time and released for the last. + + Timeout for lock is customizable. + """ + def __init__(self, db, acquire_fn=None, release_fn=None, + timeout=_db_lock_timeout): + self._db = db + self._timeout = timeout + self._acquire_fn = acquire_fn + self._release_fn = release_fn + + def __enter__(self): + if self._enter() and self._acquire_fn: + self._acquire_fn() + + def __exit__(self, type, value, traceback): + if self._exit() and self._release_fn: + self._release_fn() + + +class ReadTransaction(_Transaction): + def _enter(self): + return self._db.lock.acquire_read(self._timeout) + + def _exit(self): + return self._db.lock.release_read() + + +class WriteTransaction(_Transaction): + def _enter(self): + return self._db.lock.acquire_write(self._timeout) + + def _exit(self): + return self._db.lock.release_write() class CorruptDatabaseError(SpackError): diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py index e61929d8fd..758ec209db 100644 --- a/lib/spack/spack/directory_layout.py +++ b/lib/spack/spack/directory_layout.py @@ -32,7 +32,6 @@ import tempfile from external import yaml import llnl.util.tty as tty -from llnl.util.lang import memoized from llnl.util.filesystem import join_path, mkdirp from spack.spec import Spec @@ -263,7 +262,6 @@ class YamlDirectoryLayout(DirectoryLayout): self.write_spec(spec, spec_file_path) - @memoized def all_specs(self): if not os.path.isdir(self.root): return [] @@ -274,7 +272,6 @@ class YamlDirectoryLayout(DirectoryLayout): return [self.read_spec(s) for s in spec_files] - @memoized def specs_by_hash(self): by_hash = {} for spec in self.all_specs(): diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index 2957257b1a..b87baf403e 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -845,7 +845,7 @@ class Package(object): # note: PARENT of the build process adds the new package to # the database, so that we don't need to re-read from file. - spack.installed_db.add(self.spec, spack.install_layout.path_for_spec(self.spec)) + spack.installed_db.add(self.spec, self.prefix) # Once everything else is done, run post install hooks spack.hooks.post_install(self) diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py index a3386bad99..3c5926e840 100644 --- a/lib/spack/spack/test/database.py +++ b/lib/spack/spack/test/database.py @@ -1,5 +1,5 @@ ############################################################################## -# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. @@ -26,79 +26,320 @@ These tests check the database is functioning properly, both in memory and in its file """ -import unittest +import tempfile +import shutil +import multiprocessing from llnl.util.lock import * from llnl.util.filesystem import join_path import spack from spack.database import Database +from spack.directory_layout import YamlDirectoryLayout +from spack.test.mock_packages_test import * + +from llnl.util.tty.colify import colify + +def _print_ref_counts(): + """Print out all ref counts for the graph used here, for debugging""" + recs = [] + + def add_rec(spec): + cspecs = spack.installed_db.query(spec, installed=any) + + if not cspecs: + recs.append("[ %-7s ] %-20s-" % ('', spec)) + else: + key = cspecs[0].dag_hash() + rec = spack.installed_db.get_record(cspecs[0]) + recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count)) + + with spack.installed_db.read_transaction(): + add_rec('mpileaks ^mpich') + add_rec('callpath ^mpich') + add_rec('mpich') + + add_rec('mpileaks ^mpich2') + add_rec('callpath ^mpich2') + add_rec('mpich2') + + add_rec('mpileaks ^zmpi') + add_rec('callpath ^zmpi') + add_rec('zmpi') + add_rec('fake') + + add_rec('dyninst') + add_rec('libdwarf') + add_rec('libelf') + + colify(recs, cols=3) + + +class DatabaseTest(MockPackagesTest): + + def _mock_install(self, spec): + s = Spec(spec) + pkg = spack.db.get(s.concretized()) + pkg.do_install(fake=True) + + + def _mock_remove(self, spec): + specs = spack.installed_db.query(spec) + assert(len(specs) == 1) + spec = specs[0] + spec.package.do_uninstall(spec) -class DatabaseTest(unittest.TestCase): def setUp(self): - self.original_db = spack.installed_db - spack.installed_db = Database(self.original_db._root,"_test_index.yaml") - self.file_path = join_path(self.original_db._root,"_test_index.yaml") - if os.path.exists(self.file_path): - os.remove(self.file_path) + super(DatabaseTest, self).setUp() + # + # TODO: make the mockup below easier. + # + + # Make a fake install directory + self.install_path = tempfile.mkdtemp() + self.spack_install_path = spack.install_path + spack.install_path = self.install_path + + self.install_layout = YamlDirectoryLayout(self.install_path) + self.spack_install_layout = spack.install_layout + spack.install_layout = self.install_layout + + # Make fake database and fake install directory. + self.installed_db = Database(self.install_path) + self.spack_installed_db = spack.installed_db + spack.installed_db = self.installed_db + + # make a mock database with some packages installed note that + # the ref count for dyninst here will be 3, as it's recycled + # across each install. + # + # Here is what the mock DB looks like: + # + # o mpileaks o mpileaks' o mpileaks'' + # |\ |\ |\ + # | o callpath | o callpath' | o callpath'' + # |/| |/| |/| + # o | mpich o | mpich2 o | zmpi + # | | o | fake + # | | | + # | |______________/ + # | .____________/ + # |/ + # o dyninst + # |\ + # | o libdwarf + # |/ + # o libelf + # + + # Transaction used to avoid repeated writes. + with spack.installed_db.write_transaction(): + self._mock_install('mpileaks ^mpich') + self._mock_install('mpileaks ^mpich2') + self._mock_install('mpileaks ^zmpi') + def tearDown(self): - spack.installed_db = self.original_db - os.remove(self.file_path) - - def _test_read_from_install_tree(self): - specs = spack.install_layout.all_specs() - spack.installed_db.read_database() - spack.installed_db.write() - for sph in spack.installed_db._data: - self.assertTrue(sph['spec'] in specs) - self.assertEqual(len(specs),len(spack.installed_db._data)) - - def _test_remove_and_add(self): - specs = spack.install_layout.all_specs() - spack.installed_db.remove(specs[len(specs)-1]) - for sph in spack.installed_db._data: - self.assertTrue(sph['spec'] in specs[:len(specs)-1]) - self.assertEqual(len(specs)-1,len(spack.installed_db._data)) - - spack.installed_db.add(specs[len(specs)-1],"") - for sph in spack.installed_db._data: - self.assertTrue(sph['spec'] in specs) - self.assertEqual(len(specs),len(spack.installed_db._data)) - - def _test_read_from_file(self): - spack.installed_db.read_database() - size = len(spack.installed_db._data) - spack.installed_db._data = spack.installed_db._data[1:] - os.utime(spack.installed_db._file_path,None) - spack.installed_db.read_database() - self.assertEqual(size,len(spack.installed_db._data)) - - specs = spack.install_layout.all_specs() - self.assertEqual(size,len(specs)) - for sph in spack.installed_db._data: - self.assertTrue(sph['spec'] in specs) - - - def _test_write_to_file(self): - spack.installed_db.read_database() - size = len(spack.installed_db._data) - real_data = spack.installed_db._data - spack.installed_db._data = real_data[:size-1] - spack.installed_db.write() - spack.installed_db._data = real_data - os.utime(spack.installed_db._file_path,None) - spack.installed_db.read_database() - self.assertEqual(size-1,len(spack.installed_db._data)) - - specs = spack.install_layout.all_specs() - self.assertEqual(size,len(specs)) - for sph in spack.installed_db._data: - self.assertTrue(sph['spec'] in specs[:size-1]) - - def test_ordered_test(self): - self._test_read_from_install_tree() - self._test_remove_and_add() - self._test_read_from_file() - self._test_write_to_file() + super(DatabaseTest, self).tearDown() + shutil.rmtree(self.install_path) + spack.install_path = self.spack_install_path + spack.install_layout = self.spack_install_layout + spack.installed_db = self.spack_installed_db + + + def test_010_all_install_sanity(self): + """Ensure that the install layout reflects what we think it does.""" + all_specs = spack.install_layout.all_specs() + self.assertEqual(len(all_specs), 13) + + # query specs with multiple configurations + mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')] + callpath_specs = [s for s in all_specs if s.satisfies('callpath')] + mpi_specs = [s for s in all_specs if s.satisfies('mpi')] + + self.assertEqual(len(mpileaks_specs), 3) + self.assertEqual(len(callpath_specs), 3) + self.assertEqual(len(mpi_specs), 3) + + # query specs with single configurations + dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')] + libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')] + libelf_specs = [s for s in all_specs if s.satisfies('libelf')] + + self.assertEqual(len(dyninst_specs), 1) + self.assertEqual(len(libdwarf_specs), 1) + self.assertEqual(len(libelf_specs), 1) + + # Query by dependency + self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1) + self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1) + self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1) + + + def test_015_write_and_read(self): + # write and read DB + with spack.installed_db.write_transaction(): + specs = spack.installed_db.query() + recs = [spack.installed_db.get_record(s) for s in specs] + spack.installed_db.write() + spack.installed_db.read() + + for spec, rec in zip(specs, recs): + new_rec = spack.installed_db.get_record(spec) + self.assertEqual(new_rec.ref_count, rec.ref_count) + self.assertEqual(new_rec.spec, rec.spec) + self.assertEqual(new_rec.path, rec.path) + self.assertEqual(new_rec.installed, rec.installed) + + + def _check_db_sanity(self): + """Utiilty function to check db against install layout.""" + expected = sorted(spack.install_layout.all_specs()) + actual = sorted(self.installed_db.query()) + + self.assertEqual(len(expected), len(actual)) + for e, a in zip(expected, actual): + self.assertEqual(e, a) + + + def test_020_db_sanity(self): + """Make sure query() returns what's actually in the db.""" + self._check_db_sanity() + + + def test_030_db_sanity_from_another_process(self): + def read_and_modify(): + self._check_db_sanity() # check that other process can read DB + with self.installed_db.write_transaction(): + self._mock_remove('mpileaks ^zmpi') + + p = multiprocessing.Process(target=read_and_modify, args=()) + p.start() + p.join() + + # ensure child process change is visible in parent process + with self.installed_db.read_transaction(): + self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0) + + + def test_040_ref_counts(self): + """Ensure that we got ref counts right when we read the DB.""" + self.installed_db._check_ref_counts() + + + def test_050_basic_query(self): + """Ensure that querying the database is consistent with what is installed.""" + # query everything + self.assertEqual(len(spack.installed_db.query()), 13) + + # query specs with multiple configurations + mpileaks_specs = self.installed_db.query('mpileaks') + callpath_specs = self.installed_db.query('callpath') + mpi_specs = self.installed_db.query('mpi') + + self.assertEqual(len(mpileaks_specs), 3) + self.assertEqual(len(callpath_specs), 3) + self.assertEqual(len(mpi_specs), 3) + + # query specs with single configurations + dyninst_specs = self.installed_db.query('dyninst') + libdwarf_specs = self.installed_db.query('libdwarf') + libelf_specs = self.installed_db.query('libelf') + + self.assertEqual(len(dyninst_specs), 1) + self.assertEqual(len(libdwarf_specs), 1) + self.assertEqual(len(libelf_specs), 1) + + # Query by dependency + self.assertEqual(len(self.installed_db.query('mpileaks ^mpich')), 1) + self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1) + self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 1) + + + def _check_remove_and_add_package(self, spec): + """Remove a spec from the DB, then add it and make sure everything's + still ok once it is added. This checks that it was + removed, that it's back when added again, and that ref + counts are consistent. + """ + original = self.installed_db.query() + self.installed_db._check_ref_counts() + + # Remove spec + concrete_spec = self.installed_db.remove(spec) + self.installed_db._check_ref_counts() + remaining = self.installed_db.query() + + # ensure spec we removed is gone + self.assertEqual(len(original) - 1, len(remaining)) + self.assertTrue(all(s in original for s in remaining)) + self.assertTrue(concrete_spec not in remaining) + + # add it back and make sure everything is ok. + self.installed_db.add(concrete_spec, "") + installed = self.installed_db.query() + self.assertEqual(len(installed), len(original)) + + # sanity check against direcory layout and check ref counts. + self._check_db_sanity() + self.installed_db._check_ref_counts() + + + def test_060_remove_and_add_root_package(self): + self._check_remove_and_add_package('mpileaks ^mpich') + + + def test_070_remove_and_add_dependency_package(self): + self._check_remove_and_add_package('dyninst') + + + def test_080_root_ref_counts(self): + rec = self.installed_db.get_record('mpileaks ^mpich') + + # Remove a top-level spec from the DB + self.installed_db.remove('mpileaks ^mpich') + + # record no longer in DB + self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), []) + + # record's deps have updated ref_counts + self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 0) + self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1) + + # put the spec back + self.installed_db.add(rec.spec, rec.path) + + # record is present again + self.assertEqual(len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1) + + # dependencies have ref counts updated + self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 1) + self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2) + + + def test_090_non_root_ref_counts(self): + mpileaks_mpich_rec = self.installed_db.get_record('mpileaks ^mpich') + callpath_mpich_rec = self.installed_db.get_record('callpath ^mpich') + + # "force remove" a non-root spec from the DB + self.installed_db.remove('callpath ^mpich') + + # record still in DB but marked uninstalled + self.assertEqual(self.installed_db.query('callpath ^mpich', installed=True), []) + self.assertEqual(len(self.installed_db.query('callpath ^mpich', installed=any)), 1) + + # record and its deps have same ref_counts + self.assertEqual(self.installed_db.get_record('callpath ^mpich', installed=any).ref_count, 1) + self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2) + + # remove only dependent of uninstalled callpath record + self.installed_db.remove('mpileaks ^mpich') + + # record and parent are completely gone. + self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), []) + self.assertEqual(self.installed_db.query('callpath ^mpich', installed=any), []) + + # mpich ref count updated properly. + mpich_rec = self.installed_db.get_record('mpich') + self.assertEqual(mpich_rec.ref_count, 0) -- cgit v1.2.3-70-g09d2