summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/spack/docs/conf.py2
-rw-r--r--lib/spack/llnl/util/filesystem.py55
-rw-r--r--lib/spack/llnl/util/lock.py151
-rw-r--r--lib/spack/spack/build_environment.py3
-rw-r--r--lib/spack/spack/cmd/debug.py22
-rw-r--r--lib/spack/spack/cmd/diy.py65
-rw-r--r--lib/spack/spack/cmd/install.py23
-rw-r--r--lib/spack/spack/cmd/list.py138
-rw-r--r--lib/spack/spack/cmd/md5.py15
-rw-r--r--lib/spack/spack/cmd/package_list.py104
-rw-r--r--lib/spack/spack/cmd/uninstall.py86
-rw-r--r--lib/spack/spack/database.py8
-rw-r--r--lib/spack/spack/file_cache.py5
-rw-r--r--lib/spack/spack/package.py96
-rw-r--r--lib/spack/spack/provider_index.py3
-rw-r--r--lib/spack/spack/spec.py22
-rw-r--r--lib/spack/spack/stage.py34
-rw-r--r--lib/spack/spack/test/lock.py196
-rw-r--r--lib/spack/spack/test/spec_dag.py34
-rw-r--r--lib/spack/spack/util/crypto.py21
-rw-r--r--var/spack/repos/builtin/packages/boost/package.py33
-rw-r--r--var/spack/repos/builtin/packages/cantera/package.py3
-rw-r--r--var/spack/repos/builtin/packages/charm/package.py2
-rw-r--r--var/spack/repos/builtin/packages/charm/strictpass.patch16
-rw-r--r--var/spack/repos/builtin/packages/hdf/package.py21
-rw-r--r--var/spack/repos/builtin/packages/hdf5/package.py6
-rw-r--r--var/spack/repos/builtin/packages/hpx5/package.py5
-rw-r--r--var/spack/repos/builtin/packages/libcerf/package.py8
-rw-r--r--var/spack/repos/builtin/packages/libjpeg-turbo/package.py19
-rw-r--r--var/spack/repos/builtin/packages/mfem/package.py70
-rw-r--r--var/spack/repos/builtin/packages/mkl/package.py7
-rw-r--r--var/spack/repos/builtin/packages/mpich/package.py64
-rw-r--r--var/spack/repos/builtin/packages/netcdf/package.py10
-rw-r--r--var/spack/repos/builtin/packages/pango/package.py1
-rw-r--r--var/spack/repos/builtin/packages/tethex/package.py49
-rw-r--r--var/spack/repos/builtin/packages/trilinos/package.py4
36 files changed, 969 insertions, 432 deletions
diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py
index de136dc047..237a062c14 100644
--- a/lib/spack/docs/conf.py
+++ b/lib/spack/docs/conf.py
@@ -69,7 +69,7 @@ os.environ['COLIFY_SIZE'] = '25x120'
#
with open('package_list.rst', 'w') as plist_file:
subprocess.Popen(
- [spack_root + '/bin/spack', 'package-list'], stdout=plist_file)
+ [spack_root + '/bin/spack', 'list', '--format=rst'], stdout=plist_file)
#
# Find all the `cmd-spack-*` references and add them to a command index
diff --git a/lib/spack/llnl/util/filesystem.py b/lib/spack/llnl/util/filesystem.py
index c3ecfde4f4..e522fdda6d 100644
--- a/lib/spack/llnl/util/filesystem.py
+++ b/lib/spack/llnl/util/filesystem.py
@@ -39,15 +39,34 @@ from contextlib import contextmanager
import llnl.util.tty as tty
from llnl.util.lang import dedupe
-__all__ = ['set_install_permissions', 'install', 'install_tree',
- 'traverse_tree',
- 'expand_user', 'working_dir', 'touch', 'touchp', 'mkdirp',
- 'force_remove', 'join_path', 'ancestor', 'can_access',
- 'filter_file',
- 'FileFilter', 'change_sed_delimiter', 'is_exe', 'force_symlink',
- 'set_executable', 'copy_mode', 'unset_executable_mode',
- 'remove_dead_links', 'remove_linked_tree',
- 'fix_darwin_install_name', 'find_libraries', 'LibraryList']
+__all__ = [
+ 'FileFilter',
+ 'LibraryList',
+ 'ancestor',
+ 'can_access',
+ 'change_sed_delimiter',
+ 'copy_mode',
+ 'expand_user',
+ 'filter_file',
+ 'find_libraries',
+ 'fix_darwin_install_name',
+ 'force_remove',
+ 'force_symlink',
+ 'install',
+ 'install_tree',
+ 'is_exe',
+ 'join_path',
+ 'mkdirp',
+ 'remove_dead_links',
+ 'remove_if_dead_link',
+ 'remove_linked_tree',
+ 'set_executable',
+ 'set_install_permissions',
+ 'touch',
+ 'touchp',
+ 'traverse_tree',
+ 'unset_executable_mode',
+ 'working_dir']
def filter_file(regex, repl, *filenames, **kwargs):
@@ -388,10 +407,20 @@ def remove_dead_links(root):
"""
for file in os.listdir(root):
path = join_path(root, file)
- if os.path.islink(path):
- real_path = os.path.realpath(path)
- if not os.path.exists(real_path):
- os.unlink(path)
+ remove_if_dead_link(path)
+
+
+def remove_if_dead_link(path):
+ """
+ Removes the argument if it is a dead link, does nothing otherwise
+
+ Args:
+ path: the potential dead link
+ """
+ if os.path.islink(path):
+ real_path = os.path.realpath(path)
+ if not os.path.exists(real_path):
+ os.unlink(path)
def remove_linked_tree(path):
diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py
index f5f53101ae..2e44a94798 100644
--- a/lib/spack/llnl/util/lock.py
+++ b/lib/spack/llnl/util/lock.py
@@ -28,9 +28,13 @@ import errno
import time
import socket
+import llnl.util.tty as tty
+
+
__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction',
'LockError']
+
# Default timeout in seconds, after which locks will raise exceptions.
_default_timeout = 60
@@ -41,51 +45,86 @@ _sleep_time = 1e-5
class Lock(object):
"""This is an implementation of a filesystem lock using Python's lockf.
- In Python, `lockf` actually calls `fcntl`, so this should work with any
- filesystem implementation that supports locking through the fcntl calls.
- This includes distributed filesystems like Lustre (when flock is enabled)
- and recent NFS versions.
-
+ In Python, `lockf` actually calls `fcntl`, so this should work with
+ any filesystem implementation that supports locking through the fcntl
+ calls. This includes distributed filesystems like Lustre (when flock
+ is enabled) and recent NFS versions.
"""
- def __init__(self, file_path):
- self._file_path = file_path
- self._fd = None
+ def __init__(self, path, start=0, length=0):
+ """Construct a new lock on the file at ``path``.
+
+ By default, the lock applies to the whole file. Optionally,
+ caller can specify a byte range beginning ``start`` bytes from
+ the start of the file and extending ``length`` bytes from there.
+
+ This exposes a subset of fcntl locking functionality. It does
+ not currently expose the ``whence`` parameter -- ``whence`` is
+ always os.SEEK_SET and ``start`` is always evaluated from the
+ beginning of the file.
+ """
+ self.path = path
+ self._file = None
self._reads = 0
self._writes = 0
- def _lock(self, op, timeout):
+ # byte range parameters
+ self._start = start
+ self._length = length
+
+ # PID and host of lock holder
+ self.pid = self.old_pid = None
+ self.host = self.old_host = None
+
+ def _lock(self, op, timeout=_default_timeout):
"""This takes a lock using POSIX locks (``fnctl.lockf``).
- The lock is implemented as a spin lock using a nonblocking
- call to lockf().
+ The lock is implemented as a spin lock using a nonblocking call
+ to lockf().
On acquiring an exclusive lock, the lock writes this process's
- pid and host to the lock file, in case the holding process
- needs to be killed later.
+ pid and host to the lock file, in case the holding process needs
+ to be killed later.
If the lock times out, it raises a ``LockError``.
"""
start_time = time.time()
while (time.time() - start_time) < timeout:
try:
- # If this is already open read-only and we want to
- # upgrade to an exclusive write lock, close first.
- if self._fd is not None:
- flags = fcntl.fcntl(self._fd, fcntl.F_GETFL)
- if op == fcntl.LOCK_EX and flags | os.O_RDONLY:
- os.close(self._fd)
- self._fd = None
-
- if self._fd is None:
- mode = os.O_RDWR if op == fcntl.LOCK_EX else os.O_RDONLY
- self._fd = os.open(self._file_path, mode)
-
- fcntl.lockf(self._fd, op | fcntl.LOCK_NB)
+ # If we could write the file, we'd have opened it 'r+'.
+ # Raise an error when we attempt to upgrade to a write lock.
+ if op == fcntl.LOCK_EX:
+ if self._file and self._file.mode == 'r':
+ raise LockError(
+ "Can't take exclusive lock on read-only file: %s"
+ % self.path)
+
+ # Create file and parent directories if they don't exist.
+ if self._file is None:
+ self._ensure_parent_directory()
+
+ # Prefer to open 'r+' to allow upgrading to write
+ # lock later if possible. Open read-only if we can't
+ # write the lock file at all.
+ os_mode, fd_mode = (os.O_RDWR | os.O_CREAT), 'r+'
+ if os.path.exists(self.path) and not os.access(
+ self.path, os.W_OK):
+ os_mode, fd_mode = os.O_RDONLY, 'r'
+
+ fd = os.open(self.path, os_mode)
+ self._file = os.fdopen(fd, fd_mode)
+
+ # Try to get the lock (will raise if not available.)
+ fcntl.lockf(self._file, op | fcntl.LOCK_NB,
+ self._length, self._start, os.SEEK_SET)
+
+ # All locks read the owner PID and host
+ self._read_lock_data()
+
+ # Exclusive locks write their PID/host
if op == fcntl.LOCK_EX:
- os.write(
- self._fd,
- "pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
+ self._write_lock_data()
+
return
except IOError as error:
@@ -97,6 +136,40 @@ class Lock(object):
raise LockError("Timed out waiting for lock.")
+ def _ensure_parent_directory(self):
+ parent = os.path.dirname(self.path)
+ try:
+ os.makedirs(parent)
+ return True
+ except OSError as e:
+ # makedirs can fail when diretory already exists.
+ if not (e.errno == errno.EEXIST and os.path.isdir(parent) or
+ e.errno == errno.EISDIR):
+ raise
+
+ def _read_lock_data(self):
+ """Read PID and host data out of the file if it is there."""
+ line = self._file.read()
+ if line:
+ pid, host = line.strip().split(',')
+ _, _, self.pid = pid.rpartition('=')
+ _, _, self.host = host.rpartition('=')
+
+ def _write_lock_data(self):
+ """Write PID and host data to the file, recording old values."""
+ self.old_pid = self.pid
+ self.old_host = self.host
+
+ self.pid = os.getpid()
+ self.host = socket.getfqdn()
+
+ # write pid, host to disk to sync over FS
+ self._file.seek(0)
+ self._file.write("pid=%s,host=%s" % (self.pid, self.host))
+ self._file.truncate()
+ self._file.flush()
+ os.fsync(self._file.fileno())
+
def _unlock(self):
"""Releases a lock using POSIX locks (``fcntl.lockf``)
@@ -104,9 +177,10 @@ class Lock(object):
be masquerading as write locks, but this removes either.
"""
- fcntl.lockf(self._fd, fcntl.LOCK_UN)
- os.close(self._fd)
- self._fd = None
+ fcntl.lockf(self._file, fcntl.LOCK_UN,
+ self._length, self._start, os.SEEK_SET)
+ self._file.close()
+ self._file = None
def acquire_read(self, timeout=_default_timeout):
"""Acquires a recursive, shared lock for reading.
@@ -120,7 +194,9 @@ class Lock(object):
"""
if self._reads == 0 and self._writes == 0:
- self._lock(fcntl.LOCK_SH, timeout) # can raise LockError.
+ tty.debug('READ LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
+ .format(self))
+ self._lock(fcntl.LOCK_SH, timeout=timeout) # can raise LockError.
self._reads += 1
return True
else:
@@ -139,7 +215,10 @@ class Lock(object):
"""
if self._writes == 0:
- self._lock(fcntl.LOCK_EX, timeout) # can raise LockError.
+ tty.debug(
+ 'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]'
+ .format(self))
+ self._lock(fcntl.LOCK_EX, timeout=timeout) # can raise LockError.
self._writes += 1
return True
else:
@@ -159,6 +238,8 @@ class Lock(object):
assert self._reads > 0
if self._reads == 1 and self._writes == 0:
+ tty.debug('READ LOCK: {0.path}[{0._start}:{0._length}] [Released]'
+ .format(self))
self._unlock() # can raise LockError.
self._reads -= 1
return True
@@ -179,6 +260,8 @@ class Lock(object):
assert self._writes > 0
if self._writes == 1 and self._reads == 0:
+ tty.debug('WRITE LOCK: {0.path}[{0._start}:{0._length}] [Released]'
+ .format(self))
self._unlock() # can raise LockError.
self._writes -= 1
return True
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index 3f2939609d..15fb943ca4 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -408,7 +408,8 @@ def get_rpaths(pkg):
def get_std_cmake_args(cmake_pkg):
# standard CMake arguments
ret = ['-DCMAKE_INSTALL_PREFIX=%s' % cmake_pkg.prefix,
- '-DCMAKE_BUILD_TYPE=RelWithDebInfo']
+ '-DCMAKE_BUILD_TYPE=RelWithDebInfo',
+ '-DCMAKE_VERBOSE_MAKEFILE=ON']
if platform.mac_ver()[0]:
ret.append('-DCMAKE_FIND_FRAMEWORK=LAST')
diff --git a/lib/spack/spack/cmd/debug.py b/lib/spack/spack/cmd/debug.py
index 958eb829b4..757c5bca80 100644
--- a/lib/spack/spack/cmd/debug.py
+++ b/lib/spack/spack/cmd/debug.py
@@ -23,6 +23,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
+import re
from datetime import datetime
from glob import glob
@@ -53,8 +54,12 @@ def _debug_tarball_suffix():
if not os.path.isdir('.git'):
return 'nobranch.nogit.%s' % suffix
+ # Get symbolic branch name and strip any special chars (mainly '/')
symbolic = git(
'rev-parse', '--abbrev-ref', '--short', 'HEAD', output=str).strip()
+ symbolic = re.sub(r'[^\w.-]', '-', symbolic)
+
+ # Get the commit hash too.
commit = git(
'rev-parse', '--short', 'HEAD', output=str).strip()
@@ -69,12 +74,23 @@ def create_db_tarball(args):
tarball_name = "spack-db.%s.tar.gz" % _debug_tarball_suffix()
tarball_path = os.path.abspath(tarball_name)
- with working_dir(spack.spack_root):
+ base = os.path.basename(spack.install_path)
+ transform_args = []
+ if 'GNU' in tar('--version', output=str):
+ transform_args = ['--transform', 's/^%s/%s/' % (base, tarball_name)]
+ else:
+ transform_args = ['-s', '/^%s/%s/' % (base, tarball_name)]
+
+ wd = os.path.dirname(spack.install_path)
+ with working_dir(wd):
files = [spack.installed_db._index_path]
- files += glob('%s/*/*/*/.spack/spec.yaml' % spack.install_path)
+ files += glob('%s/*/*/*/.spack/spec.yaml' % base)
files = [os.path.relpath(f) for f in files]
- tar('-czf', tarball_path, *files)
+ args = ['-czf', tarball_path]
+ args += transform_args
+ args += files
+ tar(*args)
tty.msg('Created %s' % tarball_name)
diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py
index d60fd6bc7a..08386cac07 100644
--- a/lib/spack/spack/cmd/diy.py
+++ b/lib/spack/spack/cmd/diy.py
@@ -65,43 +65,40 @@ def diy(self, args):
if len(specs) > 1:
tty.die("spack diy only takes one spec.")
- # Take a write lock before checking for existence.
- with spack.installed_db.write_transaction():
- spec = specs[0]
- if not spack.repo.exists(spec.name):
- tty.warn("No such package: %s" % spec.name)
- create = tty.get_yes_or_no("Create this package?", default=False)
- if not create:
- tty.msg("Exiting without creating.")
- sys.exit(1)
- else:
- tty.msg("Running 'spack edit -f %s'" % spec.name)
- edit_package(spec.name, spack.repo.first_repo(), None, True)
- return
+ spec = specs[0]
+ if not spack.repo.exists(spec.name):
+ tty.warn("No such package: %s" % spec.name)
+ create = tty.get_yes_or_no("Create this package?", default=False)
+ if not create:
+ tty.msg("Exiting without creating.")
+ sys.exit(1)
+ else:
+ tty.msg("Running 'spack edit -f %s'" % spec.name)
+ edit_package(spec.name, spack.repo.first_repo(), None, True)
+ return
- if not spec.versions.concrete:
- tty.die(
- "spack diy spec must have a single, concrete version. "
- "Did you forget a package version number?")
+ if not spec.versions.concrete:
+ tty.die(
+ "spack diy spec must have a single, concrete version. "
+ "Did you forget a package version number?")
- spec.concretize()
- package = spack.repo.get(spec)
+ spec.concretize()
+ package = spack.repo.get(spec)
- if package.installed:
- tty.error("Already installed in %s" % package.prefix)
- tty.msg("Uninstall or try adding a version suffix for this "
- "DIY build.")
- sys.exit(1)
+ if package.installed:
+ tty.error("Already installed in %s" % package.prefix)
+ tty.msg("Uninstall or try adding a version suffix for this DIY build.")
+ sys.exit(1)
- # Forces the build to run out of the current directory.
- package.stage = DIYStage(os.getcwd())
+ # Forces the build to run out of the current directory.
+ package.stage = DIYStage(os.getcwd())
- # TODO: make this an argument, not a global.
- spack.do_checksum = False
+ # TODO: make this an argument, not a global.
+ spack.do_checksum = False
- package.do_install(
- keep_prefix=args.keep_prefix,
- install_deps=not args.ignore_deps,
- verbose=not args.quiet,
- keep_stage=True, # don't remove source dir for DIY.
- dirty=args.dirty)
+ package.do_install(
+ keep_prefix=args.keep_prefix,
+ install_deps=not args.ignore_deps,
+ verbose=not args.quiet,
+ keep_stage=True, # don't remove source dir for DIY.
+ dirty=args.dirty)
diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py
index e51024b05f..70abe1dd00 100644
--- a/lib/spack/spack/cmd/install.py
+++ b/lib/spack/spack/cmd/install.py
@@ -84,15 +84,14 @@ def install(parser, args):
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
package = spack.repo.get(spec)
- with spack.installed_db.write_transaction():
- package.do_install(
- keep_prefix=args.keep_prefix,
- keep_stage=args.keep_stage,
- install_deps=not args.ignore_deps,
- install_self=not args.deps_only,
- make_jobs=args.jobs,
- run_tests=args.run_tests,
- verbose=args.verbose,
- fake=args.fake,
- dirty=args.dirty,
- explicit=True)
+ package.do_install(
+ keep_prefix=args.keep_prefix,
+ keep_stage=args.keep_stage,
+ install_deps=not args.ignore_deps,
+ install_self=not args.deps_only,
+ make_jobs=args.jobs,
+ run_tests=args.run_tests,
+ verbose=args.verbose,
+ fake=args.fake,
+ dirty=args.dirty,
+ explicit=True)
diff --git a/lib/spack/spack/cmd/list.py b/lib/spack/spack/cmd/list.py
index c921efd1bd..e1389df69f 100644
--- a/lib/spack/spack/cmd/list.py
+++ b/lib/spack/spack/cmd/list.py
@@ -22,36 +22,51 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+import argparse
+import cgi
+import fnmatch
+import re
import sys
+from StringIO import StringIO
+
import llnl.util.tty as tty
-import argparse
+import spack
from llnl.util.tty.colify import colify
-import spack
-import fnmatch
-import re
+description = "Print available spack packages to stdout in different formats"
+
+formatters = {}
-description = "List available spack packages"
+
+def formatter(func):
+ """Decorator used to register formatters"""
+ formatters[func.__name__] = func
+ return func
def setup_parser(subparser):
subparser.add_argument(
'filter', nargs=argparse.REMAINDER,
- help='Optional glob patterns to filter results.')
- subparser.add_argument(
- '-s', '--sensitive', action='store_true', default=False,
- help='Use case-sensitive filtering. Default is case sensitive, '
- 'unless the query contains a capital letter.')
+ help='Optional case-insensitive glob patterns to filter results.')
subparser.add_argument(
'-d', '--search-description', action='store_true', default=False,
help='Filtering will also search the description for a match.')
+ subparser.add_argument(
+ '--format', default='name_only', choices=formatters,
+ help='Format to be used to print the output [default: name_only]')
-def list(parser, args):
- # Start with all package names.
- pkgs = set(spack.repo.all_package_names())
+def filter_by_name(pkgs, args):
+ """
+ Filters the sequence of packages according to user prescriptions
- # filter if a filter arg was provided
+ Args:
+ pkgs: sequence of packages
+ args: parsed command line arguments
+
+ Returns:
+ filtered and sorted list of packages
+ """
if args.filter:
res = []
for f in args.filter:
@@ -60,10 +75,7 @@ def list(parser, args):
else:
r = fnmatch.translate(f)
- re_flags = re.I
- if any(l.isupper for l in f) or args.sensitive:
- re_flags = 0
- rc = re.compile(r, flags=re_flags)
+ rc = re.compile(r, flags=re.IGNORECASE)
res.append(rc)
if args.search_description:
@@ -80,11 +92,91 @@ def list(parser, args):
return f.match(p)
pkgs = [p for p in pkgs if any(match(p, f) for f in res)]
- # sort before displaying.
- sorted_packages = sorted(pkgs, key=lambda s: s.lower())
+ return sorted(pkgs, key=lambda s: s.lower())
+
- # Print all the package names in columns
+@formatter
+def name_only(pkgs):
indent = 0
if sys.stdout.isatty():
- tty.msg("%d packages." % len(sorted_packages))
- colify(sorted_packages, indent=indent)
+ tty.msg("%d packages." % len(pkgs))
+ colify(pkgs, indent=indent)
+
+
+@formatter
+def rst(pkgs):
+ """Print out information on all packages in restructured text."""
+
+ def github_url(pkg):
+ """Link to a package file on github."""
+ url = 'https://github.com/LLNL/spack/blob/develop/var/spack/repos/builtin/packages/{0}/package.py'
+ return url.format(pkg.name)
+
+ def rst_table(elts):
+ """Print out a RST-style table."""
+ cols = StringIO()
+ ncol, widths = colify(elts, output=cols, tty=True)
+ header = ' '.join('=' * (w - 1) for w in widths)
+ return '%s\n%s%s' % (header, cols.getvalue(), header)
+
+ pkg_names = pkgs
+ pkgs = [spack.repo.get(name) for name in pkg_names]
+
+ print('.. _package-list:')
+ print('')
+ print('============')
+ print('Package List')
+ print('============')
+ print('')
+ print('This is a list of things you can install using Spack. It is')
+ print('automatically generated based on the packages in the latest Spack')
+ print('release.')
+ print('')
+ print('Spack currently has %d mainline packages:' % len(pkgs))
+ print('')
+ print(rst_table('`%s`_' % p for p in pkg_names))
+ print('')
+
+ # Output some text for each package.
+ for pkg in pkgs:
+ print('-----')
+ print('')
+ print('.. _%s:' % pkg.name)
+ print('')
+ # Must be at least 2 long, breaks for single letter packages like R.
+ print('-' * max(len(pkg.name), 2))
+ print(pkg.name)
+ print('-' * max(len(pkg.name), 2))
+ print('')
+ print('Homepage:')
+ print(' * `%s <%s>`__' % (cgi.escape(pkg.homepage), pkg.homepage))
+ print('')
+ print('Spack package:')
+ print(' * `%s/package.py <%s>`__' % (pkg.name, github_url(pkg)))
+ print('')
+ if pkg.versions:
+ print('Versions:')
+ print(' ' + ', '.join(str(v) for v in
+ reversed(sorted(pkg.versions))))
+ print('')
+
+ for deptype in spack.alldeps:
+ deps = pkg.dependencies_of_type(deptype)
+ if deps:
+ print('%s Dependencies' % deptype.capitalize())
+ print(' ' + ', '.join('%s_' % d if d in pkg_names
+ else d for d in deps))
+ print('')
+
+ print('Description:')
+ print(pkg.format_doc(indent=2))
+ print('')
+
+
+def list(parser, args):
+ # Retrieve the names of all the packages
+ pkgs = set(spack.repo.all_package_names())
+ # Filter the set appropriately
+ sorted_packages = filter_by_name(pkgs, args)
+ # Print to stdout
+ formatters[args.format](sorted_packages)
diff --git a/lib/spack/spack/cmd/md5.py b/lib/spack/spack/cmd/md5.py
index 506cf0913f..2ae279a41e 100644
--- a/lib/spack/spack/cmd/md5.py
+++ b/lib/spack/spack/cmd/md5.py
@@ -25,6 +25,7 @@
import argparse
import hashlib
import os
+from urlparse import urlparse
import llnl.util.tty as tty
import spack.util.crypto
@@ -49,13 +50,23 @@ def compute_md5_checksum(url):
return value
+def normalized(files):
+ for p in files:
+ result = urlparse(p)
+ value = p
+ if not result.scheme:
+ value = os.path.abspath(p)
+ yield value
+
+
def md5(parser, args):
if not args.files:
setup_parser.parser.print_help()
return 1
+ urls = [x for x in normalized(args.files)]
results = []
- for url in args.files:
+ for url in urls:
try:
checksum = compute_md5_checksum(url)
results.append((checksum, url))
@@ -70,4 +81,4 @@ def md5(parser, args):
checksum = 'checksum' if len(results) == 1 else 'checksums'
tty.msg("%d MD5 %s:" % (len(results), checksum))
for checksum, url in results:
- print "%s %s" % (checksum, url)
+ print("{0} {1}".format(checksum, url))
diff --git a/lib/spack/spack/cmd/package_list.py b/lib/spack/spack/cmd/package_list.py
deleted file mode 100644
index 42f408af96..0000000000
--- a/lib/spack/spack/cmd/package_list.py
+++ /dev/null
@@ -1,104 +0,0 @@
-##############################################################################
-# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License (as
-# published by the Free Software Foundation) version 2.1, February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-import cgi
-from StringIO import StringIO
-from llnl.util.tty.colify import *
-import spack
-
-description = "Print a list of all packages in reStructuredText."
-
-
-def github_url(pkg):
- """Link to a package file on github."""
- url = "https://github.com/LLNL/spack/blob/develop/var/spack/repos/builtin/packages/{0}/package.py"
- return url.format(pkg.name)
-
-
-def rst_table(elts):
- """Print out a RST-style table."""
- cols = StringIO()
- ncol, widths = colify(elts, output=cols, tty=True)
- header = " ".join("=" * (w - 1) for w in widths)
- return "%s\n%s%s" % (header, cols.getvalue(), header)
-
-
-def print_rst_package_list():
- """Print out information on all packages in restructured text."""
- pkgs = sorted(spack.repo.all_packages(), key=lambda s: s.name.lower())
- pkg_names = [p.name for p in pkgs]
-
- print ".. _package-list:"
- print
- print "============"
- print "Package List"
- print "============"
- print
- print "This is a list of things you can install using Spack. It is"
- print "automatically generated based on the packages in the latest Spack"
- print "release."
- print
- print "Spack currently has %d mainline packages:" % len(pkgs)
- print
- print rst_table("`%s`_" % p for p in pkg_names)
- print
-
- # Output some text for each package.
- for pkg in pkgs:
- print "-----"
- print
- print ".. _%s:" % pkg.name
- print
- # Must be at least 2 long, breaks for single letter packages like R.
- print "-" * max(len(pkg.name), 2)
- print pkg.name
- print "-" * max(len(pkg.name), 2)
- print
- print "Homepage:"
- print " * `%s <%s>`__" % (cgi.escape(pkg.homepage), pkg.homepage)
- print
- print "Spack package:"
- print " * `%s/package.py <%s>`__" % (pkg.name, github_url(pkg))
- print
- if pkg.versions:
- print "Versions:"
- print " " + ", ".join(str(v) for v in
- reversed(sorted(pkg.versions)))
- print
-
- for deptype in spack.alldeps:
- deps = pkg.dependencies_of_type(deptype)
- if deps:
- print "%s Dependencies" % deptype.capitalize()
- print " " + ", ".join("%s_" % d if d in pkg_names
- else d for d in deps)
- print
-
- print "Description:"
- print pkg.format_doc(indent=2)
- print
-
-
-def package_list(parser, args):
- print_rst_package_list()
diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py
index e42c5776b5..bbcd2e787c 100644
--- a/lib/spack/spack/cmd/uninstall.py
+++ b/lib/spack/spack/cmd/uninstall.py
@@ -54,9 +54,10 @@ def setup_parser(subparser):
subparser.add_argument(
'-a', '--all', action='store_true', dest='all',
help="USE CAREFULLY. Remove ALL installed packages that match each "
- "supplied spec. i.e., if you say uninstall libelf, ALL versions "
- "of libelf are uninstalled. This is both useful and dangerous, "
- "like rm -r.")
+ "supplied spec. i.e., if you say uninstall `libelf`,"
+ " ALL versions of `libelf` are uninstalled. If no spec is "
+ "supplied all installed software will be uninstalled. This "
+ "is both useful and dangerous, like rm -r.")
subparser.add_argument(
'-d', '--dependents', action='store_true', dest='dependents',
@@ -157,44 +158,49 @@ def do_uninstall(specs, force):
item.do_uninstall(force=force)
-def uninstall(parser, args):
- if not args.packages:
- tty.die("uninstall requires at least one package argument.")
-
- with spack.installed_db.write_transaction():
+def get_uninstall_list(args):
+ specs = [any]
+ if args.packages:
specs = spack.cmd.parse_specs(args.packages)
- # Gets the list of installed specs that match the ones give via cli
- # takes care of '-a' is given in the cli
- uninstall_list = concretize_specs(specs, args.all, args.force)
- dependent_list = installed_dependents(
- uninstall_list) # takes care of '-d'
-
- # Process dependent_list and update uninstall_list
- has_error = False
- if dependent_list and not args.dependents and not args.force:
- for spec, lst in dependent_list.items():
- tty.error("Will not uninstall %s" %
- spec.format("$_$@$%@$#", color=True))
- print('')
- print("The following packages depend on it:")
- spack.cmd.display_specs(lst, **display_args)
- print('')
- has_error = True
- elif args.dependents:
- for key, lst in dependent_list.items():
- uninstall_list.extend(lst)
- uninstall_list = list(set(uninstall_list))
-
- if has_error:
- tty.die('You can use spack uninstall --dependents '
- 'to uninstall these dependencies as well')
-
- if not args.yes_to_all:
- tty.msg("The following packages will be uninstalled : ")
+ # Gets the list of installed specs that match the ones give via cli
+ # takes care of '-a' is given in the cli
+ uninstall_list = concretize_specs(specs, args.all, args.force)
+ # Takes care of '-d'
+ dependent_list = installed_dependents(uninstall_list)
+ # Process dependent_list and update uninstall_list
+ has_error = False
+ if dependent_list and not args.dependents and not args.force:
+ for spec, lst in dependent_list.items():
+ tty.error("Will not uninstall %s" %
+ spec.format("$_$@$%@$#", color=True))
print('')
- spack.cmd.display_specs(uninstall_list, **display_args)
+ print("The following packages depend on it:")
+ spack.cmd.display_specs(lst, **display_args)
print('')
- spack.cmd.ask_for_confirmation('Do you want to proceed ? ')
+ has_error = True
+ elif args.dependents:
+ for key, lst in dependent_list.items():
+ uninstall_list.extend(lst)
+ uninstall_list = list(set(uninstall_list))
+ if has_error:
+ tty.die('You can use spack uninstall --dependents '
+ 'to uninstall these dependencies as well')
+
+ return uninstall_list
+
+
+def uninstall(parser, args):
+ if not args.packages and not args.all:
+ tty.die("uninstall requires at least one package argument.")
+
+ uninstall_list = get_uninstall_list(args)
+
+ if not args.yes_to_all:
+ tty.msg("The following packages will be uninstalled : ")
+ print('')
+ spack.cmd.display_specs(uninstall_list, **display_args)
+ print('')
+ spack.cmd.ask_for_confirmation('Do you want to proceed ? ')
- # Uninstall everything on the list
- do_uninstall(uninstall_list, args.force)
+ # Uninstall everything on the list
+ do_uninstall(uninstall_list, args.force)
diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py
index f73d3765c8..e9bd07d92c 100644
--- a/lib/spack/spack/database.py
+++ b/lib/spack/spack/database.py
@@ -33,7 +33,7 @@ The database serves two purposes:
2. It will allow us to track external installations as well as lost
packages and their dependencies.
-Prior ot the implementation of this store, a direcotry layout served
+Prior to the implementation of this store, a directory layout served
as the authoritative database of packages in Spack. This module
provides a cache and a sanity checking mechanism for what is in the
filesystem.
@@ -156,13 +156,13 @@ class Database(object):
self._index_path = join_path(self._db_dir, 'index.yaml')
self._lock_path = join_path(self._db_dir, 'lock')
+ # This is for other classes to use to lock prefix directories.
+ self.prefix_lock_path = join_path(self._db_dir, 'prefix_lock')
+
# Create needed directories and files
if not os.path.exists(self._db_dir):
mkdirp(self._db_dir)
- if not os.path.exists(self._lock_path):
- touch(self._lock_path)
-
# initialize rest of state.
self.lock = Lock(self._lock_path)
self._data = {}
diff --git a/lib/spack/spack/file_cache.py b/lib/spack/spack/file_cache.py
index 0a66166fd8..31ae009836 100644
--- a/lib/spack/spack/file_cache.py
+++ b/lib/spack/spack/file_cache.py
@@ -77,10 +77,7 @@ class FileCache(object):
def _get_lock(self, key):
"""Create a lock for a key, if necessary, and return a lock object."""
if key not in self._locks:
- lock_file = self._lock_path(key)
- if not os.path.exists(lock_file):
- touch(lock_file)
- self._locks[key] = Lock(lock_file)
+ self._locks[key] = Lock(self._lock_path(key))
return self._locks[key]
def init_entry(self, key):
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index aa874bf508..768605294f 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -39,8 +39,16 @@ import re
import textwrap
import time
import string
+import contextlib
+from StringIO import StringIO
+import llnl.util.lock
import llnl.util.tty as tty
+from llnl.util.filesystem import *
+from llnl.util.lang import *
+from llnl.util.link_tree import LinkTree
+from llnl.util.tty.log import log_output
+
import spack
import spack.build_environment
import spack.compilers
@@ -53,12 +61,8 @@ import spack.repository
import spack.url
import spack.util.web
-from StringIO import StringIO
-from llnl.util.filesystem import *
-from llnl.util.lang import *
-from llnl.util.link_tree import LinkTree
-from llnl.util.tty.log import log_output
from spack.stage import Stage, ResourceStage, StageComposite
+from spack.util.crypto import bit_length
from spack.util.environment import dump_environment
from spack.util.executable import ProcessError, which
from spack.version import *
@@ -305,6 +309,7 @@ class Package(object):
Package creators override functions like install() (all of them do this),
clean() (some of them do this), and others to provide custom behavior.
"""
+
#
# These are default values for instance variables.
#
@@ -336,6 +341,9 @@ class Package(object):
"""
sanity_check_is_dir = []
+ """Per-process lock objects for each install prefix."""
+ prefix_locks = {}
+
class __metaclass__(type):
"""Ensure attributes required by Spack directives are present."""
def __init__(cls, name, bases, dict):
@@ -346,6 +354,9 @@ class Package(object):
# this determines how the package should be built.
self.spec = spec
+ # Lock on the prefix shared resource. Will be set in prefix property
+ self._prefix_lock = None
+
# Name of package is the name of its module, without the
# containing module names.
self.name = self.module.__name__
@@ -692,6 +703,29 @@ class Package(object):
return dependents
@property
+ def prefix_lock(self):
+ """Prefix lock is a byte range lock on the nth byte of a file.
+
+ The lock file is ``spack.installed_db.prefix_lock`` -- the DB
+ tells us what to call it and it lives alongside the install DB.
+
+ n is the sys.maxsize-bit prefix of the DAG hash. This makes
+ likelihood of collision is very low AND it gives us
+ readers-writer lock semantics with just a single lockfile, so no
+ cleanup required.
+ """
+ if self._prefix_lock is None:
+ prefix = self.spec.prefix
+ if prefix not in Package.prefix_locks:
+ Package.prefix_locks[prefix] = llnl.util.lock.Lock(
+ spack.installed_db.prefix_lock_path,
+ self.spec.dag_hash_bit_prefix(bit_length(sys.maxsize)), 1)
+
+ self._prefix_lock = Package.prefix_locks[prefix]
+
+ return self._prefix_lock
+
+ @property
def prefix(self):
"""Get the prefix into which this package should be installed."""
return self.spec.prefix
@@ -875,6 +909,22 @@ class Package(object):
resource_stage_folder = '-'.join(pieces)
return resource_stage_folder
+ @contextlib.contextmanager
+ def _prefix_read_lock(self):
+ try:
+ self.prefix_lock.acquire_read(60)
+ yield self
+ finally:
+ self.prefix_lock.release_read()
+
+ @contextlib.contextmanager
+ def _prefix_write_lock(self):
+ try:
+ self.prefix_lock.acquire_write(60)
+ yield self
+ finally:
+ self.prefix_lock.release_write()
+
install_phases = set(['configure', 'build', 'install', 'provenance'])
def do_install(self,
@@ -926,14 +976,18 @@ class Package(object):
# Ensure package is not already installed
layout = spack.install_layout
- if 'install' in install_phases and layout.check_installed(self.spec):
- tty.msg("%s is already installed in %s" % (self.name, self.prefix))
- rec = spack.installed_db.get_record(self.spec)
- if (not rec.explicit) and explicit:
- with spack.installed_db.write_transaction():
- rec = spack.installed_db.get_record(self.spec)
- rec.explicit = True
- return
+ with self._prefix_read_lock():
+ if ('install' in install_phases and
+ layout.check_installed(self.spec)):
+
+ tty.msg(
+ "%s is already installed in %s" % (self.name, self.prefix))
+ rec = spack.installed_db.get_record(self.spec)
+ if (not rec.explicit) and explicit:
+ with spack.installed_db.write_transaction():
+ rec = spack.installed_db.get_record(self.spec)
+ rec.explicit = True
+ return
tty.msg("Installing %s" % self.name)
@@ -983,7 +1037,7 @@ class Package(object):
self.build_directory = join_path(self.stage.path, 'spack-build')
self.source_directory = self.stage.source_path
- with self.stage:
+ with contextlib.nested(self.stage, self._prefix_write_lock()):
# Run the pre-install hook in the child process after
# the directory is created.
spack.hooks.pre_install(self)
@@ -1077,8 +1131,9 @@ class Package(object):
wrap=False)
raise
- # note: PARENT of the build process adds the new package to
+ # Parent of the build process adds the new package to
# the database, so that we don't need to re-read from file.
+ # NOTE: add() implicitly acquires a write-lock
spack.installed_db.add(
self.spec, spack.install_layout, explicit=explicit)
@@ -1259,11 +1314,12 @@ class Package(object):
raise PackageStillNeededError(self.spec, dependents)
# Pre-uninstall hook runs first.
- spack.hooks.pre_uninstall(self)
-
- # Uninstalling in Spack only requires removing the prefix.
- self.remove_prefix()
- spack.installed_db.remove(self.spec)
+ with self._prefix_write_lock():
+ spack.hooks.pre_uninstall(self)
+ # Uninstalling in Spack only requires removing the prefix.
+ self.remove_prefix()
+ #
+ spack.installed_db.remove(self.spec)
tty.msg("Successfully uninstalled %s" % self.spec.short_spec)
# Once everything else is done, run post install hooks
diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py
index 2be48b43c1..d7f2379486 100644
--- a/lib/spack/spack/provider_index.py
+++ b/lib/spack/spack/provider_index.py
@@ -232,7 +232,8 @@ class ProviderIndex(object):
spdict[provided_spec] = opdict[provided_spec]
continue
- spdict[provided_spec] += opdict[provided_spec]
+ spdict[provided_spec] = \
+ spdict[provided_spec].union(opdict[provided_spec])
def remove_provider(self, pkg_name):
"""Remove a provider from the ProviderIndex."""
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index ba9cea876d..fc4bf41e34 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -120,6 +120,7 @@ from spack.util.prefix import Prefix
from spack.util.string import *
import spack.util.spack_yaml as syaml
from spack.util.spack_yaml import syaml_dict
+from spack.util.crypto import prefix_bits
from spack.version import *
from spack.provider_index import ProviderIndex
@@ -963,13 +964,10 @@ class Spec(object):
return Prefix(spack.install_layout.path_for_spec(self))
def dag_hash(self, length=None):
- """
- Return a hash of the entire spec DAG, including connectivity.
- """
+ """Return a hash of the entire spec DAG, including connectivity."""
if self._hash:
return self._hash[:length]
else:
- # XXX(deptype): ignore 'build' dependencies here
yaml_text = syaml.dump(
self.to_node_dict(), default_flow_style=True, width=sys.maxint)
sha = hashlib.sha1(yaml_text)
@@ -978,6 +976,10 @@ class Spec(object):
self._hash = b32_hash
return b32_hash
+ def dag_hash_bit_prefix(self, bits):
+ """Get the first <bits> bits of the DAG hash as an integer type."""
+ return base32_prefix_bits(self.dag_hash(), bits)
+
def to_node_dict(self):
d = syaml_dict()
@@ -999,6 +1001,8 @@ class Spec(object):
if self.architecture:
d['arch'] = self.architecture.to_dict()
+ # TODO: restore build dependencies here once we have less picky
+ # TODO: concretization.
deps = self.dependencies_dict(deptype=('link', 'run'))
if deps:
d['dependencies'] = syaml_dict([
@@ -2723,6 +2727,16 @@ def parse_anonymous_spec(spec_like, pkg_name):
return anon_spec
+def base32_prefix_bits(hash_string, bits):
+ """Return the first <bits> bits of a base32 string as an integer."""
+ if bits > len(hash_string) * 5:
+ raise ValueError("Too many bits! Requested %d bit prefix of '%s'."
+ % (bits, hash_string))
+
+ hash_bytes = base64.b32decode(hash_string, casefold=True)
+ return prefix_bits(hash_bytes, bits)
+
+
class SpecError(spack.error.SpackError):
"""Superclass for all errors that occur while constructing specs."""
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index 1b12966bc1..c0dfbba987 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -23,12 +23,15 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
+import sys
import errno
+import hashlib
import shutil
import tempfile
from urlparse import urljoin
import llnl.util.tty as tty
+import llnl.util.lock
from llnl.util.filesystem import *
import spack.util.pattern as pattern
@@ -38,6 +41,7 @@ import spack.config
import spack.fetch_strategy as fs
import spack.error
from spack.version import *
+from spack.util.crypto import prefix_bits, bit_length
STAGE_PREFIX = 'spack-stage-'
@@ -88,8 +92,12 @@ class Stage(object):
similar, and are intended to persist for only one run of spack.
"""
- def __init__(self, url_or_fetch_strategy,
- name=None, mirror_path=None, keep=False, path=None):
+ """Shared dict of all stage locks."""
+ stage_locks = {}
+
+ def __init__(
+ self, url_or_fetch_strategy,
+ name=None, mirror_path=None, keep=False, path=None, lock=True):
"""Create a stage object.
Parameters:
url_or_fetch_strategy
@@ -147,6 +155,20 @@ class Stage(object):
# Flag to decide whether to delete the stage folder on exit or not
self.keep = keep
+ # File lock for the stage directory. We use one file for all
+ # stage locks. See Spec.prefix_lock for details on this approach.
+ self._lock = None
+ if lock:
+ if self.name not in Stage.stage_locks:
+ sha1 = hashlib.sha1(self.name).digest()
+ lock_id = prefix_bits(sha1, bit_length(sys.maxsize))
+ stage_lock_path = join_path(spack.stage_path, '.lock')
+
+ Stage.stage_locks[self.name] = llnl.util.lock.Lock(
+ stage_lock_path, lock_id, 1)
+
+ self._lock = Stage.stage_locks[self.name]
+
def __enter__(self):
"""
Entering a stage context will create the stage directory
@@ -154,6 +176,8 @@ class Stage(object):
Returns:
self
"""
+ if self._lock is not None:
+ self._lock.acquire_write(timeout=60)
self.create()
return self
@@ -175,6 +199,9 @@ class Stage(object):
if exc_type is None and not self.keep:
self.destroy()
+ if self._lock is not None:
+ self._lock.release_write()
+
def _need_to_create_path(self):
"""Makes sure nothing weird has happened since the last time we
looked at path. Returns True if path already exists and is ok.
@@ -416,7 +443,8 @@ class Stage(object):
"""
# Create the top-level stage directory
mkdirp(spack.stage_path)
- remove_dead_links(spack.stage_path)
+ remove_if_dead_link(self.path)
+
# If a tmp_root exists then create a directory there and then link it
# in the stage area, otherwise create the stage directory in self.path
if self._need_to_create_path():
diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py
index 32cbe13ce1..4f62cd85e9 100644
--- a/lib/spack/spack/test/lock.py
+++ b/lib/spack/spack/test/lock.py
@@ -25,6 +25,7 @@
"""
These tests ensure that our lock works correctly.
"""
+import os
import shutil
import tempfile
import unittest
@@ -44,7 +45,6 @@ class LockTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.lock_path = join_path(self.tempdir, 'lockfile')
- touch(self.lock_path)
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
@@ -64,98 +64,185 @@ class LockTest(unittest.TestCase):
#
# Process snippets below can be composed into tests.
#
- def acquire_write(self, barrier):
- lock = Lock(self.lock_path)
- lock.acquire_write() # grab exclusive lock
- barrier.wait()
- barrier.wait() # hold the lock until exception raises in other procs.
-
- def acquire_read(self, barrier):
- lock = Lock(self.lock_path)
- lock.acquire_read() # grab shared lock
- barrier.wait()
- barrier.wait() # hold the lock until exception raises in other procs.
-
- def timeout_write(self, barrier):
- lock = Lock(self.lock_path)
- barrier.wait() # wait for lock acquire in first process
- self.assertRaises(LockError, lock.acquire_write, 0.1)
- barrier.wait()
+ def acquire_write(self, start=0, length=0):
+ def fn(barrier):
+ lock = Lock(self.lock_path, start, length)
+ lock.acquire_write() # grab exclusive lock
+ barrier.wait()
+ barrier.wait() # hold the lock until timeout in other procs.
+ return fn
+
+ def acquire_read(self, start=0, length=0):
+ def fn(barrier):
+ lock = Lock(self.lock_path, start, length)
+ lock.acquire_read() # grab shared lock
+ barrier.wait()
+ barrier.wait() # hold the lock until timeout in other procs.
+ return fn
+
+ def timeout_write(self, start=0, length=0):
+ def fn(barrier):
+ lock = Lock(self.lock_path, start, length)
+ barrier.wait() # wait for lock acquire in first process
+ self.assertRaises(LockError, lock.acquire_write, 0.1)
+ barrier.wait()
+ return fn
- def timeout_read(self, barrier):
- lock = Lock(self.lock_path)
- barrier.wait() # wait for lock acquire in first process
- self.assertRaises(LockError, lock.acquire_read, 0.1)
- barrier.wait()
+ def timeout_read(self, start=0, length=0):
+ def fn(barrier):
+ lock = Lock(self.lock_path, start, length)
+ barrier.wait() # wait for lock acquire in first process
+ self.assertRaises(LockError, lock.acquire_read, 0.1)
+ barrier.wait()
+ return fn
#
# Test that exclusive locks on other processes time out when an
# exclusive lock is held.
#
def test_write_lock_timeout_on_write(self):
- self.multiproc_test(self.acquire_write, self.timeout_write)
+ self.multiproc_test(self.acquire_write(), self.timeout_write())
def test_write_lock_timeout_on_write_2(self):
self.multiproc_test(
- self.acquire_write, self.timeout_write, self.timeout_write)
+ self.acquire_write(), self.timeout_write(), self.timeout_write())
def test_write_lock_timeout_on_write_3(self):
self.multiproc_test(
- self.acquire_write, self.timeout_write, self.timeout_write,
- self.timeout_write)
+ self.acquire_write(), self.timeout_write(), self.timeout_write(),
+ self.timeout_write())
+
+ def test_write_lock_timeout_on_write_ranges(self):
+ self.multiproc_test(
+ self.acquire_write(0, 1), self.timeout_write(0, 1))
+
+ def test_write_lock_timeout_on_write_ranges_2(self):
+ self.multiproc_test(
+ self.acquire_write(0, 64), self.acquire_write(65, 1),
+ self.timeout_write(0, 1), self.timeout_write(63, 1))
+
+ def test_write_lock_timeout_on_write_ranges_3(self):
+ self.multiproc_test(
+ self.acquire_write(0, 1), self.acquire_write(1, 1),
+ self.timeout_write(), self.timeout_write(), self.timeout_write())
+
+ def test_write_lock_timeout_on_write_ranges_4(self):
+ self.multiproc_test(
+ self.acquire_write(0, 1), self.acquire_write(1, 1),
+ self.acquire_write(2, 456), self.acquire_write(500, 64),
+ self.timeout_write(), self.timeout_write(), self.timeout_write())
#
# Test that shared locks on other processes time out when an
# exclusive lock is held.
#
def test_read_lock_timeout_on_write(self):
- self.multiproc_test(self.acquire_write, self.timeout_read)
+ self.multiproc_test(self.acquire_write(), self.timeout_read())
def test_read_lock_timeout_on_write_2(self):
self.multiproc_test(
- self.acquire_write, self.timeout_read, self.timeout_read)
+ self.acquire_write(), self.timeout_read(), self.timeout_read())
def test_read_lock_timeout_on_write_3(self):
self.multiproc_test(
- self.acquire_write, self.timeout_read, self.timeout_read,
- self.timeout_read)
+ self.acquire_write(), self.timeout_read(), self.timeout_read(),
+ self.timeout_read())
+
+ def test_read_lock_timeout_on_write_ranges(self):
+ """small write lock, read whole file."""
+ self.multiproc_test(self.acquire_write(0, 1), self.timeout_read())
+
+ def test_read_lock_timeout_on_write_ranges_2(self):
+ """small write lock, small read lock"""
+ self.multiproc_test(self.acquire_write(0, 1), self.timeout_read(0, 1))
+
+ def test_read_lock_timeout_on_write_ranges_3(self):
+ """two write locks, overlapping read locks"""
+ self.multiproc_test(
+ self.acquire_write(0, 1), self.acquire_write(64, 128),
+ self.timeout_read(0, 1), self.timeout_read(128, 256))
#
# Test that exclusive locks time out when shared locks are held.
#
def test_write_lock_timeout_on_read(self):
- self.multiproc_test(self.acquire_read, self.timeout_write)
+ self.multiproc_test(self.acquire_read(), self.timeout_write())
def test_write_lock_timeout_on_read_2(self):
self.multiproc_test(
- self.acquire_read, self.timeout_write, self.timeout_write)
+ self.acquire_read(), self.timeout_write(), self.timeout_write())
def test_write_lock_timeout_on_read_3(self):
self.multiproc_test(
- self.acquire_read, self.timeout_write, self.timeout_write,
- self.timeout_write)
+ self.acquire_read(), self.timeout_write(), self.timeout_write(),
+ self.timeout_write())
+
+ def test_write_lock_timeout_on_read_ranges(self):
+ self.multiproc_test(self.acquire_read(0, 1), self.timeout_write())
+
+ def test_write_lock_timeout_on_read_ranges_2(self):
+ self.multiproc_test(self.acquire_read(0, 1), self.timeout_write(0, 1))
+
+ def test_write_lock_timeout_on_read_ranges_3(self):
+ self.multiproc_test(
+ self.acquire_read(0, 1), self.acquire_read(10, 1),
+ self.timeout_write(0, 1), self.timeout_write(10, 1))
+
+ def test_write_lock_timeout_on_read_ranges_4(self):
+ self.multiproc_test(
+ self.acquire_read(0, 64),
+ self.timeout_write(10, 1), self.timeout_write(32, 1))
+
+ def test_write_lock_timeout_on_read_ranges_5(self):
+ self.multiproc_test(
+ self.acquire_read(64, 128),
+ self.timeout_write(65, 1), self.timeout_write(127, 1),
+ self.timeout_write(90, 10))
#
# Test that exclusive locks time while lots of shared locks are held.
#
def test_write_lock_timeout_with_multiple_readers_2_1(self):
self.multiproc_test(
- self.acquire_read, self.acquire_read, self.timeout_write)
+ self.acquire_read(), self.acquire_read(), self.timeout_write())
def test_write_lock_timeout_with_multiple_readers_2_2(self):
self.multiproc_test(
- self.acquire_read, self.acquire_read, self.timeout_write,
- self.timeout_write)
+ self.acquire_read(), self.acquire_read(), self.timeout_write(),
+ self.timeout_write())
def test_write_lock_timeout_with_multiple_readers_3_1(self):
self.multiproc_test(
- self.acquire_read, self.acquire_read, self.acquire_read,
- self.timeout_write)
+ self.acquire_read(), self.acquire_read(), self.acquire_read(),
+ self.timeout_write())
def test_write_lock_timeout_with_multiple_readers_3_2(self):
self.multiproc_test(
- self.acquire_read, self.acquire_read, self.acquire_read,
- self.timeout_write, self.timeout_write)
+ self.acquire_read(), self.acquire_read(), self.acquire_read(),
+ self.timeout_write(), self.timeout_write())
+
+ def test_write_lock_timeout_with_multiple_readers_2_1_ranges(self):
+ self.multiproc_test(
+ self.acquire_read(0, 10), self.acquire_read(5, 10),
+ self.timeout_write(5, 5))
+
+ def test_write_lock_timeout_with_multiple_readers_2_3_ranges(self):
+ self.multiproc_test(
+ self.acquire_read(0, 10), self.acquire_read(5, 15),
+ self.timeout_write(0, 1), self.timeout_write(11, 3),
+ self.timeout_write(7, 1))
+
+ def test_write_lock_timeout_with_multiple_readers_3_1_ranges(self):
+ self.multiproc_test(
+ self.acquire_read(0, 5), self.acquire_read(5, 5),
+ self.acquire_read(10, 5),
+ self.timeout_write(0, 15))
+
+ def test_write_lock_timeout_with_multiple_readers_3_2_ranges(self):
+ self.multiproc_test(
+ self.acquire_read(0, 5), self.acquire_read(5, 5),
+ self.acquire_read(10, 5),
+ self.timeout_write(3, 10), self.timeout_write(5, 1))
#
# Test that read can be upgraded to write.
@@ -172,19 +259,42 @@ class LockTest(unittest.TestCase):
lock.acquire_read()
self.assertTrue(lock._reads == 1)
self.assertTrue(lock._writes == 0)
+ self.assertTrue(lock._file.mode == 'r+')
lock.acquire_write()
self.assertTrue(lock._reads == 1)
self.assertTrue(lock._writes == 1)
+ self.assertTrue(lock._file.mode == 'r+')
lock.release_write()
self.assertTrue(lock._reads == 1)
self.assertTrue(lock._writes == 0)
+ self.assertTrue(lock._file.mode == 'r+')
lock.release_read()
self.assertTrue(lock._reads == 0)
self.assertTrue(lock._writes == 0)
- self.assertTrue(lock._fd is None)
+ self.assertTrue(lock._file is None)
+
+ #
+ # Test that read-only file can be read-locked but not write-locked.
+ #
+ def test_upgrade_read_to_write_fails_with_readonly_file(self):
+ # ensure lock file exists the first time, so we open it read-only
+ # to begin wtih.
+ touch(self.lock_path)
+ os.chmod(self.lock_path, 0444)
+
+ lock = Lock(self.lock_path)
+ self.assertTrue(lock._reads == 0)
+ self.assertTrue(lock._writes == 0)
+
+ lock.acquire_read()
+ self.assertTrue(lock._reads == 1)
+ self.assertTrue(lock._writes == 0)
+ self.assertTrue(lock._file.mode == 'r')
+
+ self.assertRaises(LockError, lock.acquire_write)
#
# Longer test case that ensures locks are reusable. Ordering is
diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py
index 40cdb02966..0bc63bcf0f 100644
--- a/lib/spack/spack/test/spec_dag.py
+++ b/lib/spack/spack/test/spec_dag.py
@@ -523,3 +523,37 @@ class SpecDagTest(MockPackagesTest):
level = descend_and_check(dag.to_node_dict())
# level just makes sure we are doing something here
self.assertTrue(level >= 5)
+
+ def test_hash_bits(self):
+ """Ensure getting first n bits of a base32-encoded DAG hash works."""
+
+ # RFC 4648 base32 decode table
+ b32 = dict((j, i) for i, j in enumerate('abcdefghijklmnopqrstuvwxyz'))
+ b32.update(dict((j, i) for i, j in enumerate('234567', 26)))
+
+ # some package hashes
+ tests = [
+ '35orsd4cenv743hg4i5vxha2lzayycby',
+ '6kfqtj7dap3773rxog6kkmoweix5gpwo',
+ 'e6h6ff3uvmjbq3azik2ckr6ckwm3depv',
+ 'snz2juf4ij7sv77cq3vs467q6acftmur',
+ '4eg47oedi5bbkhpoxw26v3oe6vamkfd7',
+ 'vrwabwj6umeb5vjw6flx2rnft3j457rw']
+
+ for test_hash in tests:
+ # string containing raw bits of hash ('1' and '0')
+ expected = ''.join([format(b32[c], '#07b').replace('0b', '')
+ for c in test_hash])
+
+ for bits in (1, 2, 3, 4, 7, 8, 9, 16, 64, 117, 128, 160):
+ actual_int = spack.spec.base32_prefix_bits(test_hash, bits)
+ fmt = "#0%sb" % (bits + 2)
+ actual = format(actual_int, fmt).replace('0b', '')
+
+ self.assertEqual(expected[:bits], actual)
+
+ self.assertRaises(
+ ValueError, spack.spec.base32_prefix_bits, test_hash, 161)
+
+ self.assertRaises(
+ ValueError, spack.spec.base32_prefix_bits, test_hash, 256)
diff --git a/lib/spack/spack/util/crypto.py b/lib/spack/spack/util/crypto.py
index 22777fdb68..d074716022 100644
--- a/lib/spack/spack/util/crypto.py
+++ b/lib/spack/spack/util/crypto.py
@@ -100,3 +100,24 @@ class Checker(object):
self.sum = checksum(
self.hash_fun, filename, block_size=self.block_size)
return self.sum == self.hexdigest
+
+
+def prefix_bits(byte_array, bits):
+ """Return the first <bits> bits of a byte array as an integer."""
+ result = 0
+ n = 0
+ for i, b in enumerate(byte_array):
+ n += 8
+ result = (result << 8) | ord(b)
+ if n >= bits:
+ break
+
+ result >>= (n - bits)
+ return result
+
+
+def bit_length(num):
+ """Number of bits required to represent an integer in binary."""
+ s = bin(num)
+ s = s.lstrip('-0b')
+ return len(s)
diff --git a/var/spack/repos/builtin/packages/boost/package.py b/var/spack/repos/builtin/packages/boost/package.py
index 5b50b44038..90fe28fc2c 100644
--- a/var/spack/repos/builtin/packages/boost/package.py
+++ b/var/spack/repos/builtin/packages/boost/package.py
@@ -41,8 +41,8 @@ class Boost(Package):
list_url = "http://sourceforge.net/projects/boost/files/boost/"
list_depth = 2
- version('1.62.0', '5fb94629535c19e48703bdb2b2e9490f') # TODO: fix build
- version('1.61.0', '6095876341956f65f9d35939ccea1a9f', preferred=True)
+ version('1.62.0', '5fb94629535c19e48703bdb2b2e9490f')
+ version('1.61.0', '6095876341956f65f9d35939ccea1a9f')
version('1.60.0', '65a840e1a0b13a558ff19eeb2c4f0cbe')
version('1.59.0', '6aa9a5c6a4ca1016edd0ed1178e3cb87')
version('1.58.0', 'b8839650e61e9c1c0a89f371dd475546')
@@ -110,12 +110,14 @@ class Boost(Package):
description="Additionally build shared libraries")
variant('multithreaded', default=True,
description="Build multi-threaded versions of libraries")
- variant('singlethreaded', default=True,
+ variant('singlethreaded', default=False,
description="Build single-threaded versions of libraries")
variant('icu', default=False,
description="Build with Unicode and ICU suport")
variant('graph', default=False,
description="Build the Boost Graph library")
+ variant('taggedlayout', default=False,
+ description="Augment library names with build options")
depends_on('icu4c', when='+icu')
depends_on('python', when='+python')
@@ -208,12 +210,20 @@ class Boost(Package):
if '+singlethreaded' in spec:
threadingOpts.append('single')
if not threadingOpts:
- raise RuntimeError("""At least one of {singlethreaded,
- multithreaded} must be enabled""")
+ raise RuntimeError("At least one of {singlethreaded, " +
+ "multithreaded} must be enabled")
+
+ if '+taggedlayout' in spec:
+ layout = 'tagged'
+ else:
+ if len(threadingOpts) > 1:
+ raise RuntimeError("Cannot build both single and " +
+ "multi-threaded targets with system layout")
+ layout = 'system'
options.extend([
'link=%s' % ','.join(linkTypes),
- '--layout=tagged'
+ '--layout=%s' % layout
])
if not spec.satisfies('%intel'):
@@ -223,6 +233,12 @@ class Boost(Package):
return threadingOpts
+ def add_buildopt_symlinks(self, prefix):
+ with working_dir(prefix.lib):
+ for lib in os.listdir(os.curdir):
+ prefix, remainder = lib.split('.', 1)
+ symlink(lib, '%s-mt.%s' % (prefix, remainder))
+
def install(self, spec, prefix):
# On Darwin, Boost expects the Darwin libtool. However, one of the
# dependencies may have pulled in Spack's GNU libtool, and these two
@@ -281,11 +297,16 @@ class Boost(Package):
threadingOpts = self.determine_b2_options(spec, b2_options)
+ b2('--clean')
+
# In theory it could be done on one call but it fails on
# Boost.MPI if the threading options are not separated.
for threadingOpt in threadingOpts:
b2('install', 'threading=%s' % threadingOpt, *b2_options)
+ if '+multithreaded' in spec and '~taggedlayout' in spec:
+ self.add_buildopt_symlinks(prefix)
+
# The shared libraries are not installed correctly
# on Darwin; correct this
if (sys.platform == 'darwin') and ('+shared' in spec):
diff --git a/var/spack/repos/builtin/packages/cantera/package.py b/var/spack/repos/builtin/packages/cantera/package.py
index 9a9bc5bdbd..66e1197669 100644
--- a/var/spack/repos/builtin/packages/cantera/package.py
+++ b/var/spack/repos/builtin/packages/cantera/package.py
@@ -96,8 +96,7 @@ class Cantera(Package):
options.extend([
'build_thread_safe=yes',
'boost_inc_dir={0}'.format(spec['boost'].prefix.include),
- 'boost_lib_dir={0}'.format(spec['boost'].prefix.lib),
- 'boost_thread_lib=boost_thread-mt,boost_system-mt'
+ 'boost_lib_dir={0}'.format(spec['boost'].prefix.lib)
])
else:
options.append('build_thread_safe=no')
diff --git a/var/spack/repos/builtin/packages/charm/package.py b/var/spack/repos/builtin/packages/charm/package.py
index aa247fcc02..1ec53c2138 100644
--- a/var/spack/repos/builtin/packages/charm/package.py
+++ b/var/spack/repos/builtin/packages/charm/package.py
@@ -48,6 +48,8 @@ class Charm(Package):
# Support OpenMPI; see
# <https://charm.cs.illinois.edu/redmine/issues/1206>
patch("mpi.patch")
+ # Ignore compiler warnings while configuring
+ patch("strictpass.patch")
# Communication mechanisms (choose exactly one)
# TODO: Support Blue Gene/Q PAMI, Cray GNI, Cray shmem, CUDA
diff --git a/var/spack/repos/builtin/packages/charm/strictpass.patch b/var/spack/repos/builtin/packages/charm/strictpass.patch
new file mode 100644
index 0000000000..44aa4fbd38
--- /dev/null
+++ b/var/spack/repos/builtin/packages/charm/strictpass.patch
@@ -0,0 +1,16 @@
+--- old/src/scripts/configure
++++ new/src/scripts/configure
+@@ -2146,13 +2146,6 @@
+ test_result $? "$1" "$2" "$3"
+ strictpass=$pass
+ strictfail=$fail
+- if test $pass -eq 1
+- then
+- if cat out | grep -i "warn" > /dev/null 2>&1
+- then
+- strictpass="0" && strictfail="1"
+- fi
+- fi
+ cat out >> $charmout
+ /bin/rm -f out
+ }
diff --git a/var/spack/repos/builtin/packages/hdf/package.py b/var/spack/repos/builtin/packages/hdf/package.py
index 37abb611d4..2554bd0f96 100644
--- a/var/spack/repos/builtin/packages/hdf/package.py
+++ b/var/spack/repos/builtin/packages/hdf/package.py
@@ -39,9 +39,9 @@ class Hdf(Package):
variant('szip', default=False, description="Enable szip support")
- depends_on('jpeg')
+ depends_on('jpeg@6b:')
depends_on('szip', when='+szip')
- depends_on('zlib')
+ depends_on('zlib@1.1.4:')
depends_on('bison', type='build')
depends_on('flex', type='build')
@@ -49,9 +49,9 @@ class Hdf(Package):
def install(self, spec, prefix):
config_args = [
'CFLAGS=-fPIC',
- '--prefix=%s' % prefix,
- '--with-jpeg=%s' % spec['jpeg'].prefix,
- '--with-zlib=%s' % spec['zlib'].prefix,
+ '--prefix={0}'.format(prefix),
+ '--with-jpeg={0}'.format(spec['jpeg'].prefix),
+ '--with-zlib={0}'.format(spec['zlib'].prefix),
'--disable-netcdf', # must be disabled to build NetCDF with HDF4
'--enable-fortran',
'--disable-shared', # fortran and shared libs are not compatible
@@ -59,12 +59,17 @@ class Hdf(Package):
'--enable-production'
]
- # SZip support
+ # Szip support
if '+szip' in spec:
- config_args.append('--with-szlib=%s' % spec['szip'].prefix)
+ config_args.append('--with-szlib={0}'.format(spec['szip'].prefix))
+ else:
+ config_args.append('--without-szlib')
configure(*config_args)
make()
- make('check')
+
+ if self.run_tests:
+ make('check')
+
make('install')
diff --git a/var/spack/repos/builtin/packages/hdf5/package.py b/var/spack/repos/builtin/packages/hdf5/package.py
index c0635cfbfc..f2fe48feb3 100644
--- a/var/spack/repos/builtin/packages/hdf5/package.py
+++ b/var/spack/repos/builtin/packages/hdf5/package.py
@@ -60,7 +60,7 @@ class Hdf5(Package):
depends_on("mpi", when='+mpi')
depends_on("szip", when='+szip')
- depends_on("zlib")
+ depends_on("zlib@1.1.2:")
def validate(self, spec):
"""
@@ -144,6 +144,10 @@ class Hdf5(Package):
"--with-zlib=%s" % spec['zlib'].prefix,
*extra_args)
make()
+
+ if self.run_tests:
+ make("check")
+
make("install")
self.check_install(spec)
diff --git a/var/spack/repos/builtin/packages/hpx5/package.py b/var/spack/repos/builtin/packages/hpx5/package.py
index 686e959719..fe75c256f8 100644
--- a/var/spack/repos/builtin/packages/hpx5/package.py
+++ b/var/spack/repos/builtin/packages/hpx5/package.py
@@ -37,15 +37,16 @@ class Hpx5(Package):
applications enabling scientists to write code that performs and
scales better than contemporary runtimes."""
homepage = "http://hpx.crest.iu.edu"
- url = "http://hpx.crest.iu.edu/release/hpx-2.0.0.tar.gz"
+ url = "http://hpx.crest.iu.edu/release/hpx-3.1.0.tar.gz"
+ version('3.1.0', '9e90b8ac46788c009079632828c77628')
version('2.0.0', '3d2ff3aab6c46481f9ec65c5b2bfe7a6')
version('1.3.0', '2260ecc7f850e71a4d365a43017d8cee')
version('1.2.0', '4972005f85566af4afe8b71afbf1480f')
version('1.1.0', '646afb460ecb7e0eea713a634933ce4f')
version('1.0.0', '8020822adf6090bd59ed7fe465f6c6cb')
- variant('debug', default=False, description='Build a debug version of HPX-5')
+ variant('debug', default=False, description='Build debug version of HPX-5')
variant('photon', default=False, description='Enable Photon support')
variant('mpi', default=False, description='Enable MPI support')
diff --git a/var/spack/repos/builtin/packages/libcerf/package.py b/var/spack/repos/builtin/packages/libcerf/package.py
index 7fb47f8dcd..1964f03b95 100644
--- a/var/spack/repos/builtin/packages/libcerf/package.py
+++ b/var/spack/repos/builtin/packages/libcerf/package.py
@@ -38,6 +38,12 @@ class Libcerf(Package):
version('1.3', 'b3504c467204df71e62aeccf73a25612')
def install(self, spec, prefix):
- configure('--prefix=%s' % prefix)
+ options = []
+ # Clang reports unused functions as errors, see
+ # http://clang.debian.net/status.php?version=3.8.1&key=UNUSED_FUNCTION
+ if spec.satisfies('%clang'):
+ options.append('CFLAGS=-Wno-unused-function')
+
+ configure('--prefix=%s' % prefix, *options)
make()
make("install")
diff --git a/var/spack/repos/builtin/packages/libjpeg-turbo/package.py b/var/spack/repos/builtin/packages/libjpeg-turbo/package.py
index 6252a88542..3fe159d7b9 100644
--- a/var/spack/repos/builtin/packages/libjpeg-turbo/package.py
+++ b/var/spack/repos/builtin/packages/libjpeg-turbo/package.py
@@ -26,20 +26,25 @@ from spack import *
class LibjpegTurbo(Package):
- """libjpeg-turbo is a fork of the original IJG libjpeg which uses
- SIMD to accelerate baseline JPEG compression and
- decompression. libjpeg is a library that implements JPEG image
- encoding, decoding and transcoding."""
+ """libjpeg-turbo is a fork of the original IJG libjpeg which uses SIMD to
+ accelerate baseline JPEG compression and decompression. libjpeg is a
+ library that implements JPEG image encoding, decoding and
+ transcoding."""
+
homepage = "http://libjpeg-turbo.virtualgl.org"
url = "http://downloads.sourceforge.net/libjpeg-turbo/libjpeg-turbo-1.3.1.tar.gz"
+ version('1.5.0', '3fc5d9b6a8bce96161659ae7a9939257')
version('1.3.1', '2c3a68129dac443a72815ff5bb374b05')
- # Can use either of these.
- depends_on("yasm", type='build')
+ # Can use either of these. But in the current version of the package
+ # only nasm is used. In order to use yasm an environmental variable
+ # NASM must be set.
+ # TODO: Implement the selection between two supported assemblers.
+ # depends_on("yasm", type='build')
depends_on("nasm", type='build')
def install(self, spec, prefix):
- configure("--prefix=%s" % prefix)
+ configure("--prefix=" + prefix)
make()
make("install")
diff --git a/var/spack/repos/builtin/packages/mfem/package.py b/var/spack/repos/builtin/packages/mfem/package.py
index cbf9059f65..9aa4ab8cf0 100644
--- a/var/spack/repos/builtin/packages/mfem/package.py
+++ b/var/spack/repos/builtin/packages/mfem/package.py
@@ -23,8 +23,6 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
-import glob
-import string
class Mfem(Package):
@@ -35,11 +33,11 @@ class Mfem(Package):
version('3.2',
'2938c3deed4ec4f7fd5b5f5cfe656845282e86e2dcd477d292390058b7b94340',
- url='http://goo.gl/Y9T75B', expand=False, preferred=True)
+ url='http://goo.gl/Y9T75B', preferred=True, extension='.tar.gz')
version('3.1',
'841ea5cf58de6fae4de0f553b0e01ebaab9cd9c67fa821e8a715666ecf18fc57',
- url='http://goo.gl/xrScXn', expand=False)
+ url='http://goo.gl/xrScXn', extension='.tar.gz')
# version('3.1', git='https://github.com/mfem/mfem.git',
# commit='dbae60fe32e071989b52efaaf59d7d0eb2a3b574')
@@ -48,8 +46,11 @@ class Mfem(Package):
variant('suite-sparse', default=False,
description='Activate support for SuiteSparse')
variant('mpi', default=False, description='Activate support for MPI')
+ variant('superlu-dist', default=False,
+ description='Activate support for SuperLU_Dist')
variant('lapack', default=False, description='Activate support for LAPACK')
variant('debug', default=False, description='Build debug version')
+ variant('netcdf', default=False, description='Activate NetCDF support')
depends_on('blas', when='+lapack')
depends_on('lapack', when='+lapack')
@@ -68,6 +69,12 @@ class Mfem(Package):
depends_on('metis@5:', when='+suite-sparse ^suite-sparse@4.5:')
depends_on('cmake', when='^metis@5:', type='build')
+ depends_on('superlu-dist', when='@3.2: +superlu-dist')
+
+ depends_on('netcdf', when='@3.2: +netcdf')
+ depends_on('zlib', when='@3.2: +netcdf')
+ depends_on('hdf5', when='@3.2: +netcdf')
+
def check_variants(self, spec):
if '+mpi' in spec and ('+hypre' not in spec or '+metis' not in spec):
raise InstallError('mfem+mpi must be built with +hypre ' +
@@ -81,6 +88,12 @@ class Mfem(Package):
raise InstallError('To work around CMake bug with clang, must ' +
'build mfem with mfem[+variants] %clang ' +
'^cmake %gcc to force CMake to build with gcc')
+ if '@:3.1' in spec and '+superlu-dist' in spec:
+ raise InstallError('MFEM does not support SuperLU_DIST for ' +
+ 'versions 3.1 and earlier')
+ if '@:3.1' in spec and '+netcdf' in spec:
+ raise InstallError('MFEM does not support NetCDF for versions' +
+ '3.1 and earlier')
return
def install(self, spec, prefix):
@@ -102,7 +115,14 @@ class Mfem(Package):
'HYPRE_LIB=-L%s' % spec['hypre'].prefix.lib +
' -lHYPRE'])
- if '+metis' in spec:
+ if 'parmetis' in spec:
+ metis_lib = '-L%s -lparmetis -lmetis' % spec['parmetis'].prefix.lib
+ metis_str = 'MFEM_USE_METIS_5=YES'
+ options.extend([metis_str,
+ 'METIS_DIR=%s' % spec['parmetis'].prefix,
+ 'METIS_OPT=-I%s' % spec['parmetis'].prefix.include,
+ 'METIS_LIB=%s' % metis_lib])
+ elif 'metis' in spec:
metis_lib = '-L%s -lmetis' % spec['metis'].prefix.lib
if spec['metis'].satisfies('@5:'):
metis_str = 'MFEM_USE_METIS_5=YES'
@@ -114,14 +134,27 @@ class Mfem(Package):
'METIS_OPT=-I%s' % spec['metis'].prefix.include,
'METIS_LIB=%s' % metis_lib])
- if '+mpi' in spec:
+ if 'mpi' in spec:
options.extend(['MFEM_USE_MPI=YES'])
+ if '+superlu-dist' in spec:
+ superlu_lib = '-L%s' % spec['superlu-dist'].prefix.lib
+ superlu_lib += ' -lsuperlu_dist'
+ sl_inc = 'SUPERLU_OPT=-I%s' % spec['superlu-dist'].prefix.include
+ options.extend(['MFEM_USE_SUPERLU=YES',
+ 'SUPERLU_DIR=%s' % spec['superlu-dist'].prefix,
+ sl_inc,
+ 'SUPERLU_LIB=%s' % superlu_lib])
+
if '+suite-sparse' in spec:
ssp = spec['suite-sparse'].prefix
ss_lib = '-L%s' % ssp.lib
- ss_lib += (' -lumfpack -lcholmod -lcolamd -lamd -lcamd' +
- ' -lccolamd -lsuitesparseconfig')
+
+ if '@3.2:' in spec:
+ ss_lib += ' -lklu -lbtf'
+
+ ss_lib += (' -lumfpack -lcholmod -lcolamd' +
+ ' -lamd -lcamd -lccolamd -lsuitesparseconfig')
no_librt_archs = ['darwin-i686', 'darwin-x86_64']
no_rt = any(map(lambda a: spec.satisfies('=' + a),
@@ -135,16 +168,23 @@ class Mfem(Package):
'SUITESPARSE_OPT=-I%s' % ssp.include,
'SUITESPARSE_LIB=%s' % ss_lib])
+ if '+netcdf' in spec:
+ np = spec['netcdf'].prefix
+ zp = spec['zlib'].prefix
+ h5p = spec['hdf5'].prefix
+ nlib = '-L%s -lnetcdf ' % np.lib
+ nlib += '-L%s -lhdf5_hl -lhdf5 ' % h5p.lib
+ nlib += '-L%s -lz' % zp.lib
+ options.extend(['MFEM_USE_NETCDF=YES',
+ 'NETCDF_DIR=%s' % np,
+ 'HDF5_DIR=%s' % h5p,
+ 'ZLIB_DIR=%s' % zp,
+ 'NETCDF_OPT=-I%s' % np.include,
+ 'NETCDF_LIB=%s' % nlib])
+
if '+debug' in spec:
options.extend(['MFEM_DEBUG=YES'])
- # Dirty hack to cope with URL redirect
- tgz_file = string.split(self.url, '/')[-1]
- tar = which('tar')
- tar('xzvf', tgz_file)
- cd(glob.glob('mfem*')[0])
- # End dirty hack to cope with URL redirect
-
make('config', *options)
make('all')
diff --git a/var/spack/repos/builtin/packages/mkl/package.py b/var/spack/repos/builtin/packages/mkl/package.py
index 8a31858bf5..4a9a8785f7 100644
--- a/var/spack/repos/builtin/packages/mkl/package.py
+++ b/var/spack/repos/builtin/packages/mkl/package.py
@@ -66,13 +66,6 @@ class Mkl(IntelInstaller):
for f in os.listdir(mkl_dir):
os.symlink(os.path.join(mkl_dir, f), os.path.join(self.prefix, f))
- # Unfortunately MKL libs are natively distrubted in prefix/lib/intel64.
- # To make MKL play nice with Spack, symlink all files to prefix/lib:
- mkl_lib_dir = os.path.join(prefix, "lib", "intel64")
- for f in os.listdir(mkl_lib_dir):
- os.symlink(os.path.join(mkl_lib_dir, f),
- os.path.join(self.prefix, "lib", f))
-
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
# set up MKLROOT for everyone using MKL package
spack_env.set('MKLROOT', self.prefix)
diff --git a/var/spack/repos/builtin/packages/mpich/package.py b/var/spack/repos/builtin/packages/mpich/package.py
index 8f300d4ec7..bb034f9fc7 100644
--- a/var/spack/repos/builtin/packages/mpich/package.py
+++ b/var/spack/repos/builtin/packages/mpich/package.py
@@ -27,9 +27,10 @@ from spack import *
class Mpich(Package):
"""MPICH is a high performance and widely portable implementation of
- the Message Passing Interface (MPI) standard."""
+ the Message Passing Interface (MPI) standard."""
+
homepage = "http://www.mpich.org"
- url = "http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz"
+ url = "http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz"
list_url = "http://www.mpich.org/static/downloads/"
list_depth = 2
@@ -41,10 +42,10 @@ class Mpich(Package):
version('3.1', '5643dd176499bfb7d25079aaff25f2ec')
version('3.0.4', '9c5d5d4fe1e17dd12153f40bc5b6dbc0')
- variant('verbs', default=False,
- description='Build support for OpenFabrics verbs.')
- variant('pmi', default=True, description='Build with PMI support')
- variant('hydra', default=True, description='Build the hydra process manager')
+ variant('hydra', default=True, description='Build the hydra process manager')
+ variant('pmi', default=True, description='Build with PMI support')
+ variant('romio', default=True, description='Enable ROMIO MPI I/O implementation')
+ variant('verbs', default=False, description='Build support for OpenFabrics verbs.')
provides('mpi@:3.0', when='@3:')
provides('mpi@:1.3', when='@1:')
@@ -80,16 +81,14 @@ class Mpich(Package):
]
def install(self, spec, prefix):
- config_args = ["--prefix=" + prefix,
- "--with-pmi=" + ("yes" if '+pmi' in spec else 'no'),
- "--with-pm=" + ('hydra' if '+hydra' in spec else 'no'),
- "--enable-shared"]
-
- # Variants
- if '+verbs' in spec:
- config_args.append("--with-ibverbs")
- else:
- config_args.append("--without-ibverbs")
+ config_args = [
+ '--prefix={0}'.format(prefix),
+ '--enable-shared',
+ '--with-pm={0}'.format('hydra' if '+hydra' in spec else 'no'),
+ '--with-pmi={0}'.format('yes' if '+pmi' in spec else 'no'),
+ '--{0}-romio'.format('enable' if '+romio' in spec else 'disable'),
+ '--{0}-ibverbs'.format('with' if '+verbs' in spec else 'without')
+ ]
# TODO: Spack should make it so that you can't actually find
# these compilers if they're "disabled" for the current
@@ -104,32 +103,33 @@ class Mpich(Package):
config_args.append("--disable-fortran")
configure(*config_args)
+
make()
- make("install")
+ make('check')
+ make('install')
- self.filter_compilers()
+ self.filter_compilers(prefix)
- def filter_compilers(self):
+ def filter_compilers(self, prefix):
"""Run after install to make the MPI compilers use the
- compilers that Spack built the package with.
-
- If this isn't done, they'll have CC, CXX, F77, and FC set
- to Spack's generic cc, c++, f77, and f90. We want them to
- be bound to whatever compiler they were built with.
- """
- bin = self.prefix.bin
- mpicc = join_path(bin, 'mpicc')
- mpicxx = join_path(bin, 'mpicxx')
- mpif77 = join_path(bin, 'mpif77')
- mpif90 = join_path(bin, 'mpif90')
+ compilers that Spack built the package with.
+
+ If this isn't done, they'll have CC, CXX, F77, and FC set
+ to Spack's generic cc, c++, f77, and f90. We want them to
+ be bound to whatever compiler they were built with."""
+
+ mpicc = join_path(prefix.bin, 'mpicc')
+ mpicxx = join_path(prefix.bin, 'mpicxx')
+ mpif77 = join_path(prefix.bin, 'mpif77')
+ mpif90 = join_path(prefix.bin, 'mpif90')
# Substitute Spack compile wrappers for the real
# underlying compiler
kwargs = {'ignore_absent': True, 'backup': False, 'string': True}
- filter_file(env['CC'], self.compiler.cc, mpicc, **kwargs)
+ filter_file(env['CC'], self.compiler.cc, mpicc, **kwargs)
filter_file(env['CXX'], self.compiler.cxx, mpicxx, **kwargs)
filter_file(env['F77'], self.compiler.f77, mpif77, **kwargs)
- filter_file(env['FC'], self.compiler.fc, mpif90, **kwargs)
+ filter_file(env['FC'], self.compiler.fc, mpif90, **kwargs)
# Remove this linking flag if present
# (it turns RPATH into RUNPATH)
diff --git a/var/spack/repos/builtin/packages/netcdf/package.py b/var/spack/repos/builtin/packages/netcdf/package.py
index 5dc686c907..d4d7fd1691 100644
--- a/var/spack/repos/builtin/packages/netcdf/package.py
+++ b/var/spack/repos/builtin/packages/netcdf/package.py
@@ -46,10 +46,10 @@ class Netcdf(Package):
depends_on("hdf", when='+hdf4')
# Required for DAP support
- depends_on("curl")
+ depends_on("curl@7.18.0:")
# Required for NetCDF-4 support
- depends_on("zlib")
+ depends_on("zlib@1.2.5:")
depends_on('hdf5')
# NetCDF 4.4.0 and prior have compatibility issues with HDF5 1.10 and later
@@ -105,7 +105,7 @@ class Netcdf(Package):
LDFLAGS.append("-L%s/lib" % spec['hdf'].prefix)
LIBS.append("-l%s" % "jpeg")
- if 'szip' in spec:
+ if '+szip' in spec:
CPPFLAGS.append("-I%s/include" % spec['szip'].prefix)
LDFLAGS.append("-L%s/lib" % spec['szip'].prefix)
LIBS.append("-l%s" % "sz")
@@ -120,4 +120,8 @@ class Netcdf(Package):
configure(*config_args)
make()
+
+ if self.run_tests:
+ make("check")
+
make("install")
diff --git a/var/spack/repos/builtin/packages/pango/package.py b/var/spack/repos/builtin/packages/pango/package.py
index 75c4ac807d..5cf3edb8da 100644
--- a/var/spack/repos/builtin/packages/pango/package.py
+++ b/var/spack/repos/builtin/packages/pango/package.py
@@ -41,6 +41,7 @@ class Pango(Package):
depends_on("pkg-config", type="build")
depends_on("harfbuzz")
depends_on("cairo")
+ depends_on("glib")
def install(self, spec, prefix):
configure("--prefix=%s" % prefix)
diff --git a/var/spack/repos/builtin/packages/tethex/package.py b/var/spack/repos/builtin/packages/tethex/package.py
new file mode 100644
index 0000000000..624942498e
--- /dev/null
+++ b/var/spack/repos/builtin/packages/tethex/package.py
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License (as
+# published by the Free Software Foundation) version 2.1, February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from spack import *
+
+
+class Tethex(Package):
+ """Tethex is designed to convert triangular (in 2D) or tetrahedral (in 3D)
+ Gmsh's mesh to quadrilateral or hexahedral one respectively. These meshes
+ can be used in software packages working with hexahedrals only - for
+ example, deal.II.
+ """
+
+ homepage = "https://github.com/martemyev/tethex"
+ url = "https://github.com/martemyev/tethex/archive/v0.0.7.tar.gz"
+
+ version('0.0.7', '6c9e4a18a6637deb4400c6d77ec03184')
+ version('develop', git='https://github.com/martemyev/tethex.git')
+
+ depends_on('cmake', type='build')
+
+ def install(self, spec, prefix):
+ cmake('.')
+ make()
+
+ # install by hand
+ mkdirp(prefix.bin)
+ install('tethex', prefix.bin)
diff --git a/var/spack/repos/builtin/packages/trilinos/package.py b/var/spack/repos/builtin/packages/trilinos/package.py
index 203e1502d6..4c7a94ce3f 100644
--- a/var/spack/repos/builtin/packages/trilinos/package.py
+++ b/var/spack/repos/builtin/packages/trilinos/package.py
@@ -149,10 +149,10 @@ class Trilinos(Package):
'-DMPI_BASE_DIR:PATH=%s' % spec['mpi'].prefix,
'-DTPL_ENABLE_BLAS=ON',
'-DBLAS_LIBRARY_NAMES=%s' % ';'.join(blas.names),
- '-DBLAS_LIBRARY_DIRS=%s' % spec['blas'].prefix.lib,
+ '-DBLAS_LIBRARY_DIRS=%s' % ';'.join(blas.directories),
'-DTPL_ENABLE_LAPACK=ON',
'-DLAPACK_LIBRARY_NAMES=%s' % ';'.join(lapack.names),
- '-DLAPACK_LIBRARY_DIRS=%s' % spec['lapack'].prefix.lib,
+ '-DLAPACK_LIBRARY_DIRS=%s' % ';'.join(lapack.directories),
'-DTrilinos_ENABLE_EXPLICIT_INSTANTIATION:BOOL=ON',
'-DTrilinos_ENABLE_CXX11:BOOL=ON',
'-DTPL_ENABLE_Netcdf:BOOL=ON',