summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorTodd Gamblin <tgamblin@llnl.gov>2015-05-18 16:16:20 -0700
committerTodd Gamblin <tgamblin@llnl.gov>2015-05-18 16:16:20 -0700
commitf813d823a14f25a8f06c2f0bb7ba88db33c0af68 (patch)
treecb7be41ae3142157bd18e4f7d00107cc624ee1d2 /lib
parent09151785b3353c0169a6c41771cd471c5207d2cf (diff)
parentf68e64d738054a01b38fd6840fc7932c0912a85c (diff)
downloadspack-f813d823a14f25a8f06c2f0bb7ba88db33c0af68.tar.gz
spack-f813d823a14f25a8f06c2f0bb7ba88db33c0af68.tar.bz2
spack-f813d823a14f25a8f06c2f0bb7ba88db33c0af68.tar.xz
spack-f813d823a14f25a8f06c2f0bb7ba88db33c0af68.zip
Merge branch 'features/optional-deps' into develop
This includes: - Much better variant support (+debug/-debug) - Optional dependency support (depends_on(... , when='<condition>') - New config file format (YAML in ~/.spack) - New Spec format (YAML in $prefix/.spack/spec.yaml)
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/docs/_themes/sphinx_rtd_theme/footer.html6
-rw-r--r--lib/spack/docs/conf.py4
-rw-r--r--lib/spack/external/__init__.py9
-rw-r--r--lib/spack/external/yaml/LICENSE19
-rw-r--r--lib/spack/external/yaml/README35
-rw-r--r--lib/spack/external/yaml/__init__.py315
-rw-r--r--lib/spack/external/yaml/composer.py139
-rw-r--r--lib/spack/external/yaml/constructor.py678
-rw-r--r--lib/spack/external/yaml/dumper.py62
-rw-r--r--lib/spack/external/yaml/emitter.py1140
-rw-r--r--lib/spack/external/yaml/error.py75
-rw-r--r--lib/spack/external/yaml/events.py86
-rw-r--r--lib/spack/external/yaml/loader.py40
-rw-r--r--lib/spack/external/yaml/nodes.py49
-rw-r--r--lib/spack/external/yaml/parser.py589
-rw-r--r--lib/spack/external/yaml/reader.py189
-rw-r--r--lib/spack/external/yaml/representer.py484
-rw-r--r--lib/spack/external/yaml/resolver.py224
-rw-r--r--lib/spack/external/yaml/scanner.py1457
-rw-r--r--lib/spack/external/yaml/serializer.py111
-rw-r--r--lib/spack/external/yaml/tokens.py104
-rw-r--r--lib/spack/llnl/util/lang.py28
-rw-r--r--lib/spack/spack/__init__.py13
-rw-r--r--lib/spack/spack/cmd/activate.py8
-rw-r--r--lib/spack/spack/cmd/compiler.py2
-rw-r--r--lib/spack/spack/cmd/config.py31
-rw-r--r--lib/spack/spack/cmd/deactivate.py14
-rw-r--r--lib/spack/spack/cmd/find.py36
-rw-r--r--lib/spack/spack/cmd/info.py29
-rw-r--r--lib/spack/spack/cmd/mirror.py18
-rw-r--r--lib/spack/spack/compilers/__init__.py43
-rw-r--r--lib/spack/spack/concretize.py10
-rw-r--r--lib/spack/spack/config.py719
-rw-r--r--lib/spack/spack/directives.py277
-rw-r--r--lib/spack/spack/directory_layout.py229
-rw-r--r--lib/spack/spack/multimethod.py2
-rw-r--r--lib/spack/spack/package.py102
-rw-r--r--lib/spack/spack/relations.py215
-rw-r--r--lib/spack/spack/spec.py539
-rw-r--r--lib/spack/spack/stage.py8
-rw-r--r--lib/spack/spack/test/__init__.py4
-rw-r--r--lib/spack/spack/test/concretize.py14
-rw-r--r--lib/spack/spack/test/config.py63
-rw-r--r--lib/spack/spack/test/directory_layout.py53
-rw-r--r--lib/spack/spack/test/install.py4
-rw-r--r--lib/spack/spack/test/mock_packages_test.py29
-rw-r--r--lib/spack/spack/test/optional_deps.py94
-rw-r--r--lib/spack/spack/test/spec_dag.py18
-rw-r--r--lib/spack/spack/test/spec_semantics.py110
-rw-r--r--lib/spack/spack/test/spec_yaml.py71
-rw-r--r--lib/spack/spack/util/debug.py52
-rw-r--r--lib/spack/spack/variant.py36
-rw-r--r--lib/spack/spack/version.py49
53 files changed, 7547 insertions, 1188 deletions
diff --git a/lib/spack/docs/_themes/sphinx_rtd_theme/footer.html b/lib/spack/docs/_themes/sphinx_rtd_theme/footer.html
index 6347a440d7..d000dcbc2c 100644
--- a/lib/spack/docs/_themes/sphinx_rtd_theme/footer.html
+++ b/lib/spack/docs/_themes/sphinx_rtd_theme/footer.html
@@ -22,7 +22,12 @@
{%- endif %}
{%- endif %}
+ <br/>
+ Written by Todd Gamblin (<a href="mailto:tgamblin@llnl.gov">tgamblin@llnl.gov</a>) and
+ many contributors. LLNL-CODE-647188.
+
{%- if last_updated %}
+ <br/>
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
{%- endif %}
</p>
@@ -33,4 +38,3 @@
{%- endif %}
</footer>
-
diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py
index c2b2d0e37c..7303d7fef6 100644
--- a/lib/spack/docs/conf.py
+++ b/lib/spack/docs/conf.py
@@ -94,7 +94,7 @@ master_doc = 'index'
# General information about the project.
project = u'Spack'
-copyright = u'2013-2014, Lawrence Livermore National Laboratory'
+copyright = u'2013-2015, Lawrence Livermore National Laboratory.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -203,7 +203,7 @@ html_last_updated_fmt = '%b %d, %Y'
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+#html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py
index 1cc981930a..0578022210 100644
--- a/lib/spack/external/__init__.py
+++ b/lib/spack/external/__init__.py
@@ -28,6 +28,11 @@ packages that are included in spack.
So far:
argparse: We include our own version to be Python 2.6 compatible.
- pyqver2: External script to query required python version of python source code.
- Used for ensuring 2.6 compatibility.
+
+ pyqver2: External script to query required python version of
+ python source code. Used for ensuring 2.6 compatibility.
+
+ functools: Used for implementation of total_ordering.
+
+ yaml: Used for config files.
"""
diff --git a/lib/spack/external/yaml/LICENSE b/lib/spack/external/yaml/LICENSE
new file mode 100644
index 0000000000..050ced23f6
--- /dev/null
+++ b/lib/spack/external/yaml/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/spack/external/yaml/README b/lib/spack/external/yaml/README
new file mode 100644
index 0000000000..c1edf13870
--- /dev/null
+++ b/lib/spack/external/yaml/README
@@ -0,0 +1,35 @@
+PyYAML - The next generation YAML parser and emitter for Python.
+
+To install, type 'python setup.py install'.
+
+By default, the setup.py script checks whether LibYAML is installed
+and if so, builds and installs LibYAML bindings. To skip the check
+and force installation of LibYAML bindings, use the option '--with-libyaml':
+'python setup.py --with-libyaml install'. To disable the check and
+skip building and installing LibYAML bindings, use '--without-libyaml':
+'python setup.py --without-libyaml install'.
+
+When LibYAML bindings are installed, you may use fast LibYAML-based
+parser and emitter as follows:
+
+ >>> yaml.load(stream, Loader=yaml.CLoader)
+ >>> yaml.dump(data, Dumper=yaml.CDumper)
+
+PyYAML includes a comprehensive test suite. To run the tests,
+type 'python setup.py test'.
+
+For more information, check the PyYAML homepage:
+'http://pyyaml.org/wiki/PyYAML'.
+
+For PyYAML tutorial and reference, see:
+'http://pyyaml.org/wiki/PyYAMLDocumentation'.
+
+Post your questions and opinions to the YAML-Core mailing list:
+'http://lists.sourceforge.net/lists/listinfo/yaml-core'.
+
+Submit bug reports and feature requests to the PyYAML bug tracker:
+'http://pyyaml.org/newticket?component=pyyaml'.
+
+PyYAML is written by Kirill Simonov <xi@resolvent.net>. It is released
+under the MIT license. See the file LICENSE for more details.
+
diff --git a/lib/spack/external/yaml/__init__.py b/lib/spack/external/yaml/__init__.py
new file mode 100644
index 0000000000..f977f46ba7
--- /dev/null
+++ b/lib/spack/external/yaml/__init__.py
@@ -0,0 +1,315 @@
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.10'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/lib/spack/external/yaml/composer.py b/lib/spack/external/yaml/composer.py
new file mode 100644
index 0000000000..06e5ac782f
--- /dev/null
+++ b/lib/spack/external/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/lib/spack/external/yaml/constructor.py b/lib/spack/external/yaml/constructor.py
new file mode 100644
index 0000000000..8c0ec181b2
--- /dev/null
+++ b/lib/spack/external/yaml/constructor.py
@@ -0,0 +1,678 @@
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ if key in mapping:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found already in-use key (%s)" % key, key_node.start_mark)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/lib/spack/external/yaml/dumper.py b/lib/spack/external/yaml/dumper.py
new file mode 100644
index 0000000000..f811d2c919
--- /dev/null
+++ b/lib/spack/external/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/lib/spack/external/yaml/emitter.py b/lib/spack/external/yaml/emitter.py
new file mode 100644
index 0000000000..e5bcdcccbb
--- /dev/null
+++ b/lib/spack/external/yaml/emitter.py
@@ -0,0 +1,1140 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/lib/spack/external/yaml/error.py b/lib/spack/external/yaml/error.py
new file mode 100644
index 0000000000..577686db5f
--- /dev/null
+++ b/lib/spack/external/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/lib/spack/external/yaml/events.py b/lib/spack/external/yaml/events.py
new file mode 100644
index 0000000000..f79ad389cb
--- /dev/null
+++ b/lib/spack/external/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/lib/spack/external/yaml/loader.py b/lib/spack/external/yaml/loader.py
new file mode 100644
index 0000000000..293ff467b1
--- /dev/null
+++ b/lib/spack/external/yaml/loader.py
@@ -0,0 +1,40 @@
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/lib/spack/external/yaml/nodes.py b/lib/spack/external/yaml/nodes.py
new file mode 100644
index 0000000000..c4f070c41e
--- /dev/null
+++ b/lib/spack/external/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/lib/spack/external/yaml/parser.py b/lib/spack/external/yaml/parser.py
new file mode 100644
index 0000000000..f9e3057f33
--- /dev/null
+++ b/lib/spack/external/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/lib/spack/external/yaml/reader.py b/lib/spack/external/yaml/reader.py
new file mode 100644
index 0000000000..a67af7c5da
--- /dev/null
+++ b/lib/spack/external/yaml/reader.py
@@ -0,0 +1,189 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream, name=None):
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>" if name is None else name
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>" if name is None else name
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>") if name is None else name
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/lib/spack/external/yaml/representer.py b/lib/spack/external/yaml/representer.py
new file mode 100644
index 0000000000..5f4fc70dbc
--- /dev/null
+++ b/lib/spack/external/yaml/representer.py
@@ -0,0 +1,484 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/lib/spack/external/yaml/resolver.py b/lib/spack/external/yaml/resolver.py
new file mode 100644
index 0000000000..6b5ab87596
--- /dev/null
+++ b/lib/spack/external/yaml/resolver.py
@@ -0,0 +1,224 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/lib/spack/external/yaml/scanner.py b/lib/spack/external/yaml/scanner.py
new file mode 100644
index 0000000000..5228fad65c
--- /dev/null
+++ b/lib/spack/external/yaml/scanner.py
@@ -0,0 +1,1457 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/lib/spack/external/yaml/serializer.py b/lib/spack/external/yaml/serializer.py
new file mode 100644
index 0000000000..0bf1e96dc1
--- /dev/null
+++ b/lib/spack/external/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/lib/spack/external/yaml/tokens.py b/lib/spack/external/yaml/tokens.py
new file mode 100644
index 0000000000..4d0b48a394
--- /dev/null
+++ b/lib/spack/external/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py
index 332367f537..9e1bef18ca 100644
--- a/lib/spack/llnl/util/lang.py
+++ b/lib/spack/llnl/util/lang.py
@@ -126,22 +126,20 @@ def caller_locals():
del stack
-def get_calling_package_name():
+def get_calling_module_name():
"""Make sure that the caller is a class definition, and return the
- module's name.
+ enclosing module's name.
"""
stack = inspect.stack()
try:
- # get calling function name (the relation)
- relation = stack[1][3]
-
# Make sure locals contain __module__
caller_locals = stack[2][0].f_locals
finally:
del stack
if not '__module__' in caller_locals:
- raise ScopeError(relation)
+ raise RuntimeError("Must invoke get_calling_module_name() "
+ "from inside a class definition!")
module_name = caller_locals['__module__']
base_name = module_name.split('.')[-1]
@@ -322,6 +320,24 @@ def match_predicate(*args):
return match
+
+def DictWrapper(dictionary):
+ """Returns a class that wraps a dictionary and enables it to be used
+ like an object."""
+ class wrapper(object):
+ def __getattr__(self, name): return dictionary[name]
+ def __setattr__(self, name, value): dictionary[name] = value
+ def setdefault(self, *args): return dictionary.setdefault(*args)
+ def get(self, *args): return dictionary.get(*args)
+ def keys(self): return dictionary.keys()
+ def values(self): return dictionary.values()
+ def items(self): return dictionary.items()
+ def __iter__(self): return iter(dictionary)
+
+
+ return wrapper()
+
+
class RequiredAttributeError(ValueError):
def __init__(self, message):
super(RequiredAttributeError, self).__init__(message)
diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py
index eb891e3d57..caa09eb6e0 100644
--- a/lib/spack/spack/__init__.py
+++ b/lib/spack/spack/__init__.py
@@ -42,7 +42,8 @@ test_path = join_path(module_path, "test")
hooks_path = join_path(module_path, "hooks")
var_path = join_path(prefix, "var", "spack")
stage_path = join_path(var_path, "stage")
-install_path = join_path(prefix, "opt")
+opt_path = join_path(prefix, "opt")
+install_path = join_path(opt_path, "spack")
share_path = join_path(prefix, "share", "spack")
#
@@ -65,8 +66,8 @@ mock_user_config = join_path(mock_config_path, "user_spackconfig")
# This controls how spack lays out install prefixes and
# stage directories.
#
-from spack.directory_layout import SpecHashDirectoryLayout
-install_layout = SpecHashDirectoryLayout(install_path)
+from spack.directory_layout import YamlDirectoryLayout
+install_layout = YamlDirectoryLayout(install_path)
#
# This controls how things are concretized in spack.
@@ -146,9 +147,9 @@ import llnl.util.filesystem
from llnl.util.filesystem import *
__all__ += llnl.util.filesystem.__all__
-import spack.relations
-from spack.relations import *
-__all__ += spack.relations.__all__
+import spack.directives
+from spack.directives import *
+__all__ += spack.directives.__all__
import spack.util.executable
from spack.util.executable import *
diff --git a/lib/spack/spack/cmd/activate.py b/lib/spack/spack/cmd/activate.py
index 4070baaa70..1004f1f8e6 100644
--- a/lib/spack/spack/cmd/activate.py
+++ b/lib/spack/spack/cmd/activate.py
@@ -38,17 +38,11 @@ def setup_parser(subparser):
def activate(parser, args):
- # TODO: shouldn't have to concretize here. Fix DAG issues.
- specs = spack.cmd.parse_specs(args.spec, concretize=True)
+ specs = spack.cmd.parse_specs(args.spec)
if len(specs) != 1:
tty.die("activate requires one spec. %d given." % len(specs))
- # TODO: remove this hack when DAG info is stored in dir layout.
- # This ensures the ext spec is always normalized properly.
- spack.db.get(specs[0])
-
spec = spack.cmd.disambiguate_spec(specs[0])
-
if not spec.package.is_extension:
tty.die("%s is not an extension." % spec.name)
diff --git a/lib/spack/spack/cmd/compiler.py b/lib/spack/spack/cmd/compiler.py
index e37f44b3b7..2a64dc914e 100644
--- a/lib/spack/spack/cmd/compiler.py
+++ b/lib/spack/spack/cmd/compiler.py
@@ -68,7 +68,7 @@ def compiler_add(args):
spack.compilers.add_compilers_to_config('user', *compilers)
n = len(compilers)
tty.msg("Added %d new compiler%s to %s" % (
- n, 's' if n > 1 else '', spack.config.get_filename('user')))
+ n, 's' if n > 1 else '', spack.config.get_config_scope_filename('user', 'compilers')))
colify(reversed(sorted(c.spec for c in compilers)), indent=4)
else:
tty.msg("Found no new compilers")
diff --git a/lib/spack/spack/cmd/config.py b/lib/spack/spack/cmd/config.py
index 283bfc19b9..8c18f88b64 100644
--- a/lib/spack/spack/cmd/config.py
+++ b/lib/spack/spack/cmd/config.py
@@ -43,42 +43,27 @@ def setup_parser(subparser):
sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='config_command')
- set_parser = sp.add_parser('set', help='Set configuration values.')
- set_parser.add_argument('key', help="Key to set value for.")
- set_parser.add_argument('value', nargs='?', default=None,
- help="Value to associate with key")
-
- get_parser = sp.add_parser('get', help='Get configuration values.')
- get_parser.add_argument('key', help="Key to get value for.")
+ get_parser = sp.add_parser('get', help='Print configuration values.')
+ get_parser.add_argument('category', help="Configuration category to print.")
edit_parser = sp.add_parser('edit', help='Edit configuration file.')
-
-
-def config_set(args):
- # default scope for writing is 'user'
- if not args.scope:
- args.scope = 'user'
-
- config = spack.config.get_config(args.scope)
- config.set_value(args.key, args.value)
- config.write()
+ edit_parser.add_argument('category', help="Configuration category to edit")
def config_get(args):
- config = spack.config.get_config(args.scope)
- print config.get_value(args.key)
+ spack.config.print_category(args.category)
def config_edit(args):
if not args.scope:
args.scope = 'user'
- config_file = spack.config.get_filename(args.scope)
+ if not args.category:
+ args.category = None
+ config_file = spack.config.get_config_scope_filename(args.scope, args.category)
spack.editor(config_file)
def config(parser, args):
- action = { 'set' : config_set,
- 'get' : config_get,
+ action = { 'get' : config_get,
'edit' : config_edit }
action[args.config_command](args)
-
diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py
index c9a4d4b2f6..e44be41029 100644
--- a/lib/spack/spack/cmd/deactivate.py
+++ b/lib/spack/spack/cmd/deactivate.py
@@ -44,15 +44,10 @@ def setup_parser(subparser):
def deactivate(parser, args):
- # TODO: shouldn't have to concretize here. Fix DAG issues.
- specs = spack.cmd.parse_specs(args.spec, concretize=True)
+ specs = spack.cmd.parse_specs(args.spec)
if len(specs) != 1:
tty.die("deactivate requires one spec. %d given." % len(specs))
- # TODO: remove this hack when DAG info is stored properly.
- # This ensures the ext spec is always normalized properly.
- spack.db.get(specs[0])
-
spec = spack.cmd.disambiguate_spec(specs[0])
pkg = spec.package
@@ -67,9 +62,6 @@ def deactivate(parser, args):
ext_pkg.do_deactivate(force=True)
elif pkg.is_extension:
- # TODO: store DAG info properly (see above)
- spec.normalize()
-
if not args.force and not spec.package.activated:
tty.die("%s is not activated." % pkg.spec.short_spec)
@@ -81,10 +73,6 @@ def deactivate(parser, args):
for name in topo_order:
espec = index[name]
epkg = espec.package
-
- # TODO: store DAG info properly (see above)
- epkg.spec.normalize()
-
if epkg.extends(pkg.extendee_spec):
if epkg.activated or args.force:
diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py
index 70b10edb4e..15c1cc9196 100644
--- a/lib/spack/spack/cmd/find.py
+++ b/lib/spack/spack/cmd/find.py
@@ -41,9 +41,6 @@ description ="Find installed spack packages"
def setup_parser(subparser):
format_group = subparser.add_mutually_exclusive_group()
format_group.add_argument(
- '-l', '--long', action='store_const', dest='mode', const='long',
- help='Show dependency hashes as well as versions.')
- format_group.add_argument(
'-p', '--paths', action='store_const', dest='mode', const='paths',
help='Show paths to package install directories')
format_group.add_argument(
@@ -51,12 +48,21 @@ def setup_parser(subparser):
help='Show full dependency DAG of installed packages')
subparser.add_argument(
+ '-l', '--long', action='store_true', dest='long',
+ help='Show dependency hashes as well as versions.')
+
+ subparser.add_argument(
'query_specs', nargs=argparse.REMAINDER,
help='optional specs to filter results')
+def gray_hash(spec):
+ return colorize('@K{[%s]}' % spec.dag_hash(7))
+
+
def display_specs(specs, **kwargs):
mode = kwargs.get('mode', 'short')
+ hashes = kwargs.get('long', False)
# Make a dict with specs keyed by architecture and compiler.
index = index_by(specs, ('architecture', 'compiler'))
@@ -85,13 +91,20 @@ def display_specs(specs, **kwargs):
elif mode == 'deps':
for spec in specs:
- print spec.tree(indent=4, format='$_$@$+$#', color=True),
-
- elif mode in ('short', 'long'):
- fmt = '$-_$@$+'
- if mode == 'long':
- fmt += '$#'
- colify(s.format(fmt, color=True) for s in specs)
+ print spec.tree(
+ format='$_$@$+',
+ color=True,
+ indent=4,
+ prefix=(lambda s: gray_hash(s)) if hashes else None)
+
+ elif mode == 'short':
+ def fmt(s):
+ string = ""
+ if hashes:
+ string += gray_hash(s) + ' '
+ string += s.format('$-_$@$+', color=True)
+ return string
+ colify(fmt(s) for s in specs)
else:
raise ValueError(
@@ -125,5 +138,4 @@ def find(parser, args):
if sys.stdout.isatty():
tty.msg("%d installed packages." % len(specs))
- display_specs(specs, mode=args.mode)
-
+ display_specs(specs, mode=args.mode, long=args.long)
diff --git a/lib/spack/spack/cmd/info.py b/lib/spack/spack/cmd/info.py
index eafafc601a..c6209523f0 100644
--- a/lib/spack/spack/cmd/info.py
+++ b/lib/spack/spack/cmd/info.py
@@ -22,12 +22,22 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
+import textwrap
from llnl.util.tty.colify import *
import spack
import spack.fetch_strategy as fs
description = "Get detailed information on a particular package"
+def padder(str_list, extra=0):
+ """Return a function to pad elements of a list."""
+ length = max(len(str(s)) for s in str_list) + extra
+ def pad(string):
+ string = str(string)
+ padding = max(0, length - len(string))
+ return string + (padding * ' ')
+ return pad
+
def setup_parser(subparser):
subparser.add_argument('name', metavar="PACKAGE", help="Name of package to get info for.")
@@ -42,13 +52,24 @@ def print_text_info(pkg):
print "Safe versions: "
if not pkg.versions:
- print("None.")
+ print("None")
else:
- maxlen = max(len(str(v)) for v in pkg.versions)
- fmt = "%%-%ss" % maxlen
+ pad = padder(pkg.versions, 4)
for v in reversed(sorted(pkg.versions)):
f = fs.for_package_version(pkg, v)
- print " " + (fmt % v) + " " + str(f)
+ print " %s%s" % (pad(v), str(f))
+
+ print
+ print "Variants:"
+ if not pkg.variants:
+ print "None"
+ else:
+ pad = padder(pkg.variants, 4)
+ for name in sorted(pkg.variants):
+ v = pkg.variants[name]
+ print " %s%s" % (
+ pad(('+' if v.default else '-') + name + ':'),
+ "\n".join(textwrap.wrap(v.description)))
print
print "Dependencies:"
diff --git a/lib/spack/spack/cmd/mirror.py b/lib/spack/spack/cmd/mirror.py
index 22838e1344..02a1467ee6 100644
--- a/lib/spack/spack/cmd/mirror.py
+++ b/lib/spack/spack/cmd/mirror.py
@@ -75,27 +75,22 @@ def mirror_add(args):
if url.startswith('/'):
url = 'file://' + url
- config = spack.config.get_config('user')
- config.set_value('mirror', args.name, 'url', url)
- config.write()
+ mirror_dict = { args.name : url }
+ spack.config.add_to_mirror_config({ args.name : url })
def mirror_remove(args):
"""Remove a mirror by name."""
- config = spack.config.get_config('user')
name = args.name
- if not config.has_named_section('mirror', name):
+ rmd_something = spack.config.remove_from_config('mirrors', name)
+ if not rmd_something:
tty.die("No such mirror: %s" % name)
- config.remove_named_section('mirror', name)
- config.write()
def mirror_list(args):
"""Print out available mirrors to the console."""
- config = spack.config.get_config()
- sec_names = config.get_section_names('mirror')
-
+ sec_names = spack.config.get_mirror_config()
if not sec_names:
tty.msg("No mirrors configured.")
return
@@ -103,8 +98,7 @@ def mirror_list(args):
max_len = max(len(s) for s in sec_names)
fmt = "%%-%ds%%s" % (max_len + 4)
- for name in sec_names:
- val = config.get_value('mirror', name, 'url')
+ for name, val in sec_names.iteritems():
print fmt % (name, val)
diff --git a/lib/spack/spack/compilers/__init__.py b/lib/spack/spack/compilers/__init__.py
index 8cb11c3208..b7b021a1ac 100644
--- a/lib/spack/spack/compilers/__init__.py
+++ b/lib/spack/spack/compilers/__init__.py
@@ -60,24 +60,25 @@ def _get_config():
first."""
# If any configuration file has compilers, just stick with the
# ones already configured.
- config = spack.config.get_config()
+ config = spack.config.get_compilers_config()
existing = [spack.spec.CompilerSpec(s)
- for s in config.get_section_names('compiler')]
+ for s in config]
if existing:
return config
compilers = find_compilers(*get_path('PATH'))
- new_compilers = [
- c for c in compilers if c.spec not in existing]
- add_compilers_to_config('user', *new_compilers)
+ add_compilers_to_config('user', *compilers)
# After writing compilers to the user config, return a full config
# from all files.
- return spack.config.get_config(refresh=True)
+ return spack.config.get_compilers_config()
-@memoized
+_cached_default_compiler = None
def default_compiler():
+ global _cached_default_compiler
+ if _cached_default_compiler:
+ return _cached_default_compiler
versions = []
for name in _default_order: # TODO: customize order.
versions = find(name)
@@ -86,7 +87,8 @@ def default_compiler():
if not versions:
raise NoCompilersError()
- return sorted(versions)[-1]
+ _cached_default_compiler = sorted(versions)[-1]
+ return _cached_default_compiler
def find_compilers(*path):
@@ -122,19 +124,17 @@ def find_compilers(*path):
def add_compilers_to_config(scope, *compilers):
- config = spack.config.get_config(scope)
+ compiler_config_tree = {}
for compiler in compilers:
- add_compiler(config, compiler)
- config.write()
-
-
-def add_compiler(config, compiler):
- def setup_field(cspec, name, exe):
- path = exe if exe else "None"
- config.set_value('compiler', cspec, name, path)
+ compiler_entry = {}
+ for c in _required_instance_vars:
+ val = getattr(compiler, c)
+ if not val:
+ val = "None"
+ compiler_entry[c] = val
+ compiler_config_tree[str(compiler.spec)] = compiler_entry
+ spack.config.add_to_compiler_config(compiler_config_tree, scope)
- for c in _required_instance_vars:
- setup_field(compiler.spec, c, getattr(compiler, c))
def supported_compilers():
@@ -157,8 +157,7 @@ def all_compilers():
available to build with. These are instances of CompilerSpec.
"""
configuration = _get_config()
- return [spack.spec.CompilerSpec(s)
- for s in configuration.get_section_names('compiler')]
+ return [spack.spec.CompilerSpec(s) for s in configuration]
@_auto_compiler_spec
@@ -176,7 +175,7 @@ def compilers_for_spec(compiler_spec):
config = _get_config()
def get_compiler(cspec):
- items = dict((k,v) for k,v in config.items('compiler "%s"' % cspec))
+ items = config[str(cspec)]
if not all(n in items for n in _required_instance_vars):
raise InvalidCompilerConfigurationError(cspec)
diff --git a/lib/spack/spack/concretize.py b/lib/spack/spack/concretize.py
index 3f569f9dce..15e886ad3c 100644
--- a/lib/spack/spack/concretize.py
+++ b/lib/spack/spack/concretize.py
@@ -101,6 +101,16 @@ class DefaultConcretizer(object):
spec.architecture = spack.architecture.sys_type()
+ def concretize_variants(self, spec):
+ """If the spec already has variants filled in, return. Otherwise, add
+ the default variants from the package specification.
+ """
+ for name, variant in spec.package.variants.items():
+ if name not in spec.variants:
+ spec.variants[name] = spack.spec.VariantSpec(
+ name, variant.default)
+
+
def concretize_compiler(self, spec):
"""If the spec already has a compiler, we're done. If not, then take
the compiler used for the nearest ancestor with a compiler
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index 85ee16a1c2..34dee86473 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -28,452 +28,315 @@ Configuration file scopes
===============================
When Spack runs, it pulls configuration data from several config
-files, much like bash shells. In Spack, there are two configuration
-scopes:
+directories, each of which contains configuration files. In Spack,
+there are two configuration scopes:
1. ``site``: Spack loads site-wide configuration options from
- ``$(prefix)/etc/spackconfig``.
+ ``$(prefix)/etc/spack/``.
2. ``user``: Spack next loads per-user configuration options from
- ~/.spackconfig.
-
-If user options have the same names as site options, the user options
-take precedence.
+ ~/.spack/.
+Spack may read configuration files from both of these locations. When
+configurations conflict, the user config options take precedence over
+the site configurations. Each configuration directory may contain
+several configuration files, such as compilers.yaml or mirrors.yaml.
Configuration file format
===============================
-Configuration files are formatted using .gitconfig syntax, which is
-much like Windows .INI format. This format is implemented by Python's
-ConfigParser class, and it's easy to read and versatile.
-
-The file is divided into sections, like this ``compiler`` section::
-
- [compiler]
- cc = /usr/bin/gcc
-
-In each section there are options (cc), and each option has a value
-(/usr/bin/gcc).
-
-Borrowing from git, we also allow named sections, e.g.:
-
- [compiler "gcc@4.7.3"]
- cc = /usr/bin/gcc
-
-This is a compiler section, but it's for the specific compiler,
-``gcc@4.7.3``. ``gcc@4.7.3`` is the name.
-
-
-Keys
-===============================
-
-Together, the section, name, and option, separated by periods, are
-called a ``key``. Keys can be used on the command line to set
-configuration options explicitly (this is also borrowed from git).
-
-For example, to change the C compiler used by gcc@4.7.3, you could do
-this:
-
- spack config compiler.gcc@4.7.3.cc /usr/local/bin/gcc
-
-That will create a named compiler section in the user's .spackconfig
-like the one shown above.
+Configuration files are formatted using YAML syntax.
+This format is implemented by Python's
+yaml class, and it's easy to read and versatile.
+
+The config files are structured as trees, like this ``compiler`` section::
+
+ compilers:
+ chaos_5_x86_64_ib:
+ gcc@4.4.7:
+ cc: /usr/bin/gcc
+ cxx: /usr/bin/g++
+ f77: /usr/bin/gfortran
+ fc: /usr/bin/gfortran
+ bgqos_0:
+ xlc@12.1:
+ cc: /usr/local/bin/mpixlc
+ ...
+
+In this example, entries like ''compilers'' and ''xlc@12.1'' are used to
+categorize entries beneath them in the tree. At the root of the tree,
+entries like ''cc'' and ''cxx'' are specified as name/value pairs.
+
+Spack returns these trees as nested dicts. The dict for the above example
+would looks like:
+
+ { 'compilers' :
+ { 'chaos_5_x86_64_ib' :
+ { 'gcc@4.4.7' :
+ { 'cc' : '/usr/bin/gcc',
+ 'cxx' : '/usr/bin/g++'
+ 'f77' : '/usr/bin/gfortran'
+ 'fc' : '/usr/bin/gfortran' }
+ }
+ { 'bgqos_0' :
+ { 'cc' : '/usr/local/bin/mpixlc' }
+ }
+ }
+
+Some routines, like get_mirrors_config and get_compilers_config may strip
+off the top-levels of the tree and return subtrees.
"""
import os
-import re
-import inspect
-import ConfigParser as cp
+import exceptions
+import sys
from external.ordereddict import OrderedDict
from llnl.util.lang import memoized
import spack.error
-__all__ = [
- 'SpackConfigParser', 'get_config', 'SpackConfigurationError',
- 'InvalidConfigurationScopeError', 'InvalidSectionNameError',
- 'ReadOnlySpackConfigError', 'ConfigParserError', 'NoOptionError',
- 'NoSectionError']
-
-_named_section_re = r'([^ ]+) "([^"]+)"'
+from contextlib import closing
+from external import yaml
+from external.yaml.error import MarkedYAMLError
+import llnl.util.tty as tty
+from llnl.util.filesystem import mkdirp
+
+_config_sections = {}
+class _ConfigCategory:
+ name = None
+ filename = None
+ merge = True
+ def __init__(self, n, f, m):
+ self.name = n
+ self.filename = f
+ self.merge = m
+ self.files_read_from = []
+ self.result_dict = {}
+ _config_sections[n] = self
+
+_ConfigCategory('compilers', 'compilers.yaml', True)
+_ConfigCategory('mirrors', 'mirrors.yaml', True)
+_ConfigCategory('view', 'views.yaml', True)
+_ConfigCategory('order', 'orders.yaml', True)
"""Names of scopes and their corresponding configuration files."""
-_scopes = OrderedDict({
- 'site' : os.path.join(spack.etc_path, 'spackconfig'),
- 'user' : os.path.expanduser('~/.spackconfig')
-})
-
-_field_regex = r'^([\w-]*)' \
- r'(?:\.(.*(?=.)))?' \
- r'(?:\.([\w-]+))?$'
-
-_section_regex = r'^([\w-]*)\s*' \
- r'\"([^"]*\)\"$'
-
-
-# Cache of configs -- we memoize this for performance.
-_config = {}
-
-def get_config(scope=None, **kwargs):
- """Get a Spack configuration object, which can be used to set options.
-
- With no arguments, this returns a SpackConfigParser with config
- options loaded from all config files. This is how client code
- should read Spack configuration options.
-
- Optionally, a scope parameter can be provided. Valid scopes
- are ``site`` and ``user``. If a scope is provided, only the
- options from that scope's configuration file are loaded. The
- caller can set or unset options, then call ``write()`` on the
- config object to write it back out to the original config file.
-
- By default, this will cache configurations and return the last
- read version of the config file. If the config file is
- modified and you need to refresh, call get_config with the
- refresh=True keyword argument. This will force all files to be
- re-read.
- """
- refresh = kwargs.get('refresh', False)
- if refresh:
- _config.clear()
-
- if scope not in _config:
- if scope is None:
- _config[scope] = SpackConfigParser([path for path in _scopes.values()])
- elif scope not in _scopes:
- raise UnknownConfigurationScopeError(scope)
+config_scopes = [('site', os.path.join(spack.etc_path, 'spack')),
+ ('user', os.path.expanduser('~/.spack'))]
+
+_compiler_by_arch = {}
+_read_config_file_result = {}
+def _read_config_file(filename):
+ """Read a given YAML configuration file"""
+ global _read_config_file_result
+ if filename in _read_config_file_result:
+ return _read_config_file_result[filename]
+
+ try:
+ with open(filename) as f:
+ ydict = yaml.load(f)
+ except MarkedYAMLError, e:
+ tty.die("Error parsing yaml%s: %s" % (str(e.context_mark), e.problem))
+ except exceptions.IOError, e:
+ _read_config_file_result[filename] = None
+ return None
+ _read_config_file_result[filename] = ydict
+ return ydict
+
+
+def clear_config_caches():
+ """Clears the caches for configuration files, which will cause them
+ to be re-read upon the next request"""
+ for key,s in _config_sections.iteritems():
+ s.files_read_from = []
+ s.result_dict = {}
+ spack.config._read_config_file_result = {}
+ spack.config._compiler_by_arch = {}
+ spack.compilers._cached_default_compiler = None
+
+
+def _merge_dicts(d1, d2):
+ """Recursively merges two configuration trees, with entries
+ in d2 taking precedence over d1"""
+ if not d1:
+ return d2.copy()
+ if not d2:
+ return d1
+
+ for key2, val2 in d2.iteritems():
+ if not key2 in d1:
+ d1[key2] = val2
+ continue
+ val1 = d1[key2]
+ if isinstance(val1, dict) and isinstance(val2, dict):
+ d1[key2] = _merge_dicts(val1, val2)
+ continue
+ if isinstance(val1, list) and isinstance(val2, list):
+ val1.extend(val2)
+ seen = set()
+ d1[key2] = [ x for x in val1 if not (x in seen or seen.add(x)) ]
+ continue
+ d1[key2] = val2
+ return d1
+
+
+def get_config(category_name):
+ """Get the confguration tree for the names category. Strips off the
+ top-level category entry from the dict"""
+ global config_scopes
+ category = _config_sections[category_name]
+ if category.result_dict:
+ return category.result_dict
+
+ category.result_dict = {}
+ for scope, scope_path in config_scopes:
+ path = os.path.join(scope_path, category.filename)
+ result = _read_config_file(path)
+ if not result:
+ continue
+ if not category_name in result:
+ continue
+ category.files_read_from.insert(0, path)
+ result = result[category_name]
+ if category.merge:
+ category.result_dict = _merge_dicts(category.result_dict, result)
else:
- _config[scope] = SpackConfigParser(_scopes[scope])
-
- return _config[scope]
-
-
-def get_filename(scope):
- """Get the filename for a particular config scope."""
- if not scope in _scopes:
- raise UnknownConfigurationScopeError(scope)
- return _scopes[scope]
-
-
-def _parse_key(key):
- """Return the section, name, and option the field describes.
- Values are returned in a 3-tuple.
-
- e.g.:
- The field name ``compiler.gcc@4.7.3.cc`` refers to the 'cc' key
- in a section that looks like this:
-
- [compiler "gcc@4.7.3"]
- cc = /usr/local/bin/gcc
-
- * The section is ``compiler``
- * The name is ``gcc@4.7.3``
- * The key is ``cc``
- """
- match = re.search(_field_regex, key)
- if match:
- return match.groups()
+ category.result_dict = result
+ return category.result_dict
+
+
+def get_compilers_config(arch=None):
+ """Get the compiler configuration from config files for the given
+ architecture. Strips off the architecture component of the
+ configuration"""
+ global _compiler_by_arch
+ if not arch:
+ arch = spack.architecture.sys_type()
+ if arch in _compiler_by_arch:
+ return _compiler_by_arch[arch]
+
+ cc_config = get_config('compilers')
+ if arch in cc_config and 'all' in cc_config:
+ arch_compiler = dict(cc_config[arch])
+ _compiler_by_arch[arch] = _merge_dict(arch_compiler, cc_config['all'])
+ elif arch in cc_config:
+ _compiler_by_arch[arch] = cc_config[arch]
+ elif 'all' in cc_config:
+ _compiler_by_arch[arch] = cc_config['all']
else:
- raise InvalidSectionNameError(key)
-
-
-def _make_section_name(section, name):
- if not name:
- return section
- return '%s "%s"' % (section, name)
-
-
-def _autokey(fun):
- """Allow a function to be called with a string key like
- 'compiler.gcc.cc', or with the section, name, and option
- separated. Function should take at least three args, e.g.:
-
- fun(self, section, name, option, [...])
-
- This will allow the function above to be called normally or
- with a string key, e.g.:
-
- fun(self, key, [...])
- """
- argspec = inspect.getargspec(fun)
- fun_nargs = len(argspec[0])
-
- def string_key_func(*args):
- nargs = len(args)
- if nargs == fun_nargs - 2:
- section, name, option = _parse_key(args[1])
- return fun(args[0], section, name, option, *args[2:])
-
- elif nargs == fun_nargs:
- return fun(*args)
-
- else:
- raise TypeError(
- "%s takes %d or %d args (found %d)."
- % (fun.__name__, fun_nargs - 2, fun_nargs, len(args)))
- return string_key_func
-
-
-
-class SpackConfigParser(cp.RawConfigParser):
- """Slightly modified from Python's raw config file parser to accept
- leading whitespace and preserve comments.
- """
- # Slightly modify Python option expressions to allow leading whitespace
- OPTCRE = re.compile(r'\s*' + cp.RawConfigParser.OPTCRE.pattern)
-
- def __init__(self, file_or_files):
- cp.RawConfigParser.__init__(self, dict_type=OrderedDict)
-
- if isinstance(file_or_files, basestring):
- self.read([file_or_files])
- self.filename = file_or_files
-
- else:
- self.read(file_or_files)
- self.filename = None
-
-
- @_autokey
- def set_value(self, section, name, option, value):
- """Set the value for a key. If the key is in a section or named
- section that does not yet exist, add that section.
- """
- sn = _make_section_name(section, name)
- if not self.has_section(sn):
- self.add_section(sn)
-
- # Allow valueless config options to be set like this:
- # spack config set mirror https://foo.bar.com
- #
- # Instead of this, which parses incorrectly:
- # spack config set mirror.https://foo.bar.com
- #
- if option is None:
- option = value
- value = None
-
- self.set(sn, option, value)
-
-
- @_autokey
- def get_value(self, section, name, option):
- """Get the value for a key. Raises NoOptionError or NoSectionError if
- the key is not present."""
- sn = _make_section_name(section, name)
-
+ _compiler_by_arch[arch] = {}
+ return _compiler_by_arch[arch]
+
+
+def get_mirror_config():
+ """Get the mirror configuration from config files"""
+ return get_config('mirrors')
+
+
+def get_config_scope_dirname(scope):
+ """For a scope return the config directory"""
+ global config_scopes
+ for s,p in config_scopes:
+ if s == scope:
+ return p
+ tty.die("Unknown scope %s. Valid options are %s" %
+ (scope, ", ".join([s for s,p in config_scopes])))
+
+
+def get_config_scope_filename(scope, category_name):
+ """For some scope and category, get the name of the configuration file"""
+ if not category_name in _config_sections:
+ tty.die("Unknown config category %s. Valid options are: %s" %
+ (category_name, ", ".join([s for s in _config_sections])))
+ return os.path.join(get_config_scope_dirname(scope), _config_sections[category_name].filename)
+
+
+def add_to_config(category_name, addition_dict, scope=None):
+ """Merge a new dict into a configuration tree and write the new
+ configuration to disk"""
+ global _read_config_file_result
+ get_config(category_name)
+ category = _config_sections[category_name]
+
+ #If scope is specified, use it. Otherwise use the last config scope that
+ #we successfully parsed data from.
+ file = None
+ path = None
+ if not scope and not category.files_read_from:
+ scope = 'user'
+ if scope:
try:
- if not option:
- # TODO: format this better
- return self.items(sn)
-
- return self.get(sn, option)
-
- # Wrap ConfigParser exceptions in SpackExceptions
- except cp.NoOptionError, e: raise NoOptionError(e)
- except cp.NoSectionError, e: raise NoSectionError(e)
- except cp.Error, e: raise ConfigParserError(e)
-
-
- @_autokey
- def has_value(self, section, name, option):
- """Return whether the configuration file has a value for a
- particular key."""
- sn = _make_section_name(section, name)
- return self.has_option(sn, option)
-
-
- def has_named_section(self, section, name):
- sn = _make_section_name(section, name)
- return self.has_section(sn)
-
-
- def remove_named_section(self, section, name):
- sn = _make_section_name(section, name)
- self.remove_section(sn)
-
-
- def get_section_names(self, sectype):
- """Get all named sections with the specified type.
- A named section looks like this:
-
- [compiler "gcc@4.7"]
-
- Names of sections are returned as a list, e.g.:
-
- ['gcc@4.7', 'intel@12.3', 'pgi@4.2']
-
- You can get items in the sections like this:
- """
- sections = []
- for secname in self.sections():
- match = re.match(_named_section_re, secname)
- if match:
- t, name = match.groups()
- if t == sectype:
- sections.append(name)
- return sections
-
-
- def write(self, path_or_fp=None):
- """Write this configuration out to a file.
-
- If called with no arguments, this will write the
- configuration out to the file from which it was read. If
- this config was read from multiple files, e.g. site
- configuration and then user configuration, write will
- simply raise an error.
-
- If called with a path or file object, this will write the
- configuration out to the supplied path or file object.
- """
- if path_or_fp is None:
- if not self.filename:
- raise ReadOnlySpackConfigError()
- path_or_fp = self.filename
-
- if isinstance(path_or_fp, basestring):
- path_or_fp = open(path_or_fp, 'w')
-
- self._write(path_or_fp)
-
-
- def _read(self, fp, fpname):
- """This is a copy of Python 2.6's _read() method, with support for
- continuation lines removed."""
- cursect = None # None, or a dictionary
- optname = None
- comment = 0
- lineno = 0
- e = None # None, or an exception
- while True:
- line = fp.readline()
- if not line:
- break
- lineno = lineno + 1
- # comment or blank line?
- if ((line.strip() == '' or line[0] in '#;') or
- (line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR")):
- self._sections["comment-%d" % comment] = line
- comment += 1
- # a section header or option header?
- else:
- # is it a section header?
- mo = self.SECTCRE.match(line)
- if mo:
- sectname = mo.group('header')
- if sectname in self._sections:
- cursect = self._sections[sectname]
- elif sectname == cp.DEFAULTSECT:
- cursect = self._defaults
- else:
- cursect = self._dict()
- cursect['__name__'] = sectname
- self._sections[sectname] = cursect
- # So sections can't start with a continuation line
- optname = None
- # no section header in the file?
- elif cursect is None:
- raise cp.MissingSectionHeaderError(fpname, lineno, line)
- # an option line?
- else:
- mo = self.OPTCRE.match(line)
- if mo:
- optname, vi, optval = mo.group('option', 'vi', 'value')
- if vi in ('=', ':') and ';' in optval:
- # ';' is a comment delimiter only if it follows
- # a spacing character
- pos = optval.find(';')
- if pos != -1 and optval[pos-1].isspace():
- optval = optval[:pos]
- optval = optval.strip()
- # allow empty values
- if optval == '""':
- optval = ''
- optname = self.optionxform(optname.rstrip())
- cursect[optname] = optval
- else:
- # a non-fatal parsing error occurred. set up the
- # exception but keep going. the exception will be
- # raised at the end of the file and will contain a
- # list of all bogus lines
- if not e:
- e = cp.ParsingError(fpname)
- e.append(lineno, repr(line))
- # if any parsing errors occurred, raise an exception
- if e:
- raise e
-
-
-
-
- def _write(self, fp):
- """Write an .ini-format representation of the configuration state.
-
- This is taken from the default Python 2.6 source. It writes 4
- spaces at the beginning of lines instead of no leading space.
- """
- if self._defaults:
- fp.write("[%s]\n" % cp.DEFAULTSECT)
- for (key, value) in self._defaults.items():
- fp.write(" %s = %s\n" % (key, str(value).replace('\n', '\n\t')))
- fp.write("\n")
-
- for section in self._sections:
- # Handles comments and blank lines.
- if isinstance(self._sections[section], basestring):
- fp.write(self._sections[section])
- continue
-
- else:
- # Allow leading whitespace
- fp.write("[%s]\n" % section)
- for (key, value) in self._sections[section].items():
- if key != "__name__":
- fp.write(" %s = %s\n" %
- (key, str(value).replace('\n', '\n\t')))
-
-
-class SpackConfigurationError(spack.error.SpackError):
- def __init__(self, *args):
- super(SpackConfigurationError, self).__init__(*args)
-
-
-class InvalidConfigurationScopeError(SpackConfigurationError):
- def __init__(self, scope):
- super(InvalidConfigurationScopeError, self).__init__(
- "Invalid configuration scope: '%s'" % scope,
- "Options are: %s" % ", ".join(*_scopes.values()))
-
-
-class InvalidSectionNameError(SpackConfigurationError):
- """Raised when the name for a section is invalid."""
- def __init__(self, name):
- super(InvalidSectionNameError, self).__init__(
- "Invalid section specifier: '%s'" % name)
-
-
-class ReadOnlySpackConfigError(SpackConfigurationError):
- """Raised when user attempts to write to a config read from multiple files."""
- def __init__(self):
- super(ReadOnlySpackConfigError, self).__init__(
- "Can only write to a single-file SpackConfigParser")
-
-
-class ConfigParserError(SpackConfigurationError):
- """Wrapper for the Python ConfigParser's errors"""
- def __init__(self, error):
- super(ConfigParserError, self).__init__(str(error))
- self.error = error
-
-
-class NoOptionError(ConfigParserError):
- """Wrapper for ConfigParser NoOptionError"""
- def __init__(self, error):
- super(NoOptionError, self).__init__(error)
-
-
-class NoSectionError(ConfigParserError):
- """Wrapper for ConfigParser NoOptionError"""
- def __init__(self, error):
- super(NoSectionError, self).__init__(error)
+ dir = get_config_scope_dirname(scope)
+ if not os.path.exists(dir):
+ mkdirp(dir)
+ path = os.path.join(dir, category.filename)
+ file = open(path, 'w')
+ except exceptions.IOError, e:
+ pass
+ else:
+ for p in category.files_read_from:
+ try:
+ file = open(p, 'w')
+ except exceptions.IOError, e:
+ pass
+ if file:
+ path = p
+ break;
+ if not file:
+ tty.die('Unable to write to config file %s' % path)
+
+ #Merge the new information into the existing file info, then write to disk
+ new_dict = _read_config_file_result[path]
+ if new_dict and category_name in new_dict:
+ new_dict = new_dict[category_name]
+ new_dict = _merge_dicts(new_dict, addition_dict)
+ new_dict = { category_name : new_dict }
+ _read_config_file_result[path] = new_dict
+ yaml.dump(new_dict, stream=file, default_flow_style=False)
+ file.close()
+
+ #Merge the new information into the cached results
+ category.result_dict = _merge_dicts(category.result_dict, addition_dict)
+
+
+def add_to_mirror_config(addition_dict, scope=None):
+ """Add mirrors to the configuration files"""
+ add_to_config('mirrors', addition_dict, scope)
+
+
+def add_to_compiler_config(addition_dict, scope=None, arch=None):
+ """Add compilerss to the configuration files"""
+ if not arch:
+ arch = spack.architecture.sys_type()
+ add_to_config('compilers', { arch : addition_dict }, scope)
+ clear_config_caches()
+
+
+def remove_from_config(category_name, key_to_rm, scope=None):
+ """Remove a configuration key and write a new configuration to disk"""
+ global config_scopes
+ get_config(category_name)
+ scopes_to_rm_from = [scope] if scope else [s for s,p in config_scopes]
+ category = _config_sections[category_name]
+
+ rmd_something = False
+ for s in scopes_to_rm_from:
+ path = get_config_scope_filename(scope, category_name)
+ result = _read_config_file(path)
+ if not result:
+ continue
+ if not key_to_rm in result[category_name]:
+ continue
+ with closing(open(path, 'w')) as f:
+ result[category_name].pop(key_to_rm, None)
+ yaml.dump(result, stream=f, default_flow_style=False)
+ category.result_dict.pop(key_to_rm, None)
+ rmd_something = True
+ return rmd_something
+
+
+"""Print a configuration to stdout"""
+def print_category(category_name):
+ if not category_name in _config_sections:
+ tty.die("Unknown config category %s. Valid options are: %s" %
+ (category_name, ", ".join([s for s in _config_sections])))
+ yaml.dump(get_config(category_name), stream=sys.stdout, default_flow_style=False)
+
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
new file mode 100644
index 0000000000..9297d6dac3
--- /dev/null
+++ b/lib/spack/spack/directives.py
@@ -0,0 +1,277 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""This package contains directives that can be used within a package.
+
+Directives are functions that can be called inside a package
+definition to modify the package, for example:
+
+ class OpenMpi(Package):
+ depends_on("hwloc")
+ provides("mpi")
+ ...
+
+``provides`` and ``depends_on`` are spack directives.
+
+The available directives are:
+
+ * ``version``
+ * ``depends_on``
+ * ``provides``
+ * ``extends``
+ * ``patch``
+ * ``variant``
+
+"""
+__all__ = [ 'depends_on', 'extends', 'provides', 'patch', 'version',
+ 'variant' ]
+
+import re
+import inspect
+
+from llnl.util.lang import *
+
+import spack
+import spack.spec
+import spack.error
+import spack.url
+from spack.version import Version
+from spack.patch import Patch
+from spack.variant import Variant
+from spack.spec import Spec, parse_anonymous_spec
+
+
+#
+# This is a list of all directives, built up as they are defined in
+# this file.
+#
+directives = {}
+
+
+def ensure_dicts(pkg):
+ """Ensure that a package has all the dicts required by directives."""
+ for name, d in directives.items():
+ d.ensure_dicts(pkg)
+
+
+class directive(object):
+ """Decorator for Spack directives.
+
+ Spack directives allow you to modify a package while it is being
+ defined, e.g. to add version or depenency information. Directives
+ are one of the key pieces of Spack's package "langauge", which is
+ embedded in python.
+
+ Here's an example directive:
+
+ @directive(dicts='versions')
+ version(pkg, ...):
+ ...
+
+ This directive allows you write:
+
+ class Foo(Package):
+ version(...)
+
+ The ``@directive`` decorator handles a couple things for you:
+
+ 1. Adds the class scope (pkg) as an initial parameter when
+ called, like a class method would. This allows you to modify
+ a package from within a directive, while the package is still
+ being defined.
+
+ 2. It automatically adds a dictionary called "versions" to the
+ package so that you can refer to pkg.versions.
+
+ The ``(dicts='versions')`` part ensures that ALL packages in Spack
+ will have a ``versions`` attribute after they're constructed, and
+ that if no directive actually modified it, it will just be an
+ empty dict.
+
+ This is just a modular way to add storage attributes to the
+ Package class, and it's how Spack gets information from the
+ packages to the core.
+
+ """
+
+ def __init__(self, dicts=None):
+ if isinstance(dicts, basestring):
+ dicts = (dicts,)
+ elif type(dicts) not in (list, tuple):
+ raise TypeError(
+ "dicts arg must be list, tuple, or string. Found %s."
+ % type(dicts))
+
+ self.dicts = dicts
+
+
+ def ensure_dicts(self, pkg):
+ """Ensure that a package has the dicts required by this directive."""
+ for d in self.dicts:
+ if not hasattr(pkg, d):
+ setattr(pkg, d, {})
+
+ attr = getattr(pkg, d)
+ if not isinstance(attr, dict):
+ raise spack.error.SpackError(
+ "Package %s has non-dict %s attribute!" % (pkg, d))
+
+
+ def __call__(self, directive_function):
+ directives[directive_function.__name__] = self
+
+ def wrapped(*args, **kwargs):
+ pkg = DictWrapper(caller_locals())
+ self.ensure_dicts(pkg)
+
+ pkg.name = get_calling_module_name()
+ return directive_function(pkg, *args, **kwargs)
+
+ return wrapped
+
+
+@directive('versions')
+def version(pkg, ver, checksum=None, **kwargs):
+ """Adds a version and metadata describing how to fetch it.
+ Metadata is just stored as a dict in the package's versions
+ dictionary. Package must turn it into a valid fetch strategy
+ later.
+ """
+ # TODO: checksum vs md5 distinction is confusing -- fix this.
+ # special case checksum for backward compatibility
+ if checksum:
+ kwargs['md5'] = checksum
+
+ # Store kwargs for the package to later with a fetch_strategy.
+ pkg.versions[Version(ver)] = kwargs
+
+
+def _depends_on(pkg, spec, when=None):
+ if when is None:
+ when = pkg.name
+ when_spec = parse_anonymous_spec(when, pkg.name)
+
+ dep_spec = Spec(spec)
+ if pkg.name == dep_spec.name:
+ raise CircularReferenceError('depends_on', pkg.name)
+
+ conditions = pkg.dependencies.setdefault(dep_spec.name, {})
+ if when_spec in conditions:
+ conditions[when_spec].constrain(dep_spec, deps=False)
+ else:
+ conditions[when_spec] = dep_spec
+
+
+@directive('dependencies')
+def depends_on(pkg, spec, when=None):
+ """Creates a dict of deps with specs defining when they apply."""
+ _depends_on(pkg, spec, when=when)
+
+
+@directive(('extendees', 'dependencies'))
+def extends(pkg, spec, **kwargs):
+ """Same as depends_on, but dependency is symlinked into parent prefix.
+
+ This is for Python and other language modules where the module
+ needs to be installed into the prefix of the Python installation.
+ Spack handles this by installing modules into their own prefix,
+ but allowing ONE module version to be symlinked into a parent
+ Python install at a time.
+
+ keyword arguments can be passed to extends() so that extension
+ packages can pass parameters to the extendee's extension
+ mechanism.
+
+ """
+ if pkg.extendees:
+ raise DirectiveError("Packages can extend at most one other package.")
+
+ when = kwargs.pop('when', pkg.name)
+ _depends_on(pkg, spec, when=when)
+ pkg.extendees[spec] = (Spec(spec), kwargs)
+
+
+@directive('provided')
+def provides(pkg, *specs, **kwargs):
+ """Allows packages to provide a virtual dependency. If a package provides
+ 'mpi', other packages can declare that they depend on "mpi", and spack
+ can use the providing package to satisfy the dependency.
+ """
+ spec_string = kwargs.get('when', pkg.name)
+ provider_spec = parse_anonymous_spec(spec_string, pkg.name)
+
+ for string in specs:
+ for provided_spec in spack.spec.parse(string):
+ if pkg.name == provided_spec.name:
+ raise CircularReferenceError('depends_on', pkg.name)
+ pkg.provided[provided_spec] = provider_spec
+
+
+@directive('patches')
+def patch(pkg, url_or_filename, level=1, when=None):
+ """Packages can declare patches to apply to source. You can
+ optionally provide a when spec to indicate that a particular
+ patch should only be applied when the package's spec meets
+ certain conditions (e.g. a particular version).
+ """
+ if when is None:
+ when = pkg.name
+ when_spec = parse_anonymous_spec(when, pkg.name)
+
+ if when_spec not in pkg.patches:
+ pkg.patches[when_spec] = [Patch(pkg.name, url_or_filename, level)]
+ else:
+ # if this spec is identical to some other, then append this
+ # patch to the existing list.
+ pkg.patches[when_spec].append(Patch(pkg.name, url_or_filename, level))
+
+
+@directive('variants')
+def variant(pkg, name, default=False, description=""):
+ """Define a variant for the package. Packager can specify a default
+ value (on or off) as well as a text description."""
+
+ default = bool(default)
+ description = str(description).strip()
+
+ if not re.match(spack.spec.identifier_re, name):
+ raise DirectiveError("Invalid variant name in %s: '%s'" % (pkg.name, name))
+
+ pkg.variants[name] = Variant(default, description)
+
+
+class DirectiveError(spack.error.SpackError):
+ """This is raised when something is wrong with a package directive."""
+ def __init__(self, directive, message):
+ super(DirectiveError, self).__init__(message)
+ self.directive = directive
+
+
+class CircularReferenceError(DirectiveError):
+ """This is raised when something depends on itself."""
+ def __init__(self, directive, package):
+ super(CircularReferenceError, self).__init__(
+ directive,
+ "Package '%s' cannot pass itself to %s." % (package, directive))
+ self.package = package
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index b2cf5dc801..fe02fff3b0 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -27,8 +27,9 @@ import os
import exceptions
import hashlib
import shutil
+import glob
import tempfile
-from contextlib import closing
+from external import yaml
import llnl.util.tty as tty
from llnl.util.lang import memoized
@@ -81,7 +82,7 @@ class DirectoryLayout(object):
raise NotImplementedError()
- def make_path_for_spec(self, spec):
+ def create_install_directory(self, spec):
"""Creates the installation directory for a spec."""
raise NotImplementedError()
@@ -131,7 +132,7 @@ class DirectoryLayout(object):
return os.path.join(self.root, path)
- def remove_path_for_spec(self, spec):
+ def remove_install_directory(self, spec):
"""Removes a prefix and any empty parent directories from the root.
Raised RemoveFailedError if something goes wrong.
"""
@@ -153,94 +154,70 @@ class DirectoryLayout(object):
path = os.path.dirname(path)
-def traverse_dirs_at_depth(root, depth, path_tuple=(), curdepth=0):
- """For each directory at <depth> within <root>, return a tuple representing
- the ancestors of that directory.
- """
- if curdepth == depth and curdepth != 0:
- yield path_tuple
- elif depth > curdepth:
- for filename in os.listdir(root):
- child = os.path.join(root, filename)
- if os.path.isdir(child):
- child_tuple = path_tuple + (filename,)
- for tup in traverse_dirs_at_depth(
- child, depth, child_tuple, curdepth+1):
- yield tup
-
-
-class SpecHashDirectoryLayout(DirectoryLayout):
+class YamlDirectoryLayout(DirectoryLayout):
"""Lays out installation directories like this::
- <install_root>/
+ <install root>/
<architecture>/
- <compiler>/
- name@version+variant-<dependency_hash>
-
- Where dependency_hash is a SHA-1 hash prefix for the full package spec.
- This accounts for dependencies.
+ <compiler>-<compiler version>/
+ <name>-<version>-<variants>-<hash>
- If there is ever a hash collision, you won't be able to install a new
- package unless you use a larger prefix. However, the full spec is stored
- in a file called .spec in each directory, so you can migrate an entire
- install directory to a new hash size pretty easily.
+ The hash here is a SHA-1 hash for the full DAG plus the build
+ spec. TODO: implement the build spec.
- TODO: make a tool to migrate install directories to different hash sizes.
+ To avoid special characters (like ~) in the directory name,
+ only enabled variants are included in the install path.
+ Disabled variants are omitted.
"""
def __init__(self, root, **kwargs):
- """Prefix size is number of characters in the SHA-1 prefix to use
- to make each hash unique.
- """
- spec_file_name = kwargs.get('spec_file_name', '.spec')
- extension_file_name = kwargs.get('extension_file_name', '.extensions')
- super(SpecHashDirectoryLayout, self).__init__(root)
- self.spec_file_name = spec_file_name
- self.extension_file_name = extension_file_name
+ super(YamlDirectoryLayout, self).__init__(root)
+ self.metadata_dir = kwargs.get('metadata_dir', '.spack')
+ self.hash_len = kwargs.get('hash_len', None)
+
+ self.spec_file_name = 'spec.yaml'
+ self.extension_file_name = 'extensions.yaml'
# Cache of already written/read extension maps.
self._extension_maps = {}
+
@property
def hidden_file_paths(self):
- return ('.spec', '.extensions')
+ return (self.metadata_dir,)
def relative_path_for_spec(self, spec):
_check_concrete(spec)
- dir_name = spec.format('$_$@$+$#')
- return join_path(spec.architecture, spec.compiler, dir_name)
+ enabled_variants = (
+ '-' + v.name for v in spec.variants.values()
+ if v.enabled)
+
+ dir_name = "%s-%s%s-%s" % (
+ spec.name,
+ spec.version,
+ ''.join(enabled_variants),
+ spec.dag_hash(self.hash_len))
+
+ path = join_path(
+ spec.architecture,
+ "%s-%s" % (spec.compiler.name, spec.compiler.version),
+ dir_name)
+
+ return path
def write_spec(self, spec, path):
"""Write a spec out to a file."""
- with closing(open(path, 'w')) as spec_file:
- spec_file.write(spec.tree(ids=False, cover='nodes'))
+ _check_concrete(spec)
+ with open(path, 'w') as f:
+ spec.to_yaml(f)
def read_spec(self, path):
"""Read the contents of a file and parse them as a spec"""
- with closing(open(path)) as spec_file:
- # Specs from files are assumed normal and concrete
- spec = Spec(spec_file.read().replace('\n', ''))
-
- if all(spack.db.exists(s.name) for s in spec.traverse()):
- copy = spec.copy()
-
- # TODO: It takes a lot of time to normalize every spec on read.
- # TODO: Storing graph info with spec files would fix this.
- copy.normalize()
- if copy.concrete:
- return copy # These are specs spack still understands.
-
- # If we get here, either the spec is no longer in spack, or
- # something about its dependencies has changed. So we need to
- # just assume the read spec is correct. We'll lose graph
- # information if we do this, but this is just for best effort
- # for commands like uninstall and find. Currently Spack
- # doesn't do anything that needs the graph info after install.
-
- # TODO: store specs with full connectivity information, so
- # that we don't have to normalize or reconstruct based on
- # changing dependencies in the Spack tree.
+ with open(path) as f:
+ spec = Spec.from_yaml(f)
+
+ # Specs read from actual installations are always concrete
spec._normal = True
spec._concrete = True
return spec
@@ -249,10 +226,14 @@ class SpecHashDirectoryLayout(DirectoryLayout):
def spec_file_path(self, spec):
"""Gets full path to spec file"""
_check_concrete(spec)
- return join_path(self.path_for_spec(spec), self.spec_file_name)
+ return join_path(self.metadata_path(spec), self.spec_file_name)
+
+ def metadata_path(self, spec):
+ return join_path(self.path_for_spec(spec), self.metadata_dir)
- def make_path_for_spec(self, spec):
+
+ def create_install_directory(self, spec):
_check_concrete(spec)
path = self.path_for_spec(spec)
@@ -267,16 +248,13 @@ class SpecHashDirectoryLayout(DirectoryLayout):
if installed_spec == self.spec:
raise InstallDirectoryAlreadyExistsError(path)
- spec_hash = self.hash_spec(spec)
- installed_hash = self.hash_spec(installed_spec)
- if installed_spec == spec_hash:
+ if spec.dag_hash() == installed_spec.dag_hash():
raise SpecHashCollisionError(installed_hash, spec_hash)
else:
raise InconsistentInstallDirectoryError(
- 'Spec file in %s does not match SHA-1 hash!'
- % spec_file_path)
+ 'Spec file in %s does not match hash!' % spec_file_path)
- mkdirp(path)
+ mkdirp(self.metadata_path(spec))
self.write_spec(spec, spec_file_path)
@@ -285,25 +263,50 @@ class SpecHashDirectoryLayout(DirectoryLayout):
if not os.path.isdir(self.root):
return []
- specs = []
- for path in traverse_dirs_at_depth(self.root, 3):
- arch, compiler, last_dir = path
- spec_file_path = join_path(
- self.root, arch, compiler, last_dir, self.spec_file_name)
- if os.path.exists(spec_file_path):
- spec = self.read_spec(spec_file_path)
- specs.append(spec)
- return specs
+ pattern = join_path(
+ self.root, '*', '*', '*', self.metadata_dir, self.spec_file_name)
+ spec_files = glob.glob(pattern)
+ return [self.read_spec(s) for s in spec_files]
+
+
+ @memoized
+ def specs_by_hash(self):
+ by_hash = {}
+ for spec in self.all_specs():
+ by_hash[spec.dag_hash()] = spec
+ return by_hash
def extension_file_path(self, spec):
"""Gets full path to an installed package's extension file"""
_check_concrete(spec)
- return join_path(self.path_for_spec(spec), self.extension_file_name)
+ return join_path(self.metadata_path(spec), self.extension_file_name)
+
+
+ def _write_extensions(self, spec, extensions):
+ path = self.extension_file_path(spec)
+
+ # Create a temp file in the same directory as the actual file.
+ dirname, basename = os.path.split(path)
+ tmp = tempfile.NamedTemporaryFile(
+ prefix=basename, dir=dirname, delete=False)
+
+ # write tmp file
+ with tmp:
+ yaml.dump({
+ 'extensions' : [
+ { ext.name : {
+ 'hash' : ext.dag_hash(),
+ 'path' : str(ext.prefix)
+ }} for ext in sorted(extensions.values())]
+ }, tmp, default_flow_style=False)
+
+ # Atomic update by moving tmpfile on top of old one.
+ os.rename(tmp.name, path)
def _extension_map(self, spec):
- """Get a dict<name -> spec> for all extensions currnetly
+ """Get a dict<name -> spec> for all extensions currently
installed for this package."""
_check_concrete(spec)
@@ -313,16 +316,26 @@ class SpecHashDirectoryLayout(DirectoryLayout):
self._extension_maps[spec] = {}
else:
+ by_hash = self.specs_by_hash()
exts = {}
- with closing(open(path)) as ext_file:
- for line in ext_file:
- try:
- spec = Spec(line.strip())
- exts[spec.name] = spec
- except spack.error.SpackError, e:
- # TODO: do something better here -- should be
- # resilient to corrupt files.
- raise InvalidExtensionSpecError(str(e))
+ with open(path) as ext_file:
+ yaml_file = yaml.load(ext_file)
+ for entry in yaml_file['extensions']:
+ name = next(iter(entry))
+ dag_hash = entry[name]['hash']
+ prefix = entry[name]['path']
+
+ if not dag_hash in by_hash:
+ raise InvalidExtensionSpecError(
+ "Spec %s not found in %s." % (dag_hash, prefix))
+
+ ext_spec = by_hash[dag_hash]
+ if not prefix == ext_spec.prefix:
+ raise InvalidExtensionSpecError(
+ "Prefix %s does not match spec with hash %s: %s"
+ % (prefix, dag_hash, ext_spec))
+
+ exts[ext_spec.name] = ext_spec
self._extension_maps[spec] = exts
return self._extension_maps[spec]
@@ -330,6 +343,7 @@ class SpecHashDirectoryLayout(DirectoryLayout):
def extension_map(self, spec):
"""Defensive copying version of _extension_map() for external API."""
+ _check_concrete(spec)
return self._extension_map(spec).copy()
@@ -349,23 +363,6 @@ class SpecHashDirectoryLayout(DirectoryLayout):
raise NoSuchExtensionError(spec, ext_spec)
- def _write_extensions(self, spec, extensions):
- path = self.extension_file_path(spec)
-
- # Create a temp file in the same directory as the actual file.
- dirname, basename = os.path.split(path)
- tmp = tempfile.NamedTemporaryFile(
- prefix=basename, dir=dirname, delete=False)
-
- # Write temp file.
- with closing(tmp):
- for extension in sorted(extensions.values()):
- tmp.write("%s\n" % extension)
-
- # Atomic update by moving tmpfile on top of old one.
- os.rename(tmp.name, path)
-
-
def add_extension(self, spec, ext_spec):
_check_concrete(spec)
_check_concrete(ext_spec)
@@ -399,9 +396,9 @@ class DirectoryLayoutError(SpackError):
class SpecHashCollisionError(DirectoryLayoutError):
- """Raised when there is a hash collision in an SpecHashDirectoryLayout."""
+ """Raised when there is a hash collision in an install layout."""
def __init__(self, installed_spec, new_spec):
- super(SpecHashDirectoryLayout, self).__init__(
+ super(SpecHashCollisionError, self).__init__(
'Specs %s and %s have the same SHA-1 prefix!'
% installed_spec, new_spec)
@@ -422,7 +419,7 @@ class InconsistentInstallDirectoryError(DirectoryLayoutError):
class InstallDirectoryAlreadyExistsError(DirectoryLayoutError):
- """Raised when make_path_for_sec is called unnecessarily."""
+ """Raised when create_install_directory is called unnecessarily."""
def __init__(self, path):
super(InstallDirectoryAlreadyExistsError, self).__init__(
"Install path %s already exists!")
@@ -455,5 +452,3 @@ class NoSuchExtensionError(DirectoryLayoutError):
super(NoSuchExtensionError, self).__init__(
"%s cannot be removed from %s because it's not activated."% (
ext_spec.short_spec, spec.short_spec))
-
-
diff --git a/lib/spack/spack/multimethod.py b/lib/spack/spack/multimethod.py
index 974401e1aa..892619c6ac 100644
--- a/lib/spack/spack/multimethod.py
+++ b/lib/spack/spack/multimethod.py
@@ -195,7 +195,7 @@ class when(object):
"""
class when(object):
def __init__(self, spec):
- pkg = get_calling_package_name()
+ pkg = get_calling_module_name()
self.spec = parse_anonymous_spec(spec, pkg)
def __call__(self, method):
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 7d9eca5077..452544be49 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -50,11 +50,11 @@ from llnl.util.filesystem import *
from llnl.util.lang import *
import spack
-import spack.spec
import spack.error
import spack.compilers
import spack.mirror
import spack.hooks
+import spack.directives
import spack.build_environment as build_env
import spack.url as url
import spack.fetch_strategy as fs
@@ -301,32 +301,6 @@ class Package(object):
clean() (some of them do this), and others to provide custom behavior.
"""
-
- #
- # These variables are defaults for the various "relations".
- #
- """Map of information about Versions of this package.
- Map goes: Version -> dict of attributes"""
- versions = {}
-
- """Specs of dependency packages, keyed by name."""
- dependencies = {}
-
- """Specs of virtual packages provided by this package, keyed by name."""
- provided = {}
-
- """Specs of conflicting packages, keyed by name. """
- conflicted = {}
-
- """Patches to apply to newly expanded source, if any."""
- patches = {}
-
- """Specs of package this one extends, or None.
-
- Currently, ppackages can extend at most one other package.
- """
- extendees = {}
-
#
# These are default values for instance variables.
#
@@ -350,20 +324,8 @@ class Package(object):
if '.' in self.name:
self.name = self.name[self.name.rindex('.') + 1:]
- # Sanity check some required variables that could be
- # overridden by package authors.
- def ensure_has_dict(attr_name):
- if not hasattr(self, attr_name):
- raise PackageError("Package %s must define %s" % attr_name)
-
- attr = getattr(self, attr_name)
- if not isinstance(attr, dict):
- raise PackageError("Package %s has non-dict %s attribute!"
- % (self.name, attr_name))
- ensure_has_dict('versions')
- ensure_has_dict('dependencies')
- ensure_has_dict('conflicted')
- ensure_has_dict('patches')
+ # Sanity check attributes required by Spack directives.
+ spack.directives.ensure_dicts(type(self))
# Check versions in the versions dict.
for v in self.versions:
@@ -577,41 +539,6 @@ class Package(object):
yield pkg
- def validate_dependencies(self):
- """Ensure that this package and its dependencies all have consistent
- constraints on them.
-
- NOTE that this will NOT find sanity problems through a virtual
- dependency. Virtual deps complicate the problem because we
- don't know in advance which ones conflict with others in the
- dependency DAG. If there's more than one virtual dependency,
- it's a full-on SAT problem, so hold off on this for now.
- The vdeps are actually skipped in preorder_traversal, so see
- that for details.
-
- TODO: investigate validating virtual dependencies.
- """
- # This algorithm just attempts to merge all the constraints on the same
- # package together, loses information about the source of the conflict.
- # What we'd really like to know is exactly which two constraints
- # conflict, but that algorithm is more expensive, so we'll do it
- # the simple, less informative way for now.
- merged = spack.spec.DependencyMap()
-
- try:
- for pkg in self.preorder_traversal():
- for name, spec in pkg.dependencies.iteritems():
- if name not in merged:
- merged[name] = spec.copy()
- else:
- merged[name].constrain(spec)
-
- except spack.spec.UnsatisfiableSpecError, e:
- raise InvalidPackageDependencyError(
- "Package %s has inconsistent dependency constraints: %s"
- % (self.name, e.message))
-
-
def provides(self, vpkg_name):
"""True if this package provides a virtual package with the specified name."""
return vpkg_name in self.provided
@@ -664,7 +591,7 @@ class Package(object):
def remove_prefix(self):
"""Removes the prefix for a package along with any empty parent directories."""
- spack.install_layout.remove_path_for_spec(self.spec)
+ spack.install_layout.remove_install_directory(self.spec)
def do_fetch(self):
@@ -820,7 +747,7 @@ class Package(object):
# create the install directory. The install layout
# handles this in case so that it can use whatever
# package naming scheme it likes.
- spack.install_layout.make_path_for_spec(self.spec)
+ spack.install_layout.create_install_directory(self.spec)
def cleanup():
if not keep_prefix:
@@ -841,11 +768,11 @@ class Package(object):
spack.hooks.pre_install(self)
# Set up process's build environment before running install.
- self.stage.chdir_to_source()
if fake_install:
self.do_fake_install()
else:
# Subclasses implement install() to do the real work.
+ self.stage.chdir_to_source()
self.install(self.spec, self.prefix)
# Ensure that something was actually installed.
@@ -994,16 +921,13 @@ class Package(object):
self._sanity_check_extension()
force = kwargs.get('force', False)
- # TODO: get rid of this normalize - DAG handling.
- self.spec.normalize()
-
- spack.install_layout.check_extension_conflict(self.extendee_spec, self.spec)
+ spack.install_layout.check_extension_conflict(
+ self.extendee_spec, self.spec)
+ # Activate any package dependencies that are also extensions.
if not force:
for spec in self.spec.traverse(root=False):
if spec.package.extends(self.extendee_spec):
- # TODO: fix this normalize() requirement -- revisit DAG handling.
- spec.package.spec.normalize()
if not spec.package.activated:
spec.package.do_activate(**kwargs)
@@ -1031,6 +955,7 @@ class Package(object):
conflict = tree.find_conflict(self.prefix, ignore=ignore)
if conflict:
raise ExtensionConflictError(conflict)
+
tree.merge(self.prefix, ignore=ignore)
@@ -1237,13 +1162,6 @@ class PackageError(spack.error.SpackError):
super(PackageError, self).__init__(message, long_msg)
-class InvalidPackageDependencyError(PackageError):
- """Raised when package specification is inconsistent with requirements of
- its dependencies."""
- def __init__(self, message):
- super(InvalidPackageDependencyError, self).__init__(message)
-
-
class PackageVersionError(PackageError):
"""Raised when a version URL cannot automatically be determined."""
def __init__(self, version):
diff --git a/lib/spack/spack/relations.py b/lib/spack/spack/relations.py
deleted file mode 100644
index a0c7723473..0000000000
--- a/lib/spack/spack/relations.py
+++ /dev/null
@@ -1,215 +0,0 @@
-##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://scalability-llnl.github.io/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (as published by
-# the Free Software Foundation) version 2.1 dated February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-"""
-This package contains relationships that can be defined among packages.
-Relations are functions that can be called inside a package definition,
-for example:
-
- class OpenMPI(Package):
- depends_on("hwloc")
- provides("mpi")
- ...
-
-The available relations are:
-
-depends_on
- Above, the OpenMPI package declares that it "depends on" hwloc. This means
- that the hwloc package needs to be installed before OpenMPI can be
- installed. When a user runs 'spack install openmpi', spack will fetch
- hwloc and install it first.
-
-provides
- This is useful when more than one package can satisfy a dependence. Above,
- OpenMPI declares that it "provides" mpi. Other implementations of the MPI
- interface, like mvapich and mpich, also provide mpi, e.g.:
-
- class Mvapich(Package):
- provides("mpi")
- ...
-
- class Mpich(Package):
- provides("mpi")
- ...
-
- Instead of depending on openmpi, mvapich, or mpich, another package can
- declare that it depends on "mpi":
-
- class Mpileaks(Package):
- depends_on("mpi")
- ...
-
- Now the user can pick which MPI they would like to build with when they
- install mpileaks. For example, the user could install 3 instances of
- mpileaks, one for each MPI version, by issuing these three commands:
-
- spack install mpileaks ^openmpi
- spack install mpileaks ^mvapich
- spack install mpileaks ^mpich
-"""
-__all__ = [ 'depends_on', 'extends', 'provides', 'patch', 'version' ]
-
-import re
-import inspect
-
-from llnl.util.lang import *
-
-import spack
-import spack.spec
-import spack.error
-import spack.url
-from spack.version import Version
-from spack.patch import Patch
-from spack.spec import Spec, parse_anonymous_spec
-
-
-
-def version(ver, checksum=None, **kwargs):
- """Adds a version and metadata describing how to fetch it.
- Metadata is just stored as a dict in the package's versions
- dictionary. Package must turn it into a valid fetch strategy
- later.
- """
- pkg = caller_locals()
- versions = pkg.setdefault('versions', {})
-
- # special case checksum for backward compatibility
- if checksum:
- kwargs['md5'] = checksum
-
- # Store the kwargs for the package to use later when constructing
- # a fetch strategy.
- versions[Version(ver)] = kwargs
-
-
-def depends_on(*specs):
- """Adds a dependencies local variable in the locals of
- the calling class, based on args. """
- pkg = get_calling_package_name()
- clocals = caller_locals()
- dependencies = clocals.setdefault('dependencies', {})
-
- for string in specs:
- for spec in spack.spec.parse(string):
- if pkg == spec.name:
- raise CircularReferenceError('depends_on', pkg)
- dependencies[spec.name] = spec
-
-
-def extends(spec, **kwargs):
- """Same as depends_on, but dependency is symlinked into parent prefix.
-
- This is for Python and other language modules where the module
- needs to be installed into the prefix of the Python installation.
- Spack handles this by installing modules into their own prefix,
- but allowing ONE module version to be symlinked into a parent
- Python install at a time.
-
- keyword arguments can be passed to extends() so that extension
- packages can pass parameters to the extendee's extension
- mechanism.
-
- """
- pkg = get_calling_package_name()
- clocals = caller_locals()
- dependencies = clocals.setdefault('dependencies', {})
- extendees = clocals.setdefault('extendees', {})
- if extendees:
- raise RelationError("Packages can extend at most one other package.")
-
- spec = Spec(spec)
- if pkg == spec.name:
- raise CircularReferenceError('extends', pkg)
- dependencies[spec.name] = spec
- extendees[spec.name] = (spec, kwargs)
-
-
-def provides(*specs, **kwargs):
- """Allows packages to provide a virtual dependency. If a package provides
- 'mpi', other packages can declare that they depend on "mpi", and spack
- can use the providing package to satisfy the dependency.
- """
- pkg = get_calling_package_name()
- spec_string = kwargs.get('when', pkg)
- provider_spec = parse_anonymous_spec(spec_string, pkg)
-
- provided = caller_locals().setdefault("provided", {})
- for string in specs:
- for provided_spec in spack.spec.parse(string):
- if pkg == provided_spec.name:
- raise CircularReferenceError('depends_on', pkg)
- provided[provided_spec] = provider_spec
-
-
-def patch(url_or_filename, **kwargs):
- """Packages can declare patches to apply to source. You can
- optionally provide a when spec to indicate that a particular
- patch should only be applied when the package's spec meets
- certain conditions (e.g. a particular version).
- """
- pkg = get_calling_package_name()
- level = kwargs.get('level', 1)
- when_spec = parse_anonymous_spec(kwargs.get('when', pkg), pkg)
-
- patches = caller_locals().setdefault('patches', {})
- if when_spec not in patches:
- patches[when_spec] = [Patch(pkg, url_or_filename, level)]
- else:
- # if this spec is identical to some other, then append this
- # patch to the existing list.
- patches[when_spec].append(Patch(pkg, url_or_filename, level))
-
-
-def conflicts(*specs):
- """Packages can declare conflicts with other packages.
- This can be as specific as you like: use regular spec syntax.
-
- NOT YET IMPLEMENTED.
- """
- # TODO: implement conflicts
- pass
-
-
-class RelationError(spack.error.SpackError):
- """This is raised when something is wrong with a package relation."""
- def __init__(self, relation, message):
- super(RelationError, self).__init__(message)
- self.relation = relation
-
-
-class ScopeError(RelationError):
- """This is raised when a relation is called from outside a spack package."""
- def __init__(self, relation):
- super(ScopeError, self).__init__(
- relation,
- "Must invoke '%s' from inside a class definition!" % relation)
-
-
-class CircularReferenceError(RelationError):
- """This is raised when something depends on itself."""
- def __init__(self, relation, package):
- super(CircularReferenceError, self).__init__(
- relation,
- "Package '%s' cannot pass itself to %s." % (package, relation))
- self.package = package
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index dffdccaddb..aa13f0422c 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
@@ -93,8 +93,11 @@ expansion when it is the first character in an id typed on the command line.
import sys
import itertools
import hashlib
+import base64
from StringIO import StringIO
from operator import attrgetter
+from external import yaml
+from external.yaml.error import MarkedYAMLError
import llnl.util.tty as tty
from llnl.util.lang import *
@@ -110,6 +113,9 @@ from spack.util.string import *
from spack.util.prefix import Prefix
from spack.virtual import ProviderIndex
+# Valid pattern for an identifier in Spack
+identifier_re = r'\w[\w-]*'
+
# Convenient names for color formats so that other things can use them
compiler_color = '@g'
version_color = '@c'
@@ -117,6 +123,7 @@ architecture_color = '@m'
enabled_variant_color = '@B'
disabled_variant_color = '@r'
dependency_color = '@.'
+hash_color = '@K'
"""This map determines the coloring of specs when using color output.
We make the fields different colors to enhance readability.
@@ -126,7 +133,8 @@ color_formats = {'%' : compiler_color,
'=' : architecture_color,
'+' : enabled_variant_color,
'~' : disabled_variant_color,
- '^' : dependency_color }
+ '^' : dependency_color,
+ '#' : hash_color }
"""Regex used for splitting by spec field separators."""
_separators = '[%s]' % ''.join(color_formats.keys())
@@ -214,20 +222,24 @@ class CompilerSpec(object):
return CompilerSpec(compiler_spec_like)
- def satisfies(self, other):
+ def satisfies(self, other, strict=False):
other = self._autospec(other)
return (self.name == other.name and
- self.versions.satisfies(other.versions))
+ self.versions.satisfies(other.versions, strict=strict))
def constrain(self, other):
+ """Intersect self's versions with other.
+
+ Return whether the CompilerSpec changed.
+ """
other = self._autospec(other)
# ensure that other will actually constrain this spec.
if not other.satisfies(self):
raise UnsatisfiableCompilerSpecError(other, self)
- self.versions.intersect(other.versions)
+ return self.versions.intersect(other.versions)
@property
@@ -255,6 +267,18 @@ class CompilerSpec(object):
return (self.name, self.versions)
+ def to_dict(self):
+ d = {'name' : self.name}
+ d.update(self.versions.to_dict())
+ return { 'compiler' : d }
+
+
+ @staticmethod
+ def from_dict(d):
+ d = d['compiler']
+ return CompilerSpec(d['name'], VersionList.from_dict(d))
+
+
def __str__(self):
out = self.name
if self.versions and self.versions != _any_version:
@@ -267,7 +291,7 @@ class CompilerSpec(object):
@key_ordering
-class Variant(object):
+class VariantSpec(object):
"""Variants are named, build-time options for a package. Names depend
on the particular package being built, and each named variant can
be enabled or disabled.
@@ -282,7 +306,7 @@ class Variant(object):
def copy(self):
- return Variant(self.name, self.enabled)
+ return VariantSpec(self.name, self.enabled)
def __str__(self):
@@ -291,9 +315,52 @@ class Variant(object):
class VariantMap(HashableMap):
- def satisfies(self, other):
- return all(self[key].enabled == other[key].enabled
- for key in other if key in self)
+ def __init__(self, spec):
+ super(VariantMap, self).__init__()
+ self.spec = spec
+
+
+ def satisfies(self, other, strict=False):
+ if strict or self.spec._concrete:
+ return all(k in self and self[k].enabled == other[k].enabled
+ for k in other)
+ else:
+ return all(self[k].enabled == other[k].enabled
+ for k in other if k in self)
+
+
+ def constrain(self, other):
+ """Add all variants in other that aren't in self to self.
+
+ Raises an error if any common variants don't match.
+ Return whether the spec changed.
+ """
+ if other.spec._concrete:
+ for k in self:
+ if k not in other:
+ raise UnsatisfiableVariantSpecError(self[k], '<absent>')
+
+ changed = False
+ for k in other:
+ if k in self:
+ if self[k].enabled != other[k].enabled:
+ raise UnsatisfiableVariantSpecError(self[k], other[k])
+ else:
+ self[k] = other[k].copy()
+ changed =True
+ return changed
+
+ @property
+ def concrete(self):
+ return self.spec._concrete or all(
+ v in self for v in self.spec.package.variants)
+
+
+ def copy(self):
+ clone = VariantMap(None)
+ for name, variant in self.items():
+ clone[name] = variant.copy()
+ return clone
def __str__(self):
@@ -340,10 +407,11 @@ class Spec(object):
self.name = other.name
self.dependents = other.dependents
self.versions = other.versions
- self.variants = other.variants
self.architecture = other.architecture
self.compiler = other.compiler
self.dependencies = other.dependencies
+ self.variants = other.variants
+ self.variants.spec = self
# Specs are by default not assumed to be normal, but in some
# cases we've read them from a file want to assume normal.
@@ -372,7 +440,7 @@ class Spec(object):
"""Called by the parser to add a variant."""
if name in self.variants: raise DuplicateVariantError(
"Cannot specify variant '%s' twice" % name)
- self.variants[name] = Variant(name, enabled)
+ self.variants[name] = VariantSpec(name, enabled)
def _set_compiler(self, compiler):
@@ -436,14 +504,15 @@ class Spec(object):
@property
def concrete(self):
"""A spec is concrete if it can describe only ONE build of a package.
- If any of the name, version, architecture, compiler, or depdenencies
- are ambiguous,then it is not concrete.
+ If any of the name, version, architecture, compiler,
+ variants, or depdenencies are ambiguous,then it is not concrete.
"""
if self._concrete:
return True
self._concrete = bool(not self.virtual
and self.versions.concrete
+ and self.variants.concrete
and self.architecture
and self.compiler and self.compiler.concrete
and self.dependencies.concrete)
@@ -564,18 +633,91 @@ class Spec(object):
return Prefix(spack.install_layout.path_for_spec(self))
- def dep_hash(self, length=None):
- """Return a hash representing all dependencies of this spec
- (direct and indirect).
+ def dag_hash(self, length=None):
+ """Return a hash of the entire spec DAG, including connectivity."""
+ yaml_text = yaml.dump(
+ self.to_node_dict(), default_flow_style=True, width=sys.maxint)
+ sha = hashlib.sha1(yaml_text)
+ return base64.b32encode(sha.digest()).lower()[:length]
+
+
+ def to_node_dict(self):
+ d = {
+ 'variants' : dict(
+ (name,v.enabled) for name, v in self.variants.items()),
+ 'arch' : self.architecture,
+ 'dependencies' : dict((d, self.dependencies[d].dag_hash())
+ for d in sorted(self.dependencies))
+ }
+ if self.compiler:
+ d.update(self.compiler.to_dict())
+ else:
+ d['compiler'] = None
+ d.update(self.versions.to_dict())
+ return { self.name : d }
+
+
+ def to_yaml(self, stream=None):
+ node_list = []
+ for s in self.traverse(order='pre'):
+ node = s.to_node_dict()
+ node[s.name]['hash'] = s.dag_hash()
+ node_list.append(node)
+ return yaml.dump({ 'spec' : node_list },
+ stream=stream, default_flow_style=False)
+
+
+ @staticmethod
+ def from_node_dict(node):
+ name = next(iter(node))
+ node = node[name]
+
+ spec = Spec(name)
+ spec.versions = VersionList.from_dict(node)
+ spec.architecture = node['arch']
+
+ if node['compiler'] is None:
+ spec.compiler = None
+ else:
+ spec.compiler = CompilerSpec.from_dict(node)
+
+ for name, enabled in node['variants'].items():
+ spec.variants[name] = VariantSpec(name, enabled)
+
+ return spec
+
+
+ @staticmethod
+ def from_yaml(stream):
+ """Construct a spec from YAML.
+
+ Parameters:
+ stream -- string or file object to read from.
+
+ TODO: currently discards hashes. Include hashes when they
+ represent more than the DAG does.
- If you want this hash to be consistent, you should
- concretize the spec first so that it is not ambiguous.
"""
- sha = hashlib.sha1()
- sha.update(self.dep_string())
- full_hash = sha.hexdigest()
+ deps = {}
+ spec = None
- return full_hash[:length]
+ try:
+ yfile = yaml.load(stream)
+ except MarkedYAMLError, e:
+ raise SpackYAMLError("error parsing YMAL spec:", str(e))
+
+ for node in yfile['spec']:
+ name = next(iter(node))
+ dep = Spec.from_node_dict(node)
+ if not spec:
+ spec = dep
+ deps[dep.name] = dep
+
+ for node in yfile['spec']:
+ name = next(iter(node))
+ for dep_name in node[name]['dependencies']:
+ deps[name].dependencies[dep_name] = deps[dep_name]
+ return spec
def _concretize_helper(self, presets=None, visited=None):
@@ -604,6 +746,7 @@ class Spec(object):
spack.concretizer.concretize_architecture(self)
spack.concretizer.concretize_compiler(self)
spack.concretizer.concretize_version(self)
+ spack.concretizer.concretize_variants(self)
presets[self.name] = self
visited.add(self.name)
@@ -736,80 +879,156 @@ class Spec(object):
self._add_dependency(dep)
+ def _evaluate_dependency_conditions(self, name):
+ """Evaluate all the conditions on a dependency with this name.
+
+ If the package depends on <name> in this configuration, return
+ the dependency. If no conditions are True (and we don't
+ depend on it), return None.
+ """
+ pkg = spack.db.get(self.name)
+ conditions = pkg.dependencies[name]
+
+ # evaluate when specs to figure out constraints on the dependency.
+ dep = None
+ for when_spec, dep_spec in conditions.items():
+ sat = self.satisfies(when_spec, strict=True)
+ if sat:
+ if dep is None:
+ dep = Spec(name)
+ try:
+ dep.constrain(dep_spec)
+ except UnsatisfiableSpecError, e:
+ e.message = ("Conflicting conditional dependencies on package "
+ "%s for spec %s" % (self.name, self))
+ raise e
+ return dep
+
+
+ def _find_provider(self, vdep, provider_index):
+ """Find provider for a virtual spec in the provider index.
+ Raise an exception if there is a conflicting virtual
+ dependency already in this spec.
+ """
+ assert(vdep.virtual)
+ providers = provider_index.providers_for(vdep)
+
+ # If there is a provider for the vpkg, then use that instead of
+ # the virtual package.
+ if providers:
+ # Can't have multiple providers for the same thing in one spec.
+ if len(providers) > 1:
+ raise MultipleProviderError(vdep, providers)
+ return providers[0]
+ else:
+ # The user might have required something insufficient for
+ # pkg_dep -- so we'll get a conflict. e.g., user asked for
+ # mpi@:1.1 but some package required mpi@2.1:.
+ required = provider_index.providers_for(vdep.name)
+ if len(required) > 1:
+ raise MultipleProviderError(vdep, required)
+ elif required:
+ raise UnsatisfiableProviderSpecError(required[0], vdep)
+
+
+ def _merge_dependency(self, dep, visited, spec_deps, provider_index):
+ """Merge the dependency into this spec.
+
+ This is the core of the normalize() method. There are a few basic steps:
+
+ * If dep is virtual, evaluate whether it corresponds to an
+ existing concrete dependency, and merge if so.
+
+ * If it's real and it provides some virtual dep, see if it provides
+ what some virtual dependency wants and merge if so.
+
+ * Finally, if none of the above, merge dependency and its
+ constraints into this spec.
+
+ This method returns True if the spec was changed, False otherwise.
+ """
+ changed = False
+
+ # If it's a virtual dependency, try to find a provider and
+ # merge that.
+ if dep.virtual:
+ visited.add(dep.name)
+ provider = self._find_provider(dep, provider_index)
+ if provider:
+ dep = provider
+
+ else:
+ # if it's a real dependency, check whether it provides
+ # something already required in the spec.
+ index = ProviderIndex([dep], restrict=True)
+ for vspec in (v for v in spec_deps.values() if v.virtual):
+ if index.providers_for(vspec):
+ vspec._replace_with(dep)
+ del spec_deps[vspec.name]
+ changed = True
+ else:
+ required = index.providers_for(vspec.name)
+ if required:
+ raise UnsatisfiableProviderSpecError(required[0], dep)
+ provider_index.update(dep)
+
+ # If the spec isn't already in the set of dependencies, clone
+ # it from the package description.
+ if dep.name not in spec_deps:
+ spec_deps[dep.name] = dep.copy()
+
+ # Constrain package information with spec info
+ try:
+ changed |= spec_deps[dep.name].constrain(dep)
+
+ except UnsatisfiableSpecError, e:
+ e.message = "Invalid spec: '%s'. "
+ e.message += "Package %s requires %s %s, but spec asked for %s"
+ e.message %= (spec_deps[dep.name], dep.name, e.constraint_type,
+ e.required, e.provided)
+ raise e
+
+ # Add merged spec to my deps and recurse
+ dependency = spec_deps[dep.name]
+ if dep.name not in self.dependencies:
+ self._add_dependency(dependency)
+ changed = True
+
+ changed |= dependency._normalize_helper(visited, spec_deps, provider_index)
+ return changed
+
+
def _normalize_helper(self, visited, spec_deps, provider_index):
"""Recursive helper function for _normalize."""
if self.name in visited:
- return
+ return False
visited.add(self.name)
# if we descend into a virtual spec, there's nothing more
# to normalize. Concretize will finish resolving it later.
if self.virtual:
- return
+ return False
+
+ # Combine constraints from package deps with constraints from
+ # the spec, until nothing changes.
+ any_change = False
+ changed = True
- # Combine constraints from package dependencies with
- # constraints on the spec's dependencies.
pkg = spack.db.get(self.name)
- for name, pkg_dep in self.package.dependencies.items():
- # If it's a virtual dependency, try to find a provider
- if pkg_dep.virtual:
- providers = provider_index.providers_for(pkg_dep)
+ while changed:
+ changed = False
+ for dep_name in pkg.dependencies:
+ # Do we depend on dep_name? If so pkg_dep is not None.
+ pkg_dep = self._evaluate_dependency_conditions(dep_name)
- # If there is a provider for the vpkg, then use that instead of
- # the virtual package.
- if providers:
- # Can't have multiple providers for the same thing in one spec.
- if len(providers) > 1:
- raise MultipleProviderError(pkg_dep, providers)
+ # If pkg_dep is a dependency, merge it.
+ if pkg_dep:
+ changed |= self._merge_dependency(
+ pkg_dep, visited, spec_deps, provider_index)
- pkg_dep = providers[0]
- name = pkg_dep.name
+ any_change |= changed
- else:
- # The user might have required something insufficient for
- # pkg_dep -- so we'll get a conflict. e.g., user asked for
- # mpi@:1.1 but some package required mpi@2.1:.
- required = provider_index.providers_for(name)
- if len(required) > 1:
- raise MultipleProviderError(pkg_dep, required)
- elif required:
- raise UnsatisfiableProviderSpecError(
- required[0], pkg_dep)
- else:
- # if it's a real dependency, check whether it provides something
- # already required in the spec.
- index = ProviderIndex([pkg_dep], restrict=True)
- for vspec in (v for v in spec_deps.values() if v.virtual):
- if index.providers_for(vspec):
- vspec._replace_with(pkg_dep)
- del spec_deps[vspec.name]
- else:
- required = index.providers_for(vspec.name)
- if required:
- raise UnsatisfiableProviderSpecError(
- required[0], pkg_dep)
- provider_index.update(pkg_dep)
-
- if name not in spec_deps:
- # If the spec doesn't reference a dependency that this package
- # needs, then clone it from the package description.
- spec_deps[name] = pkg_dep.copy()
-
- try:
- # Constrain package information with spec info
- spec_deps[name].constrain(pkg_dep)
-
- except UnsatisfiableSpecError, e:
- e.message = "Invalid spec: '%s'. "
- e.message += "Package %s requires %s %s, but spec asked for %s"
- e.message %= (spec_deps[name], name, e.constraint_type,
- e.required, e.provided)
- raise e
-
- # Add merged spec to my deps and recurse
- dependency = spec_deps[name]
- self._add_dependency(dependency)
- dependency._normalize_helper(visited, spec_deps, provider_index)
+ return any_change
def normalize(self, **kwargs):
@@ -836,19 +1055,14 @@ class Spec(object):
# Ensure first that all packages & compilers in the DAG exist.
self.validate_names()
- # Ensure that the package & dep descriptions are consistent & sane
- if not self.virtual:
- self.package.validate_dependencies()
-
# Get all the dependencies into one DependencyMap
spec_deps = self.flat_dependencies(copy=False)
- # Figure out which of the user-provided deps provide virtual deps.
- # Remove virtual deps that are already provided by something in the spec
- spec_packages = [d.package for d in spec_deps.values() if not d.virtual]
-
+ # Initialize index of virtual dependency providers
index = ProviderIndex(spec_deps.values(), restrict=True)
+ # traverse the package DAG and fill out dependencies according
+ # to package files & their 'when' specs
visited = set()
self._normalize_helper(visited, spec_deps, index)
@@ -856,12 +1070,6 @@ class Spec(object):
# actually deps of this package. Raise an error.
extra = set(spec_deps.keys()).difference(visited)
- # Also subtract out all the packags that provide a needed vpkg
- vdeps = [v for v in self.package.virtual_dependencies()]
-
- vpkg_providers = index.providers_for(*vdeps)
- extra.difference_update(p.name for p in vpkg_providers)
-
# Anything left over is not a valid part of the spec.
if extra:
raise InvalidDependencyException(
@@ -893,10 +1101,18 @@ class Spec(object):
if not compilers.supported(spec.compiler):
raise UnsupportedCompilerError(spec.compiler.name)
+ # Ensure that variants all exist.
+ for vname, variant in spec.variants.items():
+ if vname not in spec.package.variants:
+ raise UnknownVariantError(spec.name, vname)
- def constrain(self, other, **kwargs):
+
+ def constrain(self, other, deps=True):
+ """Merge the constraints of other with self.
+
+ Returns True if the spec changed as a result, False if not.
+ """
other = self._autospec(other)
- constrain_deps = kwargs.get('deps', True)
if not self.name == other.name:
raise UnsatisfiableSpecNameError(self.name, other.name)
@@ -915,23 +1131,32 @@ class Spec(object):
raise UnsatisfiableArchitectureSpecError(self.architecture,
other.architecture)
+ changed = False
if self.compiler is not None and other.compiler is not None:
- self.compiler.constrain(other.compiler)
+ changed |= self.compiler.constrain(other.compiler)
elif self.compiler is None:
+ changed |= (self.compiler != other.compiler)
self.compiler = other.compiler
- self.versions.intersect(other.versions)
- self.variants.update(other.variants)
+ changed |= self.versions.intersect(other.versions)
+ changed |= self.variants.constrain(other.variants)
+
+ old = self.architecture
self.architecture = self.architecture or other.architecture
+ changed |= (self.architecture != old)
- if constrain_deps:
- self._constrain_dependencies(other)
+ if deps:
+ changed |= self._constrain_dependencies(other)
+
+ return changed
def _constrain_dependencies(self, other):
"""Apply constraints of other spec's dependencies to this spec."""
+ other = self._autospec(other)
+
if not self.dependencies or not other.dependencies:
- return
+ return False
# TODO: might want more detail than this, e.g. specific deps
# in violation. if this becomes a priority get rid of this
@@ -940,12 +1165,17 @@ class Spec(object):
raise UnsatisfiableDependencySpecError(other, self)
# Handle common first-order constraints directly
+ changed = False
for name in self.common_dependencies(other):
- self[name].constrain(other[name], deps=False)
+ changed |= self[name].constrain(other[name], deps=False)
+
# Update with additional constraints from other spec
for name in other.dep_difference(self):
self._add_dependency(other[name].copy())
+ changed = True
+
+ return changed
def common_dependencies(self, other):
@@ -979,44 +1209,74 @@ class Spec(object):
return parse_anonymous_spec(spec_like, self.name)
- def satisfies(self, other, **kwargs):
+ def satisfies(self, other, deps=True, strict=False):
+ """Determine if this spec satisfies all constraints of another.
+
+ There are two senses for satisfies:
+
+ * `loose` (default): the absence of a constraint in self
+ implies that it *could* be satisfied by other, so we only
+ check that there are no conflicts with other for
+ constraints that this spec actually has.
+
+ * `strict`: strict means that we *must* meet all the
+ constraints specified on other.
+ """
other = self._autospec(other)
- satisfy_deps = kwargs.get('deps', True)
# First thing we care about is whether the name matches
if self.name != other.name:
return False
- # All these attrs have satisfies criteria of their own,
- # but can be None to indicate no constraints.
- for s, o in ((self.versions, other.versions),
- (self.variants, other.variants),
- (self.compiler, other.compiler)):
- if s and o and not s.satisfies(o):
+ if self.versions and other.versions:
+ if not self.versions.satisfies(other.versions, strict=strict):
return False
+ elif strict and (self.versions or other.versions):
+ return False
+
+ # None indicates no constraints when not strict.
+ if self.compiler and other.compiler:
+ if not self.compiler.satisfies(other.compiler, strict=strict):
+ return False
+ elif strict and (other.compiler and not self.compiler):
+ return False
+
+ if not self.variants.satisfies(other.variants, strict=strict):
+ return False
# Architecture satisfaction is currently just string equality.
- # Can be None for unconstrained, though.
- if (self.architecture and other.architecture and
- self.architecture != other.architecture):
+ # If not strict, None means unconstrained.
+ if self.architecture and other.architecture:
+ if self.architecture != other.architecture:
+ return False
+ elif strict and (other.architecture and not self.architecture):
return False
# If we need to descend into dependencies, do it, otherwise we're done.
- if satisfy_deps:
- return self.satisfies_dependencies(other)
+ if deps:
+ return self.satisfies_dependencies(other, strict=strict)
else:
return True
- def satisfies_dependencies(self, other):
+ def satisfies_dependencies(self, other, strict=False):
"""This checks constraints on common dependencies against each other."""
- # if either spec doesn't restrict dependencies then both are compatible.
- if not self.dependencies or not other.dependencies:
+ other = self._autospec(other)
+
+ if strict:
+ if other.dependencies and not self.dependencies:
+ return False
+
+ if not all(dep in self.dependencies for dep in other.dependencies):
+ return False
+
+ elif not self.dependencies or not other.dependencies:
+ # if either spec doesn't restrict dependencies then both are compatible.
return True
# Handle first-order constraints directly
for name in self.common_dependencies(other):
- if not self[name].satisfies(other[name]):
+ if not self[name].satisfies(other[name], deps=False):
return False
# For virtual dependencies, we need to dig a little deeper.
@@ -1061,11 +1321,12 @@ class Spec(object):
# Local node attributes get copied first.
self.name = other.name
self.versions = other.versions.copy()
- self.variants = other.variants.copy()
self.architecture = other.architecture
self.compiler = other.compiler.copy() if other.compiler else None
self.dependents = DependencyMap()
self.dependencies = DependencyMap()
+ self.variants = other.variants.copy()
+ self.variants.spec = self
# If we copy dependencies, preserve DAG structure in the new spec
if kwargs.get('deps', True):
@@ -1117,7 +1378,7 @@ class Spec(object):
"""
spec = self._autospec(spec)
for s in self.traverse():
- if s.satisfies(spec):
+ if s.satisfies(spec, strict=True):
return True
return False
@@ -1208,7 +1469,7 @@ class Spec(object):
$%@ Compiler & compiler version
$+ Options
$= Architecture
- $# Dependencies' 8-char sha1 prefix
+ $# 7-char prefix of DAG hash
$$ $
Optionally you can provide a width, e.g. $20_ for a 20-wide name.
@@ -1264,8 +1525,7 @@ class Spec(object):
if self.architecture:
write(fmt % (c + str(self.architecture)), c)
elif c == '#':
- if self.dependencies:
- out.write(fmt % ('-' + self.dep_hash(8)))
+ out.write('-' + fmt % (self.dag_hash(7)))
elif c == '$':
if fmt != '':
raise ValueError("Can't use format width with $$.")
@@ -1274,7 +1534,8 @@ class Spec(object):
elif compiler:
if c == '@':
- if self.compiler and self.compiler.versions:
+ if (self.compiler and self.compiler.versions and
+ self.compiler.versions != _any_version):
write(c + str(self.compiler.versions), '%')
elif c == '$':
escape = True
@@ -1311,12 +1572,15 @@ class Spec(object):
cover = kwargs.pop('cover', 'nodes')
indent = kwargs.pop('indent', 0)
fmt = kwargs.pop('format', '$_$@$%@$+$=')
+ prefix = kwargs.pop('prefix', None)
check_kwargs(kwargs, self.tree)
out = ""
cur_id = 0
ids = {}
for d, node in self.traverse(order='pre', cover=cover, depth=True):
+ if prefix is not None:
+ out += prefix(node)
out += " " * indent
if depth:
out += "%-4d" % d
@@ -1354,6 +1618,8 @@ class SpecLexer(spack.parse.Lexer):
(r'\~', lambda scanner, val: self.token(OFF, val)),
(r'\%', lambda scanner, val: self.token(PCT, val)),
(r'\=', lambda scanner, val: self.token(EQ, val)),
+ # This is more liberal than identifier_re (see above).
+ # Checked by check_identifier() for better error messages.
(r'\w[\w.-]*', lambda scanner, val: self.token(ID, val)),
(r'\s+', lambda scanner, val: None)])
@@ -1399,7 +1665,7 @@ class SpecParser(spack.parse.Parser):
spec = Spec.__new__(Spec)
spec.name = self.token.value
spec.versions = VersionList()
- spec.variants = VariantMap()
+ spec.variants = VariantMap(spec)
spec.architecture = None
spec.compiler = None
spec.dependents = DependencyMap()
@@ -1580,6 +1846,13 @@ class UnsupportedCompilerError(SpecError):
"The '%s' compiler is not yet supported." % compiler_name)
+class UnknownVariantError(SpecError):
+ """Raised when the same variant occurs in a spec twice."""
+ def __init__(self, pkg, variant):
+ super(UnknownVariantError, self).__init__(
+ "Package %s has no variant %s!" % (pkg, variant))
+
+
class DuplicateArchitectureError(SpecError):
"""Raised when the same architecture occurs in a spec twice."""
def __init__(self, message):
@@ -1683,3 +1956,7 @@ class UnsatisfiableDependencySpecError(UnsatisfiableSpecError):
def __init__(self, provided, required):
super(UnsatisfiableDependencySpecError, self).__init__(
provided, required, "dependency")
+
+class SpackYAMLError(spack.error.SpackError):
+ def __init__(self, msg, yaml_error):
+ super(SpackError, self).__init__(msg, str(yaml_error))
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index d451743508..008c5f0429 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -344,13 +344,9 @@ class DIYStage(object):
def _get_mirrors():
"""Get mirrors from spack configuration."""
- config = spack.config.get_config()
+ config = spack.config.get_mirror_config()
+ return [val for name, val in config.iteritems()]
- mirrors = []
- sec_names = config.get_section_names('mirror')
- for name in sec_names:
- mirrors.append(config.get_value('mirror', name, 'url'))
- return mirrors
def ensure_access(file=spack.stage_path):
diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py
index c53e6774fc..7ff512c370 100644
--- a/lib/spack/spack/test/__init__.py
+++ b/lib/spack/spack/test/__init__.py
@@ -52,7 +52,9 @@ test_names = ['versions',
'mirror',
'url_extrapolate',
'cc',
- 'link_tree']
+ 'link_tree',
+ 'spec_yaml',
+ 'optional_deps']
def list_tests():
diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py
index a7f4812c8c..cc839a2340 100644
--- a/lib/spack/spack/test/concretize.py
+++ b/lib/spack/spack/test/concretize.py
@@ -35,7 +35,13 @@ class ConcretizeTest(MockPackagesTest):
self.assertEqual(abstract.versions, concrete.versions)
if abstract.variants:
- self.assertEqual(abstract.versions, concrete.versions)
+ for name in abstract.variants:
+ avariant = abstract.variants[name]
+ cvariant = concrete.variants[name]
+ self.assertEqual(avariant.enabled, cvariant.enabled)
+
+ for name in abstract.package.variants:
+ self.assertTrue(name in concrete.variants)
if abstract.compiler and abstract.compiler.concrete:
self.assertEqual(abstract.compiler, concrete.compiler)
@@ -66,6 +72,12 @@ class ConcretizeTest(MockPackagesTest):
self.check_concretize('libelf')
+ def test_concretize_variant(self):
+ self.check_concretize('mpich+debug')
+ self.check_concretize('mpich~debug')
+ self.check_concretize('mpich')
+
+
def test_concretize_with_virtual(self):
self.check_concretize('mpileaks ^mpi')
self.check_concretize('mpileaks ^mpi@:1.1')
diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py
index c676e9a35b..790b22f3b0 100644
--- a/lib/spack/spack/test/config.py
+++ b/lib/spack/spack/test/config.py
@@ -26,44 +26,49 @@ import unittest
import shutil
import os
from tempfile import mkdtemp
+import spack
+from spack.packages import PackageDB
+from spack.test.mock_packages_test import *
-from spack.config import *
+class ConfigTest(MockPackagesTest):
+ def setUp(self):
+ self.initmock()
+ self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-')
+ spack.config.config_scopes = [('test_low_priority', os.path.join(self.tmp_dir, 'low')),
+ ('test_high_priority', os.path.join(self.tmp_dir, 'high'))]
-class ConfigTest(unittest.TestCase):
+ def tearDown(self):
+ self.cleanmock()
+ shutil.rmtree(self.tmp_dir, True)
- @classmethod
- def setUp(cls):
- cls.tmp_dir = mkdtemp('.tmp', 'spack-config-test-')
+ def check_config(self, comps):
+ config = spack.config.get_compilers_config()
+ compiler_list = ['cc', 'cxx', 'f77', 'f90']
+ for key in comps:
+ for c in compiler_list:
+ if comps[key][c] == '/bad':
+ continue
+ self.assertEqual(comps[key][c], config[key][c])
- @classmethod
- def tearDown(cls):
- shutil.rmtree(cls.tmp_dir, True)
-
-
- def get_path(self):
- return os.path.join(ConfigTest.tmp_dir, "spackconfig")
+ def test_write_key(self):
+ a_comps = {"gcc@4.7.3" : { "cc" : "/gcc473", "cxx" : "/g++473", "f77" : None, "f90" : None },
+ "gcc@4.5.0" : { "cc" : "/gcc450", "cxx" : "/g++450", "f77" : "/gfortran", "f90" : "/gfortran" },
+ "clang@3.3" : { "cc" : "/bad", "cxx" : "/bad", "f77" : "/bad", "f90" : "/bad" }}
+ b_comps = {"icc@10.0" : { "cc" : "/icc100", "cxx" : "/icc100", "f77" : None, "f90" : None },
+ "icc@11.1" : { "cc" : "/icc111", "cxx" : "/icp111", "f77" : "/ifort", "f90" : "/ifort" },
+ "clang@3.3" : { "cc" : "/clang", "cxx" : "/clang++", "f77" : None, "f90" : None}}
- def test_write_key(self):
- config = SpackConfigParser(self.get_path())
- config.set_value('compiler.cc', 'a')
- config.set_value('compiler.cxx', 'b')
- config.set_value('compiler', 'gcc@4.7.3', 'cc', 'c')
- config.set_value('compiler', 'gcc@4.7.3', 'cxx', 'd')
- config.write()
+ spack.config.add_to_compiler_config(a_comps, 'test_low_priority')
+ spack.config.add_to_compiler_config(b_comps, 'test_high_priority')
- config = SpackConfigParser(self.get_path())
+ self.check_config(a_comps)
+ self.check_config(b_comps)
- self.assertEqual(config.get_value('compiler.cc'), 'a')
- self.assertEqual(config.get_value('compiler.cxx'), 'b')
- self.assertEqual(config.get_value('compiler', 'gcc@4.7.3', 'cc'), 'c')
- self.assertEqual(config.get_value('compiler', 'gcc@4.7.3', 'cxx'), 'd')
+ spack.config.clear_config_caches()
- self.assertEqual(config.get_value('compiler', None, 'cc'), 'a')
- self.assertEqual(config.get_value('compiler', None, 'cxx'), 'b')
- self.assertEqual(config.get_value('compiler.gcc@4.7.3.cc'), 'c')
- self.assertEqual(config.get_value('compiler.gcc@4.7.3.cxx'), 'd')
+ self.check_config(a_comps)
+ self.check_config(b_comps)
- self.assertRaises(NoOptionError, config.get_value, 'compiler', None, 'fc')
diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py
index 3e52954cfe..7ca84090f2 100644
--- a/lib/spack/spack/test/directory_layout.py
+++ b/lib/spack/spack/test/directory_layout.py
@@ -36,7 +36,11 @@ from llnl.util.filesystem import *
import spack
from spack.spec import Spec
from spack.packages import PackageDB
-from spack.directory_layout import SpecHashDirectoryLayout
+from spack.directory_layout import YamlDirectoryLayout
+
+# number of packages to test (to reduce test time)
+max_packages = 10
+
class DirectoryLayoutTest(unittest.TestCase):
"""Tests that a directory layout works correctly and produces a
@@ -44,11 +48,11 @@ class DirectoryLayoutTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
- self.layout = SpecHashDirectoryLayout(self.tmpdir)
+ self.layout = YamlDirectoryLayout(self.tmpdir)
def tearDown(self):
- shutil.rmtree(self.tmpdir, ignore_errors=True)
+ #shutil.rmtree(self.tmpdir, ignore_errors=True)
self.layout = None
@@ -59,7 +63,9 @@ class DirectoryLayoutTest(unittest.TestCase):
finally that the directory can be removed by the directory
layout.
"""
- for pkg in spack.db.all_packages():
+ packages = list(spack.db.all_packages())[:max_packages]
+
+ for pkg in packages:
spec = pkg.spec
# If a spec fails to concretize, just skip it. If it is a
@@ -69,7 +75,7 @@ class DirectoryLayoutTest(unittest.TestCase):
except:
continue
- self.layout.make_path_for_spec(spec)
+ self.layout.create_install_directory(spec)
install_dir = self.layout.path_for_spec(spec)
spec_path = self.layout.spec_file_path(spec)
@@ -90,7 +96,7 @@ class DirectoryLayoutTest(unittest.TestCase):
# Ensure that specs that come out "normal" are really normal.
with closing(open(spec_path)) as spec_file:
- read_separately = Spec(spec_file.read())
+ read_separately = Spec.from_yaml(spec_file.read())
read_separately.normalize()
self.assertEqual(read_separately, spec_from_file)
@@ -98,11 +104,11 @@ class DirectoryLayoutTest(unittest.TestCase):
read_separately.concretize()
self.assertEqual(read_separately, spec_from_file)
- # Make sure the dep hash of the read-in spec is the same
- self.assertEqual(spec.dep_hash(), spec_from_file.dep_hash())
+ # Make sure the hash of the read-in spec is the same
+ self.assertEqual(spec.dag_hash(), spec_from_file.dag_hash())
# Ensure directories are properly removed
- self.layout.remove_path_for_spec(spec)
+ self.layout.remove_install_directory(spec)
self.assertFalse(os.path.isdir(install_dir))
self.assertFalse(os.path.exists(install_dir))
@@ -120,12 +126,14 @@ class DirectoryLayoutTest(unittest.TestCase):
"""
mock_db = PackageDB(spack.mock_packages_path)
- not_in_mock = set(spack.db.all_package_names()).difference(
+ not_in_mock = set.difference(
+ set(spack.db.all_package_names()),
set(mock_db.all_package_names()))
+ packages = list(not_in_mock)[:max_packages]
# Create all the packages that are not in mock.
installed_specs = {}
- for pkg_name in not_in_mock:
+ for pkg_name in packages:
spec = spack.db.get(pkg_name).spec
# If a spec fails to concretize, just skip it. If it is a
@@ -135,7 +143,7 @@ class DirectoryLayoutTest(unittest.TestCase):
except:
continue
- self.layout.make_path_for_spec(spec)
+ self.layout.create_install_directory(spec)
installed_specs[spec] = self.layout.path_for_spec(spec)
tmp = spack.db
@@ -144,12 +152,29 @@ class DirectoryLayoutTest(unittest.TestCase):
# Now check that even without the package files, we know
# enough to read a spec from the spec file.
for spec, path in installed_specs.items():
- spec_from_file = self.layout.read_spec(join_path(path, '.spec'))
+ spec_from_file = self.layout.read_spec(
+ join_path(path, '.spack', 'spec.yaml'))
# To satisfy these conditions, directory layouts need to
# read in concrete specs from their install dirs somehow.
self.assertEqual(path, self.layout.path_for_spec(spec_from_file))
self.assertEqual(spec, spec_from_file)
- self.assertEqual(spec.dep_hash(), spec_from_file.dep_hash())
+ self.assertTrue(spec.eq_dag(spec_from_file))
+ self.assertEqual(spec.dag_hash(), spec_from_file.dag_hash())
spack.db = tmp
+
+
+ def test_find(self):
+ """Test that finding specs within an install layout works."""
+ packages = list(spack.db.all_packages())[:max_packages]
+ installed_specs = {}
+ for pkg in packages:
+ spec = pkg.spec.concretized()
+ installed_specs[spec.name] = spec
+ self.layout.create_install_directory(spec)
+
+ found_specs = dict((s.name, s) for s in self.layout.all_specs())
+ for name, spec in found_specs.items():
+ self.assertTrue(name in found_specs)
+ self.assertTrue(found_specs[name].eq_dag(spec))
diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py
index e052f53e77..d240a393a6 100644
--- a/lib/spack/spack/test/install.py
+++ b/lib/spack/spack/test/install.py
@@ -33,7 +33,7 @@ from llnl.util.filesystem import *
import spack
from spack.stage import Stage
from spack.fetch_strategy import URLFetchStrategy
-from spack.directory_layout import SpecHashDirectoryLayout
+from spack.directory_layout import YamlDirectoryLayout
from spack.util.executable import which
from spack.test.mock_packages_test import *
from spack.test.mock_repo import MockArchive
@@ -55,7 +55,7 @@ class InstallTest(MockPackagesTest):
# installed pkgs and mock packages.
self.tmpdir = tempfile.mkdtemp()
self.orig_layout = spack.install_layout
- spack.install_layout = SpecHashDirectoryLayout(self.tmpdir)
+ spack.install_layout = YamlDirectoryLayout(self.tmpdir)
def tearDown(self):
diff --git a/lib/spack/spack/test/mock_packages_test.py b/lib/spack/spack/test/mock_packages_test.py
index e948376039..00f81114af 100644
--- a/lib/spack/spack/test/mock_packages_test.py
+++ b/lib/spack/spack/test/mock_packages_test.py
@@ -31,29 +31,40 @@ from spack.spec import Spec
def set_pkg_dep(pkg, spec):
- """Alters dependence information for a pacakge.
+ """Alters dependence information for a package.
Use this to mock up constraints.
"""
spec = Spec(spec)
- spack.db.get(pkg).dependencies[spec.name] = spec
+ spack.db.get(pkg).dependencies[spec.name] = { Spec(pkg) : spec }
class MockPackagesTest(unittest.TestCase):
- def setUp(self):
+ def initmock(self):
# Use the mock packages database for these tests. This allows
# us to set up contrived packages that don't interfere with
# real ones.
self.real_db = spack.db
spack.db = PackageDB(spack.mock_packages_path)
- self.real_scopes = spack.config._scopes
- spack.config._scopes = {
- 'site' : spack.mock_site_config,
- 'user' : spack.mock_user_config }
+ spack.config.clear_config_caches()
+ self.real_scopes = spack.config.config_scopes
+ spack.config.config_scopes = [
+ ('site', spack.mock_site_config),
+ ('user', spack.mock_user_config)]
- def tearDown(self):
+ def cleanmock(self):
"""Restore the real packages path after any test."""
spack.db = self.real_db
- spack.config._scopes = self.real_scopes
+ spack.config.config_scopes = self.real_scopes
+ spack.config.clear_config_caches()
+
+
+ def setUp(self):
+ self.initmock()
+
+
+ def tearDown(self):
+ self.cleanmock()
+
diff --git a/lib/spack/spack/test/optional_deps.py b/lib/spack/spack/test/optional_deps.py
new file mode 100644
index 0000000000..265a983f3f
--- /dev/null
+++ b/lib/spack/spack/test/optional_deps.py
@@ -0,0 +1,94 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import unittest
+
+import spack
+from spack.spec import Spec, CompilerSpec
+from spack.test.mock_packages_test import *
+
+class ConcretizeTest(MockPackagesTest):
+
+ def check_normalize(self, spec_string, expected):
+ spec = Spec(spec_string)
+ spec.normalize()
+ self.assertEqual(spec, expected)
+ self.assertTrue(spec.eq_dag(expected))
+
+
+ def test_normalize_simple_conditionals(self):
+ self.check_normalize('optional-dep-test', Spec('optional-dep-test'))
+ self.check_normalize('optional-dep-test~a', Spec('optional-dep-test~a'))
+
+ self.check_normalize('optional-dep-test+a',
+ Spec('optional-dep-test+a', Spec('a')))
+
+ self.check_normalize('optional-dep-test@1.1',
+ Spec('optional-dep-test@1.1', Spec('b')))
+
+ self.check_normalize('optional-dep-test%intel',
+ Spec('optional-dep-test%intel', Spec('c')))
+
+ self.check_normalize('optional-dep-test%intel@64.1',
+ Spec('optional-dep-test%intel@64.1', Spec('c'), Spec('d')))
+
+ self.check_normalize('optional-dep-test%intel@64.1.2',
+ Spec('optional-dep-test%intel@64.1.2', Spec('c'), Spec('d')))
+
+ self.check_normalize('optional-dep-test%clang@35',
+ Spec('optional-dep-test%clang@35', Spec('e')))
+
+
+ def test_multiple_conditionals(self):
+ self.check_normalize('optional-dep-test+a@1.1',
+ Spec('optional-dep-test+a@1.1', Spec('a'), Spec('b')))
+
+ self.check_normalize('optional-dep-test+a%intel',
+ Spec('optional-dep-test+a%intel', Spec('a'), Spec('c')))
+
+ self.check_normalize('optional-dep-test@1.1%intel',
+ Spec('optional-dep-test@1.1%intel', Spec('b'), Spec('c')))
+
+ self.check_normalize('optional-dep-test@1.1%intel@64.1.2+a',
+ Spec('optional-dep-test@1.1%intel@64.1.2+a',
+ Spec('b'), Spec('a'), Spec('c'), Spec('d')))
+
+ self.check_normalize('optional-dep-test@1.1%clang@36.5+a',
+ Spec('optional-dep-test@1.1%clang@36.5+a',
+ Spec('b'), Spec('a'), Spec('e')))
+
+
+ def test_chained_mpi(self):
+ self.check_normalize('optional-dep-test-2+mpi',
+ Spec('optional-dep-test-2+mpi',
+ Spec('optional-dep-test+mpi',
+ Spec('mpi'))))
+
+
+ def test_transitive_chain(self):
+ # Each of these dependencies comes from a conditional
+ # dependency on another. This requires iterating to evaluate
+ # the whole chain.
+ self.check_normalize('optional-dep-test+f',
+ Spec('optional-dep-test+f', Spec('f'), Spec('g'), Spec('mpi')))
diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py
index fb67aa8a8d..549f829d3e 100644
--- a/lib/spack/spack/test/spec_dag.py
+++ b/lib/spack/spack/test/spec_dag.py
@@ -44,8 +44,11 @@ class SpecDagTest(MockPackagesTest):
set_pkg_dep('callpath', 'mpich@2.0')
spec = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
- self.assertRaises(spack.package.InvalidPackageDependencyError,
- spec.package.validate_dependencies)
+
+ # TODO: try to do something to showt that the issue was with
+ # TODO: the user's input or with package inconsistencies.
+ self.assertRaises(spack.spec.UnsatisfiableVersionSpecError,
+ spec.normalize)
def test_preorder_node_traversal(self):
@@ -140,11 +143,6 @@ class SpecDagTest(MockPackagesTest):
def test_conflicting_spec_constraints(self):
mpileaks = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
- try:
- mpileaks.package.validate_dependencies()
- except spack.package.InvalidPackageDependencyError, e:
- self.fail("validate_dependencies raised an exception: %s"
- % e.message)
# Normalize then add conflicting constraints to the DAG (this is an
# extremely unlikely scenario, but we test for it anyway)
@@ -242,12 +240,6 @@ class SpecDagTest(MockPackagesTest):
self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError, spec.normalize)
- def test_unsatisfiable_variant(self):
- set_pkg_dep('mpileaks', 'mpich+debug')
- spec = Spec('mpileaks ^mpich~debug ^callpath ^dyninst ^libelf ^libdwarf')
- self.assertRaises(spack.spec.UnsatisfiableVariantSpecError, spec.normalize)
-
-
def test_unsatisfiable_architecture(self):
set_pkg_dep('mpileaks', 'mpich=bgqos_0')
spec = Spec('mpileaks ^mpich=sles_10_ppc64 ^callpath ^dyninst ^libelf ^libdwarf')
diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py
index 5fb09e68af..20df2603f5 100644
--- a/lib/spack/spack/test/spec_semantics.py
+++ b/lib/spack/spack/test/spec_semantics.py
@@ -33,8 +33,8 @@ class SpecSematicsTest(MockPackagesTest):
# ================================================================================
# Utility functions to set everything up.
# ================================================================================
- def check_satisfies(self, spec, anon_spec):
- left = Spec(spec)
+ def check_satisfies(self, spec, anon_spec, concrete=False):
+ left = Spec(spec, concrete=concrete)
right = parse_anonymous_spec(anon_spec, left.name)
# Satisfies is one-directional.
@@ -46,8 +46,8 @@ class SpecSematicsTest(MockPackagesTest):
right.copy().constrain(left)
- def check_unsatisfiable(self, spec, anon_spec):
- left = Spec(spec)
+ def check_unsatisfiable(self, spec, anon_spec, concrete=False):
+ left = Spec(spec, concrete=concrete)
right = parse_anonymous_spec(anon_spec, left.name)
self.assertFalse(left.satisfies(right))
@@ -64,6 +64,16 @@ class SpecSematicsTest(MockPackagesTest):
self.assertEqual(exp, spec)
+ def check_constrain_changed(self, spec, constraint):
+ spec = Spec(spec)
+ self.assertTrue(spec.constrain(constraint))
+
+
+ def check_constrain_not_changed(self, spec, constraint):
+ spec = Spec(spec)
+ self.assertFalse(spec.constrain(constraint))
+
+
def check_invalid_constraint(self, spec, constraint):
spec = Spec(spec)
constraint = Spec(constraint)
@@ -71,7 +81,7 @@ class SpecSematicsTest(MockPackagesTest):
# ================================================================================
- # Satisfiability and constraints
+ # Satisfiability
# ================================================================================
def test_satisfies(self):
self.check_satisfies('libelf@0.8.13', '@0:1')
@@ -96,6 +106,9 @@ class SpecSematicsTest(MockPackagesTest):
self.check_unsatisfiable('foo@4.0%pgi', '@1:3%pgi')
self.check_unsatisfiable('foo@4.0%pgi@4.5', '@1:3%pgi@4.4:4.6')
+ self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
+ self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
+
def test_satisfies_architecture(self):
self.check_satisfies('foo=chaos_5_x86_64_ib', '=chaos_5_x86_64_ib')
@@ -147,7 +160,40 @@ class SpecSematicsTest(MockPackagesTest):
self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0')
- def test_constrain(self):
+ def test_satisfies_matching_variant(self):
+ self.check_satisfies('mpich+foo', 'mpich+foo')
+ self.check_satisfies('mpich~foo', 'mpich~foo')
+
+
+ def test_satisfies_unconstrained_variant(self):
+ # only asked for mpich, no constraints. Either will do.
+ self.check_satisfies('mpich+foo', 'mpich')
+ self.check_satisfies('mpich~foo', 'mpich')
+
+
+ def test_unsatisfiable_variants(self):
+ # This case is different depending on whether the specs are concrete.
+
+ # 'mpich' is not concrete:
+ self.check_satisfies('mpich', 'mpich+foo', False)
+ self.check_satisfies('mpich', 'mpich~foo', False)
+
+ # 'mpich' is concrete:
+ self.check_unsatisfiable('mpich', 'mpich+foo', True)
+ self.check_unsatisfiable('mpich', 'mpich~foo', True)
+
+
+ def test_unsatisfiable_variant_mismatch(self):
+ # No matchi in specs
+ self.check_unsatisfiable('mpich~foo', 'mpich+foo')
+ self.check_unsatisfiable('mpich+foo', 'mpich~foo')
+
+
+
+ # ================================================================================
+ # Constraints
+ # ================================================================================
+ def test_constrain_variants(self):
self.check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3')
self.check_constrain('libelf@2.1:2.5%gcc@4.5:4.6',
'libelf@0:2.5%gcc@2:4.6', 'libelf@2.1:3%gcc@4.5:4.7')
@@ -158,6 +204,13 @@ class SpecSematicsTest(MockPackagesTest):
self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo')
self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf+debug~foo')
+
+ def test_constrain_arch(self):
+ self.check_constrain('libelf=bgqos_0', 'libelf=bgqos_0', 'libelf=bgqos_0')
+ self.check_constrain('libelf=bgqos_0', 'libelf', 'libelf=bgqos_0')
+
+
+ def test_constrain_compiler(self):
self.check_constrain('libelf=bgqos_0', 'libelf=bgqos_0', 'libelf=bgqos_0')
self.check_constrain('libelf=bgqos_0', 'libelf', 'libelf=bgqos_0')
@@ -172,6 +225,45 @@ class SpecSematicsTest(MockPackagesTest):
self.check_invalid_constraint('libelf=bgqos_0', 'libelf=x86_54')
- def test_compiler_satisfies(self):
- self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
- self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
+ def test_constrain_changed(self):
+ self.check_constrain_changed('libelf', '@1.0')
+ self.check_constrain_changed('libelf', '@1.0:5.0')
+ self.check_constrain_changed('libelf', '%gcc')
+ self.check_constrain_changed('libelf%gcc', '%gcc@4.5')
+ self.check_constrain_changed('libelf', '+debug')
+ self.check_constrain_changed('libelf', '~debug')
+ self.check_constrain_changed('libelf', '=bgqos_0')
+
+
+ def test_constrain_not_changed(self):
+ self.check_constrain_not_changed('libelf', 'libelf')
+ self.check_constrain_not_changed('libelf@1.0', '@1.0')
+ self.check_constrain_not_changed('libelf@1.0:5.0', '@1.0:5.0')
+ self.check_constrain_not_changed('libelf%gcc', '%gcc')
+ self.check_constrain_not_changed('libelf%gcc@4.5', '%gcc@4.5')
+ self.check_constrain_not_changed('libelf+debug', '+debug')
+ self.check_constrain_not_changed('libelf~debug', '~debug')
+ self.check_constrain_not_changed('libelf=bgqos_0', '=bgqos_0')
+ self.check_constrain_not_changed('libelf^foo', 'libelf^foo')
+ self.check_constrain_not_changed('libelf^foo^bar', 'libelf^foo^bar')
+
+
+ def test_constrain_dependency_changed(self):
+ self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0:5.0')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo%gcc')
+ self.check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo+debug')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo~debug')
+ self.check_constrain_changed('libelf^foo', 'libelf^foo=bgqos_0')
+
+
+ def test_constrain_dependency_not_changed(self):
+ self.check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0')
+ self.check_constrain_not_changed('libelf^foo@1.0:5.0', 'libelf^foo@1.0:5.0')
+ self.check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc')
+ self.check_constrain_not_changed('libelf^foo%gcc@4.5', 'libelf^foo%gcc@4.5')
+ self.check_constrain_not_changed('libelf^foo+debug', 'libelf^foo+debug')
+ self.check_constrain_not_changed('libelf^foo~debug', 'libelf^foo~debug')
+ self.check_constrain_not_changed('libelf^foo=bgqos_0', 'libelf^foo=bgqos_0')
+
diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py
new file mode 100644
index 0000000000..869befc02a
--- /dev/null
+++ b/lib/spack/spack/test/spec_yaml.py
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Test YAML serialization for specs.
+
+YAML format preserves DAG informatoin in the spec.
+
+"""
+from spack.spec import Spec
+from spack.test.mock_packages_test import *
+
+class SpecDagTest(MockPackagesTest):
+
+ def check_yaml_round_trip(self, spec):
+ yaml_text = spec.to_yaml()
+ spec_from_yaml = Spec.from_yaml(yaml_text)
+ self.assertTrue(spec.eq_dag(spec_from_yaml))
+
+
+ def test_simple_spec(self):
+ spec = Spec('mpileaks')
+ self.check_yaml_round_trip(spec)
+
+
+ def test_normal_spec(self):
+ spec = Spec('mpileaks+debug~opt')
+ spec.normalize()
+ self.check_yaml_round_trip(spec)
+
+
+ def test_ambiguous_version_spec(self):
+ spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt')
+ spec.normalize()
+ self.check_yaml_round_trip(spec)
+
+
+ def test_concrete_spec(self):
+ spec = Spec('mpileaks+debug~opt')
+ spec.concretize()
+ self.check_yaml_round_trip(spec)
+
+
+ def test_yaml_subdag(self):
+ spec = Spec('mpileaks^mpich+debug')
+ spec.concretize()
+
+ yaml_spec = Spec.from_yaml(spec.to_yaml())
+
+ for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'):
+ self.assertTrue(spec[dep].eq_dag(yaml_spec[dep]))
diff --git a/lib/spack/spack/util/debug.py b/lib/spack/spack/util/debug.py
new file mode 100644
index 0000000000..37985eccdd
--- /dev/null
+++ b/lib/spack/spack/util/debug.py
@@ -0,0 +1,52 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Debug signal handler: prints a stack trace and enters interpreter.
+
+``register_interrupt_handler()`` enables a ctrl-C handler that prints
+a stack trace and drops the user into an interpreter.
+
+"""
+import os
+import code
+import traceback
+import signal
+
+def debug_handler(sig, frame):
+ """Interrupt running process, and provide a python prompt for
+ interactive debugging."""
+ d = {'_frame':frame} # Allow access to frame object.
+ d.update(frame.f_globals) # Unless shadowed by global
+ d.update(frame.f_locals)
+
+ i = code.InteractiveConsole(d)
+ message = "Signal received : entering python shell.\nTraceback:\n"
+ message += ''.join(traceback.format_stack(frame))
+ i.interact(message)
+ os._exit(1) # Use os._exit to avoid test harness.
+
+
+def register_interrupt_handler():
+ """Register a handler to print a stack trace and enter an interpreter on Ctrl-C"""
+ signal.signal(signal.SIGINT, debug_handler)
diff --git a/lib/spack/spack/variant.py b/lib/spack/spack/variant.py
new file mode 100644
index 0000000000..3d3e2b0f6d
--- /dev/null
+++ b/lib/spack/spack/variant.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Variant is a class describing flags on builds, or "variants".
+
+Could be generalized later to describe aribitrary parameters, but
+currently variants are just flags.
+
+"""
+
+class Variant(object):
+ """Represents a variant on a build. Can be either on or off."""
+ def __init__(self, default, description):
+ self.default = bool(default)
+ self.description = str(description)
diff --git a/lib/spack/spack/version.py b/lib/spack/spack/version.py
index cc83634137..35db05e018 100644
--- a/lib/spack/spack/version.py
+++ b/lib/spack/spack/version.py
@@ -93,12 +93,12 @@ def coerce_versions(a, b):
def coerced(method):
"""Decorator that ensures that argument types of a method are coerced."""
@wraps(method)
- def coercing_method(a, b):
+ def coercing_method(a, b, *args, **kwargs):
if type(a) == type(b) or a is None or b is None:
- return method(a, b)
+ return method(a, b, *args, **kwargs)
else:
ca, cb = coerce_versions(a, b)
- return getattr(ca, method.__name__)(cb)
+ return getattr(ca, method.__name__)(cb, *args, **kwargs)
return coercing_method
@@ -587,16 +587,42 @@ class VersionList(object):
return False
+ def to_dict(self):
+ """Generate human-readable dict for YAML."""
+ if self.concrete:
+ return { 'version' : str(self[0]) }
+ else:
+ return { 'versions' : [str(v) for v in self] }
+
+
+ @staticmethod
+ def from_dict(dictionary):
+ """Parse dict from to_dict."""
+ if 'versions' in dictionary:
+ return VersionList(dictionary['versions'])
+ elif 'version' in dictionary:
+ return VersionList([dictionary['version']])
+ else:
+ raise ValueError("Dict must have 'version' or 'versions' in it.")
+
+
@coerced
- def satisfies(self, other):
- """A VersionList satisfies another if some version in the list would
- would satisfy some version in the other list. This uses essentially
- the same algorithm as overlaps() does for VersionList, but it calls
- satisfies() on member Versions and VersionRanges.
+ def satisfies(self, other, strict=False):
+ """A VersionList satisfies another if some version in the list
+ would satisfy some version in the other list. This uses
+ essentially the same algorithm as overlaps() does for
+ VersionList, but it calls satisfies() on member Versions
+ and VersionRanges.
+
+ If strict is specified, this version list must lie entirely
+ *within* the other in order to satisfy it.
"""
if not other or not self:
return False
+ if strict:
+ return self in other
+
s = o = 0
while s < len(self) and o < len(other):
if self[s].satisfies(other[o]):
@@ -633,9 +659,14 @@ class VersionList(object):
@coerced
def intersect(self, other):
+ """Intersect this spec's list with other.
+
+ Return True if the spec changed as a result; False otherwise
+ """
isection = self.intersection(other)
+ changed = (isection.versions != self.versions)
self.versions = isection.versions
-
+ return changed
@coerced
def __contains__(self, other):