summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/spack/external/__init__.py5
-rw-r--r--lib/spack/external/_vendoring/ruamel.pyi1
-rw-r--r--lib/spack/external/_vendoring/ruamel.yaml.LICENSE (renamed from lib/spack/external/ruamel/yaml/LICENSE)12
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/__init__.py57
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/anchor.py20
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/comments.py1267
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/compat.py268
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/composer.py243
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/configobjwalker.py14
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/constructor.py1845
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/cyaml.py183
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/dumper.py219
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/emitter.py1772
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/error.py332
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/events.py196
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/loader.py75
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/main.py1667
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/nodes.py135
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/parser.py884
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/py.typed (renamed from lib/spack/external/ruamel/__init__.py)0
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/reader.py302
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/representer.py1156
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/resolver.py (renamed from lib/spack/external/ruamel/yaml/resolver.py)416
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/scalarbool.py47
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/scalarfloat.py124
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/scalarint.py127
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/scalarstring.py152
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/scanner.py2444
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/serializer.py241
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/timestamp.py61
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/tokens.py404
-rw-r--r--lib/spack/external/_vendoring/ruamel/yaml/util.py256
-rw-r--r--lib/spack/external/ruamel/yaml/.ruamel/__init__.py2
-rw-r--r--lib/spack/external/ruamel/yaml/README.rst38
-rw-r--r--lib/spack/external/ruamel/yaml/__init__.py85
-rw-r--r--lib/spack/external/ruamel/yaml/comments.py486
-rw-r--r--lib/spack/external/ruamel/yaml/compat.py123
-rw-r--r--lib/spack/external/ruamel/yaml/composer.py182
-rw-r--r--lib/spack/external/ruamel/yaml/configobjwalker.py9
-rw-r--r--lib/spack/external/ruamel/yaml/constructor.py1172
-rw-r--r--lib/spack/external/ruamel/yaml/dumper.py102
-rw-r--r--lib/spack/external/ruamel/yaml/emitter.py1282
-rw-r--r--lib/spack/external/ruamel/yaml/error.py85
-rw-r--r--lib/spack/external/ruamel/yaml/events.py106
-rw-r--r--lib/spack/external/ruamel/yaml/loader.py61
-rw-r--r--lib/spack/external/ruamel/yaml/main.py395
-rw-r--r--lib/spack/external/ruamel/yaml/nodes.py86
-rw-r--r--lib/spack/external/ruamel/yaml/parser.py675
-rw-r--r--lib/spack/external/ruamel/yaml/reader.py213
-rw-r--r--lib/spack/external/ruamel/yaml/representer.py888
-rw-r--r--lib/spack/external/ruamel/yaml/scalarstring.py60
-rw-r--r--lib/spack/external/ruamel/yaml/scanner.py1661
-rw-r--r--lib/spack/external/ruamel/yaml/serializer.py178
-rw-r--r--lib/spack/external/ruamel/yaml/setup.cfg5
-rw-r--r--lib/spack/external/ruamel/yaml/tokens.py195
-rw-r--r--lib/spack/external/ruamel/yaml/util.py139
-rw-r--r--lib/spack/external/vendor.txt1
-rw-r--r--lib/spack/spack/binary_distribution.py4
-rw-r--r--lib/spack/spack/cmd/__init__.py8
-rw-r--r--lib/spack/spack/config.py54
-rw-r--r--lib/spack/spack/environment/environment.py7
-rw-r--r--lib/spack/spack/mirror.py16
-rw-r--r--lib/spack/spack/repo.py5
-rw-r--r--lib/spack/spack/spec.py9
-rw-r--r--lib/spack/spack/test/cmd/config.py2
-rw-r--r--lib/spack/spack/test/config.py2
-rw-r--r--lib/spack/spack/test/env.py34
-rw-r--r--lib/spack/spack/test/mirror.py12
-rw-r--r--lib/spack/spack/test/spack_yaml.py52
-rw-r--r--lib/spack/spack/test/spec_yaml.py6
-rw-r--r--lib/spack/spack/test/util/spack_yaml.py2
-rw-r--r--lib/spack/spack/util/spack_yaml.py275
-rw-r--r--pyproject.toml3
73 files changed, 15044 insertions, 8601 deletions
diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py
index b30ecca8d4..a460c84675 100644
--- a/lib/spack/external/__init__.py
+++ b/lib/spack/external/__init__.py
@@ -101,10 +101,7 @@ ruamel.yaml
* Usage: Used for config files. Ruamel is based on PyYAML but is more
actively maintained and has more features, including round-tripping
comments read from config files.
-* Version: 0.11.15 (last version supporting Python 2.6)
-* Note: This package has been slightly modified to improve Python 2.6
- compatibility -- some ``{}`` format strings were replaced, and the
- import for ``OrderedDict`` was tweaked.
+* Version: 0.17.21
six
---
diff --git a/lib/spack/external/_vendoring/ruamel.pyi b/lib/spack/external/_vendoring/ruamel.pyi
new file mode 100644
index 0000000000..c77c78e8e3
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel.pyi
@@ -0,0 +1 @@
+from ruamel import * \ No newline at end of file
diff --git a/lib/spack/external/ruamel/yaml/LICENSE b/lib/spack/external/_vendoring/ruamel.yaml.LICENSE
index f6f753a366..678f5cc0e0 100644
--- a/lib/spack/external/ruamel/yaml/LICENSE
+++ b/lib/spack/external/_vendoring/ruamel.yaml.LICENSE
@@ -1,21 +1,21 @@
-
The MIT License (MIT)
+ Copyright (c) 2014-2022 Anthon van der Neut, Ruamel bvba
+
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-
+
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
-
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
-
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/__init__.py b/lib/spack/external/_vendoring/ruamel/yaml/__init__.py
new file mode 100644
index 0000000000..2a2572cd4a
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/__init__.py
@@ -0,0 +1,57 @@
+# coding: utf-8
+
+if False: # MYPY
+ from typing import Dict, Any # NOQA
+
+_package_data = dict(
+ full_package_name='ruamel.yaml',
+ version_info=(0, 17, 21),
+ __version__='0.17.21',
+ version_timestamp='2022-02-12 09:49:22',
+ author='Anthon van der Neut',
+ author_email='a.van.der.neut@ruamel.eu',
+ description='ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA
+ entry_points=None,
+ since=2014,
+ extras_require={
+ ':platform_python_implementation=="CPython" and python_version<"3.11"': ['ruamel.yaml.clib>=0.2.6'], # NOQA
+ 'jinja2': ['ruamel.yaml.jinja2>=0.2'],
+ 'docs': ['ryd'],
+ },
+ classifiers=[
+ 'Programming Language :: Python :: 3 :: Only',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
+ 'Programming Language :: Python :: Implementation :: CPython',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Text Processing :: Markup',
+ 'Typing :: Typed',
+ ],
+ keywords='yaml 1.2 parser round-trip preserve quotes order config',
+ read_the_docs='yaml',
+ supported=[(3, 5)], # minimum
+ tox=dict(
+ env='*f', # f for 3.5
+ fl8excl='_test/lib',
+ ),
+ # universal=True,
+ python_requires='>=3',
+ rtfd='yaml',
+) # type: Dict[Any, Any]
+
+
+version_info = _package_data['version_info']
+__version__ = _package_data['__version__']
+
+try:
+ from .cyaml import * # NOQA
+
+ __with_libyaml__ = True
+except (ImportError, ValueError): # for Jython
+ __with_libyaml__ = False
+
+from ruamel.yaml.main import * # NOQA
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/anchor.py b/lib/spack/external/_vendoring/ruamel/yaml/anchor.py
new file mode 100644
index 0000000000..1deea78412
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/anchor.py
@@ -0,0 +1,20 @@
+# coding: utf-8
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+anchor_attrib = '_yaml_anchor'
+
+
+class Anchor:
+ __slots__ = 'value', 'always_dump'
+ attrib = anchor_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.value = None
+ self.always_dump = False
+
+ def __repr__(self):
+ # type: () -> Any
+ ad = ', (always dump)' if self.always_dump else ""
+ return 'Anchor({!r}{})'.format(self.value, ad)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/comments.py b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
new file mode 100644
index 0000000000..892c868af3
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/comments.py
@@ -0,0 +1,1267 @@
+# coding: utf-8
+
+"""
+stuff to deal with comments and formatting on dict/list/ordereddict/set
+these are not really related, formatting could be factored out as
+a separate base
+"""
+
+import sys
+import copy
+
+
+from ruamel.yaml.compat import ordereddict
+from ruamel.yaml.compat import MutableSliceableSequence, _F, nprintf # NOQA
+from ruamel.yaml.scalarstring import ScalarString
+from ruamel.yaml.anchor import Anchor
+
+from collections.abc import MutableSet, Sized, Set, Mapping
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+# fmt: off
+__all__ = ['CommentedSeq', 'CommentedKeySeq',
+ 'CommentedMap', 'CommentedOrderedMap',
+ 'CommentedSet', 'comment_attrib', 'merge_attrib',
+ 'C_POST', 'C_PRE', 'C_SPLIT_ON_FIRST_BLANK', 'C_BLANK_LINE_PRESERVE_SPACE',
+ ]
+# fmt: on
+
+# splitting of comments by the scanner
+# an EOLC (End-Of-Line Comment) is preceded by some token
+# an FLC (Full Line Comment) is a comment not preceded by a token, i.e. # is
+# the first non-blank on line
+# a BL is a blank line i.e. empty or spaces/tabs only
+# bits 0 and 1 are combined, you can choose only one
+C_POST = 0b00
+C_PRE = 0b01
+C_SPLIT_ON_FIRST_BLANK = 0b10 # as C_POST, but if blank line then C_PRE all lines before
+# first blank goes to POST even if no following real FLC
+# (first blank -> first of post)
+# 0b11 -> reserved for future use
+C_BLANK_LINE_PRESERVE_SPACE = 0b100
+# C_EOL_PRESERVE_SPACE2 = 0b1000
+
+
+class IDX:
+ # temporary auto increment, so rearranging is easier
+ def __init__(self):
+ # type: () -> None
+ self._idx = 0
+
+ def __call__(self):
+ # type: () -> Any
+ x = self._idx
+ self._idx += 1
+ return x
+
+ def __str__(self):
+ # type: () -> Any
+ return str(self._idx)
+
+
+cidx = IDX()
+
+# more or less in order of subjective expected likelyhood
+# the _POST and _PRE ones are lists themselves
+C_VALUE_EOL = C_ELEM_EOL = cidx()
+C_KEY_EOL = cidx()
+C_KEY_PRE = C_ELEM_PRE = cidx() # not this is not value
+C_VALUE_POST = C_ELEM_POST = cidx() # not this is not value
+C_VALUE_PRE = cidx()
+C_KEY_POST = cidx()
+C_TAG_EOL = cidx()
+C_TAG_POST = cidx()
+C_TAG_PRE = cidx()
+C_ANCHOR_EOL = cidx()
+C_ANCHOR_POST = cidx()
+C_ANCHOR_PRE = cidx()
+
+
+comment_attrib = '_yaml_comment'
+format_attrib = '_yaml_format'
+line_col_attrib = '_yaml_line_col'
+merge_attrib = '_yaml_merge'
+tag_attrib = '_yaml_tag'
+
+
+class Comment:
+ # using sys.getsize tested the Comment objects, __slots__ makes them bigger
+ # and adding self.end did not matter
+ __slots__ = 'comment', '_items', '_post', '_pre'
+ attrib = comment_attrib
+
+ def __init__(self, old=True):
+ # type: (bool) -> None
+ self._pre = None if old else [] # type: ignore
+ self.comment = None # [post, [pre]]
+ # map key (mapping/omap/dict) or index (sequence/list) to a list of
+ # dict: post_key, pre_key, post_value, pre_value
+ # list: pre item, post item
+ self._items = {} # type: Dict[Any, Any]
+ # self._start = [] # should not put these on first item
+ self._post = [] # type: List[Any] # end of document comments
+
+ def __str__(self):
+ # type: () -> str
+ if bool(self._post):
+ end = ',\n end=' + str(self._post)
+ else:
+ end = ""
+ return 'Comment(comment={0},\n items={1}{2})'.format(self.comment, self._items, end)
+
+ def _old__repr__(self):
+ # type: () -> str
+ if bool(self._post):
+ end = ',\n end=' + str(self._post)
+ else:
+ end = ""
+ try:
+ ln = max([len(str(k)) for k in self._items]) + 1
+ except ValueError:
+ ln = '' # type: ignore
+ it = ' '.join(
+ ['{:{}} {}\n'.format(str(k) + ':', ln, v) for k, v in self._items.items()]
+ )
+ if it:
+ it = '\n ' + it + ' '
+ return 'Comment(\n start={},\n items={{{}}}{})'.format(self.comment, it, end)
+
+ def __repr__(self):
+ # type: () -> str
+ if self._pre is None:
+ return self._old__repr__()
+ if bool(self._post):
+ end = ',\n end=' + repr(self._post)
+ else:
+ end = ""
+ try:
+ ln = max([len(str(k)) for k in self._items]) + 1
+ except ValueError:
+ ln = '' # type: ignore
+ it = ' '.join(
+ ['{:{}} {}\n'.format(str(k) + ':', ln, v) for k, v in self._items.items()]
+ )
+ if it:
+ it = '\n ' + it + ' '
+ return 'Comment(\n pre={},\n items={{{}}}{})'.format(self.pre, it, end)
+
+ @property
+ def items(self):
+ # type: () -> Any
+ return self._items
+
+ @property
+ def end(self):
+ # type: () -> Any
+ return self._post
+
+ @end.setter
+ def end(self, value):
+ # type: (Any) -> None
+ self._post = value
+
+ @property
+ def pre(self):
+ # type: () -> Any
+ return self._pre
+
+ @pre.setter
+ def pre(self, value):
+ # type: (Any) -> None
+ self._pre = value
+
+ def get(self, item, pos):
+ # type: (Any, Any) -> Any
+ x = self._items.get(item)
+ if x is None or len(x) < pos:
+ return None
+ return x[pos] # can be None
+
+ def set(self, item, pos, value):
+ # type: (Any, Any, Any) -> Any
+ x = self._items.get(item)
+ if x is None:
+ self._items[item] = x = [None] * (pos + 1)
+ else:
+ while len(x) <= pos:
+ x.append(None)
+ assert x[pos] is None
+ x[pos] = value
+
+ def __contains__(self, x):
+ # type: (Any) -> Any
+ # test if a substring is in any of the attached comments
+ if self.comment:
+ if self.comment[0] and x in self.comment[0].value:
+ return True
+ if self.comment[1]:
+ for c in self.comment[1]:
+ if x in c.value:
+ return True
+ for value in self.items.values():
+ if not value:
+ continue
+ for c in value:
+ if c and x in c.value:
+ return True
+ if self.end:
+ for c in self.end:
+ if x in c.value:
+ return True
+ return False
+
+
+# to distinguish key from None
+def NoComment():
+ # type: () -> None
+ pass
+
+
+class Format:
+ __slots__ = ('_flow_style',)
+ attrib = format_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self._flow_style = None # type: Any
+
+ def set_flow_style(self):
+ # type: () -> None
+ self._flow_style = True
+
+ def set_block_style(self):
+ # type: () -> None
+ self._flow_style = False
+
+ def flow_style(self, default=None):
+ # type: (Optional[Any]) -> Any
+ """if default (the flow_style) is None, the flow style tacked on to
+ the object explicitly will be taken. If that is None as well the
+ default flow style rules the format down the line, or the type
+ of the constituent values (simple -> flow, map/list -> block)"""
+ if self._flow_style is None:
+ return default
+ return self._flow_style
+
+
+class LineCol:
+ """
+ line and column information wrt document, values start at zero (0)
+ """
+
+ attrib = line_col_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.line = None
+ self.col = None
+ self.data = None # type: Optional[Dict[Any, Any]]
+
+ def add_kv_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ if self.data is None:
+ self.data = {}
+ self.data[key] = data
+
+ def key(self, k):
+ # type: (Any) -> Any
+ return self._kv(k, 0, 1)
+
+ def value(self, k):
+ # type: (Any) -> Any
+ return self._kv(k, 2, 3)
+
+ def _kv(self, k, x0, x1):
+ # type: (Any, Any, Any) -> Any
+ if self.data is None:
+ return None
+ data = self.data[k]
+ return data[x0], data[x1]
+
+ def item(self, idx):
+ # type: (Any) -> Any
+ if self.data is None:
+ return None
+ return self.data[idx][0], self.data[idx][1]
+
+ def add_idx_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ if self.data is None:
+ self.data = {}
+ self.data[key] = data
+
+ def __repr__(self):
+ # type: () -> str
+ return _F('LineCol({line}, {col})', line=self.line, col=self.col) # type: ignore
+
+
+class Tag:
+ """store tag information for roundtripping"""
+
+ __slots__ = ('value',)
+ attrib = tag_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.value = None
+
+ def __repr__(self):
+ # type: () -> Any
+ return '{0.__class__.__name__}({0.value!r})'.format(self)
+
+
+class CommentedBase:
+ @property
+ def ca(self):
+ # type: () -> Any
+ if not hasattr(self, Comment.attrib):
+ setattr(self, Comment.attrib, Comment())
+ return getattr(self, Comment.attrib)
+
+ def yaml_end_comment_extend(self, comment, clear=False):
+ # type: (Any, bool) -> None
+ if comment is None:
+ return
+ if clear or self.ca.end is None:
+ self.ca.end = []
+ self.ca.end.extend(comment)
+
+ def yaml_key_comment_extend(self, key, comment, clear=False):
+ # type: (Any, Any, bool) -> None
+ r = self.ca._items.setdefault(key, [None, None, None, None])
+ if clear or r[1] is None:
+ if comment[1] is not None:
+ assert isinstance(comment[1], list)
+ r[1] = comment[1]
+ else:
+ r[1].extend(comment[0])
+ r[0] = comment[0]
+
+ def yaml_value_comment_extend(self, key, comment, clear=False):
+ # type: (Any, Any, bool) -> None
+ r = self.ca._items.setdefault(key, [None, None, None, None])
+ if clear or r[3] is None:
+ if comment[1] is not None:
+ assert isinstance(comment[1], list)
+ r[3] = comment[1]
+ else:
+ r[3].extend(comment[0])
+ r[2] = comment[0]
+
+ def yaml_set_start_comment(self, comment, indent=0):
+ # type: (Any, Any) -> None
+ """overwrites any preceding comment lines on an object
+ expects comment to be without `#` and possible have multiple lines
+ """
+ from .error import CommentMark
+ from .tokens import CommentToken
+
+ pre_comments = self._yaml_clear_pre_comment() # type: ignore
+ if comment[-1] == '\n':
+ comment = comment[:-1] # strip final newline if there
+ start_mark = CommentMark(indent)
+ for com in comment.split('\n'):
+ c = com.strip()
+ if len(c) > 0 and c[0] != '#':
+ com = '# ' + com
+ pre_comments.append(CommentToken(com + '\n', start_mark))
+
+ def yaml_set_comment_before_after_key(
+ self, key, before=None, indent=0, after=None, after_indent=None
+ ):
+ # type: (Any, Any, Any, Any, Any) -> None
+ """
+ expects comment (before/after) to be without `#` and possible have multiple lines
+ """
+ from ruamel.yaml.error import CommentMark
+ from ruamel.yaml.tokens import CommentToken
+
+ def comment_token(s, mark):
+ # type: (Any, Any) -> Any
+ # handle empty lines as having no comment
+ return CommentToken(('# ' if s else "") + s + '\n', mark)
+
+ if after_indent is None:
+ after_indent = indent + 2
+ if before and (len(before) > 1) and before[-1] == '\n':
+ before = before[:-1] # strip final newline if there
+ if after and after[-1] == '\n':
+ after = after[:-1] # strip final newline if there
+ start_mark = CommentMark(indent)
+ c = self.ca.items.setdefault(key, [None, [], None, None])
+ if before is not None:
+ if c[1] is None:
+ c[1] = []
+ if before == '\n':
+ c[1].append(comment_token("", start_mark)) # type: ignore
+ else:
+ for com in before.split('\n'):
+ c[1].append(comment_token(com, start_mark)) # type: ignore
+ if after:
+ start_mark = CommentMark(after_indent)
+ if c[3] is None:
+ c[3] = []
+ for com in after.split('\n'):
+ c[3].append(comment_token(com, start_mark)) # type: ignore
+
+ @property
+ def fa(self):
+ # type: () -> Any
+ """format attribute
+
+ set_flow_style()/set_block_style()"""
+ if not hasattr(self, Format.attrib):
+ setattr(self, Format.attrib, Format())
+ return getattr(self, Format.attrib)
+
+ def yaml_add_eol_comment(self, comment, key=NoComment, column=None):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """
+ there is a problem as eol comments should start with ' #'
+ (but at the beginning of the line the space doesn't have to be before
+ the #. The column index is for the # mark
+ """
+ from .tokens import CommentToken
+ from .error import CommentMark
+
+ if column is None:
+ try:
+ column = self._yaml_get_column(key)
+ except AttributeError:
+ column = 0
+ if comment[0] != '#':
+ comment = '# ' + comment
+ if column is None:
+ if comment[0] == '#':
+ comment = ' ' + comment
+ column = 0
+ start_mark = CommentMark(column)
+ ct = [CommentToken(comment, start_mark), None]
+ self._yaml_add_eol_comment(ct, key=key)
+
+ @property
+ def lc(self):
+ # type: () -> Any
+ if not hasattr(self, LineCol.attrib):
+ setattr(self, LineCol.attrib, LineCol())
+ return getattr(self, LineCol.attrib)
+
+ def _yaml_set_line_col(self, line, col):
+ # type: (Any, Any) -> None
+ self.lc.line = line
+ self.lc.col = col
+
+ def _yaml_set_kv_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ self.lc.add_kv_line_col(key, data)
+
+ def _yaml_set_idx_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ self.lc.add_idx_line_col(key, data)
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ return self.anchor
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+ @property
+ def tag(self):
+ # type: () -> Any
+ if not hasattr(self, Tag.attrib):
+ setattr(self, Tag.attrib, Tag())
+ return getattr(self, Tag.attrib)
+
+ def yaml_set_tag(self, value):
+ # type: (Any) -> None
+ self.tag.value = value
+
+ def copy_attributes(self, t, memo=None):
+ # type: (Any, Any) -> None
+ # fmt: off
+ for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib,
+ Tag.attrib, merge_attrib]:
+ if hasattr(self, a):
+ if memo is not None:
+ setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+ else:
+ setattr(t, a, getattr(self, a))
+ # fmt: on
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ raise NotImplementedError
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ raise NotImplementedError
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ raise NotImplementedError
+
+
+class CommentedSeq(MutableSliceableSequence, list, CommentedBase): # type: ignore
+ __slots__ = (Comment.attrib, '_lst')
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ list.__init__(self, *args, **kw)
+
+ def __getsingleitem__(self, idx):
+ # type: (Any) -> Any
+ return list.__getitem__(self, idx)
+
+ def __setsingleitem__(self, idx, value):
+ # type: (Any, Any) -> None
+ # try to preserve the scalarstring type if setting an existing key to a new value
+ if idx < len(self):
+ if (
+ isinstance(value, str)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[idx], ScalarString)
+ ):
+ value = type(self[idx])(value)
+ list.__setitem__(self, idx, value)
+
+ def __delsingleitem__(self, idx=None):
+ # type: (Any) -> Any
+ list.__delitem__(self, idx)
+ self.ca.items.pop(idx, None) # might not be there -> default value
+ for list_index in sorted(self.ca.items):
+ if list_index < idx:
+ continue
+ self.ca.items[list_index - 1] = self.ca.items.pop(list_index)
+
+ def __len__(self):
+ # type: () -> int
+ return list.__len__(self)
+
+ def insert(self, idx, val):
+ # type: (Any, Any) -> None
+ """the comments after the insertion have to move forward"""
+ list.insert(self, idx, val)
+ for list_index in sorted(self.ca.items, reverse=True):
+ if list_index < idx:
+ break
+ self.ca.items[list_index + 1] = self.ca.items.pop(list_index)
+
+ def extend(self, val):
+ # type: (Any) -> None
+ list.extend(self, val)
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ return list.__eq__(self, other)
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ pre_comments = self.ca.comment[1]
+ return pre_comments
+
+ def _yaml_clear_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ res = self.__class__()
+ memo[id(self)] = res
+ for k in self:
+ res.append(copy.deepcopy(k, memo))
+ self.copy_attributes(res, memo=memo)
+ return res
+
+ def __add__(self, other):
+ # type: (Any) -> Any
+ return list.__add__(self, other)
+
+ def sort(self, key=None, reverse=False):
+ # type: (Any, bool) -> None
+ if key is None:
+ tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse)
+ list.__init__(self, [x[0] for x in tmp_lst])
+ else:
+ tmp_lst = sorted(
+ zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse
+ )
+ list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst])
+ itm = self.ca.items
+ self.ca._items = {}
+ for idx, x in enumerate(tmp_lst):
+ old_index = x[1]
+ if old_index in itm:
+ self.ca.items[idx] = itm[old_index]
+
+ def __repr__(self):
+ # type: () -> Any
+ return list.__repr__(self)
+
+
+class CommentedKeySeq(tuple, CommentedBase): # type: ignore
+ """This primarily exists to be able to roundtrip keys that are sequences"""
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ pre_comments = self.ca.comment[1]
+ return pre_comments
+
+ def _yaml_clear_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+
+class CommentedMapView(Sized):
+ __slots__ = ('_mapping',)
+
+ def __init__(self, mapping):
+ # type: (Any) -> None
+ self._mapping = mapping
+
+ def __len__(self):
+ # type: () -> int
+ count = len(self._mapping)
+ return count
+
+
+class CommentedMapKeysView(CommentedMapView, Set): # type: ignore
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(self, it):
+ # type: (Any) -> Any
+ return set(it)
+
+ def __contains__(self, key):
+ # type: (Any) -> Any
+ return key in self._mapping
+
+ def __iter__(self):
+ # type: () -> Any # yield from self._mapping # not in py27, pypy
+ # for x in self._mapping._keys():
+ for x in self._mapping:
+ yield x
+
+
+class CommentedMapItemsView(CommentedMapView, Set): # type: ignore
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(self, it):
+ # type: (Any) -> Any
+ return set(it)
+
+ def __contains__(self, item):
+ # type: (Any) -> Any
+ key, value = item
+ try:
+ v = self._mapping[key]
+ except KeyError:
+ return False
+ else:
+ return v == value
+
+ def __iter__(self):
+ # type: () -> Any
+ for key in self._mapping._keys():
+ yield (key, self._mapping[key])
+
+
+class CommentedMapValuesView(CommentedMapView):
+ __slots__ = ()
+
+ def __contains__(self, value):
+ # type: (Any) -> Any
+ for key in self._mapping:
+ if value == self._mapping[key]:
+ return True
+ return False
+
+ def __iter__(self):
+ # type: () -> Any
+ for key in self._mapping._keys():
+ yield self._mapping[key]
+
+
+class CommentedMap(ordereddict, CommentedBase):
+ __slots__ = (Comment.attrib, '_ok', '_ref')
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ self._ok = set() # type: MutableSet[Any] # own keys
+ self._ref = [] # type: List[CommentedMap]
+ ordereddict.__init__(self, *args, **kw)
+
+ def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """values is set to key to indicate a value attachment of comment"""
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ return
+ if value is not NoComment:
+ self.yaml_value_comment_extend(value, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ """add on the value line, with value specified by the key"""
+ self._yaml_add_comment(comment, value=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][2].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post, last = None, None, None
+ for x in self:
+ if pre is not None and x != key:
+ post = x
+ break
+ if x == key:
+ pre = last
+ last = x
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for k1 in self:
+ if k1 >= key:
+ break
+ if k1 not in self.ca.items:
+ continue
+ sel_idx = k1
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ pre_comments = self.ca.comment[1]
+ return pre_comments
+
+ def _yaml_clear_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+ def update(self, *vals, **kw):
+ # type: (Any, Any) -> None
+ try:
+ ordereddict.update(self, *vals, **kw)
+ except TypeError:
+ # probably a dict that is used
+ for x in vals[0]:
+ self[x] = vals[0][x]
+ if vals:
+ try:
+ self._ok.update(vals[0].keys()) # type: ignore
+ except AttributeError:
+ # assume one argument that is a list/tuple of two element lists/tuples
+ for x in vals[0]:
+ self._ok.add(x[0])
+ if kw:
+ self._ok.add(*kw.keys())
+
+ def insert(self, pos, key, value, comment=None):
+ # type: (Any, Any, Any, Optional[Any]) -> None
+ """insert key value into given position
+ attach comment if provided
+ """
+ keys = list(self.keys()) + [key]
+ ordereddict.insert(self, pos, key, value)
+ for keytmp in keys:
+ self._ok.add(keytmp)
+ for referer in self._ref:
+ for keytmp in keys:
+ referer.update_key_value(keytmp)
+ if comment is not None:
+ self.yaml_add_eol_comment(comment, key=key)
+
+ def mlget(self, key, default=None, list_ok=False):
+ # type: (Any, Any, Any) -> Any
+ """multi-level get that expects dicts within dicts"""
+ if not isinstance(key, list):
+ return self.get(key, default)
+ # assume that the key is a list of recursively accessible dicts
+
+ def get_one_level(key_list, level, d):
+ # type: (Any, Any, Any) -> Any
+ if not list_ok:
+ assert isinstance(d, dict)
+ if level >= len(key_list):
+ if level > len(key_list):
+ raise IndexError
+ return d[key_list[level - 1]]
+ return get_one_level(key_list, level + 1, d[key_list[level - 1]])
+
+ try:
+ return get_one_level(key, 1, self)
+ except KeyError:
+ return default
+ except (TypeError, IndexError):
+ if not list_ok:
+ raise
+ return default
+
+ def __getitem__(self, key):
+ # type: (Any) -> Any
+ try:
+ return ordereddict.__getitem__(self, key)
+ except KeyError:
+ for merged in getattr(self, merge_attrib, []):
+ if key in merged[1]:
+ return merged[1][key]
+ raise
+
+ def __setitem__(self, key, value):
+ # type: (Any, Any) -> None
+ # try to preserve the scalarstring type if setting an existing key to a new value
+ if key in self:
+ if (
+ isinstance(value, str)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[key], ScalarString)
+ ):
+ value = type(self[key])(value)
+ ordereddict.__setitem__(self, key, value)
+ self._ok.add(key)
+
+ def _unmerged_contains(self, key):
+ # type: (Any) -> Any
+ if key in self._ok:
+ return True
+ return None
+
+ def __contains__(self, key):
+ # type: (Any) -> bool
+ return bool(ordereddict.__contains__(self, key))
+
+ def get(self, key, default=None):
+ # type: (Any, Any) -> Any
+ try:
+ return self.__getitem__(key)
+ except: # NOQA
+ return default
+
+ def __repr__(self):
+ # type: () -> Any
+ return ordereddict.__repr__(self).replace('CommentedMap', 'ordereddict')
+
+ def non_merged_items(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ if x in self._ok:
+ yield x, ordereddict.__getitem__(self, x)
+
+ def __delitem__(self, key):
+ # type: (Any) -> None
+ # for merged in getattr(self, merge_attrib, []):
+ # if key in merged[1]:
+ # value = merged[1][key]
+ # break
+ # else:
+ # # not found in merged in stuff
+ # ordereddict.__delitem__(self, key)
+ # for referer in self._ref:
+ # referer.update=_key_value(key)
+ # return
+ #
+ # ordereddict.__setitem__(self, key, value) # merge might have different value
+ # self._ok.discard(key)
+ self._ok.discard(key)
+ ordereddict.__delitem__(self, key)
+ for referer in self._ref:
+ referer.update_key_value(key)
+
+ def __iter__(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x
+
+ def _keys(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return int(ordereddict.__len__(self))
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ return bool(dict(self) == other)
+
+ def keys(self):
+ # type: () -> Any
+ return CommentedMapKeysView(self)
+
+ def values(self):
+ # type: () -> Any
+ return CommentedMapValuesView(self)
+
+ def _items(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x, ordereddict.__getitem__(self, x)
+
+ def items(self):
+ # type: () -> Any
+ return CommentedMapItemsView(self)
+
+ @property
+ def merge(self):
+ # type: () -> Any
+ if not hasattr(self, merge_attrib):
+ setattr(self, merge_attrib, [])
+ return getattr(self, merge_attrib)
+
+ def copy(self):
+ # type: () -> Any
+ x = type(self)() # update doesn't work
+ for k, v in self._items():
+ x[k] = v
+ self.copy_attributes(x)
+ return x
+
+ def add_referent(self, cm):
+ # type: (Any) -> None
+ if cm not in self._ref:
+ self._ref.append(cm)
+
+ def add_yaml_merge(self, value):
+ # type: (Any) -> None
+ for v in value:
+ v[1].add_referent(self)
+ for k, v in v[1].items():
+ if ordereddict.__contains__(self, k):
+ continue
+ ordereddict.__setitem__(self, k, v)
+ self.merge.extend(value)
+
+ def update_key_value(self, key):
+ # type: (Any) -> None
+ if key in self._ok:
+ return
+ for v in self.merge:
+ if key in v[1]:
+ ordereddict.__setitem__(self, key, v[1][key])
+ return
+ ordereddict.__delitem__(self, key)
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ res = self.__class__()
+ memo[id(self)] = res
+ for k in self:
+ res[k] = copy.deepcopy(self[k], memo)
+ self.copy_attributes(res, memo=memo)
+ return res
+
+
+# based on brownie mappings
+@classmethod # type: ignore
+def raise_immutable(cls, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> None
+ raise TypeError('{} objects are immutable'.format(cls.__name__))
+
+
+class CommentedKeyMap(CommentedBase, Mapping): # type: ignore
+ __slots__ = Comment.attrib, '_od'
+ """This primarily exists to be able to roundtrip keys that are mappings"""
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ if hasattr(self, '_od'):
+ raise_immutable(self)
+ try:
+ self._od = ordereddict(*args, **kw)
+ except TypeError:
+ raise
+
+ __delitem__ = __setitem__ = clear = pop = popitem = setdefault = update = raise_immutable
+
+ # need to implement __getitem__, __iter__ and __len__
+ def __getitem__(self, index):
+ # type: (Any) -> Any
+ return self._od[index]
+
+ def __iter__(self):
+ # type: () -> Iterator[Any]
+ for x in self._od.__iter__():
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return len(self._od)
+
+ def __hash__(self):
+ # type: () -> Any
+ return hash(tuple(self.items()))
+
+ def __repr__(self):
+ # type: () -> Any
+ if not hasattr(self, merge_attrib):
+ return self._od.__repr__()
+ return 'ordereddict(' + repr(list(self._od.items())) + ')'
+
+ @classmethod
+ def fromkeys(keys, v=None):
+ # type: (Any, Any) -> Any
+ return CommentedKeyMap(dict.fromkeys(keys, v))
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+
+class CommentedOrderedMap(CommentedMap):
+ __slots__ = (Comment.attrib,)
+
+
+class CommentedSet(MutableSet, CommentedBase): # type: ignore # NOQA
+ __slots__ = Comment.attrib, 'odict'
+
+ def __init__(self, values=None):
+ # type: (Any) -> None
+ self.odict = ordereddict()
+ MutableSet.__init__(self)
+ if values is not None:
+ self |= values # type: ignore
+
+ def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """values is set to key to indicate a value attachment of comment"""
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ return
+ if value is not NoComment:
+ self.yaml_value_comment_extend(value, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ """add on the value line, with value specified by the key"""
+ self._yaml_add_comment(comment, value=key)
+
+ def add(self, value):
+ # type: (Any) -> None
+ """Add an element."""
+ self.odict[value] = None
+
+ def discard(self, value):
+ # type: (Any) -> None
+ """Remove an element. Do not raise an exception if absent."""
+ del self.odict[value]
+
+ def __contains__(self, x):
+ # type: (Any) -> Any
+ return x in self.odict
+
+ def __iter__(self):
+ # type: () -> Any
+ for x in self.odict:
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return len(self.odict)
+
+ def __repr__(self):
+ # type: () -> str
+ return 'set({0!r})'.format(self.odict.keys())
+
+
+class TaggedScalar(CommentedBase):
+ # the value and style attributes are set during roundtrip construction
+ def __init__(self, value=None, style=None, tag=None):
+ # type: (Any, Any, Any) -> None
+ self.value = value
+ self.style = style
+ if tag is not None:
+ self.yaml_set_tag(tag)
+
+ def __str__(self):
+ # type: () -> Any
+ return self.value
+
+
+def dump_comments(d, name="", sep='.', out=sys.stdout):
+ # type: (Any, str, str, Any) -> None
+ """
+ recursively dump comments, all but the toplevel preceded by the path
+ in dotted form x.0.a
+ """
+ if isinstance(d, dict) and hasattr(d, 'ca'):
+ if name:
+ out.write('{} {}\n'.format(name, type(d)))
+ out.write('{!r}\n'.format(d.ca)) # type: ignore
+ for k in d:
+ dump_comments(d[k], name=(name + sep + str(k)) if name else k, sep=sep, out=out)
+ elif isinstance(d, list) and hasattr(d, 'ca'):
+ if name:
+ out.write('{} {}\n'.format(name, type(d)))
+ out.write('{!r}\n'.format(d.ca)) # type: ignore
+ for idx, k in enumerate(d):
+ dump_comments(
+ k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out
+ )
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/compat.py b/lib/spack/external/_vendoring/ruamel/yaml/compat.py
new file mode 100644
index 0000000000..87d5e359d3
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/compat.py
@@ -0,0 +1,268 @@
+# coding: utf-8
+
+# partially from package six by Benjamin Peterson
+
+import sys
+import os
+import io
+import traceback
+from abc import abstractmethod
+import collections.abc
+
+
+# fmt: off
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA
+ from typing import Optional # NOQA
+# fmt: on
+
+_DEFAULT_YAML_VERSION = (1, 2)
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict # type: ignore
+
+ # to get the right name import ... as ordereddict doesn't do that
+
+
+class ordereddict(OrderedDict): # type: ignore
+ if not hasattr(OrderedDict, 'insert'):
+
+ def insert(self, pos, key, value):
+ # type: (int, Any, Any) -> None
+ if pos >= len(self):
+ self[key] = value
+ return
+ od = ordereddict()
+ od.update(self)
+ for k in od:
+ del self[k]
+ for index, old_key in enumerate(od):
+ if pos == index:
+ self[key] = value
+ self[old_key] = od[old_key]
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+
+# replace with f-strings when 3.5 support is dropped
+# ft = '42'
+# assert _F('abc {ft!r}', ft=ft) == 'abc %r' % ft
+# 'abc %r' % ft -> _F('abc {ft!r}' -> f'abc {ft!r}'
+def _F(s, *superfluous, **kw):
+ # type: (Any, Any, Any) -> Any
+ if superfluous:
+ raise TypeError
+ return s.format(**kw)
+
+
+StringIO = io.StringIO
+BytesIO = io.BytesIO
+
+if False: # MYPY
+ # StreamType = Union[BinaryIO, IO[str], IO[unicode], StringIO]
+ # StreamType = Union[BinaryIO, IO[str], StringIO] # type: ignore
+ StreamType = Any
+
+ StreamTextType = StreamType # Union[Text, StreamType]
+ VersionType = Union[List[int], str, Tuple[int, int]]
+
+builtins_module = 'builtins'
+
+
+def with_metaclass(meta, *bases):
+ # type: (Any, Any) -> Any
+ """Create a base class with a metaclass."""
+ return meta('NewBase', bases, {})
+
+
+DBG_TOKEN = 1
+DBG_EVENT = 2
+DBG_NODE = 4
+
+
+_debug = None # type: Optional[int]
+if 'RUAMELDEBUG' in os.environ:
+ _debugx = os.environ.get('RUAMELDEBUG')
+ if _debugx is None:
+ _debug = 0
+ else:
+ _debug = int(_debugx)
+
+
+if bool(_debug):
+
+ class ObjectCounter:
+ def __init__(self):
+ # type: () -> None
+ self.map = {} # type: Dict[Any, Any]
+
+ def __call__(self, k):
+ # type: (Any) -> None
+ self.map[k] = self.map.get(k, 0) + 1
+
+ def dump(self):
+ # type: () -> None
+ for k in sorted(self.map):
+ sys.stdout.write('{} -> {}'.format(k, self.map[k]))
+
+ object_counter = ObjectCounter()
+
+
+# used from yaml util when testing
+def dbg(val=None):
+ # type: (Any) -> Any
+ global _debug
+ if _debug is None:
+ # set to true or false
+ _debugx = os.environ.get('YAMLDEBUG')
+ if _debugx is None:
+ _debug = 0
+ else:
+ _debug = int(_debugx)
+ if val is None:
+ return _debug
+ return _debug & val
+
+
+class Nprint:
+ def __init__(self, file_name=None):
+ # type: (Any) -> None
+ self._max_print = None # type: Any
+ self._count = None # type: Any
+ self._file_name = file_name
+
+ def __call__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ if not bool(_debug):
+ return
+ out = sys.stdout if self._file_name is None else open(self._file_name, 'a')
+ dbgprint = print # to fool checking for print statements by dv utility
+ kw1 = kw.copy()
+ kw1['file'] = out
+ dbgprint(*args, **kw1)
+ out.flush()
+ if self._max_print is not None:
+ if self._count is None:
+ self._count = self._max_print
+ self._count -= 1
+ if self._count == 0:
+ dbgprint('forced exit\n')
+ traceback.print_stack()
+ out.flush()
+ sys.exit(0)
+ if self._file_name:
+ out.close()
+
+ def set_max_print(self, i):
+ # type: (int) -> None
+ self._max_print = i
+ self._count = None
+
+ def fp(self, mode='a'):
+ # type: (str) -> Any
+ out = sys.stdout if self._file_name is None else open(self._file_name, mode)
+ return out
+
+
+nprint = Nprint()
+nprintf = Nprint('/var/tmp/ruamel.yaml.log')
+
+# char checkers following production rules
+
+
+def check_namespace_char(ch):
+ # type: (Any) -> bool
+ if '\x21' <= ch <= '\x7E': # ! to ~
+ return True
+ if '\xA0' <= ch <= '\uD7FF':
+ return True
+ if ('\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF': # excl. byte order mark
+ return True
+ if '\U00010000' <= ch <= '\U0010FFFF':
+ return True
+ return False
+
+
+def check_anchorname_char(ch):
+ # type: (Any) -> bool
+ if ch in ',[]{}':
+ return False
+ return check_namespace_char(ch)
+
+
+def version_tnf(t1, t2=None):
+ # type: (Any, Any) -> Any
+ """
+ return True if ruamel.yaml version_info < t1, None if t2 is specified and bigger else False
+ """
+ from ruamel.yaml import version_info # NOQA
+
+ if version_info < t1:
+ return True
+ if t2 is not None and version_info < t2:
+ return None
+ return False
+
+
+class MutableSliceableSequence(collections.abc.MutableSequence): # type: ignore
+ __slots__ = ()
+
+ def __getitem__(self, index):
+ # type: (Any) -> Any
+ if not isinstance(index, slice):
+ return self.__getsingleitem__(index)
+ return type(self)([self[i] for i in range(*index.indices(len(self)))]) # type: ignore
+
+ def __setitem__(self, index, value):
+ # type: (Any, Any) -> None
+ if not isinstance(index, slice):
+ return self.__setsingleitem__(index, value)
+ assert iter(value)
+ # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+ if index.step is None:
+ del self[index.start : index.stop]
+ for elem in reversed(value):
+ self.insert(0 if index.start is None else index.start, elem)
+ else:
+ range_parms = index.indices(len(self))
+ nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[2] + 1
+ # need to test before changing, in case TypeError is caught
+ if nr_assigned_items < len(value):
+ raise TypeError(
+ 'too many elements in value {} < {}'.format(nr_assigned_items, len(value))
+ )
+ elif nr_assigned_items > len(value):
+ raise TypeError(
+ 'not enough elements in value {} > {}'.format(
+ nr_assigned_items, len(value)
+ )
+ )
+ for idx, i in enumerate(range(*range_parms)):
+ self[i] = value[idx]
+
+ def __delitem__(self, index):
+ # type: (Any) -> None
+ if not isinstance(index, slice):
+ return self.__delsingleitem__(index)
+ # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+ for i in reversed(range(*index.indices(len(self)))):
+ del self[i]
+
+ @abstractmethod
+ def __getsingleitem__(self, index):
+ # type: (Any) -> Any
+ raise IndexError
+
+ @abstractmethod
+ def __setsingleitem__(self, index, value):
+ # type: (Any, Any) -> None
+ raise IndexError
+
+ @abstractmethod
+ def __delsingleitem__(self, index):
+ # type: (Any) -> None
+ raise IndexError
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/composer.py b/lib/spack/external/_vendoring/ruamel/yaml/composer.py
new file mode 100644
index 0000000000..bad132aae8
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/composer.py
@@ -0,0 +1,243 @@
+# coding: utf-8
+
+import warnings
+
+from ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning
+from ruamel.yaml.compat import _F, nprint, nprintf # NOQA
+
+from ruamel.yaml.events import (
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+)
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+__all__ = ['Composer', 'ComposerError']
+
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+
+class Composer:
+ def __init__(self, loader=None):
+ # type: (Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_composer', None) is None:
+ self.loader._composer = self
+ self.anchors = {} # type: Dict[Any, Any]
+
+ @property
+ def parser(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ self.loader.parser
+ return self.loader._parser
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ # assert self.loader._resolver is not None
+ if hasattr(self.loader, 'typ'):
+ self.loader.resolver
+ return self.loader._resolver
+
+ def check_node(self):
+ # type: () -> Any
+ # Drop the STREAM-START event.
+ if self.parser.check_event(StreamStartEvent):
+ self.parser.get_event()
+
+ # If there are more documents available?
+ return not self.parser.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # type: () -> Any
+ # Get the root node of the next document.
+ if not self.parser.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # type: () -> Any
+ # Drop the STREAM-START event.
+ self.parser.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None # type: Any
+ if not self.parser.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.parser.check_event(StreamEndEvent):
+ event = self.parser.get_event()
+ raise ComposerError(
+ 'expected a single document in the stream',
+ document.start_mark,
+ 'but found another document',
+ event.start_mark,
+ )
+
+ # Drop the STREAM-END event.
+ self.parser.get_event()
+
+ return document
+
+ def compose_document(self):
+ # type: (Any) -> Any
+ # Drop the DOCUMENT-START event.
+ self.parser.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.parser.get_event()
+
+ self.anchors = {}
+ return node
+
+ def return_alias(self, a):
+ # type: (Any) -> Any
+ return a
+
+ def compose_node(self, parent, index):
+ # type: (Any, Any) -> Any
+ if self.parser.check_event(AliasEvent):
+ event = self.parser.get_event()
+ alias = event.anchor
+ if alias not in self.anchors:
+ raise ComposerError(
+ None,
+ None,
+ _F('found undefined alias {alias!r}', alias=alias),
+ event.start_mark,
+ )
+ return self.return_alias(self.anchors[alias])
+ event = self.parser.peek_event()
+ anchor = event.anchor
+ if anchor is not None: # have an anchor
+ if anchor in self.anchors:
+ # raise ComposerError(
+ # "found duplicate anchor %r; first occurrence"
+ # % (anchor), self.anchors[anchor].start_mark,
+ # "second occurrence", event.start_mark)
+ ws = (
+ '\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence '
+ '{}'.format((anchor), self.anchors[anchor].start_mark, event.start_mark)
+ )
+ warnings.warn(ws, ReusedAnchorWarning)
+ self.resolver.descend_resolver(parent, index)
+ if self.parser.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.parser.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.parser.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.resolver.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ # type: (Any) -> Any
+ event = self.parser.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(
+ tag,
+ event.value,
+ event.start_mark,
+ event.end_mark,
+ style=event.style,
+ comment=event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ # type: (Any) -> Any
+ start_event = self.parser.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.parser.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.parser.get_event()
+ if node.flow_style is True and end_event.comment is not None:
+ if node.comment is not None:
+ nprint(
+ 'Warning: unexpected end_event commment in sequence '
+ 'node {}'.format(node.flow_style)
+ )
+ node.comment = end_event.comment
+ node.end_mark = end_event.end_mark
+ self.check_end_doc_comment(end_event, node)
+ return node
+
+ def compose_mapping_node(self, anchor):
+ # type: (Any) -> Any
+ start_event = self.parser.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.parser.check_event(MappingEndEvent):
+ # key_event = self.parser.peek_event()
+ item_key = self.compose_node(node, None)
+ # if item_key in node.value:
+ # raise ComposerError("while composing a mapping",
+ # start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ # node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.parser.get_event()
+ if node.flow_style is True and end_event.comment is not None:
+ node.comment = end_event.comment
+ node.end_mark = end_event.end_mark
+ self.check_end_doc_comment(end_event, node)
+ return node
+
+ def check_end_doc_comment(self, end_event, node):
+ # type: (Any, Any) -> None
+ if end_event.comment and end_event.comment[1]:
+ # pre comments on an end_event, no following to move to
+ if node.comment is None:
+ node.comment = [None, None]
+ assert not isinstance(node, ScalarEvent)
+ # this is a post comment on a mapping node, add as third element
+ # in the list
+ node.comment.append(end_event.comment[1])
+ end_event.comment[1] = None
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/configobjwalker.py b/lib/spack/external/_vendoring/ruamel/yaml/configobjwalker.py
new file mode 100644
index 0000000000..cbc6148038
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/configobjwalker.py
@@ -0,0 +1,14 @@
+# coding: utf-8
+
+import warnings
+
+from ruamel.yaml.util import configobj_walker as new_configobj_walker
+
+if False: # MYPY
+ from typing import Any # NOQA
+
+
+def configobj_walker(cfg):
+ # type: (Any) -> Any
+ warnings.warn('configobj_walker has moved to ruamel.yaml.util, please update your code')
+ return new_configobj_walker(cfg)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/constructor.py b/lib/spack/external/_vendoring/ruamel/yaml/constructor.py
new file mode 100644
index 0000000000..6aa6371779
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/constructor.py
@@ -0,0 +1,1845 @@
+# coding: utf-8
+
+import datetime
+import base64
+import binascii
+import sys
+import types
+import warnings
+from collections.abc import Hashable, MutableSequence, MutableMapping
+
+# fmt: off
+from ruamel.yaml.error import (MarkedYAMLError, MarkedYAMLFutureWarning,
+ MantissaNoDotYAML1_1Warning)
+from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.nodes import (SequenceNode, MappingNode, ScalarNode)
+from ruamel.yaml.compat import (_F, builtins_module, # NOQA
+ nprint, nprintf, version_tnf)
+from ruamel.yaml.compat import ordereddict
+
+from ruamel.yaml.comments import * # NOQA
+from ruamel.yaml.comments import (CommentedMap, CommentedOrderedMap, CommentedSet,
+ CommentedKeySeq, CommentedSeq, TaggedScalar,
+ CommentedKeyMap,
+ C_KEY_PRE, C_KEY_EOL, C_KEY_POST,
+ C_VALUE_PRE, C_VALUE_EOL, C_VALUE_POST,
+ )
+from ruamel.yaml.scalarstring import (SingleQuotedScalarString, DoubleQuotedScalarString,
+ LiteralScalarString, FoldedScalarString,
+ PlainScalarString, ScalarString,)
+from ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from ruamel.yaml.scalarfloat import ScalarFloat
+from ruamel.yaml.scalarbool import ScalarBoolean
+from ruamel.yaml.timestamp import TimeStamp
+from ruamel.yaml.util import timestamp_regexp, create_timestamp
+
+if False: # MYPY
+ from typing import Any, Dict, List, Set, Generator, Union, Optional # NOQA
+
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError', 'RoundTripConstructor']
+# fmt: on
+
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+
+class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning):
+ pass
+
+
+class DuplicateKeyError(MarkedYAMLError):
+ pass
+
+
+class BaseConstructor:
+
+ yaml_constructors = {} # type: Dict[Any, Any]
+ yaml_multi_constructors = {} # type: Dict[Any, Any]
+
+ def __init__(self, preserve_quotes=None, loader=None):
+ # type: (Optional[bool], Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_constructor', None) is None:
+ self.loader._constructor = self
+ self.loader = loader
+ self.yaml_base_dict_type = dict
+ self.yaml_base_list_type = list
+ self.constructed_objects = {} # type: Dict[Any, Any]
+ self.recursive_objects = {} # type: Dict[Any, Any]
+ self.state_generators = [] # type: List[Any]
+ self.deep_construct = False
+ self._preserve_quotes = preserve_quotes
+ self.allow_duplicate_keys = version_tnf((0, 15, 1), (0, 16))
+
+ @property
+ def composer(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.composer
+ try:
+ return self.loader._composer
+ except AttributeError:
+ sys.stdout.write('slt {}\n'.format(type(self)))
+ sys.stdout.write('slc {}\n'.format(self.loader._composer))
+ sys.stdout.write('{}\n'.format(dir(self)))
+ raise
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver
+ return self.loader._resolver
+
+ @property
+ def scanner(self):
+ # type: () -> Any
+ # needed to get to the expanded comments
+ if hasattr(self.loader, 'typ'):
+ return self.loader.scanner
+ return self.loader._scanner
+
+ def check_data(self):
+ # type: () -> Any
+ # If there are more documents available?
+ return self.composer.check_node()
+
+ def get_data(self):
+ # type: () -> Any
+ # Construct and return the next document.
+ if self.composer.check_node():
+ return self.construct_document(self.composer.get_node())
+
+ def get_single_data(self):
+ # type: () -> Any
+ # Ensure that the stream contains a single document and construct it.
+ node = self.composer.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ # type: (Any) -> Any
+ data = self.construct_object(node)
+ while bool(self.state_generators):
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for _dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ return self.recursive_objects[node]
+ # raise ConstructorError(
+ # None, None, 'found unconstructable recursive node', node.start_mark
+ # )
+ self.recursive_objects[node] = None
+ data = self.construct_non_recursive_object(node)
+
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_non_recursive_object(self, node, tag=None):
+ # type: (Any, Optional[str]) -> Any
+ constructor = None # type: Any
+ tag_suffix = None
+ if tag is None:
+ tag = node.tag
+ if tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if tag.startswith(tag_prefix):
+ tag_suffix = tag[len(tag_prefix) :]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for _dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ return data
+
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(
+ None,
+ None,
+ _F('expected a scalar node, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ None,
+ None,
+ _F('expected a sequence node, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ return [self.construct_object(child, deep=deep) for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None,
+ None,
+ _F('expected a mapping node, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ total_mapping = self.yaml_base_dict_type()
+ if getattr(node, 'merge', None) is not None:
+ todo = [(node.merge, False), (node.value, False)]
+ else:
+ todo = [(node.value, True)]
+ for values, check in todo:
+ mapping = self.yaml_base_dict_type() # type: Dict[Any, Any]
+ for key_node, value_node in values:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, list):
+ key = tuple(key)
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+
+ value = self.construct_object(value_node, deep=deep)
+ if check:
+ if self.check_mapping_key(node, key_node, mapping, key, value):
+ mapping[key] = value
+ else:
+ mapping[key] = value
+ total_mapping.update(mapping)
+ return total_mapping
+
+ def check_mapping_key(self, node, key_node, mapping, key, value):
+ # type: (Any, Any, Any, Any, Any) -> bool
+ """return True if key is unique"""
+ if key in mapping:
+ if not self.allow_duplicate_keys:
+ mk = mapping.get(key)
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}" with value "{}" '
+ '(original value: "{}")'.format(key, value, mk),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ return False
+ return True
+
+ def check_set_key(self, node, key_node, setting, key):
+ # type: (Any, Any, Any, Any, Any) -> None
+ if key in setting:
+ if not self.allow_duplicate_keys:
+ args = [
+ 'while constructing a set',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+
+ def construct_pairs(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None,
+ None,
+ _F('expected a mapping node, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ # type: (Any, Any) -> None
+ if 'yaml_constructors' not in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ # type: (Any, Any) -> None
+ if 'yaml_multi_constructors' not in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+
+class SafeConstructor(BaseConstructor):
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ # type: (Any) -> Any
+ """
+ This implements the merge key feature http://yaml.org/type/merge.html
+ by inserting keys from the merge dict/list of dicts if not yet
+ available in this node
+ """
+ merge = [] # type: List[Any]
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ if merge: # double << key
+ if self.allow_duplicate_keys:
+ del node.value[index]
+ index += 1
+ continue
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key_node.value),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ _F(
+ 'expected a mapping for merging, but found {subnode_id!s}',
+ subnode_id=subnode.id,
+ ),
+ subnode.start_mark,
+ )
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ _F(
+ 'expected a mapping or list of mappings for merging, '
+ 'but found {value_node_id!s}',
+ value_node_id=value_node.id,
+ ),
+ value_node.start_mark,
+ )
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if bool(merge):
+ node.merge = merge # separate merge keys to be able to update without duplicate
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ # type: (Any) -> Any
+ self.construct_scalar(node)
+ return None
+
+ # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'y': True,
+ 'n': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ # type: (Any) -> bool
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ # type: (Any) -> int
+ value_s = self.construct_scalar(node)
+ value_s = value_s.replace('_', "")
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '0':
+ return 0
+ elif value_s.startswith('0b'):
+ return sign * int(value_s[2:], 2)
+ elif value_s.startswith('0x'):
+ return sign * int(value_s[2:], 16)
+ elif value_s.startswith('0o'):
+ return sign * int(value_s[2:], 8)
+ elif self.resolver.processing_version == (1, 1) and value_s[0] == '0':
+ return sign * int(value_s, 8)
+ elif self.resolver.processing_version == (1, 1) and ':' in value_s:
+ digits = [int(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ else:
+ return sign * int(value_s)
+
+ inf_value = 1e300
+ while inf_value != inf_value * inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ # type: (Any) -> float
+ value_so = self.construct_scalar(node)
+ value_s = value_so.replace('_', "").lower()
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '.inf':
+ return sign * self.inf_value
+ elif value_s == '.nan':
+ return self.nan_value
+ elif self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [float(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ else:
+ if self.resolver.processing_version != (1, 2) and 'e' in value_s:
+ # value_s is lower case independent of input
+ mantissa, exponent = value_s.split('e')
+ if '.' not in mantissa:
+ warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so))
+ return sign * float(value_s)
+
+ def construct_yaml_binary(self, node):
+ # type: (Any) -> Any
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(
+ None,
+ None,
+ _F('failed to convert base64 data into ascii: {exc!s}', exc=exc),
+ node.start_mark,
+ )
+ try:
+ return base64.decodebytes(value)
+ except binascii.Error as exc:
+ raise ConstructorError(
+ None,
+ None,
+ _F('failed to decode base64 data: {exc!s}', exc=exc),
+ node.start_mark,
+ )
+
+ timestamp_regexp = timestamp_regexp # moved to util 0.17.17
+
+ def construct_yaml_timestamp(self, node, values=None):
+ # type: (Any, Any) -> Any
+ if values is None:
+ try:
+ match = self.timestamp_regexp.match(node.value)
+ except TypeError:
+ match = None
+ if match is None:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to construct timestamp from "{}"'.format(node.value),
+ node.start_mark,
+ )
+ values = match.groupdict()
+ return create_timestamp(**values)
+
+ def construct_yaml_omap(self, node):
+ # type: (Any) -> Any
+ # Note: we do now check for duplicate keys
+ omap = ordereddict()
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ _F('expected a sequence, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ _F(
+ 'expected a mapping of length 1, but found {subnode_id!s}',
+ subnode_id=subnode.id,
+ ),
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ _F(
+ 'expected a single mapping item, but found {len_subnode_val:d} items',
+ len_subnode_val=len(subnode.value),
+ ),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ assert key not in omap
+ value = self.construct_object(value_node)
+ omap[key] = value
+
+ def construct_yaml_pairs(self, node):
+ # type: (Any) -> Any
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = [] # type: List[Any]
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ _F('expected a sequence, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ _F(
+ 'expected a mapping of length 1, but found {subnode_id!s}',
+ subnode_id=subnode.id,
+ ),
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ _F(
+ 'expected a single mapping item, but found {len_subnode_val:d} items',
+ len_subnode_val=len(subnode.value),
+ ),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ # type: (Any) -> Any
+ data = set() # type: Set[Any]
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ # type: (Any) -> Any
+ value = self.construct_scalar(node)
+ return value
+
+ def construct_yaml_seq(self, node):
+ # type: (Any) -> Any
+ data = self.yaml_base_list_type() # type: List[Any]
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ # type: (Any) -> Any
+ data = self.yaml_base_dict_type() # type: Dict[Any, Any]
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ # type: (Any, Any) -> Any
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ # type: (Any) -> None
+ raise ConstructorError(
+ None,
+ None,
+ _F(
+ 'could not determine a constructor for the tag {node_tag!r}', node_tag=node.tag
+ ),
+ node.start_mark,
+ )
+
+
+SafeConstructor.add_constructor('tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor('tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor('tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float
+)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary
+)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp
+)
+
+SafeConstructor.add_constructor('tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs
+)
+
+SafeConstructor.add_constructor('tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor('tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor('tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor('tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined)
+
+
+class Constructor(SafeConstructor):
+ def construct_python_str(self, node):
+ # type: (Any) -> Any
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ # type: (Any) -> Any
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ # type: (Any) -> Any
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(
+ None,
+ None,
+ _F('failed to convert base64 data into ascii: {exc!s}', exc=exc),
+ node.start_mark,
+ )
+ try:
+ return base64.decodebytes(value)
+ except binascii.Error as exc:
+ raise ConstructorError(
+ None,
+ None,
+ _F('failed to decode base64 data: {exc!s}', exc=exc),
+ node.start_mark,
+ )
+
+ def construct_python_long(self, node):
+ # type: (Any) -> int
+ val = self.construct_yaml_int(node)
+ return val
+
+ def construct_python_complex(self, node):
+ # type: (Any) -> Any
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ # type: (Any) -> Any
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ # type: (Any, Any) -> Any
+ if not name:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ mark,
+ _F('cannot find module {name!r} ({exc!s})', name=name, exc=exc),
+ mark,
+ )
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ # type: (Any, Any) -> Any
+ if not name:
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
+ if '.' in name:
+ lname = name.split('.')
+ lmodule_name = lname
+ lobject_name = [] # type: List[Any]
+ while len(lmodule_name) > 1:
+ lobject_name.insert(0, lmodule_name.pop())
+ module_name = '.'.join(lmodule_name)
+ try:
+ __import__(module_name)
+ # object_name = '.'.join(object_name)
+ break
+ except ImportError:
+ continue
+ else:
+ module_name = builtins_module
+ lobject_name = [name]
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ _F(
+ 'cannot find module {module_name!r} ({exc!s})',
+ module_name=module_name,
+ exc=exc,
+ ),
+ mark,
+ )
+ module = sys.modules[module_name]
+ object_name = '.'.join(lobject_name)
+ obj = module
+ while lobject_name:
+ if not hasattr(obj, lobject_name[0]):
+
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ _F(
+ 'cannot find {object_name!r} in the module {module_name!r}',
+ object_name=object_name,
+ module_name=module.__name__,
+ ),
+ mark,
+ )
+ obj = getattr(obj, lobject_name.pop(0))
+ return obj
+
+ def construct_python_name(self, suffix, node):
+ # type: (Any, Any) -> Any
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError(
+ 'while constructing a Python name',
+ node.start_mark,
+ _F('expected the empty value, but found {value!r}', value=value),
+ node.start_mark,
+ )
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ # type: (Any, Any) -> Any
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ node.start_mark,
+ _F('expected the empty value, but found {value!r}', value=value),
+ node.start_mark,
+ )
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+ # type: (Any, Any, Any, Any, bool) -> Any
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ # type: (Any, Any) -> None
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {} # type: Dict[Any, Any]
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(instance, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # type: (Any, Any) -> Any
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ self.recursive_objects[node] = instance
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # type: (Any, Any, bool) -> Any
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {} # type: Dict[Any, Any]
+ state = {} # type: Dict[Any, Any]
+ listitems = [] # type: List[Any]
+ dictitems = {} # type: Dict[Any, Any]
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if bool(state):
+ self.set_python_instance_state(instance, state)
+ if bool(listitems):
+ instance.extend(listitems)
+ if bool(dictitems):
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ # type: (Any, Any) -> Any
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+
+Constructor.add_constructor('tag:yaml.org,2002:python/none', Constructor.construct_yaml_null)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/str', Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode
+)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes
+)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/int', Constructor.construct_yaml_int)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/long', Constructor.construct_python_long)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/float', Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex
+)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple
+)
+
+Constructor.add_constructor('tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:', Constructor.construct_python_name
+)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:', Constructor.construct_python_module
+)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:', Constructor.construct_python_object
+)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply
+)
+
+Constructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new
+)
+
+
+class RoundTripConstructor(SafeConstructor):
+ """need to store the comments on the node itself,
+ as well as on the items
+ """
+
+ def comment(self, idx):
+ # type: (Any) -> Any
+ assert self.loader.comment_handling is not None
+ x = self.scanner.comments[idx]
+ x.set_assigned()
+ return x
+
+ def comments(self, list_of_comments, idx=None):
+ # type: (Any, Optional[Any]) -> Any
+ # hand in the comment and optional pre, eol, post segment
+ if list_of_comments is None:
+ return []
+ if idx is not None:
+ if list_of_comments[idx] is None:
+ return []
+ list_of_comments = list_of_comments[idx]
+ for x in list_of_comments:
+ yield self.comment(x)
+
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(
+ None,
+ None,
+ _F('expected a scalar node, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+
+ if node.style == '|' and isinstance(node.value, str):
+ lss = LiteralScalarString(node.value, anchor=node.anchor)
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment and node.comment[1]:
+ lss.comment = node.comment[1][0] # type: ignore
+ else:
+ # NEWCMNT
+ if node.comment is not None and node.comment[1]:
+ # nprintf('>>>>nc1', node.comment)
+ # EOL comment after |
+ lss.comment = self.comment(node.comment[1][0]) # type: ignore
+ return lss
+ if node.style == '>' and isinstance(node.value, str):
+ fold_positions = [] # type: List[int]
+ idx = -1
+ while True:
+ idx = node.value.find('\a', idx + 1)
+ if idx < 0:
+ break
+ fold_positions.append(idx - len(fold_positions))
+ fss = FoldedScalarString(node.value.replace('\a', ''), anchor=node.anchor)
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment and node.comment[1]:
+ fss.comment = node.comment[1][0] # type: ignore
+ else:
+ # NEWCMNT
+ if node.comment is not None and node.comment[1]:
+ # nprintf('>>>>nc2', node.comment)
+ # EOL comment after >
+ fss.comment = self.comment(node.comment[1][0]) # type: ignore
+ if fold_positions:
+ fss.fold_pos = fold_positions # type: ignore
+ return fss
+ elif bool(self._preserve_quotes) and isinstance(node.value, str):
+ if node.style == "'":
+ return SingleQuotedScalarString(node.value, anchor=node.anchor)
+ if node.style == '"':
+ return DoubleQuotedScalarString(node.value, anchor=node.anchor)
+ if node.anchor:
+ return PlainScalarString(node.value, anchor=node.anchor)
+ return node.value
+
+ def construct_yaml_int(self, node):
+ # type: (Any) -> Any
+ width = None # type: Any
+ value_su = self.construct_scalar(node)
+ try:
+ sx = value_su.rstrip('_')
+ underscore = [len(sx) - sx.rindex('_') - 1, False, False] # type: Any
+ except ValueError:
+ underscore = None
+ except IndexError:
+ underscore = None
+ value_s = value_su.replace('_', "")
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '0':
+ return 0
+ elif value_s.startswith('0b'):
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return BinaryInt(
+ sign * int(value_s[2:], 2),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif value_s.startswith('0x'):
+ # default to lower-case if no a-fA-F in string
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ hex_fun = HexInt # type: Any
+ for ch in value_s[2:]:
+ if ch in 'ABCDEF': # first non-digit is capital
+ hex_fun = HexCapsInt
+ break
+ if ch in 'abcdef':
+ break
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return hex_fun(
+ sign * int(value_s[2:], 16),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif value_s.startswith('0o'):
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return OctalInt(
+ sign * int(value_s[2:], 8),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif self.resolver.processing_version != (1, 2) and value_s[0] == '0':
+ return sign * int(value_s, 8)
+ elif self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [int(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ elif self.resolver.processing_version > (1, 1) and value_s[0] == '0':
+ # not an octal, an integer with leading zero(s)
+ if underscore is not None:
+ # cannot have a leading underscore
+ underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
+ return ScalarInt(sign * int(value_s), width=len(value_s), underscore=underscore)
+ elif underscore:
+ # cannot have a leading underscore
+ underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
+ return ScalarInt(
+ sign * int(value_s), width=None, underscore=underscore, anchor=node.anchor
+ )
+ elif node.anchor:
+ return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor)
+ else:
+ return sign * int(value_s)
+
+ def construct_yaml_float(self, node):
+ # type: (Any) -> Any
+ def leading_zeros(v):
+ # type: (Any) -> int
+ lead0 = 0
+ idx = 0
+ while idx < len(v) and v[idx] in '0.':
+ if v[idx] == '0':
+ lead0 += 1
+ idx += 1
+ return lead0
+
+ # underscore = None
+ m_sign = False # type: Any
+ value_so = self.construct_scalar(node)
+ value_s = value_so.replace('_', "").lower()
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ m_sign = value_s[0]
+ value_s = value_s[1:]
+ if value_s == '.inf':
+ return sign * self.inf_value
+ if value_s == '.nan':
+ return self.nan_value
+ if self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [float(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ if 'e' in value_s:
+ try:
+ mantissa, exponent = value_so.split('e')
+ exp = 'e'
+ except ValueError:
+ mantissa, exponent = value_so.split('E')
+ exp = 'E'
+ if self.resolver.processing_version != (1, 2):
+ # value_s is lower case independent of input
+ if '.' not in mantissa:
+ warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so))
+ lead0 = leading_zeros(mantissa)
+ width = len(mantissa)
+ prec = mantissa.find('.')
+ if m_sign:
+ width -= 1
+ e_width = len(exponent)
+ e_sign = exponent[0] in '+-'
+ # nprint('sf', width, prec, m_sign, exp, e_width, e_sign)
+ return ScalarFloat(
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ exp=exp,
+ e_width=e_width,
+ e_sign=e_sign,
+ anchor=node.anchor,
+ )
+ width = len(value_so)
+ prec = value_so.index('.') # you can use index, this would not be float without dot
+ lead0 = leading_zeros(value_so)
+ return ScalarFloat(
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ anchor=node.anchor,
+ )
+
+ def construct_yaml_str(self, node):
+ # type: (Any) -> Any
+ value = self.construct_scalar(node)
+ if isinstance(value, ScalarString):
+ return value
+ return value
+
+ def construct_rt_sequence(self, node, seqtyp, deep=False):
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ None,
+ None,
+ _F('expected a sequence node, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ ret_val = []
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment:
+ seqtyp._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ # this happens e.g. if you have a sequence element that is a flow-style
+ # mapping and that has no EOL comment but a following commentline or
+ # empty line
+ seqtyp.yaml_end_comment_extend(node.comment[2], clear=True)
+ else:
+ # NEWCMNT
+ if node.comment:
+ nprintf('nc3', node.comment)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ seqtyp.yaml_set_anchor(node.anchor)
+ for idx, child in enumerate(node.value):
+ if child.comment:
+ seqtyp._yaml_add_comment(child.comment, key=idx)
+ child.comment = None # if moved to sequence remove from child
+ ret_val.append(self.construct_object(child, deep=deep))
+ seqtyp._yaml_set_idx_line_col(
+ idx, [child.start_mark.line, child.start_mark.column]
+ )
+ return ret_val
+
+ def flatten_mapping(self, node):
+ # type: (Any) -> Any
+ """
+ This implements the merge key feature http://yaml.org/type/merge.html
+ by inserting keys from the merge dict/list of dicts if not yet
+ available in this node
+ """
+
+ def constructed(value_node):
+ # type: (Any) -> Any
+ # If the contents of a merge are defined within the
+ # merge marker, then they won't have been constructed
+ # yet. But if they were already constructed, we need to use
+ # the existing object.
+ if value_node in self.constructed_objects:
+ value = self.constructed_objects[value_node]
+ else:
+ value = self.construct_object(value_node, deep=False)
+ return value
+
+ # merge = []
+ merge_map_list = [] # type: List[Any]
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ if merge_map_list: # double << key
+ if self.allow_duplicate_keys:
+ del node.value[index]
+ index += 1
+ continue
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key_node.value),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ merge_map_list.append((index, constructed(value_node)))
+ # self.flatten_mapping(value_node)
+ # merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ # submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ _F(
+ 'expected a mapping for merging, but found {subnode_id!s}',
+ subnode_id=subnode.id,
+ ),
+ subnode.start_mark,
+ )
+ merge_map_list.append((index, constructed(subnode)))
+ # self.flatten_mapping(subnode)
+ # submerge.append(subnode.value)
+ # submerge.reverse()
+ # for value in submerge:
+ # merge.extend(value)
+ else:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ _F(
+ 'expected a mapping or list of mappings for merging, '
+ 'but found {value_node_id!s}',
+ value_node_id=value_node.id,
+ ),
+ value_node.start_mark,
+ )
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ return merge_map_list
+ # if merge:
+ # node.value = merge + node.value
+
+ def _sentinel(self):
+ # type: () -> None
+ pass
+
+ def construct_mapping(self, node, maptyp, deep=False): # type: ignore
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None,
+ None,
+ _F('expected a mapping node, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ merge_map = self.flatten_mapping(node)
+ # mapping = {}
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment:
+ maptyp._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ maptyp.yaml_end_comment_extend(node.comment[2], clear=True)
+ else:
+ # NEWCMNT
+ if node.comment:
+ # nprintf('nc4', node.comment, node.start_mark)
+ if maptyp.ca.pre is None:
+ maptyp.ca.pre = []
+ for cmnt in self.comments(node.comment, 0):
+ maptyp.ca.pre.append(cmnt)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ maptyp.yaml_set_anchor(node.anchor)
+ last_key, last_value = None, self._sentinel
+ for key_node, value_node in node.value:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, MutableSequence):
+ key_s = CommentedKeySeq(key)
+ if key_node.flow_style is True:
+ key_s.fa.set_flow_style()
+ elif key_node.flow_style is False:
+ key_s.fa.set_block_style()
+ key = key_s
+ elif isinstance(key, MutableMapping):
+ key_m = CommentedKeyMap(key)
+ if key_node.flow_style is True:
+ key_m.fa.set_flow_style()
+ elif key_node.flow_style is False:
+ key_m.fa.set_block_style()
+ key = key_m
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+ value = self.construct_object(value_node, deep=deep)
+ if self.check_mapping_key(node, key_node, maptyp, key, value):
+ if self.loader and self.loader.comment_handling is None:
+ if key_node.comment and len(key_node.comment) > 4 and key_node.comment[4]:
+ if last_value is None:
+ key_node.comment[0] = key_node.comment.pop(4)
+ maptyp._yaml_add_comment(key_node.comment, value=last_key)
+ else:
+ key_node.comment[2] = key_node.comment.pop(4)
+ maptyp._yaml_add_comment(key_node.comment, key=key)
+ key_node.comment = None
+ if key_node.comment:
+ maptyp._yaml_add_comment(key_node.comment, key=key)
+ if value_node.comment:
+ maptyp._yaml_add_comment(value_node.comment, value=key)
+ else:
+ # NEWCMNT
+ if key_node.comment:
+ nprintf('nc5a', key, key_node.comment)
+ if key_node.comment[0]:
+ maptyp.ca.set(key, C_KEY_PRE, key_node.comment[0])
+ if key_node.comment[1]:
+ maptyp.ca.set(key, C_KEY_EOL, key_node.comment[1])
+ if key_node.comment[2]:
+ maptyp.ca.set(key, C_KEY_POST, key_node.comment[2])
+ if value_node.comment:
+ nprintf('nc5b', key, value_node.comment)
+ if value_node.comment[0]:
+ maptyp.ca.set(key, C_VALUE_PRE, value_node.comment[0])
+ if value_node.comment[1]:
+ maptyp.ca.set(key, C_VALUE_EOL, value_node.comment[1])
+ if value_node.comment[2]:
+ maptyp.ca.set(key, C_VALUE_POST, value_node.comment[2])
+ maptyp._yaml_set_kv_line_col(
+ key,
+ [
+ key_node.start_mark.line,
+ key_node.start_mark.column,
+ value_node.start_mark.line,
+ value_node.start_mark.column,
+ ],
+ )
+ maptyp[key] = value
+ last_key, last_value = key, value # could use indexing
+ # do this last, or <<: before a key will prevent insertion in instances
+ # of collections.OrderedDict (as they have no __contains__
+ if merge_map:
+ maptyp.add_yaml_merge(merge_map)
+
+ def construct_setting(self, node, typ, deep=False):
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None,
+ None,
+ _F('expected a mapping node, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment:
+ typ._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ typ.yaml_end_comment_extend(node.comment[2], clear=True)
+ else:
+ # NEWCMNT
+ if node.comment:
+ nprintf('nc6', node.comment)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ typ.yaml_set_anchor(node.anchor)
+ for key_node, value_node in node.value:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, list):
+ key = tuple(key)
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+ # construct but should be null
+ value = self.construct_object(value_node, deep=deep) # NOQA
+ self.check_set_key(node, key_node, typ, key)
+ if self.loader and self.loader.comment_handling is None:
+ if key_node.comment:
+ typ._yaml_add_comment(key_node.comment, key=key)
+ if value_node.comment:
+ typ._yaml_add_comment(value_node.comment, value=key)
+ else:
+ # NEWCMNT
+ if key_node.comment:
+ nprintf('nc7a', key_node.comment)
+ if value_node.comment:
+ nprintf('nc7b', value_node.comment)
+ typ.add(key)
+
+ def construct_yaml_seq(self, node):
+ # type: (Any) -> Any
+ data = CommentedSeq()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ # if node.comment:
+ # data._yaml_add_comment(node.comment)
+ yield data
+ data.extend(self.construct_rt_sequence(node, data))
+ self.set_collection_style(data, node)
+
+ def construct_yaml_map(self, node):
+ # type: (Any) -> Any
+ data = CommentedMap()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ yield data
+ self.construct_mapping(node, data, deep=True)
+ self.set_collection_style(data, node)
+
+ def set_collection_style(self, data, node):
+ # type: (Any, Any) -> None
+ if len(data) == 0:
+ return
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+
+ def construct_yaml_object(self, node, cls):
+ # type: (Any, Any) -> Any
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = SafeConstructor.construct_mapping(self, node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = SafeConstructor.construct_mapping(self, node)
+ if hasattr(data, '__attrs_attrs__'): # issue 394
+ data.__init__(**state)
+ else:
+ data.__dict__.update(state)
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+ from ruamel.yaml.anchor import Anchor
+
+ if not templated_id(node.anchor):
+ if not hasattr(data, Anchor.attrib):
+ a = Anchor()
+ setattr(data, Anchor.attrib, a)
+ else:
+ a = getattr(data, Anchor.attrib)
+ a.value = node.anchor
+
+ def construct_yaml_omap(self, node):
+ # type: (Any) -> Any
+ # Note: we do now check for duplicate keys
+ omap = CommentedOrderedMap()
+ omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ omap.fa.set_flow_style()
+ elif node.flow_style is False:
+ omap.fa.set_block_style()
+ yield omap
+ if self.loader and self.loader.comment_handling is None:
+ if node.comment:
+ omap._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ omap.yaml_end_comment_extend(node.comment[2], clear=True)
+ else:
+ # NEWCMNT
+ if node.comment:
+ nprintf('nc8', node.comment)
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ _F('expected a sequence, but found {node_id!s}', node_id=node.id),
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ _F(
+ 'expected a mapping of length 1, but found {subnode_id!s}',
+ subnode_id=subnode.id,
+ ),
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ _F(
+ 'expected a single mapping item, but found {len_subnode_val:d} items',
+ len_subnode_val=len(subnode.value),
+ ),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ assert key not in omap
+ value = self.construct_object(value_node)
+ if self.loader and self.loader.comment_handling is None:
+ if key_node.comment:
+ omap._yaml_add_comment(key_node.comment, key=key)
+ if subnode.comment:
+ omap._yaml_add_comment(subnode.comment, key=key)
+ if value_node.comment:
+ omap._yaml_add_comment(value_node.comment, value=key)
+ else:
+ # NEWCMNT
+ if key_node.comment:
+ nprintf('nc9a', key_node.comment)
+ if subnode.comment:
+ nprintf('nc9b', subnode.comment)
+ if value_node.comment:
+ nprintf('nc9c', value_node.comment)
+ omap[key] = value
+
+ def construct_yaml_set(self, node):
+ # type: (Any) -> Any
+ data = CommentedSet()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ yield data
+ self.construct_setting(node, data)
+
+ def construct_undefined(self, node):
+ # type: (Any) -> Any
+ try:
+ if isinstance(node, MappingNode):
+ data = CommentedMap()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+ data.yaml_set_tag(node.tag)
+ yield data
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ data.yaml_set_anchor(node.anchor)
+ self.construct_mapping(node, data)
+ return
+ elif isinstance(node, ScalarNode):
+ data2 = TaggedScalar()
+ data2.value = self.construct_scalar(node)
+ data2.style = node.style
+ data2.yaml_set_tag(node.tag)
+ yield data2
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ data2.yaml_set_anchor(node.anchor, always_dump=True)
+ return
+ elif isinstance(node, SequenceNode):
+ data3 = CommentedSeq()
+ data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data3.fa.set_flow_style()
+ elif node.flow_style is False:
+ data3.fa.set_block_style()
+ data3.yaml_set_tag(node.tag)
+ yield data3
+ if node.anchor:
+ from ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ data3.yaml_set_anchor(node.anchor)
+ data3.extend(self.construct_sequence(node))
+ return
+ except: # NOQA
+ pass
+ raise ConstructorError(
+ None,
+ None,
+ _F(
+ 'could not determine a constructor for the tag {node_tag!r}', node_tag=node.tag
+ ),
+ node.start_mark,
+ )
+
+ def construct_yaml_timestamp(self, node, values=None):
+ # type: (Any, Any) -> Any
+ try:
+ match = self.timestamp_regexp.match(node.value)
+ except TypeError:
+ match = None
+ if match is None:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to construct timestamp from "{}"'.format(node.value),
+ node.start_mark,
+ )
+ values = match.groupdict()
+ if not values['hour']:
+ return create_timestamp(**values)
+ # return SafeConstructor.construct_yaml_timestamp(self, node, values)
+ for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']:
+ if values[part]:
+ break
+ else:
+ return create_timestamp(**values)
+ # return SafeConstructor.construct_yaml_timestamp(self, node, values)
+ dd = create_timestamp(**values) # this has delta applied
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ minutes = values['tz_minute']
+ tz_minute = int(minutes) if minutes else 0
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ # should check for None and solve issue 366 should be tzinfo=delta)
+ data = TimeStamp(
+ dd.year, dd.month, dd.day, dd.hour, dd.minute, dd.second, dd.microsecond
+ )
+ if delta:
+ data._yaml['delta'] = delta
+ tz = values['tz_sign'] + values['tz_hour']
+ if values['tz_minute']:
+ tz += ':' + values['tz_minute']
+ data._yaml['tz'] = tz
+ else:
+ if values['tz']: # no delta
+ data._yaml['tz'] = values['tz']
+
+ if values['t']:
+ data._yaml['t'] = True
+ return data
+
+ def construct_yaml_bool(self, node):
+ # type: (Any) -> Any
+ b = SafeConstructor.construct_yaml_bool(self, node)
+ if node.anchor:
+ return ScalarBoolean(b, anchor=node.anchor)
+ return b
+
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_null
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool', RoundTripConstructor.construct_yaml_bool
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_int
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_float
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary', RoundTripConstructor.construct_yaml_binary
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp', RoundTripConstructor.construct_yaml_timestamp
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap', RoundTripConstructor.construct_yaml_omap
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs', RoundTripConstructor.construct_yaml_pairs
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:set', RoundTripConstructor.construct_yaml_set
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:str', RoundTripConstructor.construct_yaml_str
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq', RoundTripConstructor.construct_yaml_seq
+)
+
+RoundTripConstructor.add_constructor(
+ 'tag:yaml.org,2002:map', RoundTripConstructor.construct_yaml_map
+)
+
+RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/cyaml.py b/lib/spack/external/_vendoring/ruamel/yaml/cyaml.py
new file mode 100644
index 0000000000..0ab2828dc2
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/cyaml.py
@@ -0,0 +1,183 @@
+# coding: utf-8
+
+from _ruamel_yaml import CParser, CEmitter # type: ignore
+
+from ruamel.yaml.constructor import Constructor, BaseConstructor, SafeConstructor
+from ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter
+from ruamel.yaml.resolver import Resolver, BaseResolver
+
+if False: # MYPY
+ from typing import Any, Union, Optional # NOQA
+ from ruamel.yaml.compat import StreamTextType, StreamType, VersionType # NOQA
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+
+# this includes some hacks to solve the usage of resolver by lower level
+# parts of the parser
+
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ BaseConstructor.__init__(self, loader=self)
+ BaseResolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ SafeConstructor.__init__(self, loader=self)
+ Resolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CLoader(CParser, Constructor, Resolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ Constructor.__init__(self, loader=self)
+ Resolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ BaseResolver.__init__(self, loadumper=self)
+
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ self._emitter = self._serializer = self._representer = self
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ SafeRepresenter.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style
+ )
+ Resolver.__init__(self)
+
+
+class CDumper(CEmitter, Representer, Resolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ Representer.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style
+ )
+ Resolver.__init__(self)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/dumper.py b/lib/spack/external/_vendoring/ruamel/yaml/dumper.py
new file mode 100644
index 0000000000..7e9bf01ded
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/dumper.py
@@ -0,0 +1,219 @@
+# coding: utf-8
+
+from ruamel.yaml.emitter import Emitter
+from ruamel.yaml.serializer import Serializer
+from ruamel.yaml.representer import (
+ Representer,
+ SafeRepresenter,
+ BaseRepresenter,
+ RoundTripRepresenter,
+)
+from ruamel.yaml.resolver import Resolver, BaseResolver, VersionedResolver
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Optional # NOQA
+ from ruamel.yaml.compat import StreamType, VersionType # NOQA
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
+
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ BaseResolver.__init__(self, loadumper=self)
+
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ Resolver.__init__(self, loadumper=self)
+
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ Representer.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ Resolver.__init__(self, loadumper=self)
+
+
+class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Optional[bool], Optional[int], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ RoundTripRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ VersionedResolver.__init__(self, loader=self)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/emitter.py b/lib/spack/external/_vendoring/ruamel/yaml/emitter.py
new file mode 100644
index 0000000000..f9611ee191
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/emitter.py
@@ -0,0 +1,1772 @@
+# coding: utf-8
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+import sys
+from ruamel.yaml.error import YAMLError, YAMLStreamError
+from ruamel.yaml.events import * # NOQA
+
+# fmt: off
+from ruamel.yaml.compat import _F, nprint, dbg, DBG_EVENT, \
+ check_anchorname_char, nprintf # NOQA
+# fmt: on
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA
+ from ruamel.yaml.compat import StreamType # NOQA
+
+__all__ = ['Emitter', 'EmitterError']
+
+
+class EmitterError(YAMLError):
+ pass
+
+
+class ScalarAnalysis:
+ def __init__(
+ self,
+ scalar,
+ empty,
+ multiline,
+ allow_flow_plain,
+ allow_block_plain,
+ allow_single_quoted,
+ allow_double_quoted,
+ allow_block,
+ ):
+ # type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+
+class Indents:
+ # replacement for the list based stack of None/int
+ def __init__(self):
+ # type: () -> None
+ self.values = [] # type: List[Tuple[Any, bool]]
+
+ def append(self, val, seq):
+ # type: (Any, Any) -> None
+ self.values.append((val, seq))
+
+ def pop(self):
+ # type: () -> Any
+ return self.values.pop()[0]
+
+ def last_seq(self):
+ # type: () -> bool
+ # return the seq(uence) value for the element added before the last one
+ # in increase_indent()
+ try:
+ return self.values[-2][1]
+ except IndexError:
+ return False
+
+ def seq_flow_align(self, seq_indent, column, pre_comment=False):
+ # type: (int, int, Optional[bool]) -> int
+ # extra spaces because of dash
+ # nprint('seq_flow_align', self.values, pre_comment)
+ if len(self.values) < 2 or not self.values[-1][1]:
+ if len(self.values) == 0 or not pre_comment:
+ return 0
+ base = self.values[-1][0] if self.values[-1][0] is not None else 0
+ if pre_comment:
+ return base + seq_indent # type: ignore
+ # return (len(self.values)) * seq_indent
+ # -1 for the dash
+ return base + seq_indent - column - 1 # type: ignore
+
+ def __len__(self):
+ # type: () -> int
+ return len(self.values)
+
+
+class Emitter:
+ # fmt: off
+ DEFAULT_TAG_PREFIXES = {
+ '!': '!',
+ 'tag:yaml.org,2002:': '!!',
+ }
+ # fmt: on
+
+ MAX_SIMPLE_KEY_LENGTH = 128
+
+ def __init__(
+ self,
+ stream,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ brace_single_entry_mapping_in_flow_sequence=None,
+ dumper=None,
+ ):
+ # type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA
+ self.dumper = dumper
+ if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None:
+ self.dumper._emitter = self
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None # type: Optional[Text]
+ self.allow_space_break = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = [] # type: List[Any]
+ self.state = self.expect_stream_start # type: Any
+
+ # Current event and the event queue.
+ self.events = [] # type: List[Any]
+ self.event = None # type: Any
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = Indents()
+ self.indent = None # type: Optional[int]
+
+ # flow_context is an expanding/shrinking list consisting of '{' and '['
+ # for each unclosed flow context. If empty list that means block context
+ self.flow_context = [] # type: List[Text]
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+ self.compact_seq_seq = True # dash after dash
+ self.compact_seq_map = True # key after dash
+ # self.compact_ms = False # dash after key, only when excplicit key with ?
+ self.no_newline = None # type: Optional[bool] # set if directly after `- `
+
+ # Whether the document requires an explicit document end indicator
+ self.open_ended = False
+
+ # colon handling
+ self.colon = ':'
+ self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon
+ # single entry mappings in flow sequence
+ self.brace_single_entry_mapping_in_flow_sequence = (
+ brace_single_entry_mapping_in_flow_sequence # NOQA
+ )
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis
+ self.unicode_supplementary = sys.maxunicode > 0xFFFF
+ self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0
+ self.top_level_colon_align = top_level_colon_align
+ self.best_sequence_indent = 2
+ self.requested_indent = indent # specific for literal zero indent
+ if indent and 1 < indent < 10:
+ self.best_sequence_indent = indent
+ self.best_map_indent = self.best_sequence_indent
+ # if self.best_sequence_indent < self.sequence_dash_offset + 1:
+ # self.best_sequence_indent = self.sequence_dash_offset + 1
+ self.best_width = 80
+ if width and width > self.best_sequence_indent * 2:
+ self.best_width = width
+ self.best_line_break = '\n' # type: Any
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None # type: Any
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None # type: Any
+ self.prepared_tag = None # type: Any
+
+ # Scalar analysis and style.
+ self.analysis = None # type: Any
+ self.style = None # type: Any
+
+ self.scalar_after_indicator = True # write a scalar on the same line as `---`
+
+ self.alt_null = 'null'
+
+ @property
+ def stream(self):
+ # type: () -> Any
+ try:
+ return self._stream
+ except AttributeError:
+ raise YAMLStreamError('output stream needs to specified')
+
+ @stream.setter
+ def stream(self, val):
+ # type: (Any) -> None
+ if val is None:
+ return
+ if not hasattr(val, 'write'):
+ raise YAMLStreamError('stream argument needs to have a write() method')
+ self._stream = val
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ try:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.serializer
+ return self.dumper._serializer
+ except AttributeError:
+ return self # cyaml
+
+ @property
+ def flow_level(self):
+ # type: () -> int
+ return len(self.flow_context)
+
+ def dispose(self):
+ # type: () -> None
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ # type: (Any) -> None
+ if dbg(DBG_EVENT):
+ nprint(event)
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ # type: () -> bool
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ # type: (int) -> bool
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return len(self.events) < count + 1
+
+ def increase_indent(self, flow=False, sequence=None, indentless=False):
+ # type: (bool, Optional[bool], bool) -> None
+ self.indents.append(self.indent, sequence)
+ if self.indent is None: # top level
+ if flow:
+ # self.indent = self.best_sequence_indent if self.indents.last_seq() else \
+ # self.best_map_indent
+ # self.indent = self.best_sequence_indent
+ self.indent = self.requested_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += (
+ self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent
+ )
+ # if self.indents.last_seq():
+ # if self.indent == 0: # top level block sequence
+ # self.indent = self.best_sequence_indent - self.sequence_dash_offset
+ # else:
+ # self.indent += self.best_sequence_indent
+ # else:
+ # self.indent += self.best_map_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ # type: () -> None
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError(
+ _F('expected StreamStartEvent, but got {self_event!s}', self_event=self.event)
+ )
+
+ def expect_nothing(self):
+ # type: () -> None
+ raise EmitterError(
+ _F('expected nothing, but got {self_event!s}', self_event=self.event)
+ )
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ # type: () -> Any
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ # type: (bool) -> None
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (
+ first
+ and not self.event.explicit
+ and not self.canonical
+ and not self.event.version
+ and not self.event.tags
+ and not self.check_empty_document()
+ )
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError(
+ _F(
+ 'expected DocumentStartEvent, but got {self_event!s}',
+ self_event=self.event,
+ )
+ )
+
+ def expect_document_end(self):
+ # type: () -> None
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError(
+ _F('expected DocumentEndEvent, but got {self_event!s}', self_event=self.event)
+ )
+
+ def expect_document_root(self):
+ # type: () -> None
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False):
+ # type: (bool, bool, bool, bool) -> None
+ self.root_context = root
+ self.sequence_context = sequence # not used in PyYAML
+ force_flow_indent = False
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ if (
+ self.process_anchor('&')
+ and isinstance(self.event, ScalarEvent)
+ and self.sequence_context
+ ):
+ self.sequence_context = False
+ if (
+ root
+ and isinstance(self.event, ScalarEvent)
+ and not self.scalar_after_indicator
+ ):
+ self.write_indent()
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ # nprint('@', self.indention, self.no_newline, self.column)
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ # nprint('@', self.indention, self.no_newline, self.column)
+ i2, n2 = self.indention, self.no_newline # NOQA
+ if self.event.comment:
+ if self.event.flow_style is False:
+ if self.write_post_comment(self.event):
+ self.indention = False
+ self.no_newline = True
+ if self.event.flow_style:
+ column = self.column
+ if self.write_pre_comment(self.event):
+ if self.event.flow_style:
+ # force_flow_indent = True
+ force_flow_indent = not self.indents.values[-1][1]
+ self.indention = i2
+ self.no_newline = not self.indention
+ if self.event.flow_style:
+ self.column = column
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_sequence()
+ ):
+ self.expect_flow_sequence(force_flow_indent)
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.event.flow_style is False and self.event.comment:
+ self.write_post_comment(self.event)
+ if self.event.comment and self.event.comment[1]:
+ self.write_pre_comment(self.event)
+ if self.event.flow_style:
+ force_flow_indent = not self.indents.values[-1][1]
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_mapping()
+ ):
+ self.expect_flow_mapping(single=self.event.nr_items == 1,
+ force_flow_indent=force_flow_indent)
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError(
+ _F('expected NodeEvent, but got {self_event!s}', self_event=self.event)
+ )
+
+ def expect_alias(self):
+ # type: () -> None
+ if self.event.anchor is None:
+ raise EmitterError('anchor is not specified for alias')
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ # type: () -> None
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self, force_flow_indent=False):
+ # type: (Optional[bool]) -> None
+ if force_flow_indent:
+ self.increase_indent(flow=True, sequence=True)
+ ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column,
+ force_flow_indent)
+ self.write_indicator(' ' * ind + '[', True, whitespace=True)
+ if not force_flow_indent:
+ self.increase_indent(flow=True, sequence=True)
+ self.flow_context.append('[')
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ # type: () -> None
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '['
+ self.write_indicator(']', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on empty flow sequence
+ self.write_post_comment(self.event)
+ elif self.flow_level == 0:
+ self.write_line_break()
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ # type: () -> None
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '['
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on flow sequence
+ self.write_post_comment(self.event)
+ else:
+ self.no_newline = False
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self, single=False, force_flow_indent=False):
+ # type: (Optional[bool], Optional[bool]) -> None
+ if force_flow_indent:
+ self.increase_indent(flow=True, sequence=False)
+ ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column,
+ force_flow_indent)
+ map_init = '{'
+ if (
+ single
+ and self.flow_level
+ and self.flow_context[-1] == '['
+ and not self.canonical
+ and not self.brace_single_entry_mapping_in_flow_sequence
+ ):
+ # single map item with flow context, no curly braces necessary
+ map_init = ''
+ self.write_indicator(' ' * ind + map_init, True, whitespace=True)
+ self.flow_context.append(map_init)
+ if not force_flow_indent:
+ self.increase_indent(flow=True, sequence=False)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ # type: () -> None
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '{' # empty flow mapping
+ self.write_indicator('}', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on empty mapping
+ self.write_post_comment(self.event)
+ elif self.flow_level == 0:
+ self.write_line_break()
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ # type: () -> None
+ if isinstance(self.event, MappingEndEvent):
+ # if self.event.comment and self.event.comment[1]:
+ # self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped in ['{', '']
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ if popped != '':
+ self.write_indicator('}', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on flow mapping, never reached on empty mappings
+ self.write_post_comment(self.event)
+ else:
+ self.no_newline = False
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ # type: () -> None
+ self.write_indicator(self.prefixed_colon, False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ # type: () -> None
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(self.prefixed_colon, True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ # type: () -> None
+ if self.mapping_context:
+ indentless = not self.indention
+ else:
+ indentless = False
+ if not self.compact_seq_seq and self.column != 0:
+ self.write_line_break()
+ self.increase_indent(flow=False, sequence=True, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ # type: () -> Any
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ # type: (bool) -> None
+ if not first and isinstance(self.event, SequenceEndEvent):
+ if self.event.comment and self.event.comment[1]:
+ # final comments on a block list e.g. empty line
+ self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ self.no_newline = False
+ else:
+ if self.event.comment and self.event.comment[1]:
+ self.write_pre_comment(self.event)
+ nonl = self.no_newline if self.column == 0 else False
+ self.write_indent()
+ ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0
+ self.write_indicator(' ' * ind + '-', True, indention=True)
+ if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent:
+ self.no_newline = True
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ # type: () -> None
+ if not self.mapping_context and not (self.compact_seq_map or self.column == 0):
+ self.write_line_break()
+ self.increase_indent(flow=False, sequence=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ # type: () -> None
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ # type: (Any) -> None
+ if not first and isinstance(self.event, MappingEndEvent):
+ if self.event.comment and self.event.comment[1]:
+ # final comments from a doc
+ self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ if self.event.comment and self.event.comment[1]:
+ # final comments from a doc
+ self.write_pre_comment(self.event)
+ self.write_indent()
+ if self.check_simple_key():
+ if not isinstance(
+ self.event, (SequenceStartEvent, MappingStartEvent)
+ ): # sequence keys
+ try:
+ if self.event.style == '?':
+ self.write_indicator('?', True, indention=True)
+ except AttributeError: # aliases have no style
+ pass
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ # test on style for alias in !!set
+ if isinstance(self.event, AliasEvent) and not self.event.style == '?':
+ self.stream.write(' ')
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ # type: () -> None
+ if getattr(self.event, 'style', None) != '?':
+ # prefix = ''
+ if self.indent == 0 and self.top_level_colon_align is not None:
+ # write non-prefixed colon
+ c = ' ' * (self.top_level_colon_align - self.column) + self.colon
+ else:
+ c = self.prefixed_colon
+ self.write_indicator(c, False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ # type: () -> None
+ self.write_indent()
+ self.write_indicator(self.prefixed_colon, True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ # type: () -> bool
+ return (
+ isinstance(self.event, SequenceStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], SequenceEndEvent)
+ )
+
+ def check_empty_mapping(self):
+ # type: () -> bool
+ return (
+ isinstance(self.event, MappingStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], MappingEndEvent)
+ )
+
+ def check_empty_document(self):
+ # type: () -> bool
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (
+ isinstance(event, ScalarEvent)
+ and event.anchor is None
+ and event.tag is None
+ and event.implicit
+ and event.value == ""
+ )
+
+ def check_simple_key(self):
+ # type: () -> bool
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if (
+ isinstance(self.event, (ScalarEvent, CollectionStartEvent))
+ and self.event.tag is not None
+ ):
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return length < self.MAX_SIMPLE_KEY_LENGTH and (
+ isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True)
+ or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True)
+ or (
+ isinstance(self.event, ScalarEvent)
+ # if there is an explicit style for an empty string, it is a simple key
+ and not (self.analysis.empty and self.style and self.style not in '\'"')
+ and not self.analysis.multiline
+ )
+ or self.check_empty_sequence()
+ or self.check_empty_mapping()
+ )
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ # type: (Any) -> bool
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return False
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator + self.prepared_anchor, True)
+ # issue 288
+ self.no_newline = False
+ self.prepared_anchor = None
+ return True
+
+ def process_tag(self):
+ # type: () -> None
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if (
+ self.event.value == ''
+ and self.style == "'"
+ and tag == 'tag:yaml.org,2002:null'
+ and self.alt_null is not None
+ ):
+ self.event.value = self.alt_null
+ self.analysis = None
+ self.style = self.choose_scalar_style()
+ if (not self.canonical or tag is None) and (
+ (self.style == "" and self.event.implicit[0])
+ or (self.style != "" and self.event.implicit[1])
+ ):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError('tag is not specified')
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ if (
+ self.sequence_context
+ and not self.flow_level
+ and isinstance(self.event, ScalarEvent)
+ ):
+ self.no_newline = True
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ # type: () -> Any
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if (not self.event.style or self.event.style == '?') and (
+ self.event.implicit[0] or not self.event.implicit[2]
+ ):
+ if not (
+ self.simple_key_context and (self.analysis.empty or self.analysis.multiline)
+ ) and (
+ self.flow_level
+ and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain)
+ ):
+ return ""
+ self.analysis.allow_block = True
+ if self.event.style and self.event.style in '|>':
+ if (
+ not self.flow_level
+ and not self.simple_key_context
+ and self.analysis.allow_block
+ ):
+ return self.event.style
+ if not self.event.style and self.analysis.allow_double_quoted:
+ if "'" in self.event.value or '\n' in self.event.value:
+ return '"'
+ if not self.event.style or self.event.style == "'":
+ if self.analysis.allow_single_quoted and not (
+ self.simple_key_context and self.analysis.multiline
+ ):
+ return "'"
+ return '"'
+
+ def process_scalar(self):
+ # type: () -> None
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = not self.simple_key_context
+ # if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ # nprint('xx', self.sequence_context, self.flow_level)
+ if self.sequence_context and not self.flow_level:
+ self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == "'":
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ if (
+ self.event.comment
+ and self.event.comment[0]
+ and self.event.comment[0].column >= self.indent
+ ):
+ # comment following a folded scalar must dedent (issue 376)
+ self.event.comment[0].column = self.indent - 1 # type: ignore
+ elif self.style == '|':
+ # self.write_literal(self.analysis.scalar, self.event.comment)
+ try:
+ cmx = self.event.comment[1][0]
+ except (IndexError, TypeError):
+ cmx = ""
+ self.write_literal(self.analysis.scalar, cmx)
+ if (
+ self.event.comment
+ and self.event.comment[0]
+ and self.event.comment[0].column >= self.indent
+ ):
+ # comment following a literal scalar must dedent (issue 376)
+ self.event.comment[0].column = self.indent - 1 # type: ignore
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+ if self.event.comment:
+ self.write_post_comment(self.event)
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ # type: (Any) -> Any
+ major, minor = version
+ if major != 1:
+ raise EmitterError(
+ _F('unsupported YAML version: {major:d}.{minor:d}', major=major, minor=minor)
+ )
+ return _F('{major:d}.{minor:d}', major=major, minor=minor)
+
+ def prepare_tag_handle(self, handle):
+ # type: (Any) -> Any
+ if not handle:
+ raise EmitterError('tag handle must not be empty')
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError(
+ _F("tag handle must start and end with '!': {handle!r}", handle=handle)
+ )
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_'):
+ raise EmitterError(
+ _F(
+ 'invalid character {ch!r} in the tag handle: {handle!r}',
+ ch=ch,
+ handle=handle,
+ )
+ )
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ # type: (Any) -> Any
+ if not prefix:
+ raise EmitterError('tag prefix must not be empty')
+ chunks = [] # type: List[Any]
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ ch_set = "-;/?:@&=+$,_.~*'()[]"
+ if self.dumper:
+ version = getattr(self.dumper, 'version', (1, 2))
+ if version is None or version >= (1, 2):
+ ch_set += '#'
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in ch_set:
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end + 1
+ data = ch
+ for ch in data:
+ chunks.append(_F('%{ord_ch:02X}', ord_ch=ord(ch)))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return "".join(chunks)
+
+ def prepare_tag(self, tag):
+ # type: (Any) -> Any
+ if not tag:
+ raise EmitterError('tag must not be empty')
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix) :]
+ chunks = [] # type: List[Any]
+ start = end = 0
+ ch_set = "-;/?:@&=+$,_.~*'()[]"
+ if self.dumper:
+ version = getattr(self.dumper, 'version', (1, 2))
+ if version is None or version >= (1, 2):
+ ch_set += '#'
+ while end < len(suffix):
+ ch = suffix[end]
+ if (
+ '0' <= ch <= '9'
+ or 'A' <= ch <= 'Z'
+ or 'a' <= ch <= 'z'
+ or ch in ch_set
+ or (ch == '!' and handle != '!')
+ ):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end + 1
+ data = ch
+ for ch in data:
+ chunks.append(_F('%{ord_ch:02X}', ord_ch=ord(ch)))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = "".join(chunks)
+ if handle:
+ return _F('{handle!s}{suffix_text!s}', handle=handle, suffix_text=suffix_text)
+ else:
+ return _F('!<{suffix_text!s}>', suffix_text=suffix_text)
+
+ def prepare_anchor(self, anchor):
+ # type: (Any) -> Any
+ if not anchor:
+ raise EmitterError('anchor must not be empty')
+ for ch in anchor:
+ if not check_anchorname_char(ch):
+ raise EmitterError(
+ _F(
+ 'invalid character {ch!r} in the anchor: {anchor!r}',
+ ch=ch,
+ anchor=anchor,
+ )
+ )
+ return anchor
+
+ def analyze_scalar(self, scalar):
+ # type: (Any) -> Any
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=True,
+ multiline=False,
+ allow_flow_plain=False,
+ allow_block_plain=True,
+ allow_single_quoted=True,
+ allow_double_quoted=True,
+ allow_block=False,
+ )
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = len(scalar) == 1 or scalar[1] in '\0 \t\r\n\x85\u2028\u2029'
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:': # ToDo
+ if self.serializer.use_version == (1, 1):
+ flow_indicators = True
+ elif len(scalar) == 1: # single character
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859
+ flow_indicators = True
+ if ch == '?' and self.serializer.use_version == (1, 1):
+ flow_indicators = True
+ if ch == ':':
+ if followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ if ch == '#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (
+ ch == '\x85'
+ or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'
+ or (self.unicode_supplementary and ('\U00010000' <= ch <= '\U0010FFFF'))
+ ) and ch != '\uFEFF':
+ # unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar) - 1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar) - 1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = ch in '\0 \t\r\n\x85\u2028\u2029'
+ followed_by_whitespace = (
+ index + 1 >= len(scalar) or scalar[index + 1] in '\0 \t\r\n\x85\u2028\u2029'
+ )
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if leading_space or leading_break or trailing_space or trailing_break:
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if special_characters:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False
+ elif space_break:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+ if not self.allow_space_break:
+ allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=False,
+ multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block,
+ )
+
+ # Writers.
+
+ def flush_stream(self):
+ # type: () -> None
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # type: () -> None
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ # type: () -> None
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False):
+ # type: (Any, Any, bool, bool) -> None
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' ' + indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ # type: () -> None
+ indent = self.indent or 0
+ if (
+ not self.indention
+ or self.column > indent
+ or (self.column == indent and not self.whitespace)
+ ):
+ if bool(self.no_newline):
+ self.no_newline = False
+ else:
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' ' * (indent - self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding) # type: ignore
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ # type: (Any) -> None
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ # type: (Any) -> None
+ data = _F('%YAML {version_text!s}', version_text=version_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ # type: (Any, Any) -> None
+ data = _F(
+ '%TAG {handle_text!s} {prefix_text!s}',
+ handle_text=handle_text,
+ prefix_text=prefix_text,
+ )
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ self.write_indicator("'", True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if (
+ start + 1 == end
+ and self.column > self.best_width
+ and split
+ and start != 0
+ and end != len(text)
+ ):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == "'":
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == "'":
+ data = "''"
+ self.column += 2
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = ch == ' '
+ breaks = ch in '\n\x85\u2028\u2029'
+ end += 1
+ self.write_indicator("'", False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '"': '"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if (
+ ch is None
+ or ch in '"\\\x85\u2028\u2029\uFEFF'
+ or not (
+ '\x20' <= ch <= '\x7E'
+ or (
+ self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF' or '\uE000' <= ch <= '\uFFFD')
+ )
+ )
+ ):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\' + self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = _F('\\x{ord_ch:02X}', ord_ch=ord(ch))
+ elif ch <= '\uFFFF':
+ data = _F('\\u{ord_ch:04X}', ord_ch=ord(ch))
+ else:
+ data = _F('\\U{ord_ch:08X}', ord_ch=ord(ch))
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if (
+ 0 < end < len(text) - 1
+ and (ch == ' ' or start >= end)
+ and self.column + (end - start) > self.best_width
+ and split
+ ):
+ data = text[start:end] + '\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ # type: (Any) -> Any
+ indent = 0
+ indicator = ''
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ indent = self.best_sequence_indent
+ hints += str(indent)
+ elif self.root_context:
+ for end in ['\n---', '\n...']:
+ pos = 0
+ while True:
+ pos = text.find(end, pos)
+ if pos == -1:
+ break
+ try:
+ if text[pos + 4] in ' \r\n':
+ break
+ except IndexError:
+ pass
+ pos += 1
+ if pos > -1:
+ break
+ if pos > 0:
+ indent = self.best_sequence_indent
+ if text[-1] not in '\n\x85\u2028\u2029':
+ indicator = '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ indicator = '+'
+ hints += indicator
+ return hints, indent, indicator
+
+ def write_folded(self, text):
+ # type: (Any) -> None
+ hints, _indent, _indicator = self.determine_block_hints(text)
+ self.write_indicator('>' + hints, True)
+ if _indicator == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029\a':
+ if (
+ not leading_space
+ and ch is not None
+ and ch != ' '
+ and text[start] == '\n'
+ ):
+ self.write_line_break()
+ leading_space = ch == ' '
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start + 1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029\a':
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch == '\a':
+ if end < (len(text) - 1) and not text[end + 2].isspace():
+ self.write_line_break()
+ self.write_indent()
+ end += 2 # \a and the space that is inserted on the fold
+ else:
+ raise EmitterError('unexcpected fold indicator \\a before space')
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = ch in '\n\x85\u2028\u2029'
+ spaces = ch == ' '
+ end += 1
+
+ def write_literal(self, text, comment=None):
+ # type: (Any, Any) -> None
+ hints, _indent, _indicator = self.determine_block_hints(text)
+ # if comment is not None:
+ # try:
+ # hints += comment[1][0]
+ # except (TypeError, IndexError) as e:
+ # pass
+ if not isinstance(comment, str):
+ comment = ''
+ self.write_indicator('|' + hints + comment, True)
+ # try:
+ # nprintf('selfev', comment)
+ # cmx = comment[1][0]
+ # if cmx:
+ # self.stream.write(cmx)
+ # except (TypeError, IndexError) as e:
+ # pass
+ if _indicator == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ if self.root_context:
+ idnx = self.indent if self.indent is not None else 0
+ self.stream.write(' ' * (_indent + idnx))
+ else:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = ch in '\n\x85\u2028\u2029'
+ end += 1
+
+ def write_plain(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ else:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding) # type: ignore
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start + 1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding) # type: ignore
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029': # type: ignore
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding) # type: ignore
+ try:
+ self.stream.write(data)
+ except: # NOQA
+ sys.stdout.write(repr(data) + '\n')
+ raise
+ start = end
+ if ch is not None:
+ spaces = ch == ' '
+ breaks = ch in '\n\x85\u2028\u2029'
+ end += 1
+
+ def write_comment(self, comment, pre=False):
+ # type: (Any, bool) -> None
+ value = comment.value
+ # nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value))
+ if not pre and value[-1] == '\n':
+ value = value[:-1]
+ try:
+ # get original column position
+ col = comment.start_mark.column
+ if comment.value and comment.value.startswith('\n'):
+ # never inject extra spaces if the comment starts with a newline
+ # and not a real comment (e.g. if you have an empty line following a key-value
+ col = self.column
+ elif col < self.column + 1:
+ ValueError
+ except ValueError:
+ col = self.column + 1
+ # nprint('post_comment', self.line, self.column, value)
+ try:
+ # at least one space if the current column >= the start column of the comment
+ # but not at the start of a line
+ nr_spaces = col - self.column
+ if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n':
+ nr_spaces = 1
+ value = ' ' * nr_spaces + value
+ try:
+ if bool(self.encoding):
+ value = value.encode(self.encoding)
+ except UnicodeDecodeError:
+ pass
+ self.stream.write(value)
+ except TypeError:
+ raise
+ if not pre:
+ self.write_line_break()
+
+ def write_pre_comment(self, event):
+ # type: (Any) -> bool
+ comments = event.comment[1]
+ if comments is None:
+ return False
+ try:
+ start_events = (MappingStartEvent, SequenceStartEvent)
+ for comment in comments:
+ if isinstance(event, start_events) and getattr(comment, 'pre_done', None):
+ continue
+ if self.column != 0:
+ self.write_line_break()
+ self.write_comment(comment, pre=True)
+ if isinstance(event, start_events):
+ comment.pre_done = True
+ except TypeError:
+ sys.stdout.write('eventtt {} {}'.format(type(event), event))
+ raise
+ return True
+
+ def write_post_comment(self, event):
+ # type: (Any) -> bool
+ if self.event.comment[0] is None:
+ return False
+ comment = event.comment[0]
+ self.write_comment(comment)
+ return True
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/error.py b/lib/spack/external/_vendoring/ruamel/yaml/error.py
new file mode 100644
index 0000000000..30b114a01b
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/error.py
@@ -0,0 +1,332 @@
+# coding: utf-8
+
+import warnings
+import textwrap
+
+from ruamel.yaml.compat import _F
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Text # NOQA
+
+
+__all__ = [
+ 'FileMark',
+ 'StringMark',
+ 'CommentMark',
+ 'YAMLError',
+ 'MarkedYAMLError',
+ 'ReusedAnchorWarning',
+ 'UnsafeLoaderWarning',
+ 'MarkedYAMLWarning',
+ 'MarkedYAMLFutureWarning',
+]
+
+
+class StreamMark:
+ __slots__ = 'name', 'index', 'line', 'column'
+
+ def __init__(self, name, index, line, column):
+ # type: (Any, int, int, int) -> None
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+
+ def __str__(self):
+ # type: () -> Any
+ where = _F(
+ ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}',
+ sname=self.name,
+ sline1=self.line + 1,
+ scolumn1=self.column + 1,
+ )
+ return where
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ if self.line != other.line or self.column != other.column:
+ return False
+ if self.name != other.name or self.index != other.index:
+ return False
+ return True
+
+ def __ne__(self, other):
+ # type: (Any) -> bool
+ return not self.__eq__(other)
+
+
+class FileMark(StreamMark):
+ __slots__ = ()
+
+
+class StringMark(StreamMark):
+ __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer'
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ # type: (Any, int, int, int, Any, Any) -> None
+ StreamMark.__init__(self, name, index, line, column)
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ # type: (int, int) -> Any
+ if self.buffer is None: # always False
+ return None
+ head = ""
+ start = self.pointer
+ while start > 0 and self.buffer[start - 1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer - start > max_length / 2 - 1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ""
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end - self.pointer > max_length / 2 - 1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ caret = '^'
+ caret = '^ (line: {})'.format(self.line + 1)
+ return (
+ ' ' * indent
+ + head
+ + snippet
+ + tail
+ + '\n'
+ + ' ' * (indent + self.pointer - start + len(head))
+ + caret
+ )
+
+ def __str__(self):
+ # type: () -> Any
+ snippet = self.get_snippet()
+ where = _F(
+ ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}',
+ sname=self.name,
+ sline1=self.line + 1,
+ scolumn1=self.column + 1,
+ )
+ if snippet is not None:
+ where += ':\n' + snippet
+ return where
+
+ def __repr__(self):
+ # type: () -> Any
+ snippet = self.get_snippet()
+ where = _F(
+ ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}',
+ sname=self.name,
+ sline1=self.line + 1,
+ scolumn1=self.column + 1,
+ )
+ if snippet is not None:
+ where += ':\n' + snippet
+ return where
+
+
+class CommentMark:
+ __slots__ = ('column',)
+
+ def __init__(self, column):
+ # type: (Any) -> None
+ self.column = column
+
+
+class YAMLError(Exception):
+ pass
+
+
+class MarkedYAMLError(YAMLError):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ # warn is ignored
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ return '\n'.join(lines)
+
+
+class YAMLStreamError(Exception):
+ pass
+
+
+class YAMLWarning(Warning):
+ pass
+
+
+class MarkedYAMLWarning(YAMLWarning):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ self.warn = warn
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ if self.warn is not None and self.warn:
+ warn = textwrap.dedent(self.warn)
+ lines.append(warn)
+ return '\n'.join(lines)
+
+
+class ReusedAnchorWarning(YAMLWarning):
+ pass
+
+
+class UnsafeLoaderWarning(YAMLWarning):
+ text = """
+The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
+Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK.
+Alternatively include the following in your code:
+
+ import warnings
+ warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
+
+In most other cases you should consider using 'safe_load(stream)'"""
+ pass
+
+
+warnings.simplefilter('once', UnsafeLoaderWarning)
+
+
+class MantissaNoDotYAML1_1Warning(YAMLWarning):
+ def __init__(self, node, flt_str):
+ # type: (Any, Any) -> None
+ self.node = node
+ self.flt = flt_str
+
+ def __str__(self):
+ # type: () -> Any
+ line = self.node.start_mark.line
+ col = self.node.start_mark.column
+ return """
+In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
+See the Floating-Point Language-Independent Type for YAMLâ„¢ Version 1.1 specification
+( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
+
+Correct your float: "{}" on line: {}, column: {}
+
+or alternatively include the following in your code:
+
+ import warnings
+ warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
+
+""".format(
+ self.flt, line, col
+ )
+
+
+warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
+
+
+class YAMLFutureWarning(Warning):
+ pass
+
+
+class MarkedYAMLFutureWarning(YAMLFutureWarning):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ self.warn = warn
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ if self.warn is not None and self.warn:
+ warn = textwrap.dedent(self.warn)
+ lines.append(warn)
+ return '\n'.join(lines)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/events.py b/lib/spack/external/_vendoring/ruamel/yaml/events.py
new file mode 100644
index 0000000000..2a895ff359
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/events.py
@@ -0,0 +1,196 @@
+# coding: utf-8
+
+from ruamel.yaml.compat import _F
+
+# Abstract classes.
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+SHOW_LINES = False
+
+
+def CommentCheck():
+ # type: () -> None
+ pass
+
+
+class Event:
+ __slots__ = 'start_mark', 'end_mark', 'comment'
+
+ def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
+ # type: (Any, Any, Any) -> None
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ # assert comment is not CommentCheck
+ if comment is CommentCheck:
+ comment = None
+ self.comment = comment
+
+ def __repr__(self):
+ # type: () -> Any
+ if True:
+ arguments = []
+ if hasattr(self, 'value'):
+ # if you use repr(getattr(self, 'value')) then flake8 complains about
+ # abuse of getattr with a constant. When you change to self.value
+ # then mypy throws an error
+ arguments.append(repr(self.value)) # type: ignore
+ for key in ['anchor', 'tag', 'implicit', 'flow_style', 'style']:
+ v = getattr(self, key, None)
+ if v is not None:
+ arguments.append(_F('{key!s}={v!r}', key=key, v=v))
+ if self.comment not in [None, CommentCheck]:
+ arguments.append('comment={!r}'.format(self.comment))
+ if SHOW_LINES:
+ arguments.append(
+ '({}:{}/{}:{})'.format(
+ self.start_mark.line,
+ self.start_mark.column,
+ self.end_mark.line,
+ self.end_mark.column,
+ )
+ )
+ arguments = ', '.join(arguments) # type: ignore
+ else:
+ attributes = [
+ key
+ for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']
+ if hasattr(self, key)
+ ]
+ arguments = ', '.join(
+ [_F('{k!s}={attr!r}', k=key, attr=getattr(self, key)) for key in attributes]
+ )
+ if self.comment not in [None, CommentCheck]:
+ arguments += ', comment={!r}'.format(self.comment)
+ return _F(
+ '{self_class_name!s}({arguments!s})',
+ self_class_name=self.__class__.__name__,
+ arguments=arguments,
+ )
+
+
+class NodeEvent(Event):
+ __slots__ = ('anchor',)
+
+ def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.anchor = anchor
+
+
+class CollectionStartEvent(NodeEvent):
+ __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items'
+
+ def __init__(
+ self,
+ anchor,
+ tag,
+ implicit,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ nr_items=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.tag = tag
+ self.implicit = implicit
+ self.flow_style = flow_style
+ self.nr_items = nr_items
+
+
+class CollectionEndEvent(Event):
+ __slots__ = ()
+
+
+# Implementations.
+
+
+class StreamStartEvent(Event):
+ __slots__ = ('encoding',)
+
+ def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.encoding = encoding
+
+
+class StreamEndEvent(Event):
+ __slots__ = ()
+
+
+class DocumentStartEvent(Event):
+ __slots__ = 'explicit', 'version', 'tags'
+
+ def __init__(
+ self,
+ start_mark=None,
+ end_mark=None,
+ explicit=None,
+ version=None,
+ tags=None,
+ comment=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+
+class DocumentEndEvent(Event):
+ __slots__ = ('explicit',)
+
+ def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.explicit = explicit
+
+
+class AliasEvent(NodeEvent):
+ __slots__ = 'style'
+
+ def __init__(self, anchor, start_mark=None, end_mark=None, style=None, comment=None):
+ # type: (Any, Any, Any, Any, Any) -> None
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.style = style
+
+
+class ScalarEvent(NodeEvent):
+ __slots__ = 'tag', 'implicit', 'value', 'style'
+
+ def __init__(
+ self,
+ anchor,
+ tag,
+ implicit,
+ value,
+ start_mark=None,
+ end_mark=None,
+ style=None,
+ comment=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.style = style
+
+
+class SequenceStartEvent(CollectionStartEvent):
+ __slots__ = ()
+
+
+class SequenceEndEvent(CollectionEndEvent):
+ __slots__ = ()
+
+
+class MappingStartEvent(CollectionStartEvent):
+ __slots__ = ()
+
+
+class MappingEndEvent(CollectionEndEvent):
+ __slots__ = ()
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/loader.py b/lib/spack/external/_vendoring/ruamel/yaml/loader.py
new file mode 100644
index 0000000000..7234ee1214
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/loader.py
@@ -0,0 +1,75 @@
+# coding: utf-8
+
+from ruamel.yaml.reader import Reader
+from ruamel.yaml.scanner import Scanner, RoundTripScanner
+from ruamel.yaml.parser import Parser, RoundTripParser
+from ruamel.yaml.composer import Composer
+from ruamel.yaml.constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
+from ruamel.yaml.resolver import VersionedResolver
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Optional # NOQA
+ from ruamel.yaml.compat import StreamTextType, VersionType # NOQA
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
+
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ self.comment_handling = None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ BaseConstructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ self.comment_handling = None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ SafeConstructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ self.comment_handling = None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ Constructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class RoundTripLoader(
+ Reader,
+ RoundTripScanner,
+ RoundTripParser,
+ Composer,
+ RoundTripConstructor,
+ VersionedResolver,
+):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ # self.reader = Reader.__init__(self, stream)
+ self.comment_handling = None # issue 385
+ Reader.__init__(self, stream, loader=self)
+ RoundTripScanner.__init__(self, loader=self)
+ RoundTripParser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/main.py b/lib/spack/external/_vendoring/ruamel/yaml/main.py
new file mode 100644
index 0000000000..20bd8d3eb1
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/main.py
@@ -0,0 +1,1667 @@
+# coding: utf-8
+
+import sys
+import os
+import warnings
+import glob
+from importlib import import_module
+
+
+import ruamel.yaml
+from ruamel.yaml.error import UnsafeLoaderWarning, YAMLError # NOQA
+
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.nodes import * # NOQA
+
+from ruamel.yaml.loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA
+from ruamel.yaml.dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA
+from ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, nprint, nprintf # NOQA
+from ruamel.yaml.resolver import VersionedResolver, Resolver # NOQA
+from ruamel.yaml.representer import (
+ BaseRepresenter,
+ SafeRepresenter,
+ Representer,
+ RoundTripRepresenter,
+)
+from ruamel.yaml.constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
+from ruamel.yaml.loader import Loader as UnsafeLoader
+from ruamel.yaml.comments import CommentedMap, CommentedSeq, C_PRE
+
+if False: # MYPY
+ from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA
+ from ruamel.yaml.compat import StreamType, StreamTextType, VersionType # NOQA
+ from pathlib import Path
+
+try:
+ from _ruamel_yaml import CParser, CEmitter # type: ignore
+except: # NOQA
+ CParser = CEmitter = None
+
+# import io
+
+
+# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
+# subset of abbreviations, which should be all caps according to PEP8
+
+
+class YAML:
+ def __init__(self, *, typ=None, pure=False, output=None, plug_ins=None): # input=None,
+ # type: (Any, Optional[Text], Any, Any, Any) -> None
+ """
+ typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default)
+ 'safe' -> SafeLoader/SafeDumper,
+ 'unsafe' -> normal/unsafe Loader/Dumper
+ 'base' -> baseloader
+ pure: if True only use Python modules
+ input/output: needed to work as context manager
+ plug_ins: a list of plug-in files
+ """
+
+ self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ])
+ self.pure = pure
+
+ # self._input = input
+ self._output = output
+ self._context_manager = None # type: Any
+
+ self.plug_ins = [] # type: List[Any]
+ for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
+ file_name = pu.replace(os.sep, '.')
+ self.plug_ins.append(import_module(file_name))
+ self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any
+ self.allow_unicode = True
+ self.Reader = None # type: Any
+ self.Representer = None # type: Any
+ self.Constructor = None # type: Any
+ self.Scanner = None # type: Any
+ self.Serializer = None # type: Any
+ self.default_flow_style = None # type: Any
+ self.comment_handling = None
+ typ_found = 1
+ setup_rt = False
+ if 'rt' in self.typ:
+ setup_rt = True
+ elif 'safe' in self.typ:
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
+ self.Representer = ruamel.yaml.representer.SafeRepresenter
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.SafeConstructor
+ elif 'base' in self.typ:
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.Representer = ruamel.yaml.representer.BaseRepresenter
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.BaseConstructor
+ elif 'unsafe' in self.typ:
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
+ self.Representer = ruamel.yaml.representer.Representer
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.Constructor
+ elif 'rtsc' in self.typ:
+ self.default_flow_style = False
+ # no optimized rt-dumper yet
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.Representer = ruamel.yaml.representer.RoundTripRepresenter
+ self.Scanner = ruamel.yaml.scanner.RoundTripScannerSC
+ # no optimized rt-parser yet
+ self.Parser = ruamel.yaml.parser.RoundTripParserSC
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
+ self.comment_handling = C_PRE
+ else:
+ setup_rt = True
+ typ_found = 0
+ if setup_rt:
+ self.default_flow_style = False
+ # no optimized rt-dumper yet
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.Representer = ruamel.yaml.representer.RoundTripRepresenter
+ self.Scanner = ruamel.yaml.scanner.RoundTripScanner
+ # no optimized rt-parser yet
+ self.Parser = ruamel.yaml.parser.RoundTripParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
+ del setup_rt
+ self.stream = None
+ self.canonical = None
+ self.old_indent = None
+ self.width = None
+ self.line_break = None
+
+ self.map_indent = None
+ self.sequence_indent = None
+ self.sequence_dash_offset = 0
+ self.compact_seq_seq = None
+ self.compact_seq_map = None
+ self.sort_base_mapping_type_on_output = None # default: sort
+
+ self.top_level_colon_align = None
+ self.prefix_colon = None
+ self.version = None
+ self.preserve_quotes = None
+ self.allow_duplicate_keys = False # duplicate keys in map, set
+ self.encoding = 'utf-8'
+ self.explicit_start = None
+ self.explicit_end = None
+ self.tags = None
+ self.default_style = None
+ self.top_level_block_style_scalar_no_indent_error_1_1 = False
+ # directives end indicator with single scalar document
+ self.scalar_after_indicator = None
+ # [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}]
+ self.brace_single_entry_mapping_in_flow_sequence = False
+ for module in self.plug_ins:
+ if getattr(module, 'typ', None) in self.typ:
+ typ_found += 1
+ module.init_typ(self)
+ break
+ if typ_found == 0:
+ raise NotImplementedError(
+ 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ)
+ )
+
+ @property
+ def reader(self):
+ # type: () -> Any
+ try:
+ return self._reader # type: ignore
+ except AttributeError:
+ self._reader = self.Reader(None, loader=self)
+ return self._reader
+
+ @property
+ def scanner(self):
+ # type: () -> Any
+ try:
+ return self._scanner # type: ignore
+ except AttributeError:
+ self._scanner = self.Scanner(loader=self)
+ return self._scanner
+
+ @property
+ def parser(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ if self.Parser is not CParser:
+ setattr(self, attr, self.Parser(loader=self))
+ else:
+ if getattr(self, '_stream', None) is None:
+ # wait for the stream
+ return None
+ else:
+ # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'):
+ # # pathlib.Path() instance
+ # setattr(self, attr, CParser(self._stream))
+ # else:
+ setattr(self, attr, CParser(self._stream))
+ # self._parser = self._composer = self
+ # nprint('scanner', self.loader.scanner)
+
+ return getattr(self, attr)
+
+ @property
+ def composer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self.Composer(loader=self))
+ return getattr(self, attr)
+
+ @property
+ def constructor(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self)
+ cnst.allow_duplicate_keys = self.allow_duplicate_keys
+ setattr(self, attr, cnst)
+ return getattr(self, attr)
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self.Resolver(version=self.version, loader=self))
+ return getattr(self, attr)
+
+ @property
+ def emitter(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ if self.Emitter is not CEmitter:
+ _emitter = self.Emitter(
+ None,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ prefix_colon=self.prefix_colon,
+ brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA
+ dumper=self,
+ )
+ setattr(self, attr, _emitter)
+ if self.map_indent is not None:
+ _emitter.best_map_indent = self.map_indent
+ if self.sequence_indent is not None:
+ _emitter.best_sequence_indent = self.sequence_indent
+ if self.sequence_dash_offset is not None:
+ _emitter.sequence_dash_offset = self.sequence_dash_offset
+ # _emitter.block_seq_indent = self.sequence_dash_offset
+ if self.compact_seq_seq is not None:
+ _emitter.compact_seq_seq = self.compact_seq_seq
+ if self.compact_seq_map is not None:
+ _emitter.compact_seq_map = self.compact_seq_map
+ else:
+ if getattr(self, '_stream', None) is None:
+ # wait for the stream
+ return None
+ return None
+ return getattr(self, attr)
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(
+ self,
+ attr,
+ self.Serializer(
+ encoding=self.encoding,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ dumper=self,
+ ),
+ )
+ return getattr(self, attr)
+
+ @property
+ def representer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ repres = self.Representer(
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ dumper=self,
+ )
+ if self.sort_base_mapping_type_on_output is not None:
+ repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output
+ setattr(self, attr, repres)
+ return getattr(self, attr)
+
+ def scan(self, stream):
+ # type: (StreamTextType) -> Any
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.scan(fp)
+ _, parser = self.get_constructor_parser(stream)
+ try:
+ while self.scanner.check_token():
+ yield self.scanner.get_token()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def parse(self, stream):
+ # type: (StreamTextType) -> Any
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.parse(fp)
+ _, parser = self.get_constructor_parser(stream)
+ try:
+ while parser.check_event():
+ yield parser.get_event()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def compose(self, stream):
+ # type: (Union[Path, StreamTextType]) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.compose(fp)
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ return constructor.composer.get_single_node()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def compose_all(self, stream):
+ # type: (Union[Path, StreamTextType]) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ while constructor.composer.check_node():
+ yield constructor.composer.get_node()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ # separate output resolver?
+
+ # def load(self, stream=None):
+ # if self._context_manager:
+ # if not self._input:
+ # raise TypeError("Missing input stream while dumping from context manager")
+ # for data in self._context_manager.load():
+ # yield data
+ # return
+ # if stream is None:
+ # raise TypeError("Need a stream argument when not loading from context manager")
+ # return self.load_one(stream)
+
+ def load(self, stream):
+ # type: (Union[Path, StreamTextType]) -> Any
+ """
+ at this point you either have the non-pure Parser (which has its own reader and
+ scanner) or you have the pure Parser.
+ If the pure Parser is set, then set the Reader and Scanner, if not already set.
+ If either the Scanner or Reader are set, you cannot use the non-pure Parser,
+ so reset it to the pure parser and set the Reader resp. Scanner if necessary
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.load(fp)
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ return constructor.get_single_data()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def load_all(self, stream): # *, skip=None):
+ # type: (Union[Path, StreamTextType]) -> Any
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('r') as fp:
+ for d in self.load_all(fp):
+ yield d
+ return
+ # if skip is None:
+ # skip = []
+ # elif isinstance(skip, int):
+ # skip = [skip]
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ while constructor.check_data():
+ yield constructor.get_data()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def get_constructor_parser(self, stream):
+ # type: (StreamTextType) -> Any
+ """
+ the old cyaml needs special setup, and therefore the stream
+ """
+ if self.Parser is not CParser:
+ if self.Reader is None:
+ self.Reader = ruamel.yaml.reader.Reader
+ if self.Scanner is None:
+ self.Scanner = ruamel.yaml.scanner.Scanner
+ self.reader.stream = stream
+ else:
+ if self.Reader is not None:
+ if self.Scanner is None:
+ self.Scanner = ruamel.yaml.scanner.Scanner
+ self.Parser = ruamel.yaml.parser.Parser
+ self.reader.stream = stream
+ elif self.Scanner is not None:
+ if self.Reader is None:
+ self.Reader = ruamel.yaml.reader.Reader
+ self.Parser = ruamel.yaml.parser.Parser
+ self.reader.stream = stream
+ else:
+ # combined C level reader>scanner>parser
+ # does some calls to the resolver, e.g. BaseResolver.descend_resolver
+ # if you just initialise the CParser, to much of resolver.py
+ # is actually used
+ rslvr = self.Resolver
+ # if rslvr is ruamel.yaml.resolver.VersionedResolver:
+ # rslvr = ruamel.yaml.resolver.Resolver
+
+ class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore
+ def __init__(selfx, stream, version=self.version, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA
+ CParser.__init__(selfx, stream)
+ selfx._parser = selfx._composer = selfx
+ self.Constructor.__init__(selfx, loader=selfx)
+ selfx.allow_duplicate_keys = self.allow_duplicate_keys
+ rslvr.__init__(selfx, version=version, loadumper=selfx)
+
+ self._stream = stream
+ loader = XLoader(stream)
+ return loader, loader
+ return self.constructor, self.parser
+
+ def emit(self, events, stream):
+ # type: (Any, Any) -> None
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ _, _, emitter = self.get_serializer_representer_emitter(stream, None)
+ try:
+ for event in events:
+ emitter.emit(event)
+ finally:
+ try:
+ emitter.dispose()
+ except AttributeError:
+ raise
+
+ def serialize(self, node, stream):
+ # type: (Any, Optional[StreamType]) -> Any
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ self.serialize_all([node], stream)
+
+ def serialize_all(self, nodes, stream):
+ # type: (Any, Optional[StreamType]) -> Any
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ serializer, _, emitter = self.get_serializer_representer_emitter(stream, None)
+ try:
+ serializer.open()
+ for node in nodes:
+ serializer.serialize(node)
+ serializer.close()
+ finally:
+ try:
+ emitter.dispose()
+ except AttributeError:
+ raise
+
+ def dump(self, data, stream=None, *, transform=None):
+ # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+ if self._context_manager:
+ if not self._output:
+ raise TypeError('Missing output stream while dumping from context manager')
+ if transform is not None:
+ raise TypeError(
+ '{}.dump() in the context manager cannot have transform keyword '
+ ''.format(self.__class__.__name__)
+ )
+ self._context_manager.dump(data)
+ else: # old style
+ if stream is None:
+ raise TypeError('Need a stream argument when not dumping from context manager')
+ return self.dump_all([data], stream, transform=transform)
+
+ def dump_all(self, documents, stream, *, transform=None):
+ # type: (Any, Union[Path, StreamType], Any) -> Any
+ if self._context_manager:
+ raise NotImplementedError
+ self._output = stream
+ self._context_manager = YAMLContextManager(self, transform=transform)
+ for data in documents:
+ self._context_manager.dump(data)
+ self._context_manager.teardown_output()
+ self._output = None
+ self._context_manager = None
+
+ def Xdump_all(self, documents, stream, *, transform=None):
+ # type: (Any, Any, Any) -> Any
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ """
+ if not hasattr(stream, 'write') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('w') as fp:
+ return self.dump_all(documents, fp, transform=transform)
+ # The stream should have the methods `write` and possibly `flush`.
+ if self.top_level_colon_align is True:
+ tlca = max([len(str(x)) for x in documents[0]]) # type: Any
+ else:
+ tlca = self.top_level_colon_align
+ if transform is not None:
+ fstream = stream
+ if self.encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ serializer, representer, emitter = self.get_serializer_representer_emitter(
+ stream, tlca
+ )
+ try:
+ self.serializer.open()
+ for data in documents:
+ try:
+ self.representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+ self.serializer.close()
+ finally:
+ try:
+ self.emitter.dispose()
+ except AttributeError:
+ raise
+ # self.dumper.dispose() # cyaml
+ delattr(self, '_serializer')
+ delattr(self, '_emitter')
+ if transform:
+ val = stream.getvalue()
+ if self.encoding:
+ val = val.decode(self.encoding)
+ if fstream is None:
+ transform(val)
+ else:
+ fstream.write(transform(val))
+ return None
+
+ def get_serializer_representer_emitter(self, stream, tlca):
+ # type: (StreamType, Any) -> Any
+ # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler
+ if self.Emitter is not CEmitter:
+ if self.Serializer is None:
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.emitter.stream = stream
+ self.emitter.top_level_colon_align = tlca
+ if self.scalar_after_indicator is not None:
+ self.emitter.scalar_after_indicator = self.scalar_after_indicator
+ return self.serializer, self.representer, self.emitter
+ if self.Serializer is not None:
+ # cannot set serializer with CEmitter
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.emitter.stream = stream
+ self.emitter.top_level_colon_align = tlca
+ if self.scalar_after_indicator is not None:
+ self.emitter.scalar_after_indicator = self.scalar_after_indicator
+ return self.serializer, self.representer, self.emitter
+ # C routines
+
+ rslvr = (
+ ruamel.yaml.resolver.BaseResolver
+ if 'base' in self.typ
+ else ruamel.yaml.resolver.Resolver
+ )
+
+ class XDumper(CEmitter, self.Representer, rslvr): # type: ignore
+ def __init__(
+ selfx,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ selfx,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ selfx._emitter = selfx._serializer = selfx._representer = selfx
+ self.Representer.__init__(
+ selfx, default_style=default_style, default_flow_style=default_flow_style
+ )
+ rslvr.__init__(selfx)
+
+ self._stream = stream
+ dumper = XDumper(
+ stream,
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ )
+ self._emitter = self._serializer = dumper
+ return dumper, dumper, dumper
+
+ # basic types
+ def map(self, **kw):
+ # type: (Any) -> Any
+ if 'rt' in self.typ:
+ return CommentedMap(**kw)
+ else:
+ return dict(**kw)
+
+ def seq(self, *args):
+ # type: (Any) -> Any
+ if 'rt' in self.typ:
+ return CommentedSeq(*args)
+ else:
+ return list(*args)
+
+ # helpers
+ def official_plug_ins(self):
+ # type: () -> Any
+ """search for list of subdirs that are plug-ins, if __file__ is not available, e.g.
+ single file installers that are not properly emulating a file-system (issue 324)
+ no plug-ins will be found. If any are packaged, you know which file that are
+ and you can explicitly provide it during instantiation:
+ yaml = ruamel.yaml.YAML(plug_ins=['ruamel/yaml/jinja2/__plug_in__'])
+ """
+ try:
+ bd = os.path.dirname(__file__)
+ except NameError:
+ return []
+ gpbd = os.path.dirname(os.path.dirname(bd))
+ res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')]
+ return res
+
+ def register_class(self, cls):
+ # type:(Any) -> Any
+ """
+ register a class for dumping loading
+ - if it has attribute yaml_tag use that to register, else use class name
+ - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
+ as mapping
+ """
+ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
+ try:
+ self.representer.add_representer(cls, cls.to_yaml)
+ except AttributeError:
+
+ def t_y(representer, data):
+ # type: (Any, Any) -> Any
+ return representer.represent_yaml_object(
+ tag, data, cls, flow_style=representer.default_flow_style
+ )
+
+ self.representer.add_representer(cls, t_y)
+ try:
+ self.constructor.add_constructor(tag, cls.from_yaml)
+ except AttributeError:
+
+ def f_y(constructor, node):
+ # type: (Any, Any) -> Any
+ return constructor.construct_yaml_object(node, cls)
+
+ self.constructor.add_constructor(tag, f_y)
+ return cls
+
+ # ### context manager
+
+ def __enter__(self):
+ # type: () -> Any
+ self._context_manager = YAMLContextManager(self)
+ return self
+
+ def __exit__(self, typ, value, traceback):
+ # type: (Any, Any, Any) -> None
+ if typ:
+ nprint('typ', typ)
+ self._context_manager.teardown_output()
+ # self._context_manager.teardown_input()
+ self._context_manager = None
+
+ # ### backwards compatibility
+ def _indent(self, mapping=None, sequence=None, offset=None):
+ # type: (Any, Any, Any) -> None
+ if mapping is not None:
+ self.map_indent = mapping
+ if sequence is not None:
+ self.sequence_indent = sequence
+ if offset is not None:
+ self.sequence_dash_offset = offset
+
+ @property
+ def indent(self):
+ # type: () -> Any
+ return self._indent
+
+ @indent.setter
+ def indent(self, val):
+ # type: (Any) -> None
+ self.old_indent = val
+
+ @property
+ def block_seq_indent(self):
+ # type: () -> Any
+ return self.sequence_dash_offset
+
+ @block_seq_indent.setter
+ def block_seq_indent(self, val):
+ # type: (Any) -> None
+ self.sequence_dash_offset = val
+
+ def compact(self, seq_seq=None, seq_map=None):
+ # type: (Any, Any) -> None
+ self.compact_seq_seq = seq_seq
+ self.compact_seq_map = seq_map
+
+
+class YAMLContextManager:
+ def __init__(self, yaml, transform=None):
+ # type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None
+ self._yaml = yaml
+ self._output_inited = False
+ self._output_path = None
+ self._output = self._yaml._output
+ self._transform = transform
+
+ # self._input_inited = False
+ # self._input = input
+ # self._input_path = None
+ # self._transform = yaml.transform
+ # self._fstream = None
+
+ if not hasattr(self._output, 'write') and hasattr(self._output, 'open'):
+ # pathlib.Path() instance, open with the same mode
+ self._output_path = self._output
+ self._output = self._output_path.open('w')
+
+ # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
+ # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
+ # # pathlib.Path() instance, open with the same mode
+ # self._input_path = self._input
+ # self._input = self._input_path.open('r')
+
+ if self._transform is not None:
+ self._fstream = self._output
+ if self._yaml.encoding is None:
+ self._output = StringIO()
+ else:
+ self._output = BytesIO()
+
+ def teardown_output(self):
+ # type: () -> None
+ if self._output_inited:
+ self._yaml.serializer.close()
+ else:
+ return
+ try:
+ self._yaml.emitter.dispose()
+ except AttributeError:
+ raise
+ # self.dumper.dispose() # cyaml
+ try:
+ delattr(self._yaml, '_serializer')
+ delattr(self._yaml, '_emitter')
+ except AttributeError:
+ raise
+ if self._transform:
+ val = self._output.getvalue()
+ if self._yaml.encoding:
+ val = val.decode(self._yaml.encoding)
+ if self._fstream is None:
+ self._transform(val)
+ else:
+ self._fstream.write(self._transform(val))
+ self._fstream.flush()
+ self._output = self._fstream # maybe not necessary
+ if self._output_path is not None:
+ self._output.close()
+
+ def init_output(self, first_data):
+ # type: (Any) -> None
+ if self._yaml.top_level_colon_align is True:
+ tlca = max([len(str(x)) for x in first_data]) # type: Any
+ else:
+ tlca = self._yaml.top_level_colon_align
+ self._yaml.get_serializer_representer_emitter(self._output, tlca)
+ self._yaml.serializer.open()
+ self._output_inited = True
+
+ def dump(self, data):
+ # type: (Any) -> None
+ if not self._output_inited:
+ self.init_output(data)
+ try:
+ self._yaml.representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+
+ # def teardown_input(self):
+ # pass
+ #
+ # def init_input(self):
+ # # set the constructor and parser on YAML() instance
+ # self._yaml.get_constructor_parser(stream)
+ #
+ # def load(self):
+ # if not self._input_inited:
+ # self.init_input()
+ # try:
+ # while self._yaml.constructor.check_data():
+ # yield self._yaml.constructor.get_data()
+ # finally:
+ # parser.dispose()
+ # try:
+ # self._reader.reset_reader() # type: ignore
+ # except AttributeError:
+ # pass
+ # try:
+ # self._scanner.reset_scanner() # type: ignore
+ # except AttributeError:
+ # pass
+
+
+def yaml_object(yml):
+ # type: (Any) -> Any
+ """ decorator for classes that needs to dump/load objects
+ The tag for such objects is taken from the class attribute yaml_tag (or the
+ class name in lowercase in case unavailable)
+ If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
+ loading, default routines (dumping a mapping of the attributes) used otherwise.
+ """
+
+ def yo_deco(cls):
+ # type: (Any) -> Any
+ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
+ try:
+ yml.representer.add_representer(cls, cls.to_yaml)
+ except AttributeError:
+
+ def t_y(representer, data):
+ # type: (Any, Any) -> Any
+ return representer.represent_yaml_object(
+ tag, data, cls, flow_style=representer.default_flow_style
+ )
+
+ yml.representer.add_representer(cls, t_y)
+ try:
+ yml.constructor.add_constructor(tag, cls.from_yaml)
+ except AttributeError:
+
+ def f_y(constructor, node):
+ # type: (Any, Any) -> Any
+ return constructor.construct_yaml_object(node, cls)
+
+ yml.constructor.add_constructor(tag, f_y)
+ return cls
+
+ return yo_deco
+
+
+########################################################################################
+def warn_deprecation(fun, method, arg=''):
+ # type: (Any, Any, str) -> None
+ from ruamel.yaml.compat import _F
+
+ warnings.warn(
+ _F(
+ '\n{fun} will be removed, use\n\n yaml=YAML({arg})\n yaml.{method}(...)\n\ninstead', # NOQA
+ fun=fun,
+ method=method,
+ arg=arg,
+ ),
+ PendingDeprecationWarning, # this will show when testing with pytest/tox
+ stacklevel=3,
+ )
+
+
+########################################################################################
+
+
+def scan(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ warn_deprecation('scan', 'scan', arg="typ='unsafe', pure=True")
+ loader = Loader(stream)
+ try:
+ while loader.scanner.check_token():
+ yield loader.scanner.get_token()
+ finally:
+ loader._parser.dispose()
+
+
+def parse(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ warn_deprecation('parse', 'parse', arg="typ='unsafe', pure=True")
+ loader = Loader(stream)
+ try:
+ while loader._parser.check_event():
+ yield loader._parser.get_event()
+ finally:
+ loader._parser.dispose()
+
+
+def compose(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True")
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+
+def compose_all(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True")
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader._composer.get_node()
+ finally:
+ loader._parser.dispose()
+
+
+def load(stream, Loader=None, version=None, preserve_quotes=None):
+ # type: (Any, Any, Any, Any) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ warn_deprecation('load', 'load', arg="typ='unsafe', pure=True")
+ if Loader is None:
+ warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+ Loader = UnsafeLoader
+ loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any
+ try:
+ return loader._constructor.get_single_data()
+ finally:
+ loader._parser.dispose()
+ try:
+ loader._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ loader._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+
+def load_all(stream, Loader=None, version=None, preserve_quotes=None):
+ # type: (Any, Any, Any, Any) -> Any # NOQA
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ warn_deprecation('load_all', 'load_all', arg="typ='unsafe', pure=True")
+ if Loader is None:
+ warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+ Loader = UnsafeLoader
+ loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any
+ try:
+ while loader._constructor.check_data():
+ yield loader._constructor.get_data()
+ finally:
+ loader._parser.dispose()
+ try:
+ loader._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ loader._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+
+def safe_load(stream, version=None):
+ # type: (StreamTextType, Optional[VersionType]) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ warn_deprecation('safe_load', 'load', arg="typ='safe', pure=True")
+ return load(stream, SafeLoader, version)
+
+
+def safe_load_all(stream, version=None):
+ # type: (StreamTextType, Optional[VersionType]) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ warn_deprecation('safe_load_all', 'load_all', arg="typ='safe', pure=True")
+ return load_all(stream, SafeLoader, version)
+
+
+def round_trip_load(stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ warn_deprecation('round_trip_load_all', 'load')
+ return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def round_trip_load_all(stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ warn_deprecation('round_trip_load_all', 'load_all')
+ return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def emit(
+ events,
+ stream=None,
+ Dumper=Dumper,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+):
+ # type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('emit', 'emit', arg="typ='safe', pure=True")
+ getvalue = None
+ if stream is None:
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ )
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+
+
+enc = None
+
+
+def serialize_all(
+ nodes,
+ stream=None,
+ Dumper=Dumper,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('serialize_all', 'serialize_all', arg="typ='safe', pure=True")
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ version=version,
+ tags=tags,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ )
+ try:
+ dumper._serializer.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper._serializer.close()
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ # type: (Any, Optional[StreamType], Any, Any) -> Any
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('serialize', 'serialize', arg="typ='safe', pure=True")
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+
+def dump_all(
+ documents,
+ stream=None,
+ Dumper=Dumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Any # NOQA
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('dump_all', 'dump_all', arg="typ='unsafe', pure=True")
+ getvalue = None
+ if top_level_colon_align is True:
+ top_level_colon_align = max([len(str(x)) for x in documents[0]])
+ if stream is None:
+ if encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
+ try:
+ dumper._serializer.open()
+ for data in documents:
+ try:
+ dumper._representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+ dumper._serializer.close()
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+ return None
+
+
+def dump(
+ data,
+ stream=None,
+ Dumper=Dumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[Any] # NOQA
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+
+ default_style ∈ None, '', '"', "'", '|', '>'
+
+ """
+ warn_deprecation('dump', 'dump', arg="typ='unsafe', pure=True")
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ )
+
+
+def safe_dump_all(documents, stream=None, **kwds):
+ # type: (Any, Optional[StreamType], Any) -> Optional[Any]
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('safe_dump_all', 'dump_all', arg="typ='safe', pure=True")
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+
+def safe_dump(data, stream=None, **kwds):
+ # type: (Any, Optional[StreamType], Any) -> Optional[Any]
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ warn_deprecation('safe_dump', 'dump', arg="typ='safe', pure=True")
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+
+def round_trip_dump(
+ data,
+ stream=None,
+ Dumper=RoundTripDumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[Any] # NOQA
+ allow_unicode = True if allow_unicode is None else allow_unicode
+ warn_deprecation('round_trip_dump', 'dump')
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
+
+
+# Loader/Dumper are no longer composites, to get to the associated
+# Resolver()/Representer(), etc., you need to instantiate the class
+
+
+def add_implicit_resolver(
+ tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
+):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ if Loader is None and Dumper is None:
+ resolver.add_implicit_resolver(tag, regexp, first)
+ return
+ if Loader:
+ if hasattr(Loader, 'add_implicit_resolver'):
+ Loader.add_implicit_resolver(tag, regexp, first)
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
+ ):
+ Resolver.add_implicit_resolver(tag, regexp, first)
+ else:
+ raise NotImplementedError
+ if Dumper:
+ if hasattr(Dumper, 'add_implicit_resolver'):
+ Dumper.add_implicit_resolver(tag, regexp, first)
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
+ ):
+ Resolver.add_implicit_resolver(tag, regexp, first)
+ else:
+ raise NotImplementedError
+
+
+# this code currently not tested
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ if Loader is None and Dumper is None:
+ resolver.add_path_resolver(tag, path, kind)
+ return
+ if Loader:
+ if hasattr(Loader, 'add_path_resolver'):
+ Loader.add_path_resolver(tag, path, kind)
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
+ ):
+ Resolver.add_path_resolver(tag, path, kind)
+ else:
+ raise NotImplementedError
+ if Dumper:
+ if hasattr(Dumper, 'add_path_resolver'):
+ Dumper.add_path_resolver(tag, path, kind)
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
+ ):
+ Resolver.add_path_resolver(tag, path, kind)
+ else:
+ raise NotImplementedError
+
+
+def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add an object constructor for the given tag.
+ object_onstructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ constructor.add_constructor(tag, object_constructor)
+ else:
+ if hasattr(Loader, 'add_constructor'):
+ Loader.add_constructor(tag, object_constructor)
+ return
+ if issubclass(Loader, BaseLoader):
+ BaseConstructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, SafeLoader):
+ SafeConstructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, Loader):
+ Constructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, RoundTripLoader):
+ RoundTripConstructor.add_constructor(tag, object_constructor)
+ else:
+ raise NotImplementedError
+
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ constructor.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ if False and hasattr(Loader, 'add_multi_constructor'):
+ Loader.add_multi_constructor(tag_prefix, constructor)
+ return
+ if issubclass(Loader, BaseLoader):
+ BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, SafeLoader):
+ SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, ruamel.yaml.loader.Loader):
+ Constructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, RoundTripLoader):
+ RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ raise NotImplementedError
+
+
+def add_representer(data_type, object_representer, Dumper=None, representer=Representer):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a representer for the given type.
+ object_representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ if Dumper is None:
+ representer.add_representer(data_type, object_representer)
+ else:
+ if hasattr(Dumper, 'add_representer'):
+ Dumper.add_representer(data_type, object_representer)
+ return
+ if issubclass(Dumper, BaseDumper):
+ BaseRepresenter.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, SafeDumper):
+ SafeRepresenter.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, Dumper):
+ Representer.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, RoundTripDumper):
+ RoundTripRepresenter.add_representer(data_type, object_representer)
+ else:
+ raise NotImplementedError
+
+
+# this code currently not tested
+def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a representer for the given type.
+ multi_representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ if Dumper is None:
+ representer.add_multi_representer(data_type, multi_representer)
+ else:
+ if hasattr(Dumper, 'add_multi_representer'):
+ Dumper.add_multi_representer(data_type, multi_representer)
+ return
+ if issubclass(Dumper, BaseDumper):
+ BaseRepresenter.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, SafeDumper):
+ SafeRepresenter.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, Dumper):
+ Representer.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, RoundTripDumper):
+ RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
+ else:
+ raise NotImplementedError
+
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+
+ def __init__(cls, name, bases, kwds):
+ # type: (Any, Any, Any) -> None
+ super().__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
+ cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
+
+
+class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_constructor = Constructor
+ yaml_representer = Representer
+
+ yaml_tag = None # type: Any
+ yaml_flow_style = None # type: Any
+
+ @classmethod
+ def from_yaml(cls, constructor, node):
+ # type: (Any, Any) -> Any
+ """
+ Convert a representation node to a Python object.
+ """
+ return constructor.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, representer, data):
+ # type: (Any, Any) -> Any
+ """
+ Convert a Python object to a representation node.
+ """
+ return representer.represent_yaml_object(
+ cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style
+ )
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/nodes.py b/lib/spack/external/_vendoring/ruamel/yaml/nodes.py
new file mode 100644
index 0000000000..c76bb4e2cc
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/nodes.py
@@ -0,0 +1,135 @@
+# coding: utf-8
+
+import sys
+
+from ruamel.yaml.compat import _F
+
+if False: # MYPY
+ from typing import Dict, Any, Text # NOQA
+
+
+class Node:
+ __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor'
+
+ def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.comment = comment
+ self.anchor = anchor
+
+ def __repr__(self):
+ # type: () -> Any
+ value = self.value
+ # if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = f'<{len(value)} items>'
+ # else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return _F(
+ '{class_name!s}(tag={self_tag!r}, value={value!s})',
+ class_name=self.__class__.__name__,
+ self_tag=self.tag,
+ value=value,
+ )
+
+ def dump(self, indent=0):
+ # type: (int) -> None
+ if isinstance(self.value, str):
+ sys.stdout.write(
+ '{}{}(tag={!r}, value={!r})\n'.format(
+ ' ' * indent, self.__class__.__name__, self.tag, self.value
+ )
+ )
+ if self.comment:
+ sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment))
+ return
+ sys.stdout.write(
+ '{}{}(tag={!r})\n'.format(' ' * indent, self.__class__.__name__, self.tag)
+ )
+ if self.comment:
+ sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment))
+ for v in self.value:
+ if isinstance(v, tuple):
+ for v1 in v:
+ v1.dump(indent + 1)
+ elif isinstance(v, Node):
+ v.dump(indent + 1)
+ else:
+ sys.stdout.write('Node value type? {}\n'.format(type(v)))
+
+
+class ScalarNode(Node):
+ """
+ styles:
+ ? -> set() ? key, no value
+ " -> double quoted
+ ' -> single quoted
+ | -> literal style
+ > -> folding style
+ """
+
+ __slots__ = ('style',)
+ id = 'scalar'
+
+ def __init__(
+ self, tag, value, start_mark=None, end_mark=None, style=None, comment=None, anchor=None
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ Node.__init__(self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor)
+ self.style = style
+
+
+class CollectionNode(Node):
+ __slots__ = ('flow_style',)
+
+ def __init__(
+ self,
+ tag,
+ value,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ anchor=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
+ self.flow_style = flow_style
+ self.anchor = anchor
+
+
+class SequenceNode(CollectionNode):
+ __slots__ = ()
+ id = 'sequence'
+
+
+class MappingNode(CollectionNode):
+ __slots__ = ('merge',)
+ id = 'mapping'
+
+ def __init__(
+ self,
+ tag,
+ value,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ anchor=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ CollectionNode.__init__(
+ self, tag, value, start_mark, end_mark, flow_style, comment, anchor
+ )
+ self.merge = None
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/parser.py b/lib/spack/external/_vendoring/ruamel/yaml/parser.py
new file mode 100644
index 0000000000..a2fab43ce7
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/parser.py
@@ -0,0 +1,884 @@
+# coding: utf-8
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document*
+# STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content |
+# indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+# BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START <}
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START
+# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR
+# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START
+# FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+# FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+# FLOW-MAPPING-START KEY }
+
+# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py
+# only to not do anything with the package afterwards
+# and for Jython too
+
+
+from ruamel.yaml.error import MarkedYAMLError
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.scanner import Scanner, RoundTripScanner, ScannerError # NOQA
+from ruamel.yaml.scanner import BlankLineComment
+from ruamel.yaml.comments import C_PRE, C_POST, C_SPLIT_ON_FIRST_BLANK
+from ruamel.yaml.compat import _F, nprint, nprintf # NOQA
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Optional # NOQA
+
+__all__ = ['Parser', 'RoundTripParser', 'ParserError']
+
+
+def xprintf(*args, **kw):
+ # type: (Any, Any) -> Any
+ return nprintf(*args, **kw)
+ pass
+
+
+class ParserError(MarkedYAMLError):
+ pass
+
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {'!': '!', '!!': 'tag:yaml.org,2002:'}
+
+ def __init__(self, loader):
+ # type: (Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_parser', None) is None:
+ self.loader._parser = self
+ self.reset_parser()
+
+ def reset_parser(self):
+ # type: () -> None
+ # Reset the state attributes (to clear self-references)
+ self.current_event = self.last_event = None
+ self.tag_handles = {} # type: Dict[Any, Any]
+ self.states = [] # type: List[Any]
+ self.marks = [] # type: List[Any]
+ self.state = self.parse_stream_start # type: Any
+
+ def dispose(self):
+ # type: () -> None
+ self.reset_parser()
+
+ @property
+ def scanner(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.scanner
+ return self.loader._scanner
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver
+ return self.loader._resolver
+
+ def check_event(self, *choices):
+ # type: (Any) -> bool
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # type: () -> Any
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # type: () -> Any
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ # assert self.current_event is not None
+ # if self.current_event.end_mark.line != self.peek_event().start_mark.line:
+ xprintf('get_event', repr(self.current_event), self.peek_event().start_mark.line)
+ self.last_event = value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document*
+ # STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+ # type: () -> Any
+ # Parse the stream start.
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+ # type: () -> Any
+ # Parse an implicit document.
+ if not self.scanner.check_token(DirectiveToken, DocumentStartToken, StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.scanner.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark, explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+ # type: () -> Any
+ # Parse any extra document end indicators.
+ while self.scanner.check_token(DocumentEndToken):
+ self.scanner.get_token()
+ # Parse an explicit document.
+ if not self.scanner.check_token(StreamEndToken):
+ version, tags = self.process_directives()
+ if not self.scanner.check_token(DocumentStartToken):
+ raise ParserError(
+ None,
+ None,
+ _F(
+ "expected '<document start>', but found {pt!r}",
+ pt=self.scanner.peek_token().id,
+ ),
+ self.scanner.peek_token().start_mark,
+ )
+ token = self.scanner.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ # if self.loader is not None and \
+ # end_mark.line != self.scanner.peek_token().start_mark.line:
+ # self.loader.scalar_after_indicator = False
+ event = DocumentStartEvent(
+ start_mark, end_mark, explicit=True, version=version, tags=tags,
+ comment=token.comment
+ ) # type: Any
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.scanner.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+ # type: () -> Any
+ # Parse the document end.
+ token = self.scanner.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.scanner.check_token(DocumentEndToken):
+ token = self.scanner.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark, explicit=explicit)
+
+ # Prepare the next state.
+ if self.resolver.processing_version == (1, 1):
+ self.state = self.parse_document_start
+ else:
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_document_content(self):
+ # type: () -> Any
+ if self.scanner.check_token(
+ DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken
+ ):
+ event = self.process_empty_scalar(self.scanner.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ # type: () -> Any
+ yaml_version = None
+ self.tag_handles = {}
+ while self.scanner.check_token(DirectiveToken):
+ token = self.scanner.get_token()
+ if token.name == 'YAML':
+ if yaml_version is not None:
+ raise ParserError(
+ None, None, 'found duplicate YAML directive', token.start_mark
+ )
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(
+ None,
+ None,
+ 'found incompatible YAML document (version 1.* is required)',
+ token.start_mark,
+ )
+ yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(
+ None,
+ None,
+ _F('duplicate tag handle {handle!r}', handle=handle),
+ token.start_mark,
+ )
+ self.tag_handles[handle] = prefix
+ if bool(self.tag_handles):
+ value = yaml_version, self.tag_handles.copy() # type: Any
+ else:
+ value = yaml_version, None
+ if self.loader is not None and hasattr(self.loader, 'tags'):
+ self.loader.version = yaml_version
+ if self.loader.tags is None:
+ self.loader.tags = {}
+ for k in self.tag_handles:
+ self.loader.tags[k] = self.tag_handles[k]
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ # type: () -> Any
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ # type: () -> Any
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ # type: () -> Any
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def transform_tag(self, handle, suffix):
+ # type: (Any, Any) -> Any
+ return self.tag_handles[handle] + suffix
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ # type: (bool, bool) -> Any
+ if self.scanner.check_token(AliasToken):
+ token = self.scanner.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark) # type: Any
+ self.state = self.states.pop()
+ return event
+
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.scanner.check_token(AnchorToken):
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.scanner.check_token(TagToken):
+ token = self.scanner.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.scanner.check_token(TagToken):
+ token = self.scanner.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.scanner.check_token(AnchorToken):
+ token = self.scanner.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError(
+ 'while parsing a node',
+ start_mark,
+ _F('found undefined tag handle {handle!r}', handle=handle),
+ tag_mark,
+ )
+ tag = self.transform_tag(handle, suffix)
+ else:
+ tag = suffix
+ # if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag'
+ # and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.scanner.peek_token().start_mark
+ event = None
+ implicit = tag is None or tag == '!'
+ if indentless_sequence and self.scanner.check_token(BlockEntryToken):
+ comment = None
+ pt = self.scanner.peek_token()
+ if self.loader and self.loader.comment_handling is None:
+ if pt.comment and pt.comment[0]:
+ comment = [pt.comment[0], []]
+ pt.comment[0] = None
+ elif self.loader:
+ if pt.comment:
+ comment = pt.comment
+ end_mark = self.scanner.peek_token().end_mark
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_indentless_sequence_entry
+ return event
+
+ if self.scanner.check_token(ScalarToken):
+ token = self.scanner.get_token()
+ # self.scanner.peek_token_same_line_comment(token)
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ # nprint('se', token.value, token.comment)
+ event = ScalarEvent(
+ anchor,
+ tag,
+ implicit,
+ token.value,
+ start_mark,
+ end_mark,
+ style=token.style,
+ comment=token.comment,
+ )
+ self.state = self.states.pop()
+ elif self.scanner.check_token(FlowSequenceStartToken):
+ pt = self.scanner.peek_token()
+ end_mark = pt.end_mark
+ event = SequenceStartEvent(
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.scanner.check_token(FlowMappingStartToken):
+ pt = self.scanner.peek_token()
+ end_mark = pt.end_mark
+ event = MappingStartEvent(
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.scanner.check_token(BlockSequenceStartToken):
+ end_mark = self.scanner.peek_token().start_mark
+ # should inserting the comment be dependent on the
+ # indentation?
+ pt = self.scanner.peek_token()
+ comment = pt.comment
+ # nprint('pt0', type(pt))
+ if comment is None or comment[1] is None:
+ comment = pt.split_old_comment()
+ # nprint('pt1', comment)
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.scanner.check_token(BlockMappingStartToken):
+ end_mark = self.scanner.peek_token().start_mark
+ comment = self.scanner.peek_token().comment
+ event = MappingStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), "", start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.scanner.peek_token()
+ raise ParserError(
+ _F('while parsing a {node!s} node', node=node),
+ start_mark,
+ _F('expected the node content, but found {token_id!r}', token_id=token.id),
+ token.start_mark,
+ )
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+ # BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ # move any comment from start token
+ # self.move_token_comment(token)
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ # type: () -> Any
+ if self.scanner.check_token(BlockEntryToken):
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ if not self.scanner.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.scanner.check_token(BlockEndToken):
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a block collection',
+ self.marks[-1],
+ _F('expected <block end>, but found {token_id!r}', token_id=token.id),
+ token.start_mark,
+ )
+ token = self.scanner.get_token() # BlockEndToken
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ # indentless_sequence?
+ # sequence:
+ # - entry
+ # - nested
+
+ def parse_indentless_sequence_entry(self):
+ # type: () -> Any
+ if self.scanner.check_token(BlockEntryToken):
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ if not self.scanner.check_token(
+ BlockEntryToken, KeyToken, ValueToken, BlockEndToken
+ ):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.scanner.peek_token()
+ c = None
+ if self.loader and self.loader.comment_handling is None:
+ c = token.comment
+ start_mark = token.start_mark
+ else:
+ start_mark = self.last_event.end_mark # type: ignore
+ c = self.distribute_comment(token.comment, start_mark.line) # type: ignore
+ event = SequenceEndEvent(start_mark, start_mark, comment=c)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ # type: () -> Any
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if self.resolver.processing_version > (1, 1) and self.scanner.check_token(ValueToken):
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+ if not self.scanner.check_token(BlockEndToken):
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a block mapping',
+ self.marks[-1],
+ _F('expected <block end>, but found {token_id!r}', token_id=token.id),
+ token.start_mark,
+ )
+ token = self.scanner.get_token()
+ self.move_token_comment(token)
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ # value token might have post comment move it to e.g. block
+ if self.scanner.check_token(ValueToken):
+ self.move_token_comment(token)
+ else:
+ if not self.scanner.check_token(KeyToken):
+ self.move_token_comment(token, empty=True)
+ # else: empty value for this key cannot move token.comment
+ if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ comment = token.comment
+ if comment is None:
+ token = self.scanner.peek_token()
+ comment = token.comment
+ if comment:
+ token._comment = [None, comment[1]]
+ comment = [comment[0], None]
+ return self.process_empty_scalar(token.end_mark, comment=comment)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ # type: (bool) -> Any
+ if not self.scanner.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.scanner.check_token(FlowEntryToken):
+ self.scanner.get_token()
+ else:
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a flow sequence',
+ self.marks[-1],
+ _F("expected ',' or ']', but got {token_id!r}", token_id=token.id),
+ token.start_mark,
+ )
+
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.peek_token()
+ event = MappingStartEvent(
+ None, None, True, token.start_mark, token.end_mark, flow_style=True
+ ) # type: Any
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.scanner.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.scanner.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ # type: () -> Any
+ self.state = self.parse_flow_sequence_entry
+ token = self.scanner.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ # type: (Any) -> Any
+ if not self.scanner.check_token(FlowMappingEndToken):
+ if not first:
+ if self.scanner.check_token(FlowEntryToken):
+ self.scanner.get_token()
+ else:
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a flow mapping',
+ self.marks[-1],
+ _F("expected ',' or '}}', but got {token_id!r}", token_id=token.id),
+ token.start_mark,
+ )
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(
+ ValueToken, FlowEntryToken, FlowMappingEndToken
+ ):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif self.resolver.processing_version > (1, 1) and self.scanner.check_token(
+ ValueToken
+ ):
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(self.scanner.peek_token().end_mark)
+ elif not self.scanner.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.scanner.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ # type: () -> Any
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark, comment=None):
+ # type: (Any, Any) -> Any
+ return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment)
+
+ def move_token_comment(self, token, nt=None, empty=False):
+ # type: (Any, Optional[Any], Optional[bool]) -> Any
+ pass
+
+
+class RoundTripParser(Parser):
+ """roundtrip is a safe loader, that wants to see the unmangled tag"""
+
+ def transform_tag(self, handle, suffix):
+ # type: (Any, Any) -> Any
+ # return self.tag_handles[handle]+suffix
+ if handle == '!!' and suffix in (
+ 'null',
+ 'bool',
+ 'int',
+ 'float',
+ 'binary',
+ 'timestamp',
+ 'omap',
+ 'pairs',
+ 'set',
+ 'str',
+ 'seq',
+ 'map',
+ ):
+ return Parser.transform_tag(self, handle, suffix)
+ return handle + suffix
+
+ def move_token_comment(self, token, nt=None, empty=False):
+ # type: (Any, Optional[Any], Optional[bool]) -> Any
+ token.move_old_comment(self.scanner.peek_token() if nt is None else nt, empty=empty)
+
+
+class RoundTripParserSC(RoundTripParser):
+ """roundtrip is a safe loader, that wants to see the unmangled tag"""
+
+ # some of the differences are based on the superclass testing
+ # if self.loader.comment_handling is not None
+
+ def move_token_comment(self, token, nt=None, empty=False):
+ # type: (Any, Any, Any, Optional[bool]) -> None
+ token.move_new_comment(self.scanner.peek_token() if nt is None else nt, empty=empty)
+
+ def distribute_comment(self, comment, line):
+ # type: (Any, Any) -> Any
+ # ToDo, look at indentation of the comment to determine attachment
+ if comment is None:
+ return None
+ if not comment[0]:
+ return None
+ if comment[0][0] != line + 1:
+ nprintf('>>>dcxxx', comment, line)
+ assert comment[0][0] == line + 1
+ # if comment[0] - line > 1:
+ # return
+ typ = self.loader.comment_handling & 0b11
+ # nprintf('>>>dca', comment, line, typ)
+ if typ == C_POST:
+ return None
+ if typ == C_PRE:
+ c = [None, None, comment[0]]
+ comment[0] = None
+ return c
+ # nprintf('>>>dcb', comment[0])
+ for _idx, cmntidx in enumerate(comment[0]):
+ # nprintf('>>>dcb', cmntidx)
+ if isinstance(self.scanner.comments[cmntidx], BlankLineComment):
+ break
+ else:
+ return None # no space found
+ if _idx == 0:
+ return None # first line was blank
+ # nprintf('>>>dcc', idx)
+ if typ == C_SPLIT_ON_FIRST_BLANK:
+ c = [None, None, comment[0][:_idx]]
+ comment[0] = comment[0][_idx:]
+ return c
+ raise NotImplementedError # reserved
diff --git a/lib/spack/external/ruamel/__init__.py b/lib/spack/external/_vendoring/ruamel/yaml/py.typed
index e69de29bb2..e69de29bb2 100644
--- a/lib/spack/external/ruamel/__init__.py
+++ b/lib/spack/external/_vendoring/ruamel/yaml/py.typed
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/reader.py b/lib/spack/external/_vendoring/ruamel/yaml/reader.py
new file mode 100644
index 0000000000..4aac40a4f7
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/reader.py
@@ -0,0 +1,302 @@
+# coding: utf-8
+
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length`
+# characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current
+# character.
+
+import codecs
+
+from ruamel.yaml.error import YAMLError, FileMark, StringMark, YAMLStreamError
+from ruamel.yaml.compat import _F # NOQA
+from ruamel.yaml.util import RegExp
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional # NOQA
+# from ruamel.yaml.compat import StreamTextType # NOQA
+
+__all__ = ['Reader', 'ReaderError']
+
+
+class ReaderError(YAMLError):
+ def __init__(self, name, position, character, encoding, reason):
+ # type: (Any, Any, Any, Any, Any) -> None
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ # type: () -> Any
+ if isinstance(self.character, bytes):
+ return _F(
+ "'{self_encoding!s}' codec can't decode byte #x{ord_self_character:02x}: "
+ '{self_reason!s}\n'
+ ' in "{self_name!s}", position {self_position:d}',
+ self_encoding=self.encoding,
+ ord_self_character=ord(self.character),
+ self_reason=self.reason,
+ self_name=self.name,
+ self_position=self.position,
+ )
+ else:
+ return _F(
+ 'unacceptable character #x{self_character:04x}: {self_reason!s}\n'
+ ' in "{self_name!s}", position {self_position:d}',
+ self_character=self.character,
+ self_reason=self.reason,
+ self_name=self.name,
+ self_position=self.position,
+ )
+
+
+class Reader:
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream, loader=None):
+ # type: (Any, Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_reader', None) is None:
+ self.loader._reader = self
+ self.reset_reader()
+ self.stream = stream # type: Any # as .read is called
+
+ def reset_reader(self):
+ # type: () -> None
+ self.name = None # type: Any
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ""
+ self.pointer = 0
+ self.raw_buffer = None # type: Any
+ self.raw_decode = None
+ self.encoding = None # type: Optional[Text]
+ self.index = 0
+ self.line = 0
+ self.column = 0
+
+ @property
+ def stream(self):
+ # type: () -> Any
+ try:
+ return self._stream
+ except AttributeError:
+ raise YAMLStreamError('input stream needs to specified')
+
+ @stream.setter
+ def stream(self, val):
+ # type: (Any) -> None
+ if val is None:
+ return
+ self._stream = None
+ if isinstance(val, str):
+ self.name = '<unicode string>'
+ self.check_printable(val)
+ self.buffer = val + '\0'
+ elif isinstance(val, bytes):
+ self.name = '<byte string>'
+ self.raw_buffer = val
+ self.determine_encoding()
+ else:
+ if not hasattr(val, 'read'):
+ raise YAMLStreamError('stream argument needs to have a read() method')
+ self._stream = val
+ self.name = getattr(self.stream, 'name', '<file>')
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ # type: (int) -> Text
+ try:
+ return self.buffer[self.pointer + index]
+ except IndexError:
+ self.update(index + 1)
+ return self.buffer[self.pointer + index]
+
+ def prefix(self, length=1):
+ # type: (int) -> Any
+ if self.pointer + length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer : self.pointer + length]
+
+ def forward_1_1(self, length=1):
+ # type: (int) -> None
+ if self.pointer + length + 1 >= len(self.buffer):
+ self.update(length + 1)
+ while length != 0:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' or (
+ ch == '\r' and self.buffer[self.pointer] != '\n'
+ ):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def forward(self, length=1):
+ # type: (int) -> None
+ if self.pointer + length + 1 >= len(self.buffer):
+ self.update(length + 1)
+ while length != 0:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch == '\n' or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ # type: () -> Any
+ if self.stream is None:
+ return StringMark(
+ self.name, self.index, self.line, self.column, self.buffer, self.pointer
+ )
+ else:
+ return FileMark(self.name, self.index, self.line, self.column)
+
+ def determine_encoding(self):
+ # type: () -> None
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode # type: ignore
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode # type: ignore
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode # type: ignore
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = RegExp(
+ '[^\x09\x0A\x0D\x20-\x7E\x85' '\xA0-\uD7FF' '\uE000-\uFFFD' '\U00010000-\U0010FFFF' ']'
+ )
+
+ _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode('ascii')
+
+ @classmethod
+ def _get_non_printable_ascii(cls, data): # type: ignore
+ # type: (Text, bytes) -> Optional[Tuple[int, Text]]
+ ascii_bytes = data.encode('ascii') # type: ignore
+ non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore
+ if not non_printables:
+ return None
+ non_printable = non_printables[:1]
+ return ascii_bytes.index(non_printable), non_printable.decode('ascii')
+
+ @classmethod
+ def _get_non_printable_regex(cls, data):
+ # type: (Text) -> Optional[Tuple[int, Text]]
+ match = cls.NON_PRINTABLE.search(data)
+ if not bool(match):
+ return None
+ return match.start(), match.group()
+
+ @classmethod
+ def _get_non_printable(cls, data):
+ # type: (Text) -> Optional[Tuple[int, Text]]
+ try:
+ return cls._get_non_printable_ascii(data) # type: ignore
+ except UnicodeEncodeError:
+ return cls._get_non_printable_regex(data)
+
+ def check_printable(self, data):
+ # type: (Any) -> None
+ non_printable_match = self._get_non_printable(data)
+ if non_printable_match is not None:
+ start, character = non_printable_match
+ position = self.index + (len(self.buffer) - self.pointer) + start
+ raise ReaderError(
+ self.name,
+ position,
+ ord(character),
+ 'unicode',
+ 'special characters are not allowed',
+ )
+
+ def update(self, length):
+ # type: (int) -> None
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer :]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
+ elif self.stream is not None:
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character, exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=None):
+ # type: (Optional[int]) -> None
+ if size is None:
+ size = 4096
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+
+# try:
+# import psyco
+# psyco.bind(Reader)
+# except ImportError:
+# pass
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/representer.py b/lib/spack/external/_vendoring/ruamel/yaml/representer.py
new file mode 100644
index 0000000000..820559b5c4
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/representer.py
@@ -0,0 +1,1156 @@
+# coding: utf-8
+
+from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.compat import ordereddict
+from ruamel.yaml.compat import _F, nprint, nprintf # NOQA
+from ruamel.yaml.scalarstring import (
+ LiteralScalarString,
+ FoldedScalarString,
+ SingleQuotedScalarString,
+ DoubleQuotedScalarString,
+ PlainScalarString,
+)
+from ruamel.yaml.comments import (
+ CommentedMap,
+ CommentedOrderedMap,
+ CommentedSeq,
+ CommentedKeySeq,
+ CommentedKeyMap,
+ CommentedSet,
+ comment_attrib,
+ merge_attrib,
+ TaggedScalar,
+)
+from ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from ruamel.yaml.scalarfloat import ScalarFloat
+from ruamel.yaml.scalarbool import ScalarBoolean
+from ruamel.yaml.timestamp import TimeStamp
+from ruamel.yaml.anchor import Anchor
+
+import datetime
+import sys
+import types
+
+import copyreg
+import base64
+
+if False: # MYPY
+ from typing import Dict, List, Any, Union, Text, Optional # NOQA
+
+# fmt: off
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError', 'RoundTripRepresenter']
+# fmt: on
+
+
+class RepresenterError(YAMLError):
+ pass
+
+
+class BaseRepresenter:
+
+ yaml_representers = {} # type: Dict[Any, Any]
+ yaml_multi_representers = {} # type: Dict[Any, Any]
+
+ def __init__(self, default_style=None, default_flow_style=None, dumper=None):
+ # type: (Any, Any, Any, Any) -> None
+ self.dumper = dumper
+ if self.dumper is not None:
+ self.dumper._representer = self
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {} # type: Dict[Any, Any]
+ self.object_keeper = [] # type: List[Any]
+ self.alias_key = None # type: Optional[int]
+ self.sort_base_mapping_type_on_output = True
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ try:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.serializer
+ return self.dumper._serializer
+ except AttributeError:
+ return self # cyaml
+
+ def represent(self, data):
+ # type: (Any) -> None
+ node = self.represent_data(data)
+ self.serializer.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ # type: (Any) -> Any
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ # if node is None:
+ # raise RepresenterError(
+ # f"recursive objects are not allowed: {data!r}")
+ return node
+ # self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ # if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def represent_key(self, data):
+ # type: (Any) -> Any
+ """
+ David Fraser: Extract a method to represent keys in mappings, so that
+ a subclass can choose not to quote them (for example)
+ used in represent_mapping
+ https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c
+ """
+ return self.represent_data(data)
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ # type: (Any, Any) -> None
+ if 'yaml_representers' not in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ # type: (Any, Any) -> None
+ if 'yaml_multi_representers' not in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if style is None:
+ style = self.default_style
+ comment = None
+ if style and style[0] in '|>':
+ comment = getattr(value, 'comment', None)
+ if comment:
+ comment = [None, [comment]]
+ node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_omap(self, tag, omap, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item_key in omap:
+ item_val = omap[item_key]
+ node_item = self.represent_data({item_key: item_val})
+ # if not (isinstance(node_item, ScalarNode) \
+ # and not node_item.style):
+ # best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ if self.sort_base_mapping_type_on_output:
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ return False
+
+
+class SafeRepresenter(BaseRepresenter):
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ # https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
+ # "i.e. two occurrences of the empty tuple may or may not yield the same object"
+ # so "data is ()" should not be used
+ if data is None or (isinstance(data, tuple) and data == ()):
+ return True
+ if isinstance(data, (bytes, str, bool, int, float)):
+ return True
+ return False
+
+ def represent_none(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ # type: (Any) -> Any
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ # check py2 only?
+ data = base64.encodestring(data).decode('ascii') # type: ignore
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data, anchor=None):
+ # type: (Any, Optional[Any]) -> Any
+ try:
+ value = self.dumper.boolean_representation[bool(data)]
+ except AttributeError:
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value, anchor=anchor)
+
+ def represent_int(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value * inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ # type: (Any) -> Any
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ if getattr(self.serializer, 'use_version', None) == (1, 1):
+ if '.' not in value and 'e' in value:
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag in YAML 1.1. We fix
+ # this by adding '.0' before the 'e' symbol.
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ # type: (Any) -> Any
+ # pairs = (len(data) > 0 and isinstance(data, list))
+ # if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ # if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+
+ # value = []
+ # for item_key, item_value in data:
+ # value.append(self.represent_mapping('tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ # return SequenceNode('tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ # type: (Any) -> Any
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_ordereddict(self, data):
+ # type: (Any) -> Any
+ return self.represent_omap('tag:yaml.org,2002:omap', data)
+
+ def represent_set(self, data):
+ # type: (Any) -> Any
+ value = {} # type: Dict[Any, None]
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ # type: (Any) -> Any
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ # type: (Any) -> Any
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ # type: (Any) -> None
+ raise RepresenterError(_F('cannot represent an object: {data!s}', data=data))
+
+
+SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str, SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int, SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float, SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set, SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict)
+
+if sys.version_info >= (2, 7):
+ import collections
+
+ SafeRepresenter.add_representer(
+ collections.OrderedDict, SafeRepresenter.represent_ordereddict
+ )
+
+SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined)
+
+
+class Representer(SafeRepresenter):
+ def represent_complex(self, data):
+ # type: (Any) -> Any
+ if data.imag == 0.0:
+ data = repr(data.real)
+ elif data.real == 0.0:
+ data = _F('{data_imag!r}j', data_imag=data.imag)
+ elif data.imag > 0:
+ data = _F('{data_real!r}+{data_imag!r}j', data_real=data.real, data_imag=data.imag)
+ else:
+ data = _F('{data_real!r}{data_imag!r}j', data_real=data.real, data_imag=data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ # type: (Any) -> Any
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ # type: (Any) -> Any
+ try:
+ name = _F(
+ '{modname!s}.{qualname!s}', modname=data.__module__, qualname=data.__qualname__
+ )
+ except AttributeError:
+ # ToDo: check if this can be reached in Py3
+ name = _F('{modname!s}.{name!s}', modname=data.__module__, name=data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:' + name, "")
+
+ def represent_module(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar('tag:yaml.org,2002:python/module:' + data.__name__, "")
+
+ def represent_object(self, data):
+ # type: (Any) -> Any
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table: # type: ignore
+ reduce = copyreg.dispatch_table[cls](data) # type: ignore
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError(_F('cannot represent object: {data!r}', data=data))
+ reduce = (list(reduce) + [None] * 5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ try:
+ function_name = _F(
+ '{fun!s}.{qualname!s}', fun=function.__module__, qualname=function.__qualname__
+ )
+ except AttributeError:
+ # ToDo: check if this can be reached in Py3
+ function_name = _F(
+ '{fun!s}.{name!s}', fun=function.__module__, name=function.__name__
+ )
+ if not args and not listitems and not dictitems and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:' + function_name, state
+ )
+ if not listitems and not dictitems and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag + function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag + function_name, value)
+
+
+Representer.add_representer(complex, Representer.represent_complex)
+
+Representer.add_representer(tuple, Representer.represent_tuple)
+
+Representer.add_representer(type, Representer.represent_name)
+
+Representer.add_representer(types.FunctionType, Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name)
+
+Representer.add_representer(types.ModuleType, Representer.represent_module)
+
+Representer.add_multi_representer(object, Representer.represent_object)
+
+Representer.add_multi_representer(type, Representer.represent_name)
+
+
+class RoundTripRepresenter(SafeRepresenter):
+ # need to add type here and write out the .comment
+ # in serializer and emitter
+
+ def __init__(self, default_style=None, default_flow_style=None, dumper=None):
+ # type: (Any, Any, Any) -> None
+ if not hasattr(dumper, 'typ') and default_flow_style is None:
+ default_flow_style = False
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=dumper,
+ )
+
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ try:
+ if data.anchor is not None and data.anchor.value is not None:
+ return False
+ except AttributeError:
+ pass
+ return SafeRepresenter.ignore_aliases(self, data)
+
+ def represent_none(self, data):
+ # type: (Any) -> Any
+ if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start:
+ # this will be open ended (although it is not yet)
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+ return self.represent_scalar('tag:yaml.org,2002:null', "")
+
+ def represent_literal_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '|'
+ anchor = data.yaml_anchor(any=True)
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ represent_preserved_scalarstring = represent_literal_scalarstring
+
+ def represent_folded_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '>'
+ anchor = data.yaml_anchor(any=True)
+ for fold_pos in reversed(getattr(data, 'fold_pos', [])):
+ if (
+ data[fold_pos] == ' '
+ and (fold_pos > 0 and not data[fold_pos - 1].isspace())
+ and (fold_pos < len(data) and not data[fold_pos + 1].isspace())
+ ):
+ data = data[:fold_pos] + '\a' + data[fold_pos:]
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_single_quoted_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = "'"
+ anchor = data.yaml_anchor(any=True)
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_double_quoted_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '"'
+ anchor = data.yaml_anchor(any=True)
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_plain_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = ''
+ anchor = data.yaml_anchor(any=True)
+ tag = 'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def insert_underscore(self, prefix, s, underscore, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if underscore is None:
+ return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor)
+ if underscore[0]:
+ sl = list(s)
+ pos = len(s) - underscore[0]
+ while pos > 0:
+ sl.insert(pos, '_')
+ pos -= underscore[0]
+ s = "".join(sl)
+ if underscore[1]:
+ s = '_' + s
+ if underscore[2]:
+ s += '_'
+ return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor)
+
+ def represent_scalar_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ s = '{:0{}d}'.format(data, data._width)
+ else:
+ s = format(data, 'd')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore("", s, data._underscore, anchor=anchor)
+
+ def represent_binary_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}b}', that strips the zeros
+ s = '{:0{}b}'.format(data, data._width)
+ else:
+ s = format(data, 'b')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0b', s, data._underscore, anchor=anchor)
+
+ def represent_octal_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}o}', that strips the zeros
+ s = '{:0{}o}'.format(data, data._width)
+ else:
+ s = format(data, 'o')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0o', s, data._underscore, anchor=anchor)
+
+ def represent_hex_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}x}', that strips the zeros
+ s = '{:0{}x}'.format(data, data._width)
+ else:
+ s = format(data, 'x')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
+
+ def represent_hex_caps_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}X}', that strips the zeros
+ s = '{:0{}X}'.format(data, data._width)
+ else:
+ s = format(data, 'X')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
+
+ def represent_scalar_float(self, data):
+ # type: (Any) -> Any
+ """ this is way more complicated """
+ value = None
+ anchor = data.yaml_anchor(any=True)
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ if value:
+ return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor)
+ if data._exp is None and data._prec > 0 and data._prec == data._width - 1:
+ # no exponent, but trailing dot
+ value = '{}{:d}.'.format(data._m_sign if data._m_sign else "", abs(int(data)))
+ elif data._exp is None:
+ # no exponent, "normal" dot
+ prec = data._prec
+ ms = data._m_sign if data._m_sign else ""
+ # -1 for the dot
+ value = '{}{:0{}.{}f}'.format(
+ ms, abs(data), data._width - len(ms), data._width - prec - 1
+ )
+ if prec == 0 or (prec == 1 and ms != ""):
+ value = value.replace('0.', '.')
+ while len(value) < data._width:
+ value += '0'
+ else:
+ # exponent
+ m, es = '{:{}.{}e}'.format(
+ # data, data._width, data._width - data._prec + (1 if data._m_sign else 0)
+ data,
+ data._width,
+ data._width + (1 if data._m_sign else 0),
+ ).split('e')
+ w = data._width if data._prec > 0 else (data._width + 1)
+ if data < 0:
+ w += 1
+ m = m[:w]
+ e = int(es)
+ m1, m2 = m.split('.') # always second?
+ while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0):
+ m2 += '0'
+ if data._m_sign and data > 0:
+ m1 = '+' + m1
+ esgn = '+' if data._e_sign else ""
+ if data._prec < 0: # mantissa without dot
+ if m2 != '0':
+ e -= len(m2)
+ else:
+ m2 = ""
+ while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width:
+ m2 += '0'
+ e -= 1
+ value = m1 + m2 + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width)
+ elif data._prec == 0: # mantissa with trailing dot
+ e -= len(m2)
+ value = m1 + m2 + '.' + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width)
+ else:
+ if data._m_lead0 > 0:
+ m2 = '0' * (data._m_lead0 - 1) + m1 + m2
+ m1 = '0'
+ m2 = m2[: -data._m_lead0] # these should be zeros
+ e += data._m_lead0
+ while len(m1) < data._prec:
+ m1 += m2[0]
+ m2 = m2[1:]
+ e -= 1
+ value = m1 + '.' + m2 + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width)
+
+ if value is None:
+ value = repr(data).lower()
+ return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor)
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ # if the flow_style is None, the flow style tacked on to the object
+ # explicitly will be taken. If that is None as well the default flow
+ # style rules
+ try:
+ flow_style = sequence.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = sequence.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ try:
+ comment = getattr(sequence, comment_attrib)
+ node.comment = comment.comment
+ # reset any comment already printed information
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ item_comments = comment.items
+ if node.comment is None:
+ node.comment = comment.comment
+ else:
+ # as we are potentially going to extend this, make a new list
+ node.comment = comment.comment[:]
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for idx, item in enumerate(sequence):
+ node_item = self.represent_data(item)
+ self.merge_comments(node_item, item_comments.get(idx))
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if len(sequence) != 0 and self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def merge_comments(self, node, comments):
+ # type: (Any, Any) -> Any
+ if comments is None:
+ assert hasattr(node, 'comment')
+ return node
+ if getattr(node, 'comment', None) is not None:
+ for idx, val in enumerate(comments):
+ if idx >= len(node.comment):
+ continue
+ nc = node.comment[idx]
+ if nc is not None:
+ assert val is None or val == nc
+ comments[idx] = nc
+ node.comment = comments
+ return node
+
+ def represent_key(self, data):
+ # type: (Any) -> Any
+ if isinstance(data, CommentedKeySeq):
+ self.alias_key = None
+ return self.represent_sequence('tag:yaml.org,2002:seq', data, flow_style=True)
+ if isinstance(data, CommentedKeyMap):
+ self.alias_key = None
+ return self.represent_mapping('tag:yaml.org,2002:map', data, flow_style=True)
+ return SafeRepresenter.represent_key(self, data)
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ try:
+ flow_style = mapping.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = mapping.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ # no sorting! !!
+ try:
+ comment = getattr(mapping, comment_attrib)
+ if node.comment is None:
+ node.comment = comment.comment
+ else:
+ # as we are potentially going to extend this, make a new list
+ node.comment = comment.comment[:]
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ if self.dumper.comment_handling is None:
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ else:
+ # NEWCMNT
+ pass
+ except AttributeError:
+ item_comments = {}
+ merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
+ try:
+ merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0]
+ except IndexError:
+ merge_pos = 0
+ item_count = 0
+ if bool(merge_list):
+ items = mapping.non_merged_items()
+ else:
+ items = mapping.items()
+ for item_key, item_value in items:
+ item_count += 1
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(item_value)
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ # assert getattr(node_key, 'comment', None) is None
+ # issue 351 did throw this because the comment from the list item was
+ # moved to the dict
+ node_key.comment = item_comment[:2]
+ nvc = getattr(node_value, 'comment', None)
+ if nvc is not None: # end comment already there
+ nvc[0] = item_comment[2]
+ nvc[1] = item_comment[3]
+ else:
+ node_value.comment = item_comment[2:]
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if ((item_count != 0) or bool(merge_list)) and self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ if bool(merge_list):
+ # because of the call to represent_data here, the anchors
+ # are marked as being used and thereby created
+ if len(merge_list) == 1:
+ arg = self.represent_data(merge_list[0])
+ else:
+ arg = self.represent_data(merge_list)
+ arg.flow_style = True
+ value.insert(merge_pos, (ScalarNode('tag:yaml.org,2002:merge', '<<'), arg))
+ return node
+
+ def represent_omap(self, tag, omap, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ try:
+ flow_style = omap.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = omap.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ try:
+ comment = getattr(omap, comment_attrib)
+ if node.comment is None:
+ node.comment = comment.comment
+ else:
+ # as we are potentially going to extend this, make a new list
+ node.comment = comment.comment[:]
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for item_key in omap:
+ item_val = omap[item_key]
+ node_item = self.represent_data({item_key: item_val})
+ # node_item.flow_style = False
+ # node item has two scalars in value: node_key and node_value
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ if item_comment[1]:
+ node_item.comment = [None, item_comment[1]]
+ assert getattr(node_item.value[0][0], 'comment', None) is None
+ node_item.value[0][0].comment = [item_comment[0], None]
+ nvc = getattr(node_item.value[0][1], 'comment', None)
+ if nvc is not None: # end comment already there
+ nvc[0] = item_comment[2]
+ nvc[1] = item_comment[3]
+ else:
+ node_item.value[0][1].comment = item_comment[2:]
+ # if not (isinstance(node_item, ScalarNode) \
+ # and not node_item.style):
+ # best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_set(self, setting):
+ # type: (Any) -> Any
+ flow_style = False
+ tag = 'tag:yaml.org,2002:set'
+ # return self.represent_mapping(tag, value)
+ value = [] # type: List[Any]
+ flow_style = setting.fa.flow_style(flow_style)
+ try:
+ anchor = setting.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ # no sorting! !!
+ try:
+ comment = getattr(setting, comment_attrib)
+ if node.comment is None:
+ node.comment = comment.comment
+ else:
+ # as we are potentially going to extend this, make a new list
+ node.comment = comment.comment[:]
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for item_key in setting.odict:
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(None)
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ assert getattr(node_key, 'comment', None) is None
+ node_key.comment = item_comment[:2]
+ node_key.style = node_value.style = '?'
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ best_style = best_style
+ return node
+
+ def represent_dict(self, data):
+ # type: (Any) -> Any
+ """write out tag if saved on loading"""
+ try:
+ t = data.tag.value
+ except AttributeError:
+ t = None
+ if t:
+ if t.startswith('!!'):
+ tag = 'tag:yaml.org,2002:' + t[2:]
+ else:
+ tag = t
+ else:
+ tag = 'tag:yaml.org,2002:map'
+ return self.represent_mapping(tag, data)
+
+ def represent_list(self, data):
+ # type: (Any) -> Any
+ try:
+ t = data.tag.value
+ except AttributeError:
+ t = None
+ if t:
+ if t.startswith('!!'):
+ tag = 'tag:yaml.org,2002:' + t[2:]
+ else:
+ tag = t
+ else:
+ tag = 'tag:yaml.org,2002:seq'
+ return self.represent_sequence(tag, data)
+
+ def represent_datetime(self, data):
+ # type: (Any) -> Any
+ inter = 'T' if data._yaml['t'] else ' '
+ _yaml = data._yaml
+ if _yaml['delta']:
+ data += _yaml['delta']
+ value = data.isoformat(inter)
+ else:
+ value = data.isoformat(inter)
+ if _yaml['tz']:
+ value += _yaml['tz']
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_tagged_scalar(self, data):
+ # type: (Any) -> Any
+ try:
+ tag = data.tag.value
+ except AttributeError:
+ tag = None
+ try:
+ anchor = data.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor)
+
+ def represent_scalar_bool(self, data):
+ # type: (Any) -> Any
+ try:
+ anchor = data.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ return SafeRepresenter.represent_bool(self, data, anchor=anchor)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ # type: (Any, Any, Any, Optional[Any]) -> Any
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ anchor = state.pop(Anchor.attrib, None)
+ res = self.represent_mapping(tag, state, flow_style=flow_style)
+ if anchor is not None:
+ res.anchor = anchor
+ return res
+
+
+RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none)
+
+RoundTripRepresenter.add_representer(
+ LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring
+)
+
+# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple)
+
+RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int)
+
+RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int)
+
+RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int)
+
+RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int)
+
+RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int)
+
+RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float)
+
+RoundTripRepresenter.add_representer(ScalarBoolean, RoundTripRepresenter.represent_scalar_bool)
+
+RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list)
+
+RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict)
+
+RoundTripRepresenter.add_representer(
+ CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict
+)
+
+if sys.version_info >= (2, 7):
+ import collections
+
+ RoundTripRepresenter.add_representer(
+ collections.OrderedDict, RoundTripRepresenter.represent_ordereddict
+ )
+
+RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set)
+
+RoundTripRepresenter.add_representer(
+ TaggedScalar, RoundTripRepresenter.represent_tagged_scalar
+)
+
+RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime)
diff --git a/lib/spack/external/ruamel/yaml/resolver.py b/lib/spack/external/_vendoring/ruamel/yaml/resolver.py
index 84227072e0..969b9a928d 100644
--- a/lib/spack/external/ruamel/yaml/resolver.py
+++ b/lib/spack/external/_vendoring/ruamel/yaml/resolver.py
@@ -1,54 +1,166 @@
# coding: utf-8
-from __future__ import absolute_import
-
import re
-try:
- from .error import * # NOQA
- from .nodes import * # NOQA
- from .compat import string_types
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.error import * # NOQA
- from ruamel.yaml.nodes import * # NOQA
- from ruamel.yaml.compat import string_types
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Text, Optional # NOQA
+ from ruamel.yaml.compat import VersionType # NOQA
+
+from ruamel.yaml.compat import _DEFAULT_YAML_VERSION, _F # NOQA
+from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode # NOQA
+from ruamel.yaml.util import RegExp # NOQA
__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver']
-_DEFAULT_VERSION = (1, 2)
+# fmt: off
+# resolvers consist of
+# - a list of applicable version
+# - a tag
+# - a regexp
+# - a list of first characters to match
+implicit_resolvers = [
+ ([(1, 2)],
+ 'tag:yaml.org,2002:bool',
+ RegExp('''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
+ list('tTfF')),
+ ([(1, 1)],
+ 'tag:yaml.org,2002:bool',
+ RegExp('''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO')),
+ ([(1, 2)],
+ 'tag:yaml.org,2002:float',
+ RegExp('''^(?:
+ [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+ |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+ |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?\\.(?:inf|Inf|INF)
+ |\\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.')),
+ ([(1, 1)],
+ 'tag:yaml.org,2002:float',
+ RegExp('''^(?:
+ [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+ |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+ |\\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float
+ |[-+]?\\.(?:inf|Inf|INF)
+ |\\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.')),
+ ([(1, 2)],
+ 'tag:yaml.org,2002:int',
+ RegExp('''^(?:[-+]?0b[0-1_]+
+ |[-+]?0o?[0-7_]+
+ |[-+]?[0-9_]+
+ |[-+]?0x[0-9a-fA-F_]+)$''', re.X),
+ list('-+0123456789')),
+ ([(1, 1)],
+ 'tag:yaml.org,2002:int',
+ RegExp('''^(?:[-+]?0b[0-1_]+
+ |[-+]?0?[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int
+ list('-+0123456789')),
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:merge',
+ RegExp('^(?:<<)$'),
+ ['<']),
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:null',
+ RegExp('''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', '']),
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:timestamp',
+ RegExp('''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \\t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
+ (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789')),
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:value',
+ RegExp('^(?:=)$'),
+ ['=']),
+ # The following resolver is only for documentation purposes. It cannot work
+ # because plain scalars cannot start with '!', '&', or '*'.
+ ([(1, 2), (1, 1)],
+ 'tag:yaml.org,2002:yaml',
+ RegExp('^(?:!|&|\\*)$'),
+ list('!&*')),
+]
+# fmt: on
class ResolverError(YAMLError):
pass
-class BaseResolver(object):
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {} # type: Dict[Any, Any]
+ yaml_path_resolvers = {} # type: Dict[Any, Any]
- DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+ def __init__(self, loadumper=None):
+ # type: (Any, Any) -> None
+ self.loadumper = loadumper
+ if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None:
+ self.loadumper._resolver = self.loadumper
+ self._loader_version = None # type: Any
+ self.resolver_exact_paths = [] # type: List[Any]
+ self.resolver_prefix_paths = [] # type: List[Any]
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
+ @property
+ def parser(self):
+ # type: () -> Any
+ if self.loadumper is not None:
+ if hasattr(self.loadumper, 'typ'):
+ return self.loadumper.parser
+ return self.loadumper._parser
+ return None
- def __init__(self):
- self._loader_version = None
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
+ @classmethod
+ def add_implicit_resolver_base(cls, tag, regexp, first):
+ # type: (Any, Any, Any) -> None
+ if 'yaml_implicit_resolvers' not in cls.__dict__:
+ # deepcopy doesn't work here
+ cls.yaml_implicit_resolvers = dict(
+ (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
+ )
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
+ # type: (Any, Any, Any) -> None
if 'yaml_implicit_resolvers' not in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ # deepcopy doesn't work here
+ cls.yaml_implicit_resolvers = dict(
+ (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
+ )
if first is None:
first = [None]
for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append(
- (tag, regexp))
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first))
+
+ # @classmethod
+ # def add_implicit_resolver(cls, tag, regexp, first):
@classmethod
def add_path_resolver(cls, tag, path, kind=None):
+ # type: (Any, Any, Any) -> None
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
@@ -63,7 +175,7 @@ class BaseResolver(object):
# against a sequence value with the index equal to `index_check`.
if 'yaml_path_resolvers' not in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
+ new_path = [] # type: List[Any]
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
@@ -72,7 +184,9 @@ class BaseResolver(object):
node_check = element[0]
index_check = True
else:
- raise ResolverError("Invalid path element: %s" % element)
+ raise ResolverError(
+ _F('Invalid path element: {element!s}', element=element)
+ )
else:
node_check = None
index_check = element
@@ -82,13 +196,18 @@ class BaseResolver(object):
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, string_types) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (string_types, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
+ elif (
+ node_check not in [ScalarNode, SequenceNode, MappingNode]
+ and not isinstance(node_check, str)
+ and node_check is not None
+ ):
+ raise ResolverError(
+ _F('Invalid node checker: {node_check!s}', node_check=node_check)
+ )
+ if not isinstance(index_check, (str, int)) and index_check is not None:
+ raise ResolverError(
+ _F('Invalid index checker: {index_check!s}', index_check=index_check)
+ )
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
@@ -96,12 +215,12 @@ class BaseResolver(object):
kind = SequenceNode
elif kind is dict:
kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None:
+ raise ResolverError(_F('Invalid node kind: {kind!s}', kind=kind))
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
+ # type: (Any, Any) -> None
if not self.yaml_path_resolvers:
return
exact_paths = {}
@@ -109,13 +228,11 @@ class BaseResolver(object):
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
+ if self.check_resolver_prefix(depth, path, kind, current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
- exact_paths[kind] = self.yaml_path_resolvers[path,
- kind]
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
@@ -126,39 +243,40 @@ class BaseResolver(object):
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
+ # type: () -> None
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, string_types):
+ def check_resolver_prefix(self, depth, path, kind, current_node, current_index):
+ # type: (int, Any, Any, Any, Any) -> bool
+ node_check, index_check = path[depth - 1]
+ if isinstance(node_check, str):
if current_node.tag != node_check:
- return
+ return False
elif node_check is not None:
if not isinstance(current_node, node_check):
- return
+ return False
if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, string_types):
- if not (isinstance(current_index, ScalarNode) and
- index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check,
- bool):
+ return False
+ if (index_check is False or index_check is None) and current_index is None:
+ return False
+ if isinstance(index_check, str):
+ if not (
+ isinstance(current_index, ScalarNode) and index_check == current_index.value
+ ):
+ return False
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
- return
+ return False
return True
def resolve(self, kind, value, implicit):
+ # type: (Any, Any, Any) -> Any
if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ if value == "":
+ resolvers = self.yaml_implicit_resolvers.get("", [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
@@ -166,7 +284,7 @@ class BaseResolver(object):
if regexp.match(value):
return tag
implicit = implicit[1]
- if self.yaml_path_resolvers:
+ if bool(self.yaml_path_resolvers):
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
@@ -181,158 +299,37 @@ class BaseResolver(object):
@property
def processing_version(self):
+ # type: () -> Any
return None
class Resolver(BaseResolver):
pass
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:bool',
- re.compile(u'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list(u'yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:float',
- re.compile(u'''^(?:
- [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
- |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
- |\\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
- |[-+]?\\.(?:inf|Inf|INF)
- |\\.(?:nan|NaN|NAN))$''', re.X),
- list(u'-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:int',
- re.compile(u'''^(?:[-+]?0b[0-1_]+
- |[-+]?0o?[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list(u'-+0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:merge',
- re.compile(u'^(?:<<)$'),
- [u'<'])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:null',
- re.compile(u'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- [u'~', u'n', u'N', u''])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:timestamp',
- re.compile(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \\t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
- (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list(u'0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:value',
- re.compile(u'^(?:=)$'),
- [u'='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:yaml',
- re.compile(u'^(?:!|&|\\*)$'),
- list(u'!&*'))
-# resolvers consist of
-# - a list of applicable version
-# - a tag
-# - a regexp
-# - a list of first characters to match
-implicit_resolvers = [
- ([(1, 2)],
- u'tag:yaml.org,2002:bool',
- re.compile(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
- list(u'tTfF')),
- ([(1, 1)],
- u'tag:yaml.org,2002:bool',
- re.compile(u'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list(u'yYnNtTfFoO')),
- ([(1, 2), (1, 1)],
- u'tag:yaml.org,2002:float',
- re.compile(u'''^(?:
- [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
- |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
- |\\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
- |[-+]?\\.(?:inf|Inf|INF)
- |\\.(?:nan|NaN|NAN))$''', re.X),
- list(u'-+0123456789.')),
- ([(1, 2)],
- u'tag:yaml.org,2002:int',
- re.compile(u'''^(?:[-+]?0b[0-1_]+
- |[-+]?0o?[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+)$''', re.X),
- list(u'-+0123456789')),
- ([(1, 1)],
- u'tag:yaml.org,2002:int',
- re.compile(u'''^(?:[-+]?0b[0-1_]+
- |[-+]?0o?[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list(u'-+0123456789')),
- ([(1, 2), (1, 1)],
- u'tag:yaml.org,2002:merge',
- re.compile(u'^(?:<<)$'),
- [u'<']),
- ([(1, 2), (1, 1)],
- u'tag:yaml.org,2002:null',
- re.compile(u'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- [u'~', u'n', u'N', u'']),
- ([(1, 2), (1, 1)],
- u'tag:yaml.org,2002:timestamp',
- re.compile(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \\t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
- (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list(u'0123456789')),
- ([(1, 2), (1, 1)],
- u'tag:yaml.org,2002:value',
- re.compile(u'^(?:=)$'),
- [u'=']),
- # The following resolver is only for documentation purposes. It cannot work
- # because plain scalars cannot start with '!', '&', or '*'.
- ([(1, 2), (1, 1)],
- u'tag:yaml.org,2002:yaml',
- re.compile(u'^(?:!|&|\\*)$'),
- list(u'!&*')),
-]
+for ir in implicit_resolvers:
+ if (1, 2) in ir[0]:
+ Resolver.add_implicit_resolver_base(*ir[1:])
class VersionedResolver(BaseResolver):
"""
contrary to the "normal" resolver, the smart resolver delays loading
the pattern matching rules. That way it can decide to load 1.1 rules
- or the (default) 1.2 that no longer support octal without 0o, sexagesimals
+ or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals
and Yes/No/On/Off booleans.
"""
- def __init__(self, version=None):
- BaseResolver.__init__(self)
+ def __init__(self, version=None, loader=None, loadumper=None):
+ # type: (Optional[VersionType], Any, Any) -> None
+ if loader is None and loadumper is not None:
+ loader = loadumper
+ BaseResolver.__init__(self, loader)
self._loader_version = self.get_loader_version(version)
- self._version_implicit_resolver = {}
+ self._version_implicit_resolver = {} # type: Dict[Any, Any]
def add_version_implicit_resolver(self, version, tag, regexp, first):
+ # type: (VersionType, Any, Any, Any) -> None
if first is None:
first = [None]
impl_resolver = self._version_implicit_resolver.setdefault(version, {})
@@ -340,19 +337,23 @@ class VersionedResolver(BaseResolver):
impl_resolver.setdefault(ch, []).append((tag, regexp))
def get_loader_version(self, version):
+ # type: (Optional[VersionType]) -> Any
if version is None or isinstance(version, tuple):
return version
if isinstance(version, list):
return tuple(version)
# assume string
- return tuple(map(int, version.split(u'.')))
+ return tuple(map(int, version.split('.')))
@property
- def resolver(self):
+ def versioned_resolver(self):
+ # type: () -> Any
"""
select the resolver based on the version we are parsing
"""
version = self.processing_version
+ if isinstance(version, str):
+ version = tuple(map(int, version.split('.')))
if version not in self._version_implicit_resolver:
for x in implicit_resolvers:
if version in x[0]:
@@ -360,17 +361,18 @@ class VersionedResolver(BaseResolver):
return self._version_implicit_resolver[version]
def resolve(self, kind, value, implicit):
+ # type: (Any, Any, Any) -> Any
if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.resolver.get(u'', [])
+ if value == "":
+ resolvers = self.versioned_resolver.get("", [])
else:
- resolvers = self.resolver.get(value[0], [])
- resolvers += self.resolver.get(None, [])
+ resolvers = self.versioned_resolver.get(value[0], [])
+ resolvers += self.versioned_resolver.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
- if self.yaml_path_resolvers:
+ if bool(self.yaml_path_resolvers):
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
@@ -385,13 +387,19 @@ class VersionedResolver(BaseResolver):
@property
def processing_version(self):
+ # type: () -> Any
try:
- version = self.yaml_version
+ version = self.loadumper._scanner.yaml_version
except AttributeError:
- # dumping
- version = self.use_version
+ try:
+ if hasattr(self.loadumper, 'typ'):
+ version = self.loadumper.version
+ else:
+ version = self.loadumper._serializer.use_version # dumping
+ except AttributeError:
+ version = None
if version is None:
version = self._loader_version
if version is None:
- version = _DEFAULT_VERSION
+ version = _DEFAULT_YAML_VERSION
return version
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/scalarbool.py b/lib/spack/external/_vendoring/ruamel/yaml/scalarbool.py
new file mode 100644
index 0000000000..60242b4383
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/scalarbool.py
@@ -0,0 +1,47 @@
+# coding: utf-8
+
+"""
+You cannot subclass bool, and this is necessary for round-tripping anchored
+bool values (and also if you want to preserve the original way of writing)
+
+bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well.
+
+You can use these in an if statement, but not when testing equivalence
+"""
+
+from ruamel.yaml.anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarBoolean']
+
+
+class ScalarBoolean(int):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ anchor = kw.pop('anchor', None)
+ b = int.__new__(cls, *args, **kw)
+ if anchor is not None:
+ b.yaml_set_anchor(anchor, always_dump=True)
+ return b
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/scalarfloat.py b/lib/spack/external/_vendoring/ruamel/yaml/scalarfloat.py
new file mode 100644
index 0000000000..b9f8bdfb88
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/scalarfloat.py
@@ -0,0 +1,124 @@
+# coding: utf-8
+
+import sys
+from ruamel.yaml.anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat']
+
+
+class ScalarFloat(float):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ width = kw.pop('width', None)
+ prec = kw.pop('prec', None)
+ m_sign = kw.pop('m_sign', None)
+ m_lead0 = kw.pop('m_lead0', 0)
+ exp = kw.pop('exp', None)
+ e_width = kw.pop('e_width', None)
+ e_sign = kw.pop('e_sign', None)
+ underscore = kw.pop('underscore', None)
+ anchor = kw.pop('anchor', None)
+ v = float.__new__(cls, *args, **kw)
+ v._width = width
+ v._prec = prec
+ v._m_sign = m_sign
+ v._m_lead0 = m_lead0
+ v._exp = exp
+ v._e_width = e_width
+ v._e_sign = e_sign
+ v._underscore = underscore
+ if anchor is not None:
+ v.yaml_set_anchor(anchor, always_dump=True)
+ return v
+
+ def __iadd__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) + a
+ x = type(self)(self + a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __ifloordiv__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) // a
+ x = type(self)(self // a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __imul__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) * a
+ x = type(self)(self * a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ x._prec = self._prec # check for others
+ return x
+
+ def __ipow__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) ** a
+ x = type(self)(self ** a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __isub__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) - a
+ x = type(self)(self - a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+ def dump(self, out=sys.stdout):
+ # type: (Any) -> Any
+ out.write(
+ 'ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format(
+ self,
+ self._width, # type: ignore
+ self._prec, # type: ignore
+ self._m_sign, # type: ignore
+ self._m_lead0, # type: ignore
+ self._underscore, # type: ignore
+ self._exp, # type: ignore
+ self._e_width, # type: ignore
+ self._e_sign, # type: ignore
+ )
+ )
+
+
+class ExponentialFloat(ScalarFloat):
+ def __new__(cls, value, width=None, underscore=None):
+ # type: (Any, Any, Any) -> Any
+ return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
+
+
+class ExponentialCapsFloat(ScalarFloat):
+ def __new__(cls, value, width=None, underscore=None):
+ # type: (Any, Any, Any) -> Any
+ return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/scalarint.py b/lib/spack/external/_vendoring/ruamel/yaml/scalarint.py
new file mode 100644
index 0000000000..1572b0f16c
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/scalarint.py
@@ -0,0 +1,127 @@
+# coding: utf-8
+
+from ruamel.yaml.anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt']
+
+
+class ScalarInt(int):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ width = kw.pop('width', None)
+ underscore = kw.pop('underscore', None)
+ anchor = kw.pop('anchor', None)
+ v = int.__new__(cls, *args, **kw)
+ v._width = width
+ v._underscore = underscore
+ if anchor is not None:
+ v.yaml_set_anchor(anchor, always_dump=True)
+ return v
+
+ def __iadd__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self + a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __ifloordiv__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self // a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __imul__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self * a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __ipow__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self ** a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __isub__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self - a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+
+class BinaryInt(ScalarInt):
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class OctalInt(ScalarInt):
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+# mixed casing of A-F is not supported, when loading the first non digit
+# determines the case
+
+
+class HexInt(ScalarInt):
+ """uses lower case (a-f)"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class HexCapsInt(ScalarInt):
+ """uses upper case (A-F)"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class DecimalInt(ScalarInt):
+ """needed if anchor"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/scalarstring.py b/lib/spack/external/_vendoring/ruamel/yaml/scalarstring.py
new file mode 100644
index 0000000000..7538ab7155
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/scalarstring.py
@@ -0,0 +1,152 @@
+# coding: utf-8
+
+from ruamel.yaml.anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = [
+ 'ScalarString',
+ 'LiteralScalarString',
+ 'FoldedScalarString',
+ 'SingleQuotedScalarString',
+ 'DoubleQuotedScalarString',
+ 'PlainScalarString',
+ # PreservedScalarString is the old name, as it was the first to be preserved on rt,
+ # use LiteralScalarString instead
+ 'PreservedScalarString',
+]
+
+
+class ScalarString(str):
+ __slots__ = Anchor.attrib
+
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any) -> Any
+ anchor = kw.pop('anchor', None)
+ ret_val = str.__new__(cls, *args, **kw)
+ if anchor is not None:
+ ret_val.yaml_set_anchor(anchor, always_dump=True)
+ return ret_val
+
+ def replace(self, old, new, maxreplace=-1):
+ # type: (Any, Any, int) -> Any
+ return type(self)((str.replace(self, old, new, maxreplace)))
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+
+class LiteralScalarString(ScalarString):
+ __slots__ = 'comment' # the comment after the | on the first line
+
+ style = '|'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+PreservedScalarString = LiteralScalarString
+
+
+class FoldedScalarString(ScalarString):
+ __slots__ = ('fold_pos', 'comment') # the comment after the > on the first line
+
+ style = '>'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class SingleQuotedScalarString(ScalarString):
+ __slots__ = ()
+
+ style = "'"
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class DoubleQuotedScalarString(ScalarString):
+ __slots__ = ()
+
+ style = '"'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class PlainScalarString(ScalarString):
+ __slots__ = ()
+
+ style = ''
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+def preserve_literal(s):
+ # type: (Text) -> Text
+ return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n'))
+
+
+def walk_tree(base, map=None):
+ # type: (Any, Any) -> None
+ """
+ the routine here walks over a simple yaml tree (recursing in
+ dict values and list items) and converts strings that
+ have multiple lines to literal scalars
+
+ You can also provide an explicit (ordered) mapping for multiple transforms
+ (first of which is executed):
+ map = ruamel.yaml.compat.ordereddict
+ map['\n'] = preserve_literal
+ map[':'] = SingleQuotedScalarString
+ walk_tree(data, map=map)
+ """
+ from collections.abc import MutableMapping, MutableSequence
+
+ if map is None:
+ map = {'\n': preserve_literal}
+
+ if isinstance(base, MutableMapping):
+ for k in base:
+ v = base[k] # type: Text
+ if isinstance(v, str):
+ for ch in map:
+ if ch in v:
+ base[k] = map[ch](v)
+ break
+ else:
+ walk_tree(v, map=map)
+ elif isinstance(base, MutableSequence):
+ for idx, elem in enumerate(base):
+ if isinstance(elem, str):
+ for ch in map:
+ if ch in elem:
+ base[idx] = map[ch](elem)
+ break
+ else:
+ walk_tree(elem, map=map)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/scanner.py b/lib/spack/external/_vendoring/ruamel/yaml/scanner.py
new file mode 100644
index 0000000000..61cae63103
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/scanner.py
@@ -0,0 +1,2444 @@
+# coding: utf-8
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# RoundTripScanner
+# COMMENT(value)
+#
+# Read comments in the Scanner code for more details.
+#
+
+import inspect
+from ruamel.yaml.error import MarkedYAMLError, CommentMark # NOQA
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.compat import _F, check_anchorname_char, nprint, nprintf # NOQA
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Text # NOQA
+ from ruamel.yaml.compat import VersionType # NOQA
+
+__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError']
+
+
+_THE_END = '\n\0\r\x85\u2028\u2029'
+_THE_END_SPACE_TAB = ' \n\0\t\r\x85\u2028\u2029'
+_SPACE_TAB = ' \t'
+
+
+def xprintf(*args, **kw):
+ # type: (Any, Any) -> Any
+ return nprintf(*args, **kw)
+ pass
+
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ # type: (Any, Any, int, int, int, Any) -> None
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+
+class Scanner:
+ def __init__(self, loader=None):
+ # type: (Any) -> None
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer
+
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_scanner', None) is None:
+ self.loader._scanner = self
+ self.reset_scanner()
+ self.first_time = False
+ self.yaml_version = None # type: Any
+
+ @property
+ def flow_level(self):
+ # type: () -> int
+ return len(self.flow_context)
+
+ def reset_scanner(self):
+ # type: () -> None
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # flow_context is an expanding/shrinking list consisting of '{' and '['
+ # for each unclosed flow context. If empty list that means block context
+ self.flow_context = [] # type: List[Text]
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = [] # type: List[Any]
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = [] # type: List[int]
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {} # type: Dict[Any, Any]
+
+ @property
+ def reader(self):
+ # type: () -> Any
+ try:
+ return self._scanner_reader # type: ignore
+ except AttributeError:
+ if hasattr(self.loader, 'typ'):
+ self._scanner_reader = self.loader.reader
+ else:
+ self._scanner_reader = self.loader._reader
+ return self._scanner_reader
+
+ @property
+ def scanner_processing_version(self): # prefix until un-composited
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver.processing_version
+ return self.loader.processing_version
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # type: (Any) -> bool
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if len(self.tokens) > 0:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # type: () -> Any
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if len(self.tokens) > 0:
+ return self.tokens[0]
+
+ def get_token(self):
+ # type: () -> Any
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if len(self.tokens) > 0:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ # type: () -> bool
+ if self.done:
+ return False
+ if len(self.tokens) == 0:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+ return False
+
+ def fetch_comment(self, comment):
+ # type: (Any) -> None
+ raise NotImplementedError
+
+ def fetch_more_tokens(self):
+ # type: () -> Any
+ # Eat whitespaces and comments until we reach the next token.
+ comment = self.scan_to_next_token()
+ if comment is not None: # never happens for base scanner
+ return self.fetch_comment(comment)
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.reader.column)
+
+ # Peek the next character.
+ ch = self.reader.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ # if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == "'":
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError(
+ 'while scanning for the next token',
+ None,
+ _F('found character {ch!r} that cannot start any token', ch=ch),
+ self.reader.get_mark(),
+ )
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # type: () -> Any
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # type: () -> None
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.reader.line or self.reader.index - key.index > 1024:
+ if key.required:
+ raise ScannerError(
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # type: () -> None
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.reader.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken + len(self.tokens)
+ key = SimpleKey(
+ token_number,
+ required,
+ self.reader.index,
+ self.reader.line,
+ self.reader.column,
+ self.reader.get_mark(),
+ )
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # type: () -> None
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError(
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+ # type: (Any) -> None
+ # In flow context, tokens should respect indentation.
+ # Actually the condition should be `self.indent >= column` according to
+ # the spec. But this condition will prohibit intuitively correct
+ # constructions such as
+ # key : {
+ # }
+ # ####
+ # if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.reader.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if bool(self.flow_level):
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.reader.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # type: (int) -> bool
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # type: () -> None
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+ # Read the token.
+ mark = self.reader.get_mark()
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding))
+
+ def fetch_stream_end(self):
+ # type: () -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+ # Read the token.
+ mark = self.reader.get_mark()
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+ # type: () -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ # type: () -> None
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ # type: () -> None
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+ # type: (Any) -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.reader.get_mark()
+ self.reader.forward(3)
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ # type: () -> None
+ self.fetch_flow_collection_start(FlowSequenceStartToken, to_push='[')
+
+ def fetch_flow_mapping_start(self):
+ # type: () -> None
+ self.fetch_flow_collection_start(FlowMappingStartToken, to_push='{')
+
+ def fetch_flow_collection_start(self, TokenClass, to_push):
+ # type: (Any, Text) -> None
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+ # Increase the flow level.
+ self.flow_context.append(to_push)
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ # type: () -> None
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ # type: () -> None
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+ # type: (Any) -> None
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Decrease the flow level.
+ try:
+ popped = self.flow_context.pop() # NOQA
+ except IndexError:
+ # We must not be in a list or object.
+ # Defer error handling to the parser.
+ pass
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+ # type: () -> None
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Add FLOW-ENTRY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+ # type: () -> None
+ # Block context needs additional checks.
+ if not self.flow_level:
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None, None, 'sequence entries are not allowed here', self.reader.get_mark()
+ )
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+ # type: () -> None
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None, None, 'mapping keys are not allowed here', self.reader.get_mark()
+ )
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+ # type: () -> None
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(
+ key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark)
+ )
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(
+ key.token_number - self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark),
+ )
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be caught by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None,
+ None,
+ 'mapping values are not allowed here',
+ self.reader.get_mark(),
+ )
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+ # type: () -> None
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+ # type: () -> None
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+ # type: () -> None
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ # type: () -> None
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ # type: () -> None
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+ # type: (Any) -> None
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ # type: () -> None
+ self.fetch_flow_scalar(style="'")
+
+ def fetch_double(self):
+ # type: () -> None
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+ # type: (Any) -> None
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+ # type: () -> None
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+ # type: () -> Any
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.reader.column == 0:
+ return True
+ return None
+
+ def check_document_start(self):
+ # type: () -> Any
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.reader.column == 0:
+ if self.reader.prefix(3) == '---' and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ return True
+ return None
+
+ def check_document_end(self):
+ # type: () -> Any
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.reader.column == 0:
+ if self.reader.prefix(3) == '...' and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ return True
+ return None
+
+ def check_block_entry(self):
+ # type: () -> Any
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_key(self):
+ # type: () -> Any
+ # KEY(flow context): '?'
+ if bool(self.flow_level):
+ return True
+ # KEY(block context): '?' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_value(self):
+ # type: () -> Any
+ # VALUE(flow context): ':'
+ if self.scanner_processing_version == (1, 1):
+ if bool(self.flow_level):
+ return True
+ else:
+ if bool(self.flow_level):
+ if self.flow_context[-1] == '[':
+ if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+ return False
+ elif self.tokens and isinstance(self.tokens[-1], ValueToken):
+ # mapping flow context scanning a value token
+ if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+ return False
+ return True
+ # VALUE(block context): ':' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_plain(self):
+ # type: () -> Any
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ srp = self.reader.peek
+ ch = srp()
+ if self.scanner_processing_version == (1, 1):
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or (
+ srp(1) not in _THE_END_SPACE_TAB
+ and (ch == '-' or (not self.flow_level and ch in '?:'))
+ )
+ # YAML 1.2
+ if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`':
+ # ################### ^ ???
+ return True
+ ch1 = srp(1)
+ if ch == '-' and ch1 not in _THE_END_SPACE_TAB:
+ return True
+ if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB:
+ return True
+
+ return srp(1) not in _THE_END_SPACE_TAB and (
+ ch == '-' or (not self.flow_level and ch in '?:')
+ )
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # type: () -> Any
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ found = False
+ _the_end = _THE_END
+ while not found:
+ while srp() == ' ':
+ srf()
+ if srp() == '#':
+ while srp() not in _the_end:
+ srf()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+ return None
+
+ def scan_directive(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ start_mark = self.reader.get_mark()
+ srf()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.reader.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.reader.get_mark()
+ else:
+ end_mark = self.reader.get_mark()
+ while srp() not in _THE_END:
+ srf()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ length = 0
+ srp = self.reader.peek
+ ch = srp(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.':
+ length += 1
+ ch = srp(length)
+ if not length:
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ major = self.scan_yaml_directive_number(start_mark)
+ if srp() != '.':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ _F("expected a digit or '.', but found {srp_call!r}", srp_call=srp()),
+ self.reader.get_mark(),
+ )
+ srf()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if srp() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ _F("expected a digit or '.', but found {srp_call!r}", srp_call=srp()),
+ self.reader.get_mark(),
+ )
+ self.yaml_version = (major, minor)
+ return self.yaml_version
+
+ def scan_yaml_directive_number(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ ch = srp()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ _F('expected a digit, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ length = 0
+ while '0' <= srp(length) <= '9':
+ length += 1
+ value = int(self.reader.prefix(length))
+ srf(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while srp() == ' ':
+ srf()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.reader.peek()
+ if ch != ' ':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ _F("expected ' ', but found {ch!r}", ch=ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.reader.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ _F("expected ' ', but found {ch!r}", ch=ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # type: (Any) -> None
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ if srp() == '#':
+ while srp() not in _THE_END:
+ srf()
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ _F('expected a comment or a line break, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # type: (Any) -> Any
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ srp = self.reader.peek
+ start_mark = self.reader.get_mark()
+ indicator = srp()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.reader.forward()
+ length = 0
+ ch = srp(length)
+ # while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ # or ch in '-_':
+ while check_anchorname_char(ch):
+ length += 1
+ ch = srp(length)
+ if not length:
+ raise ScannerError(
+ _F('while scanning an {name!s}', name=name),
+ start_mark,
+ _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ # ch1 = ch
+ # ch = srp() # no need to peek, ch is already set
+ # assert ch1 == ch
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`':
+ raise ScannerError(
+ _F('while scanning an {name!s}', name=name),
+ start_mark,
+ _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ end_mark = self.reader.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ start_mark = self.reader.get_mark()
+ ch = srp(1)
+ if ch == '<':
+ handle = None
+ self.reader.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if srp() != '>':
+ raise ScannerError(
+ 'while parsing a tag',
+ start_mark,
+ _F("expected '>', but found {srp_call!r}", srp_call=srp()),
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ elif ch in _THE_END_SPACE_TAB:
+ handle = None
+ suffix = '!'
+ self.reader.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = srp(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.reader.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a tag',
+ start_mark,
+ _F("expected ' ', but found {ch!r}", ch=ch),
+ self.reader.get_mark(),
+ )
+ value = (handle, suffix)
+ end_mark = self.reader.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style, rt=False):
+ # type: (Any, Optional[bool]) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+
+ # Scan the header.
+ self.reader.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ # block scalar comment e.g. : |+ # comment text
+ block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent + 1
+ if increment is None:
+ # no increment and top level, min_indent could be 0
+ if min_indent < 1 and (
+ style not in '|>'
+ or (self.scanner_processing_version == (1, 1))
+ and getattr(
+ self.loader, 'top_level_block_style_scalar_no_indent_error_1_1', False
+ )
+ ):
+ min_indent = 1
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ if min_indent < 1:
+ min_indent = 1
+ indent = min_indent + increment - 1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ""
+
+ # Scan the inner part of the block scalar.
+ while self.reader.column == indent and srp() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = srp() not in ' \t'
+ length = 0
+ while srp(length) not in _THE_END:
+ length += 1
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if style in '|>' and min_indent == 0:
+ # at the beginning of a line, if in block style see if
+ # end of document/start_new_document
+ if self.check_document_start() or self.check_document_end():
+ break
+ if self.reader.column == indent and srp() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if rt and folded and line_break == '\n':
+ chunks.append('\a')
+ if folded and line_break == '\n' and leading_non_space and srp() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ # if folded and line_break == '\n':
+ # if not breaks:
+ # if srp() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ # else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Process trailing line breaks. The 'chomping' setting determines
+ # whether they are included in the value.
+ trailing = [] # type: List[Any]
+ if chomping in [None, True]:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+ elif chomping in [None, False]:
+ trailing.extend(breaks)
+
+ # We are done.
+ token = ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+ if self.loader is not None:
+ comment_handler = getattr(self.loader, 'comment_handling', False)
+ if comment_handler is None:
+ if block_scalar_comment is not None:
+ token.add_pre_comments([block_scalar_comment])
+ if len(trailing) > 0:
+ # Eat whitespaces and comments until we reach the next token.
+ if self.loader is not None:
+ comment_handler = getattr(self.loader, 'comment_handling', None)
+ if comment_handler is not None:
+ line = end_mark.line - len(trailing)
+ for x in trailing:
+ assert x[-1] == '\n'
+ self.comments.add_blank_line(x, 0, line) # type: ignore
+ line += 1
+ comment = self.scan_to_next_token()
+ while comment:
+ trailing.append(' ' * comment[1].column + comment[0])
+ comment = self.scan_to_next_token()
+ if self.loader is not None:
+ comment_handler = getattr(self.loader, 'comment_handling', False)
+ if comment_handler is None:
+ # Keep track of the trailing whitespace and following comments
+ # as a comment token, if isn't all included in the actual value.
+ comment_end_mark = self.reader.get_mark()
+ comment = CommentToken("".join(trailing), end_mark, comment_end_mark)
+ token.add_post_comment(comment)
+ return token
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ chomping = None
+ increment = None
+ ch = srp()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.reader.forward()
+ ch = srp()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ ch = srp()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.reader.forward()
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ _F('expected chomping or indentation indicators, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ prefix = ''
+ comment = None
+ while srp() == ' ':
+ prefix += srp()
+ srf()
+ if srp() == '#':
+ comment = prefix
+ while srp() not in _THE_END:
+ comment += srp()
+ srf()
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ _F('expected a comment or a line break, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ self.scan_line_break()
+ return comment
+
+ def scan_block_scalar_indentation(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = []
+ max_indent = 0
+ end_mark = self.reader.get_mark()
+ while srp() in ' \r\n\x85\u2028\u2029':
+ if srp() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.reader.get_mark()
+ else:
+ srf()
+ if self.reader.column > max_indent:
+ max_indent = self.reader.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # type: (int) -> Any
+ # See the specification for details.
+ chunks = []
+ srp = self.reader.peek
+ srf = self.reader.forward
+ end_mark = self.reader.get_mark()
+ while self.reader.column < indent and srp() == ' ':
+ srf()
+ while srp() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.reader.get_mark()
+ while self.reader.column < indent and srp() == ' ':
+ srf()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # type: (Any) -> Any
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ srp = self.reader.peek
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+ quote = srp()
+ self.reader.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while srp() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ return ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '"': '"',
+ '/': '/', # as per http://www.json.org/
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8}
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ chunks = [] # type: List[Any]
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while True:
+ length = 0
+ while srp(length) not in ' \n\'"\\\0\t\r\x85\u2028\u2029':
+ length += 1
+ if length != 0:
+ chunks.append(self.reader.prefix(length))
+ srf(length)
+ ch = srp()
+ if not double and ch == "'" and srp(1) == "'":
+ chunks.append("'")
+ srf(2)
+ elif (double and ch == "'") or (not double and ch in '"\\'):
+ chunks.append(ch)
+ srf()
+ elif double and ch == '\\':
+ srf()
+ ch = srp()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ srf()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ srf()
+ for k in range(length):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ _F(
+ 'expected escape sequence of {length:d} hexdecimal '
+ 'numbers, but found {srp_call!r}',
+ length=length,
+ srp_call=srp(k),
+ ),
+ self.reader.get_mark(),
+ )
+ code = int(self.reader.prefix(length), 16)
+ chunks.append(chr(code))
+ srf(length)
+ elif ch in '\n\r\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError(
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ _F('found unknown escape character {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ chunks = []
+ length = 0
+ while srp(length) in ' \t':
+ length += 1
+ whitespaces = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch == '\0':
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected end of stream',
+ self.reader.get_mark(),
+ )
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ chunks = [] # type: List[Any]
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected document separator',
+ self.reader.get_mark(),
+ )
+ while srp() in ' \t':
+ srf()
+ if srp() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # type: () -> Any
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ': ' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+ end_mark = start_mark
+ indent = self.indent + 1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ # if indent == 0:
+ # indent = 1
+ spaces = [] # type: List[Any]
+ while True:
+ length = 0
+ if srp() == '#':
+ break
+ while True:
+ ch = srp(length)
+ if ch == ':' and srp(length + 1) not in _THE_END_SPACE_TAB:
+ pass
+ elif ch == '?' and self.scanner_processing_version != (1, 1):
+ pass
+ elif (
+ ch in _THE_END_SPACE_TAB
+ or (
+ not self.flow_level
+ and ch == ':'
+ and srp(length + 1) in _THE_END_SPACE_TAB
+ )
+ or (self.flow_level and ch in ',:?[]{}')
+ ):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (
+ self.flow_level
+ and ch == ':'
+ and srp(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'
+ ):
+ srf(length)
+ raise ScannerError(
+ 'while scanning a plain scalar',
+ start_mark,
+ "found unexpected ':'",
+ self.reader.get_mark(),
+ 'Please check '
+ 'http://pyyaml.org/wiki/YAMLColonInFlowContext '
+ 'for details.',
+ )
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.reader.prefix(length))
+ srf(length)
+ end_mark = self.reader.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if (
+ not spaces
+ or srp() == '#'
+ or (not self.flow_level and self.reader.column < indent)
+ ):
+ break
+
+ token = ScalarToken("".join(chunks), True, start_mark, end_mark)
+ # getattr provides True so C type loader, which cannot handle comment,
+ # will not make CommentToken
+ if self.loader is not None:
+ comment_handler = getattr(self.loader, 'comment_handling', False)
+ if comment_handler is None:
+ if spaces and spaces[0] == '\n':
+ # Create a comment token to preserve the trailing line breaks.
+ comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark)
+ token.add_post_comment(comment)
+ elif comment_handler is not False:
+ line = start_mark.line + 1
+ for ch in spaces:
+ if ch == '\n':
+ self.comments.add_blank_line('\n', 0, line) # type: ignore
+ line += 1
+
+ return token
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = []
+ length = 0
+ while srp(length) in ' ':
+ length += 1
+ whitespaces = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ return
+ breaks = []
+ while srp() in ' \r\n\x85\u2028\u2029':
+ if srp() == ' ':
+ srf()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ srp = self.reader.peek
+ ch = srp()
+ if ch != '!':
+ raise ScannerError(
+ _F('while scanning an {name!s}', name=name),
+ start_mark,
+ _F("expected '!', but found {ch!r}", ch=ch),
+ self.reader.get_mark(),
+ )
+ length = 1
+ ch = srp(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_':
+ length += 1
+ ch = srp(length)
+ if ch != '!':
+ self.reader.forward(length)
+ raise ScannerError(
+ _F('while scanning an {name!s}', name=name),
+ start_mark,
+ _F("expected '!', but found {ch!r}", ch=ch),
+ self.reader.get_mark(),
+ )
+ length += 1
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ srp = self.reader.peek
+ chunks = []
+ length = 0
+ ch = srp(length)
+ while (
+ '0' <= ch <= '9'
+ or 'A' <= ch <= 'Z'
+ or 'a' <= ch <= 'z'
+ or ch in "-;/?:@&=+$,_.!~*'()[]%"
+ or ((self.scanner_processing_version > (1, 1)) and ch == '#')
+ ):
+ if ch == '%':
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = srp(length)
+ if length != 0:
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError(
+ _F('while parsing an {name!s}', name=name),
+ start_mark,
+ _F('expected URI, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ return "".join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ code_bytes = [] # type: List[Any]
+ mark = self.reader.get_mark()
+ while srp() == '%':
+ srf()
+ for k in range(2):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ _F('while scanning an {name!s}', name=name),
+ start_mark,
+ _F(
+ 'expected URI escape sequence of 2 hexdecimal numbers,'
+ ' but found {srp_call!r}',
+ srp_call=srp(k),
+ ),
+ self.reader.get_mark(),
+ )
+ code_bytes.append(int(self.reader.prefix(2), 16))
+ srf(2)
+ try:
+ value = bytes(code_bytes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError(
+ _F('while scanning an {name!s}', name=name), start_mark, str(exc), mark
+ )
+ return value
+
+ def scan_line_break(self):
+ # type: () -> Any
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.reader.peek()
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.reader.forward()
+ return ch
+ return ""
+
+
+class RoundTripScanner(Scanner):
+ def check_token(self, *choices):
+ # type: (Any) -> bool
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if len(self.tokens) > 0:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # type: () -> Any
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if len(self.tokens) > 0:
+ return self.tokens[0]
+ return None
+
+ def _gather_comments(self):
+ # type: () -> Any
+ """combine multiple comment lines and assign to next non-comment-token"""
+ comments = [] # type: List[Any]
+ if not self.tokens:
+ return comments
+ if isinstance(self.tokens[0], CommentToken):
+ comment = self.tokens.pop(0)
+ self.tokens_taken += 1
+ comments.append(comment)
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if not self.tokens:
+ return comments
+ if isinstance(self.tokens[0], CommentToken):
+ self.tokens_taken += 1
+ comment = self.tokens.pop(0)
+ # nprint('dropping2', comment)
+ comments.append(comment)
+ if len(comments) >= 1:
+ self.tokens[0].add_pre_comments(comments)
+ # pull in post comment on e.g. ':'
+ if not self.done and len(self.tokens) < 2:
+ self.fetch_more_tokens()
+
+ def get_token(self):
+ # type: () -> Any
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if len(self.tokens) > 0:
+ # nprint('tk', self.tokens)
+ # only add post comment to single line tokens:
+ # scalar, value token. FlowXEndToken, otherwise
+ # hidden streamtokens could get them (leave them and they will be
+ # pre comments for the next map/seq
+ if (
+ len(self.tokens) > 1
+ and isinstance(
+ self.tokens[0],
+ (ScalarToken, ValueToken, FlowSequenceEndToken, FlowMappingEndToken),
+ )
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line
+ ):
+ self.tokens_taken += 1
+ c = self.tokens.pop(1)
+ self.fetch_more_tokens()
+ while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+ self.tokens_taken += 1
+ c1 = self.tokens.pop(1)
+ c.value = c.value + (' ' * c1.start_mark.column) + c1.value
+ self.fetch_more_tokens()
+ self.tokens[0].add_post_comment(c)
+ elif (
+ len(self.tokens) > 1
+ and isinstance(self.tokens[0], ScalarToken)
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line
+ ):
+ self.tokens_taken += 1
+ c = self.tokens.pop(1)
+ c.value = (
+ '\n' * (c.start_mark.line - self.tokens[0].end_mark.line)
+ + (' ' * c.start_mark.column)
+ + c.value
+ )
+ self.tokens[0].add_post_comment(c)
+ self.fetch_more_tokens()
+ while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+ self.tokens_taken += 1
+ c1 = self.tokens.pop(1)
+ c.value = c.value + (' ' * c1.start_mark.column) + c1.value
+ self.fetch_more_tokens()
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+ return None
+
+ def fetch_comment(self, comment):
+ # type: (Any) -> None
+ value, start_mark, end_mark = comment
+ while value and value[-1] == ' ':
+ # empty line within indented key context
+ # no need to update end-mark, that is not used
+ value = value[:-1]
+ self.tokens.append(CommentToken(value, start_mark, end_mark))
+
+ # scanner
+
+ def scan_to_next_token(self):
+ # type: () -> Any
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ found = False
+ while not found:
+ while srp() == ' ':
+ srf()
+ ch = srp()
+ if ch == '#':
+ start_mark = self.reader.get_mark()
+ comment = ch
+ srf()
+ while ch not in _THE_END:
+ ch = srp()
+ if ch == '\0': # don't gobble the end-of-stream character
+ # but add an explicit newline as "YAML processors should terminate
+ # the stream with an explicit line break
+ # https://yaml.org/spec/1.2/spec.html#id2780069
+ comment += '\n'
+ break
+ comment += ch
+ srf()
+ # gather any blank lines following the comment too
+ ch = self.scan_line_break()
+ while len(ch) > 0:
+ comment += ch
+ ch = self.scan_line_break()
+ end_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ return comment, start_mark, end_mark
+ if self.scan_line_break() != '':
+ start_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ ch = srp()
+ if ch == '\n': # empty toplevel lines
+ start_mark = self.reader.get_mark()
+ comment = ""
+ while ch:
+ ch = self.scan_line_break(empty_line=True)
+ comment += ch
+ if srp() == '#':
+ # empty line followed by indented real comment
+ comment = comment.rsplit('\n', 1)[0] + '\n'
+ end_mark = self.reader.get_mark()
+ return comment, start_mark, end_mark
+ else:
+ found = True
+ return None
+
+ def scan_line_break(self, empty_line=False):
+ # type: (bool) -> Text
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.reader.peek() # type: Text
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.reader.forward()
+ return ch
+ elif empty_line and ch in '\t ':
+ self.reader.forward()
+ return ch
+ return ""
+
+ def scan_block_scalar(self, style, rt=True):
+ # type: (Any, Optional[bool]) -> Any
+ return Scanner.scan_block_scalar(self, style, rt=rt)
+
+
+# commenthandling 2021, differentiatiation not needed
+
+VALUECMNT = 0
+KEYCMNT = 0 # 1
+# TAGCMNT = 2
+# ANCHORCMNT = 3
+
+
+class CommentBase:
+ __slots__ = ('value', 'line', 'column', 'used', 'function', 'fline', 'ufun', 'uline')
+
+ def __init__(self, value, line, column):
+ # type: (Any, Any, Any) -> None
+ self.value = value
+ self.line = line
+ self.column = column
+ self.used = ' '
+ info = inspect.getframeinfo(inspect.stack()[3][0])
+ self.function = info.function
+ self.fline = info.lineno
+ self.ufun = None
+ self.uline = None
+
+ def set_used(self, v='+'):
+ # type: (Any) -> None
+ self.used = v
+ info = inspect.getframeinfo(inspect.stack()[1][0])
+ self.ufun = info.function # type: ignore
+ self.uline = info.lineno # type: ignore
+
+ def set_assigned(self):
+ # type: () -> None
+ self.used = '|'
+
+ def __str__(self):
+ # type: () -> str
+ return _F('{value}', value=self.value) # type: ignore
+
+ def __repr__(self):
+ # type: () -> str
+ return _F('{value!r}', value=self.value) # type: ignore
+
+ def info(self):
+ # type: () -> str
+ return _F( # type: ignore
+ '{name}{used} {line:2}:{column:<2} "{value:40s} {function}:{fline} {ufun}:{uline}',
+ name=self.name, # type: ignore
+ line=self.line,
+ column=self.column,
+ value=self.value + '"',
+ used=self.used,
+ function=self.function,
+ fline=self.fline,
+ ufun=self.ufun,
+ uline=self.uline,
+ )
+
+
+class EOLComment(CommentBase):
+ name = 'EOLC'
+
+ def __init__(self, value, line, column):
+ # type: (Any, Any, Any) -> None
+ super().__init__(value, line, column)
+
+
+class FullLineComment(CommentBase):
+ name = 'FULL'
+
+ def __init__(self, value, line, column):
+ # type: (Any, Any, Any) -> None
+ super().__init__(value, line, column)
+
+
+class BlankLineComment(CommentBase):
+ name = 'BLNK'
+
+ def __init__(self, value, line, column):
+ # type: (Any, Any, Any) -> None
+ super().__init__(value, line, column)
+
+
+class ScannedComments:
+ def __init__(self):
+ # type: (Any) -> None
+ self.comments = {} # type: ignore
+ self.unused = [] # type: ignore
+
+ def add_eol_comment(self, comment, column, line):
+ # type: (Any, Any, Any) -> Any
+ # info = inspect.getframeinfo(inspect.stack()[1][0])
+ if comment.count('\n') == 1:
+ assert comment[-1] == '\n'
+ else:
+ assert '\n' not in comment
+ self.comments[line] = retval = EOLComment(comment[:-1], line, column)
+ self.unused.append(line)
+ return retval
+
+ def add_blank_line(self, comment, column, line):
+ # type: (Any, Any, Any) -> Any
+ # info = inspect.getframeinfo(inspect.stack()[1][0])
+ assert comment.count('\n') == 1 and comment[-1] == '\n'
+ assert line not in self.comments
+ self.comments[line] = retval = BlankLineComment(comment[:-1], line, column)
+ self.unused.append(line)
+ return retval
+
+ def add_full_line_comment(self, comment, column, line):
+ # type: (Any, Any, Any) -> Any
+ # info = inspect.getframeinfo(inspect.stack()[1][0])
+ assert comment.count('\n') == 1 and comment[-1] == '\n'
+ # if comment.startswith('# C12'):
+ # raise
+ # this raises in line 2127 fro 330
+ self.comments[line] = retval = FullLineComment(comment[:-1], line, column)
+ self.unused.append(line)
+ return retval
+
+ def __getitem__(self, idx):
+ # type: (Any) -> Any
+ return self.comments[idx]
+
+ def __str__(self):
+ # type: () -> Any
+ return (
+ 'ParsedComments:\n '
+ + '\n '.join(
+ (
+ _F('{lineno:2} {x}', lineno=lineno, x=x.info())
+ for lineno, x in self.comments.items()
+ )
+ )
+ + '\n'
+ )
+
+ def last(self):
+ # type: () -> str
+ lineno, x = list(self.comments.items())[-1]
+ return _F('{lineno:2} {x}\n', lineno=lineno, x=x.info()) # type: ignore
+
+ def any_unprocessed(self):
+ # type: () -> bool
+ # ToDo: might want to differentiate based on lineno
+ return len(self.unused) > 0
+ # for lno, comment in reversed(self.comments.items()):
+ # if comment.used == ' ':
+ # return True
+ # return False
+
+ def unprocessed(self, use=False):
+ # type: (Any) -> Any
+ while len(self.unused) > 0:
+ first = self.unused.pop(0) if use else self.unused[0]
+ info = inspect.getframeinfo(inspect.stack()[1][0])
+ xprintf('using', first, self.comments[first].value, info.function, info.lineno)
+ yield first, self.comments[first]
+ if use:
+ self.comments[first].set_used()
+
+ def assign_pre(self, token):
+ # type: (Any) -> Any
+ token_line = token.start_mark.line
+ info = inspect.getframeinfo(inspect.stack()[1][0])
+ xprintf('assign_pre', token_line, self.unused, info.function, info.lineno)
+ gobbled = False
+ while self.unused and self.unused[0] < token_line:
+ gobbled = True
+ first = self.unused.pop(0)
+ xprintf('assign_pre < ', first)
+ self.comments[first].set_used()
+ token.add_comment_pre(first)
+ return gobbled
+
+ def assign_eol(self, tokens):
+ # type: (Any) -> Any
+ try:
+ comment_line = self.unused[0]
+ except IndexError:
+ return
+ if not isinstance(self.comments[comment_line], EOLComment):
+ return
+ idx = 1
+ while tokens[-idx].start_mark.line > comment_line or isinstance(
+ tokens[-idx], ValueToken
+ ):
+ idx += 1
+ xprintf('idx1', idx)
+ if (
+ len(tokens) > idx
+ and isinstance(tokens[-idx], ScalarToken)
+ and isinstance(tokens[-(idx + 1)], ScalarToken)
+ ):
+ return
+ try:
+ if isinstance(tokens[-idx], ScalarToken) and isinstance(
+ tokens[-(idx + 1)], KeyToken
+ ):
+ try:
+ eol_idx = self.unused.pop(0)
+ self.comments[eol_idx].set_used()
+ xprintf('>>>>>a', idx, eol_idx, KEYCMNT)
+ tokens[-idx].add_comment_eol(eol_idx, KEYCMNT)
+ except IndexError:
+ raise NotImplementedError
+ return
+ except IndexError:
+ xprintf('IndexError1')
+ pass
+ try:
+ if isinstance(tokens[-idx], ScalarToken) and isinstance(
+ tokens[-(idx + 1)], (ValueToken, BlockEntryToken)
+ ):
+ try:
+ eol_idx = self.unused.pop(0)
+ self.comments[eol_idx].set_used()
+ tokens[-idx].add_comment_eol(eol_idx, VALUECMNT)
+ except IndexError:
+ raise NotImplementedError
+ return
+ except IndexError:
+ xprintf('IndexError2')
+ pass
+ for t in tokens:
+ xprintf('tt-', t)
+ xprintf('not implemented EOL', type(tokens[-idx]))
+ import sys
+
+ sys.exit(0)
+
+ def assign_post(self, token):
+ # type: (Any) -> Any
+ token_line = token.start_mark.line
+ info = inspect.getframeinfo(inspect.stack()[1][0])
+ xprintf('assign_post', token_line, self.unused, info.function, info.lineno)
+ gobbled = False
+ while self.unused and self.unused[0] < token_line:
+ gobbled = True
+ first = self.unused.pop(0)
+ xprintf('assign_post < ', first)
+ self.comments[first].set_used()
+ token.add_comment_post(first)
+ return gobbled
+
+ def str_unprocessed(self):
+ # type: () -> Any
+ return ''.join(
+ (
+ _F(' {ind:2} {x}\n', ind=ind, x=x.info())
+ for ind, x in self.comments.items()
+ if x.used == ' '
+ )
+ )
+
+
+class RoundTripScannerSC(Scanner): # RoundTripScanner Split Comments
+ def __init__(self, *arg, **kw):
+ # type: (Any, Any) -> None
+ super().__init__(*arg, **kw)
+ assert self.loader is not None
+ # comments isinitialised on .need_more_tokens and persist on
+ # self.loader.parsed_comments
+ self.comments = None
+
+ def get_token(self):
+ # type: () -> Any
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if len(self.tokens) > 0:
+ if isinstance(self.tokens[0], BlockEndToken):
+ self.comments.assign_post(self.tokens[0]) # type: ignore
+ else:
+ self.comments.assign_pre(self.tokens[0]) # type: ignore
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ def need_more_tokens(self):
+ # type: () -> bool
+ if self.comments is None:
+ self.loader.parsed_comments = self.comments = ScannedComments() # type: ignore
+ if self.done:
+ return False
+ if len(self.tokens) == 0:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+ if len(self.tokens) < 2:
+ return True
+ if self.tokens[0].start_mark.line == self.tokens[-1].start_mark.line:
+ return True
+ if True:
+ xprintf('-x--', len(self.tokens))
+ for t in self.tokens:
+ xprintf(t)
+ # xprintf(self.comments.last())
+ xprintf(self.comments.str_unprocessed()) # type: ignore
+ self.comments.assign_pre(self.tokens[0]) # type: ignore
+ self.comments.assign_eol(self.tokens) # type: ignore
+ return False
+
+ def scan_to_next_token(self):
+ # type: () -> None
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ start_mark = self.reader.get_mark()
+ # xprintf('current_mark', start_mark.line, start_mark.column)
+ found = False
+ while not found:
+ while srp() == ' ':
+ srf()
+ ch = srp()
+ if ch == '#':
+ comment_start_mark = self.reader.get_mark()
+ comment = ch
+ srf() # skipt the '#'
+ while ch not in _THE_END:
+ ch = srp()
+ if ch == '\0': # don't gobble the end-of-stream character
+ # but add an explicit newline as "YAML processors should terminate
+ # the stream with an explicit line break
+ # https://yaml.org/spec/1.2/spec.html#id2780069
+ comment += '\n'
+ break
+ comment += ch
+ srf()
+ # we have a comment
+ if start_mark.column == 0:
+ self.comments.add_full_line_comment( # type: ignore
+ comment, comment_start_mark.column, comment_start_mark.line
+ )
+ else:
+ self.comments.add_eol_comment( # type: ignore
+ comment, comment_start_mark.column, comment_start_mark.line
+ )
+ comment = ""
+ # gather any blank lines or full line comments following the comment as well
+ self.scan_empty_or_full_line_comments()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ return
+ if bool(self.scan_line_break()):
+ # start_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ self.scan_empty_or_full_line_comments()
+ return None
+ ch = srp()
+ if ch == '\n': # empty toplevel lines
+ start_mark = self.reader.get_mark()
+ comment = ""
+ while ch:
+ ch = self.scan_line_break(empty_line=True)
+ comment += ch
+ if srp() == '#':
+ # empty line followed by indented real comment
+ comment = comment.rsplit('\n', 1)[0] + '\n'
+ _ = self.reader.get_mark() # gobble end_mark
+ return None
+ else:
+ found = True
+ return None
+
+ def scan_empty_or_full_line_comments(self):
+ # type: () -> None
+ blmark = self.reader.get_mark()
+ assert blmark.column == 0
+ blanks = ""
+ comment = None
+ mark = None
+ ch = self.reader.peek()
+ while True:
+ # nprint('ch', repr(ch), self.reader.get_mark().column)
+ if ch in '\r\n\x85\u2028\u2029':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ if comment is not None:
+ comment += '\n'
+ self.comments.add_full_line_comment(comment, mark.column, mark.line)
+ comment = None
+ else:
+ blanks += '\n'
+ self.comments.add_blank_line(blanks, blmark.column, blmark.line) # type: ignore # NOQA
+ blanks = ""
+ blmark = self.reader.get_mark()
+ ch = self.reader.peek()
+ continue
+ if comment is None:
+ if ch in ' \t':
+ blanks += ch
+ elif ch == '#':
+ mark = self.reader.get_mark()
+ comment = '#'
+ else:
+ # xprintf('breaking on', repr(ch))
+ break
+ else:
+ comment += ch
+ self.reader.forward()
+ ch = self.reader.peek()
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ prefix = ''
+ comment = None
+ while srp() == ' ':
+ prefix += srp()
+ srf()
+ if srp() == '#':
+ comment = ''
+ mark = self.reader.get_mark()
+ while srp() not in _THE_END:
+ comment += srp()
+ srf()
+ comment += '\n' # type: ignore
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ _F('expected a comment or a line break, but found {ch!r}', ch=ch),
+ self.reader.get_mark(),
+ )
+ if comment is not None:
+ self.comments.add_eol_comment(comment, mark.column, mark.line) # type: ignore
+ self.scan_line_break()
+ return None
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/serializer.py b/lib/spack/external/_vendoring/ruamel/yaml/serializer.py
new file mode 100644
index 0000000000..cc2329dcc4
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/serializer.py
@@ -0,0 +1,241 @@
+# coding: utf-8
+
+from ruamel.yaml.error import YAMLError
+from ruamel.yaml.compat import nprint, DBG_NODE, dbg, nprintf # NOQA
+from ruamel.yaml.util import RegExp
+
+from ruamel.yaml.events import (
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+ DocumentStartEvent,
+ DocumentEndEvent,
+)
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode
+
+if False: # MYPY
+ from typing import Any, Dict, Union, Text, Optional # NOQA
+ from ruamel.yaml.compat import VersionType # NOQA
+
+__all__ = ['Serializer', 'SerializerError']
+
+
+class SerializerError(YAMLError):
+ pass
+
+
+class Serializer:
+
+ # 'id' and 3+ numbers, but not 000
+ ANCHOR_TEMPLATE = 'id%03d'
+ ANCHOR_RE = RegExp('id(?!000$)\\d{3,}')
+
+ def __init__(
+ self,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ dumper=None,
+ ):
+ # type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None # NOQA
+ self.dumper = dumper
+ if self.dumper is not None:
+ self.dumper._serializer = self
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ if isinstance(version, str):
+ self.use_version = tuple(map(int, version.split('.')))
+ else:
+ self.use_version = version # type: ignore
+ self.use_tags = tags
+ self.serialized_nodes = {} # type: Dict[Any, Any]
+ self.anchors = {} # type: Dict[Any, Any]
+ self.last_anchor_id = 0
+ self.closed = None # type: Optional[bool]
+ self._templated_id = None
+
+ @property
+ def emitter(self):
+ # type: () -> Any
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.emitter
+ return self.dumper._emitter
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.dumper, 'typ'):
+ self.dumper.resolver
+ return self.dumper._resolver
+
+ def open(self):
+ # type: () -> None
+ if self.closed is None:
+ self.emitter.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError('serializer is closed')
+ else:
+ raise SerializerError('serializer is already opened')
+
+ def close(self):
+ # type: () -> None
+ if self.closed is None:
+ raise SerializerError('serializer is not opened')
+ elif not self.closed:
+ self.emitter.emit(StreamEndEvent())
+ self.closed = True
+
+ # def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ # type: (Any) -> None
+ if dbg(DBG_NODE):
+ nprint('Serializing nodes')
+ node.dump()
+ if self.closed is None:
+ raise SerializerError('serializer is not opened')
+ elif self.closed:
+ raise SerializerError('serializer is closed')
+ self.emitter.emit(
+ DocumentStartEvent(
+ explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags
+ )
+ )
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ # type: (Any) -> None
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ anchor = None
+ try:
+ if node.anchor.always_dump:
+ anchor = node.anchor.value
+ except: # NOQA
+ pass
+ self.anchors[node] = anchor
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ # type: (Any) -> Any
+ try:
+ anchor = node.anchor.value
+ except: # NOQA
+ anchor = None
+ if anchor is None:
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+ return anchor
+
+ def serialize_node(self, node, parent, index):
+ # type: (Any, Any, Any) -> None
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ node_style = getattr(node, 'style', None)
+ if node_style != '?':
+ node_style = None
+ self.emitter.emit(AliasEvent(alias, style=node_style))
+ else:
+ self.serialized_nodes[node] = True
+ self.resolver.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ # here check if the node.tag equals the one that would result from parsing
+ # if not equal quoting is necessary for strings
+ detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True))
+ implicit = (
+ (node.tag == detected_tag),
+ (node.tag == default_tag),
+ node.tag.startswith('tag:yaml.org,2002:'),
+ )
+ self.emitter.emit(
+ ScalarEvent(
+ alias,
+ node.tag,
+ implicit,
+ node.value,
+ style=node.style,
+ comment=node.comment,
+ )
+ )
+ elif isinstance(node, SequenceNode):
+ implicit = node.tag == self.resolver.resolve(SequenceNode, node.value, True)
+ comment = node.comment
+ end_comment = None
+ seq_comment = None
+ if node.flow_style is True:
+ if comment: # eol comment on flow style sequence
+ seq_comment = comment[0]
+ # comment[0] = None
+ if comment and len(comment) > 2:
+ end_comment = comment[2]
+ else:
+ end_comment = None
+ self.emitter.emit(
+ SequenceStartEvent(
+ alias,
+ node.tag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ )
+ )
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
+ elif isinstance(node, MappingNode):
+ implicit = node.tag == self.resolver.resolve(MappingNode, node.value, True)
+ comment = node.comment
+ end_comment = None
+ map_comment = None
+ if node.flow_style is True:
+ if comment: # eol comment on flow style sequence
+ map_comment = comment[0]
+ # comment[0] = None
+ if comment and len(comment) > 2:
+ end_comment = comment[2]
+ self.emitter.emit(
+ MappingStartEvent(
+ alias,
+ node.tag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ nr_items=len(node.value),
+ )
+ )
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment]))
+ self.resolver.ascend_resolver()
+
+
+def templated_id(s):
+ # type: (Text) -> Any
+ return Serializer.ANCHOR_RE.match(s)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/timestamp.py b/lib/spack/external/_vendoring/ruamel/yaml/timestamp.py
new file mode 100644
index 0000000000..58eef04dc8
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/timestamp.py
@@ -0,0 +1,61 @@
+# coding: utf-8
+
+import datetime
+import copy
+
+# ToDo: at least on PY3 you could probably attach the tzinfo correctly to the object
+# a more complete datetime might be used by safe loading as well
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+
+class TimeStamp(datetime.datetime):
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ self._yaml = dict(t=False, tz=None, delta=0) # type: Dict[Any, Any]
+
+ def __new__(cls, *args, **kw): # datetime is immutable
+ # type: (Any, Any) -> Any
+ return datetime.datetime.__new__(cls, *args, **kw)
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second)
+ ts._yaml = copy.deepcopy(self._yaml)
+ return ts
+
+ def replace(
+ self,
+ year=None,
+ month=None,
+ day=None,
+ hour=None,
+ minute=None,
+ second=None,
+ microsecond=None,
+ tzinfo=True,
+ fold=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any, Any, Any) -> Any
+ if year is None:
+ year = self.year
+ if month is None:
+ month = self.month
+ if day is None:
+ day = self.day
+ if hour is None:
+ hour = self.hour
+ if minute is None:
+ minute = self.minute
+ if second is None:
+ second = self.second
+ if microsecond is None:
+ microsecond = self.microsecond
+ if tzinfo is True:
+ tzinfo = self.tzinfo
+ if fold is None:
+ fold = self.fold
+ ts = type(self)(year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold)
+ ts._yaml = copy.deepcopy(self._yaml)
+ return ts
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/tokens.py b/lib/spack/external/_vendoring/ruamel/yaml/tokens.py
new file mode 100644
index 0000000000..bc302ba316
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/tokens.py
@@ -0,0 +1,404 @@
+# coding: utf-8
+
+from ruamel.yaml.compat import _F, nprintf # NOQA
+
+if False: # MYPY
+ from typing import Text, Any, Dict, Optional, List # NOQA
+ from .error import StreamMark # NOQA
+
+SHOW_LINES = True
+
+
+class Token:
+ __slots__ = 'start_mark', 'end_mark', '_comment'
+
+ def __init__(self, start_mark, end_mark):
+ # type: (StreamMark, StreamMark) -> None
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+ def __repr__(self):
+ # type: () -> Any
+ # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and
+ # hasattr('self', key)]
+ attributes = [key for key in self.__slots__ if not key.endswith('_mark')]
+ attributes.sort()
+ # arguments = ', '.join(
+ # [_F('{key!s}={gattr!r})', key=key, gattr=getattr(self, key)) for key in attributes]
+ # )
+ arguments = [
+ _F('{key!s}={gattr!r}', key=key, gattr=getattr(self, key)) for key in attributes
+ ]
+ if SHOW_LINES:
+ try:
+ arguments.append('line: ' + str(self.start_mark.line))
+ except: # NOQA
+ pass
+ try:
+ arguments.append('comment: ' + str(self._comment))
+ except: # NOQA
+ pass
+ return '{}({})'.format(self.__class__.__name__, ', '.join(arguments))
+
+ @property
+ def column(self):
+ # type: () -> int
+ return self.start_mark.column
+
+ @column.setter
+ def column(self, pos):
+ # type: (Any) -> None
+ self.start_mark.column = pos
+
+ # old style ( <= 0.17) is a TWO element list with first being the EOL
+ # comment concatenated with following FLC/BLNK; and second being a list of FLC/BLNK
+ # preceding the token
+ # new style ( >= 0.17 ) is a THREE element list with the first being a list of
+ # preceding FLC/BLNK, the second EOL and the third following FLC/BLNK
+ # note that new style has differing order, and does not consist of CommentToken(s)
+ # but of CommentInfo instances
+ # any non-assigned values in new style are None, but first and last can be empty list
+ # new style routines add one comment at a time
+
+ # going to be deprecated in favour of add_comment_eol/post
+ def add_post_comment(self, comment):
+ # type: (Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None]
+ else:
+ assert len(self._comment) in [2, 5] # make sure it is version 0
+ # if isinstance(comment, CommentToken):
+ # if comment.value.startswith('# C09'):
+ # raise
+ self._comment[0] = comment
+
+ # going to be deprecated in favour of add_comment_pre
+ def add_pre_comments(self, comments):
+ # type: (Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None]
+ else:
+ assert len(self._comment) == 2 # make sure it is version 0
+ assert self._comment[1] is None
+ self._comment[1] = comments
+ return
+
+ # new style
+ def add_comment_pre(self, comment):
+ # type: (Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [[], None, None] # type: ignore
+ else:
+ assert len(self._comment) == 3
+ if self._comment[0] is None:
+ self._comment[0] = [] # type: ignore
+ self._comment[0].append(comment) # type: ignore
+
+ def add_comment_eol(self, comment, comment_type):
+ # type: (Any, Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None, None]
+ else:
+ assert len(self._comment) == 3
+ assert self._comment[1] is None
+ if self.comment[1] is None:
+ self._comment[1] = [] # type: ignore
+ self._comment[1].extend([None] * (comment_type + 1 - len(self.comment[1]))) # type: ignore # NOQA
+ # nprintf('commy', self.comment, comment_type)
+ self._comment[1][comment_type] = comment # type: ignore
+
+ def add_comment_post(self, comment):
+ # type: (Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None, []] # type: ignore
+ else:
+ assert len(self._comment) == 3
+ if self._comment[2] is None:
+ self._comment[2] = [] # type: ignore
+ self._comment[2].append(comment) # type: ignore
+
+ # def get_comment(self):
+ # # type: () -> Any
+ # return getattr(self, '_comment', None)
+
+ @property
+ def comment(self):
+ # type: () -> Any
+ return getattr(self, '_comment', None)
+
+ def move_old_comment(self, target, empty=False):
+ # type: (Any, bool) -> Any
+ """move a comment from this token to target (normally next token)
+ used to combine e.g. comments before a BlockEntryToken to the
+ ScalarToken that follows it
+ empty is a special for empty values -> comment after key
+ """
+ c = self.comment
+ if c is None:
+ return
+ # don't push beyond last element
+ if isinstance(target, (StreamEndToken, DocumentStartToken)):
+ return
+ delattr(self, '_comment')
+ tc = target.comment
+ if not tc: # target comment, just insert
+ # special for empty value in key: value issue 25
+ if empty:
+ c = [c[0], c[1], None, None, c[0]]
+ target._comment = c
+ # nprint('mco2:', self, target, target.comment, empty)
+ return self
+ if c[0] and tc[0] or c[1] and tc[1]:
+ raise NotImplementedError(_F('overlap in comment {c!r} {tc!r}', c=c, tc=tc))
+ if c[0]:
+ tc[0] = c[0]
+ if c[1]:
+ tc[1] = c[1]
+ return self
+
+ def split_old_comment(self):
+ # type: () -> Any
+ """ split the post part of a comment, and return it
+ as comment to be added. Delete second part if [None, None]
+ abc: # this goes to sequence
+ # this goes to first element
+ - first element
+ """
+ comment = self.comment
+ if comment is None or comment[0] is None:
+ return None # nothing to do
+ ret_val = [comment[0], None]
+ if comment[1] is None:
+ delattr(self, '_comment')
+ return ret_val
+
+ def move_new_comment(self, target, empty=False):
+ # type: (Any, bool) -> Any
+ """move a comment from this token to target (normally next token)
+ used to combine e.g. comments before a BlockEntryToken to the
+ ScalarToken that follows it
+ empty is a special for empty values -> comment after key
+ """
+ c = self.comment
+ if c is None:
+ return
+ # don't push beyond last element
+ if isinstance(target, (StreamEndToken, DocumentStartToken)):
+ return
+ delattr(self, '_comment')
+ tc = target.comment
+ if not tc: # target comment, just insert
+ # special for empty value in key: value issue 25
+ if empty:
+ c = [c[0], c[1], c[2]]
+ target._comment = c
+ # nprint('mco2:', self, target, target.comment, empty)
+ return self
+ # if self and target have both pre, eol or post comments, something seems wrong
+ for idx in range(3):
+ if c[idx] is not None and tc[idx] is not None:
+ raise NotImplementedError(_F('overlap in comment {c!r} {tc!r}', c=c, tc=tc))
+ # move the comment parts
+ for idx in range(3):
+ if c[idx]:
+ tc[idx] = c[idx]
+ return self
+
+
+# class BOMToken(Token):
+# id = '<byte order mark>'
+
+
+class DirectiveToken(Token):
+ __slots__ = 'name', 'value'
+ id = '<directive>'
+
+ def __init__(self, name, value, start_mark, end_mark):
+ # type: (Any, Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.name = name
+ self.value = value
+
+
+class DocumentStartToken(Token):
+ __slots__ = ()
+ id = '<document start>'
+
+
+class DocumentEndToken(Token):
+ __slots__ = ()
+ id = '<document end>'
+
+
+class StreamStartToken(Token):
+ __slots__ = ('encoding',)
+ id = '<stream start>'
+
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.encoding = encoding
+
+
+class StreamEndToken(Token):
+ __slots__ = ()
+ id = '<stream end>'
+
+
+class BlockSequenceStartToken(Token):
+ __slots__ = ()
+ id = '<block sequence start>'
+
+
+class BlockMappingStartToken(Token):
+ __slots__ = ()
+ id = '<block mapping start>'
+
+
+class BlockEndToken(Token):
+ __slots__ = ()
+ id = '<block end>'
+
+
+class FlowSequenceStartToken(Token):
+ __slots__ = ()
+ id = '['
+
+
+class FlowMappingStartToken(Token):
+ __slots__ = ()
+ id = '{'
+
+
+class FlowSequenceEndToken(Token):
+ __slots__ = ()
+ id = ']'
+
+
+class FlowMappingEndToken(Token):
+ __slots__ = ()
+ id = '}'
+
+
+class KeyToken(Token):
+ __slots__ = ()
+ id = '?'
+
+ # def x__repr__(self):
+ # return 'KeyToken({})'.format(
+ # self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0])
+
+
+class ValueToken(Token):
+ __slots__ = ()
+ id = ':'
+
+
+class BlockEntryToken(Token):
+ __slots__ = ()
+ id = '-'
+
+
+class FlowEntryToken(Token):
+ __slots__ = ()
+ id = ','
+
+
+class AliasToken(Token):
+ __slots__ = ('value',)
+ id = '<alias>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class AnchorToken(Token):
+ __slots__ = ('value',)
+ id = '<anchor>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class TagToken(Token):
+ __slots__ = ('value',)
+ id = '<tag>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class ScalarToken(Token):
+ __slots__ = 'value', 'plain', 'style'
+ id = '<scalar>'
+
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ # type: (Any, Any, Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+ self.plain = plain
+ self.style = style
+
+
+class CommentToken(Token):
+ __slots__ = '_value', 'pre_done'
+ id = '<comment>'
+
+ def __init__(self, value, start_mark=None, end_mark=None, column=None):
+ # type: (Any, Any, Any, Any) -> None
+ if start_mark is None:
+ assert column is not None
+ self._column = column
+ Token.__init__(self, start_mark, None) # type: ignore
+ self._value = value
+
+ @property
+ def value(self):
+ # type: () -> str
+ if isinstance(self._value, str):
+ return self._value
+ return "".join(self._value)
+
+ @value.setter
+ def value(self, val):
+ # type: (Any) -> None
+ self._value = val
+
+ def reset(self):
+ # type: () -> None
+ if hasattr(self, 'pre_done'):
+ delattr(self, 'pre_done')
+
+ def __repr__(self):
+ # type: () -> Any
+ v = '{!r}'.format(self.value)
+ if SHOW_LINES:
+ try:
+ v += ', line: ' + str(self.start_mark.line)
+ except: # NOQA
+ pass
+ try:
+ v += ', col: ' + str(self.start_mark.column)
+ except: # NOQA
+ pass
+ return 'CommentToken({})'.format(v)
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ if self.start_mark != other.start_mark:
+ return False
+ if self.end_mark != other.end_mark:
+ return False
+ if self.value != other.value:
+ return False
+ return True
+
+ def __ne__(self, other):
+ # type: (Any) -> bool
+ return not self.__eq__(other)
diff --git a/lib/spack/external/_vendoring/ruamel/yaml/util.py b/lib/spack/external/_vendoring/ruamel/yaml/util.py
new file mode 100644
index 0000000000..9ff51bdee2
--- /dev/null
+++ b/lib/spack/external/_vendoring/ruamel/yaml/util.py
@@ -0,0 +1,256 @@
+# coding: utf-8
+
+"""
+some helper functions that might be generally useful
+"""
+
+import datetime
+from functools import partial
+import re
+
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Text # NOQA
+ from .compat import StreamTextType # NOQA
+
+
+class LazyEval:
+ """
+ Lightweight wrapper around lazily evaluated func(*args, **kwargs).
+
+ func is only evaluated when any attribute of its return value is accessed.
+ Every attribute access is passed through to the wrapped value.
+ (This only excludes special cases like method-wrappers, e.g., __hash__.)
+ The sole additional attribute is the lazy_self function which holds the
+ return value (or, prior to evaluation, func and arguments), in its closure.
+ """
+
+ def __init__(self, func, *args, **kwargs):
+ # type: (Any, Any, Any) -> None
+ def lazy_self():
+ # type: () -> Any
+ return_value = func(*args, **kwargs)
+ object.__setattr__(self, 'lazy_self', lambda: return_value)
+ return return_value
+
+ object.__setattr__(self, 'lazy_self', lazy_self)
+
+ def __getattribute__(self, name):
+ # type: (Any) -> Any
+ lazy_self = object.__getattribute__(self, 'lazy_self')
+ if name == 'lazy_self':
+ return lazy_self
+ return getattr(lazy_self(), name)
+
+ def __setattr__(self, name, value):
+ # type: (Any, Any) -> None
+ setattr(self.lazy_self(), name, value)
+
+
+RegExp = partial(LazyEval, re.compile)
+
+timestamp_regexp = RegExp(
+ """^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:((?P<t>[Tt])|[ \\t]+) # explictly not retaining extra spaces
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\\.(?P<fraction>[0-9]*))?
+ (?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$""",
+ re.X,
+)
+
+
+def create_timestamp(
+ year, month, day, t, hour, minute, second, fraction, tz, tz_sign, tz_hour, tz_minute
+):
+ # type: (Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any) -> Any
+ # create a timestamp from match against timestamp_regexp
+ MAX_FRAC = 999999
+ year = int(year)
+ month = int(month)
+ day = int(day)
+ if not hour:
+ return datetime.date(year, month, day)
+ hour = int(hour)
+ minute = int(minute)
+ second = int(second)
+ frac = 0
+ if fraction:
+ frac_s = fraction[:6]
+ while len(frac_s) < 6:
+ frac_s += '0'
+ frac = int(frac_s)
+ if len(fraction) > 6 and int(fraction[6]) > 4:
+ frac += 1
+ if frac > MAX_FRAC:
+ fraction = 0
+ else:
+ fraction = frac
+ else:
+ fraction = 0
+ delta = None
+ if tz_sign:
+ tz_hour = int(tz_hour)
+ tz_minute = int(tz_minute) if tz_minute else 0
+ delta = datetime.timedelta(
+ hours=tz_hour, minutes=tz_minute, seconds=1 if frac > MAX_FRAC else 0
+ )
+ if tz_sign == '-':
+ delta = -delta
+ elif frac > MAX_FRAC:
+ delta = -datetime.timedelta(seconds=1)
+ # should do something else instead (or hook this up to the preceding if statement
+ # in reverse
+ # if delta is None:
+ # return datetime.datetime(year, month, day, hour, minute, second, fraction)
+ # return datetime.datetime(year, month, day, hour, minute, second, fraction,
+ # datetime.timezone.utc)
+ # the above is not good enough though, should provide tzinfo. In Python3 that is easily
+ # doable drop that kind of support for Python2 as it has not native tzinfo
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+
+# originally as comment
+# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605
+# if you use this in your code, I suggest adding a test in your test suite
+# that check this routines output against a known piece of your YAML
+# before upgrades to this code break your round-tripped YAML
+def load_yaml_guess_indent(stream, **kw):
+ # type: (StreamTextType, Any) -> Any
+ """guess the indent and block sequence indent of yaml stream/string
+
+ returns round_trip_loaded stream, indent level, block sequence indent
+ - block sequence indent is the number of spaces before a dash relative to previous indent
+ - if there are no block sequences, indent is taken from nested mappings, block sequence
+ indent is unset (None) in that case
+ """
+ from .main import YAML
+
+ # load a YAML document, guess the indentation, if you use TABs you are on your own
+ def leading_spaces(line):
+ # type: (Any) -> int
+ idx = 0
+ while idx < len(line) and line[idx] == ' ':
+ idx += 1
+ return idx
+
+ if isinstance(stream, str):
+ yaml_str = stream # type: Any
+ elif isinstance(stream, bytes):
+ # most likely, but the Reader checks BOM for this
+ yaml_str = stream.decode('utf-8')
+ else:
+ yaml_str = stream.read()
+ map_indent = None
+ indent = None # default if not found for some reason
+ block_seq_indent = None
+ prev_line_key_only = None
+ key_indent = 0
+ for line in yaml_str.splitlines():
+ rline = line.rstrip()
+ lline = rline.lstrip()
+ if lline.startswith('- '):
+ l_s = leading_spaces(line)
+ block_seq_indent = l_s - key_indent
+ idx = l_s + 1
+ while line[idx] == ' ': # this will end as we rstripped
+ idx += 1
+ if line[idx] == '#': # comment after -
+ continue
+ indent = idx - key_indent
+ break
+ if map_indent is None and prev_line_key_only is not None and rline:
+ idx = 0
+ while line[idx] in ' -':
+ idx += 1
+ if idx > prev_line_key_only:
+ map_indent = idx - prev_line_key_only
+ if rline.endswith(':'):
+ key_indent = leading_spaces(line)
+ idx = 0
+ while line[idx] == ' ': # this will end on ':'
+ idx += 1
+ prev_line_key_only = idx
+ continue
+ prev_line_key_only = None
+ if indent is None and map_indent is not None:
+ indent = map_indent
+ yaml = YAML()
+ return yaml.load(yaml_str, **kw), indent, block_seq_indent # type: ignore
+
+
+def configobj_walker(cfg):
+ # type: (Any) -> Any
+ """
+ walks over a ConfigObj (INI file with comments) generating
+ corresponding YAML output (including comments
+ """
+ from configobj import ConfigObj # type: ignore
+
+ assert isinstance(cfg, ConfigObj)
+ for c in cfg.initial_comment:
+ if c.strip():
+ yield c
+ for s in _walk_section(cfg):
+ if s.strip():
+ yield s
+ for c in cfg.final_comment:
+ if c.strip():
+ yield c
+
+
+def _walk_section(s, level=0):
+ # type: (Any, int) -> Any
+ from configobj import Section
+
+ assert isinstance(s, Section)
+ indent = ' ' * level
+ for name in s.scalars:
+ for c in s.comments[name]:
+ yield indent + c.strip()
+ x = s[name]
+ if '\n' in x:
+ i = indent + ' '
+ x = '|\n' + i + x.strip().replace('\n', '\n' + i)
+ elif ':' in x:
+ x = "'" + x.replace("'", "''") + "'"
+ line = '{0}{1}: {2}'.format(indent, name, x)
+ c = s.inline_comments[name]
+ if c:
+ line += ' ' + c
+ yield line
+ for name in s.sections:
+ for c in s.comments[name]:
+ yield indent + c.strip()
+ line = '{0}{1}:'.format(indent, name)
+ c = s.inline_comments[name]
+ if c:
+ line += ' ' + c
+ yield line
+ for val in _walk_section(s[name], level=level + 1):
+ yield val
+
+
+# def config_obj_2_rt_yaml(cfg):
+# from .comments import CommentedMap, CommentedSeq
+# from configobj import ConfigObj
+# assert isinstance(cfg, ConfigObj)
+# #for c in cfg.initial_comment:
+# # if c.strip():
+# # pass
+# cm = CommentedMap()
+# for name in s.sections:
+# cm[name] = d = CommentedMap()
+#
+#
+# #for c in cfg.final_comment:
+# # if c.strip():
+# # yield c
+# return cm
diff --git a/lib/spack/external/ruamel/yaml/.ruamel/__init__.py b/lib/spack/external/ruamel/yaml/.ruamel/__init__.py
deleted file mode 100644
index ece379ce2f..0000000000
--- a/lib/spack/external/ruamel/yaml/.ruamel/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-import pkg_resources
-pkg_resources.declare_namespace(__name__)
diff --git a/lib/spack/external/ruamel/yaml/README.rst b/lib/spack/external/ruamel/yaml/README.rst
deleted file mode 100644
index 993cf35542..0000000000
--- a/lib/spack/external/ruamel/yaml/README.rst
+++ /dev/null
@@ -1,38 +0,0 @@
-
-ruamel.yaml
-===========
-
-``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python.
-
-* `Overview <http://yaml.readthedocs.org/en/latest/overview.html>`_
-* `Installing <http://yaml.readthedocs.org/en/latest/install.html>`_
-* `Details <http://yaml.readthedocs.org/en/latest/detail.html>`_
-* `Examples <http://yaml.readthedocs.org/en/latest/example.html>`_
-* `Differences with PyYAML <http://yaml.readthedocs.org/en/latest/pyyaml.html>`_
-
-.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable
- :target: https://yaml.readthedocs.org/en/stable
-
-ChangeLog
-=========
-
-::
-
- 0.11.15 (2016-XX-XX):
- - Change to prevent FutureWarning in NumPy, as reported by tgehring
- ("comparison to None will result in an elementwise object comparison in the future")
-
- 0.11.14 (2016-07-06):
- - fix preserve_quotes missing on original Loaders (as reported
- by Leynos, bitbucket issue 38)
-
- 0.11.13 (2016-07-06):
- - documentation only, automated linux wheels
-
- 0.11.12 (2016-07-06):
- - added support for roundtrip of single/double quoted scalars using:
- ruamel.yaml.round_trip_load(stream, preserve_quotes=True)
-
- 0.11.0 (2016-02-18):
- - RoundTripLoader loads 1.2 by default (no sexagesimals, 012 octals nor
- yes/no/on/off booleans
diff --git a/lib/spack/external/ruamel/yaml/__init__.py b/lib/spack/external/ruamel/yaml/__init__.py
deleted file mode 100644
index b77032fc83..0000000000
--- a/lib/spack/external/ruamel/yaml/__init__.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# coding: utf-8
-
-from __future__ import print_function
-from __future__ import absolute_import
-
-# install_requires of ruamel.base is not really required but the old
-# ruamel.base installed __init__.py, and thus a new version should
-# be installed at some point
-
-_package_data = dict(
- full_package_name="ruamel.yaml",
- version_info=(0, 11, 15),
- author="Anthon van der Neut",
- author_email="a.van.der.neut@ruamel.eu",
- description="ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order", # NOQA
- entry_points=None,
- install_requires=dict(
- any=[],
- py26=["ruamel.ordereddict"],
- py27=["ruamel.ordereddict"]
- ),
- ext_modules=[dict(
- name="_ruamel_yaml",
- src=["ext/_ruamel_yaml.c", "ext/api.c", "ext/writer.c", "ext/dumper.c",
- "ext/loader.c", "ext/reader.c", "ext/scanner.c", "ext/parser.c",
- "ext/emitter.c"],
- lib=[],
- # test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser; /* prevent warning */\nreturn 0;\n}\n' # NOQA
- )
- ],
- classifiers=[
- "Programming Language :: Python :: 2.6",
- "Programming Language :: Python :: 2.7",
- "Programming Language :: Python :: 3.3",
- "Programming Language :: Python :: 3.4",
- "Programming Language :: Python :: 3.5",
- "Programming Language :: Python :: Implementation :: CPython",
- "Programming Language :: Python :: Implementation :: PyPy",
- "Programming Language :: Python :: Implementation :: Jython",
- "Topic :: Software Development :: Libraries :: Python Modules",
- "Topic :: Text Processing :: Markup"
- ],
- windows_wheels=True,
- read_the_docs='yaml',
- many_linux='libyaml-devel',
-)
-
-
-# < from ruamel.util.new import _convert_version
-def _convert_version(tup):
- """create a PEP 386 pseudo-format conformant string from tuple tup"""
- ret_val = str(tup[0]) # first is always digit
- next_sep = "." # separator for next extension, can be "" or "."
- for x in tup[1:]:
- if isinstance(x, int):
- ret_val += next_sep + str(x)
- next_sep = '.'
- continue
- first_letter = x[0].lower()
- next_sep = ''
- if first_letter in 'abcr':
- ret_val += 'rc' if first_letter == 'r' else first_letter
- elif first_letter in 'pd':
- ret_val += '.post' if first_letter == 'p' else '.dev'
- return ret_val
-
-
-# <
-version_info = _package_data['version_info']
-__version__ = _convert_version(version_info)
-
-del _convert_version
-
-try:
- from .cyaml import * # NOQA
- __with_libyaml__ = True
-except (ImportError, ValueError): # for Jython
- __with_libyaml__ = False
-
-
-# body extracted to main.py
-try:
- from .main import * # NOQA
-except ImportError:
- from ruamel.yaml.main import * # NOQA
diff --git a/lib/spack/external/ruamel/yaml/comments.py b/lib/spack/external/ruamel/yaml/comments.py
deleted file mode 100644
index a517072087..0000000000
--- a/lib/spack/external/ruamel/yaml/comments.py
+++ /dev/null
@@ -1,486 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-"""
-stuff to deal with comments and formatting on dict/list/ordereddict/set
-these are not really related, formatting could be factored out as
-a separate base
-"""
-
-import sys
-
-if sys.version_info >= (3, 3):
- from collections.abc import MutableSet
-else:
- from collections import MutableSet
-
-__all__ = ["CommentedSeq", "CommentedMap", "CommentedOrderedMap",
- "CommentedSet", 'comment_attrib', 'merge_attrib']
-
-
-try:
- from .compat import ordereddict
-except ImportError:
- from ruamel.yaml.compat import ordereddict
-
-comment_attrib = '_yaml_comment'
-format_attrib = '_yaml_format'
-line_col_attrib = '_yaml_line_col'
-anchor_attrib = '_yaml_anchor'
-merge_attrib = '_yaml_merge'
-tag_attrib = '_yaml_tag'
-
-
-class Comment(object):
- # sys.getsize tested the Comment objects, __slots__ make them bigger
- # and adding self.end did not matter
- attrib = comment_attrib
-
- def __init__(self):
- self.comment = None # [post, [pre]]
- # map key (mapping/omap/dict) or index (sequence/list) to a list of
- # dict: post_key, pre_key, post_value, pre_value
- # list: pre item, post item
- self._items = {}
- # self._start = [] # should not put these on first item
- self._end = [] # end of document comments
-
- def __str__(self):
- if self._end:
- end = ',\n end=' + str(self._end)
- else:
- end = ''
- return "Comment(comment={0},\n items={1}{2})".format(
- self.comment, self._items, end)
-
- @property
- def items(self):
- return self._items
-
- @property
- def end(self):
- return self._end
-
- @end.setter
- def end(self, value):
- self._end = value
-
- @property
- def start(self):
- return self._start
-
- @start.setter
- def start(self, value):
- self._start = value
-
-
-# to distinguish key from None
-def NoComment():
- pass
-
-
-class Format(object):
- attrib = format_attrib
-
- def __init__(self):
- self._flow_style = None
-
- def set_flow_style(self):
- self._flow_style = True
-
- def set_block_style(self):
- self._flow_style = False
-
- def flow_style(self, default=None):
- """if default (the flow_style) is None, the flow style tacked on to
- the object explicitly will be taken. If that is None as well the
- default flow style rules the format down the line, or the type
- of the constituent values (simple -> flow, map/list -> block)"""
- if self._flow_style is None:
- return default
- return self._flow_style
-
-
-class LineCol(object):
- attrib = line_col_attrib
-
- def __init__(self):
- self.line = None
- self.col = None
- self.data = None
-
- def add_kv_line_col(self, key, data):
- if self.data is None:
- self.data = {}
- self.data[key] = data
-
- def key(self, k):
- return self._kv(k, 0, 1)
-
- def value(self, k):
- return self._kv(k, 2, 3)
-
- def _kv(self, k, x0, x1):
- if self.data is None:
- return None
- data = self.data[k]
- return data[x0], data[x1]
-
- def item(self, idx):
- if self.data is None:
- return None
- return self.data[idx][0], self.data[idx][1]
-
- def add_idx_line_col(self, key, data):
- if self.data is None:
- self.data = {}
- self.data[key] = data
-
-
-class Anchor(object):
- attrib = anchor_attrib
-
- def __init__(self):
- self.value = None
- self.always_dump = False
-
-
-class Tag(object):
- """store tag information for roundtripping"""
- attrib = tag_attrib
-
- def __init__(self):
- self.value = None
-
-
-class CommentedBase(object):
- @property
- def ca(self):
- if not hasattr(self, Comment.attrib):
- setattr(self, Comment.attrib, Comment())
- return getattr(self, Comment.attrib)
-
- def yaml_end_comment_extend(self, comment, clear=False):
- if clear:
- self.ca.end = []
- self.ca.end.extend(comment)
-
- def yaml_key_comment_extend(self, key, comment, clear=False):
- l = self.ca._items.setdefault(key, [None, None, None, None])
- if clear or l[1] is None:
- if comment[1] is not None:
- assert isinstance(comment[1], list)
- l[1] = comment[1]
- else:
- l[1].extend(comment[0])
- l[0] = comment[0]
-
- def yaml_value_comment_extend(self, key, comment, clear=False):
- l = self.ca._items.setdefault(key, [None, None, None, None])
- if clear or l[3] is None:
- if comment[1] is not None:
- assert isinstance(comment[1], list)
- l[3] = comment[1]
- else:
- l[3].extend(comment[0])
- l[2] = comment[0]
-
- def yaml_set_start_comment(self, comment, indent=0):
- """overwrites any preceding comment lines on an object
- expects comment to be without `#` and possible have mutlple lines
- """
- from .error import Mark
- from .tokens import CommentToken
- pre_comments = self._yaml_get_pre_comment()
- if comment[-1] == '\n':
- comment = comment[:-1] # strip final newline if there
- start_mark = Mark(None, None, None, indent, None, None)
- for com in comment.split('\n'):
- pre_comments.append(CommentToken('# ' + com + '\n', start_mark, None))
-
- @property
- def fa(self):
- """format attribute
-
- set_flow_style()/set_block_style()"""
- if not hasattr(self, Format.attrib):
- setattr(self, Format.attrib, Format())
- return getattr(self, Format.attrib)
-
- def yaml_add_eol_comment(self, comment, key=NoComment, column=None):
- """
- there is a problem as eol comments should start with ' #'
- (but at the beginning of the line the space doesn't have to be before
- the #. The column index is for the # mark
- """
- from .tokens import CommentToken
- from .error import Mark
- if column is None:
- column = self._yaml_get_column(key)
- if comment[0] != '#':
- comment = '# ' + comment
- if column is None:
- if comment[0] == '#':
- comment = ' ' + comment
- column = 0
- start_mark = Mark(None, None, None, column, None, None)
- ct = [CommentToken(comment, start_mark, None), None]
- self._yaml_add_eol_comment(ct, key=key)
-
- @property
- def lc(self):
- if not hasattr(self, LineCol.attrib):
- setattr(self, LineCol.attrib, LineCol())
- return getattr(self, LineCol.attrib)
-
- def _yaml_set_line_col(self, line, col):
- self.lc.line = line
- self.lc.col = col
-
- def _yaml_set_kv_line_col(self, key, data):
- self.lc.add_kv_line_col(key, data)
-
- def _yaml_set_idx_line_col(self, key, data):
- self.lc.add_idx_line_col(key, data)
-
- @property
- def anchor(self):
- if not hasattr(self, Anchor.attrib):
- setattr(self, Anchor.attrib, Anchor())
- return getattr(self, Anchor.attrib)
-
- def yaml_anchor(self):
- if not hasattr(self, Anchor.attrib):
- return None
- return self.anchor
-
- def yaml_set_anchor(self, value, always_dump=False):
- self.anchor.value = value
- self.anchor.always_dump = always_dump
-
- @property
- def tag(self):
- if not hasattr(self, Tag.attrib):
- setattr(self, Tag.attrib, Tag())
- return getattr(self, Tag.attrib)
-
- def yaml_set_tag(self, value):
- self.tag.value = value
-
-
-class CommentedSeq(list, CommentedBase):
- __slots__ = [Comment.attrib, ]
-
- def _yaml_add_comment(self, comment, key=NoComment):
- if key is not NoComment:
- self.yaml_key_comment_extend(key, comment)
- else:
- self.ca.comment = comment
-
- def _yaml_add_eol_comment(self, comment, key):
- self._yaml_add_comment(comment, key=key)
-
- def _yaml_get_columnX(self, key):
- return self.ca.items[key][0].start_mark.column
-
- def insert(self, idx, val):
- """the comments after the insertion have to move forward"""
- list.insert(self, idx, val)
- for list_index in sorted(self.ca.items, reverse=True):
- if list_index < idx:
- break
- self.ca.items[list_index+1] = self.ca.items.pop(list_index)
-
- def pop(self, idx):
- res = list.pop(self, idx)
- self.ca.items.pop(idx, None) # might not be there -> default value
- for list_index in sorted(self.ca.items):
- if list_index < idx:
- continue
- self.ca.items[list_index-1] = self.ca.items.pop(list_index)
- return res
-
- def _yaml_get_column(self, key):
- column = None
- sel_idx = None
- pre, post = key-1, key+1
- if pre in self.ca.items:
- sel_idx = pre
- elif post in self.ca.items:
- sel_idx = post
- else:
- # self.ca.items is not ordered
- for row_idx, k1 in enumerate(self):
- if row_idx >= key:
- break
- if row_idx not in self.ca.items:
- continue
- sel_idx = row_idx
- if sel_idx is not None:
- column = self._yaml_get_columnX(sel_idx)
- return column
-
- def _yaml_get_pre_comment(self):
- if self.ca.comment is None:
- pre_comments = []
- self.ca.comment = [None, pre_comments]
- else:
- pre_comments = self.ca.comment[1] = []
- return pre_comments
-
-
-class CommentedMap(ordereddict, CommentedBase):
- __slots__ = [Comment.attrib, ]
-
- def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
- """values is set to key to indicate a value attachment of comment"""
- if key is not NoComment:
- self.yaml_key_comment_extend(key, comment)
- return
- if value is not NoComment:
- self.yaml_value_comment_extend(value, comment)
- else:
- self.ca.comment = comment
-
- def _yaml_add_eol_comment(self, comment, key):
- """add on the value line, with value specified by the key"""
- self._yaml_add_comment(comment, value=key)
-
- def _yaml_get_columnX(self, key):
- return self.ca.items[key][2].start_mark.column
-
- def _yaml_get_column(self, key):
- column = None
- sel_idx = None
- pre, post, last = None, None, None
- for x in self:
- if pre is not None and x != key:
- post = x
- break
- if x == key:
- pre = last
- last = x
- if pre in self.ca.items:
- sel_idx = pre
- elif post in self.ca.items:
- sel_idx = post
- else:
- # self.ca.items is not ordered
- for row_idx, k1 in enumerate(self):
- if k1 >= key:
- break
- if k1 not in self.ca.items:
- continue
- sel_idx = k1
- if sel_idx is not None:
- column = self._yaml_get_columnX(sel_idx)
- return column
-
- def _yaml_get_pre_comment(self):
- if self.ca.comment is None:
- pre_comments = []
- self.ca.comment = [None, pre_comments]
- else:
- pre_comments = self.ca.comment[1] = []
- return pre_comments
-
- def update(self, *vals, **kwds):
- try:
- ordereddict.update(self, *vals, **kwds)
- except TypeError:
- # probably a dict that is used
- for x in vals:
- self[x] = vals[x]
-
- def insert(self, pos, key, value, comment=None):
- """insert key value into given position
- attach comment if provided
- """
- ordereddict.insert(self, pos, key, value)
- if comment is not None:
- self.yaml_add_eol_comment(comment, key=key)
-
- def mlget(self, key, default=None, list_ok=False):
- """multi-level get that expects dicts within dicts"""
- if not isinstance(key, list):
- return self.get(key, default)
- # assume that the key is a list of recursively accessible dicts
-
- def get_one_level(key_list, level, d):
- if not list_ok:
- assert isinstance(d, dict)
- if level >= len(key_list):
- if level > len(key_list):
- raise IndexError
- return d[key_list[level-1]]
- return get_one_level(key_list, level+1, d[key_list[level-1]])
-
- try:
- return get_one_level(key, 1, self)
- except KeyError:
- return default
- except (TypeError, IndexError):
- if not list_ok:
- raise
- return default
-
- def __getitem__(self, key):
- try:
- return ordereddict.__getitem__(self, key)
- except KeyError:
- for merged in getattr(self, merge_attrib, []):
- if key in merged[1]:
- return merged[1][key]
- raise
-
- def get(self, key, default=None):
- try:
- return self.__getitem__(key)
- except:
- return default
-
- @property
- def merge(self):
- if not hasattr(self, merge_attrib):
- setattr(self, merge_attrib, [])
- return getattr(self, merge_attrib)
-
- def add_yaml_merge(self, value):
- self.merge.extend(value)
-
-
-class CommentedOrderedMap(CommentedMap):
- __slots__ = [Comment.attrib, ]
-
-
-class CommentedSet(MutableSet, CommentedMap):
- __slots__ = [Comment.attrib, 'odict']
-
- def __init__(self, values=None):
- self.odict = ordereddict()
- MutableSet.__init__(self)
- if values is not None:
- self |= values
-
- def add(self, value):
- """Add an element."""
- self.odict[value] = None
-
- def discard(self, value):
- """Remove an element. Do not raise an exception if absent."""
- del self.odict[value]
-
- def __contains__(self, x):
- return x in self.odict
-
- def __iter__(self):
- for x in self.odict:
- yield x
-
- def __len__(self):
- return len(self.odict)
-
- def __repr__(self):
- return 'set({0!r})'.format(self.odict.keys())
diff --git a/lib/spack/external/ruamel/yaml/compat.py b/lib/spack/external/ruamel/yaml/compat.py
deleted file mode 100644
index 28a981dc43..0000000000
--- a/lib/spack/external/ruamel/yaml/compat.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# coding: utf-8
-
-from __future__ import print_function
-
-# partially from package six by Benjamin Peterson
-
-import sys
-import os
-import types
-
-try:
- from ruamel.ordereddict import ordereddict
-except:
- try:
- from collections.abc import OrderedDict
- except ImportError:
- try:
- from collections import OrderedDict
- except ImportError:
- from ordereddict import OrderedDict
- # to get the right name import ... as ordereddict doesn't do that
-
- class ordereddict(OrderedDict):
- if not hasattr(OrderedDict, 'insert'):
- def insert(self, pos, key, value):
- if pos >= len(self):
- self[key] = value
- return
- od = ordereddict()
- od.update(self)
- for k in od:
- del self[k]
- for index, old_key in enumerate(od):
- if pos == index:
- self[key] = value
- self[old_key] = od[old_key]
-
-
-PY2 = sys.version_info[0] == 2
-PY3 = sys.version_info[0] == 3
-
-if PY3:
- def utf8(s):
- return s
-
- def to_str(s):
- return s
-
- def to_unicode(s):
- return s
-
-else:
- def utf8(s):
- return s.encode('utf-8')
-
- def to_str(s):
- return str(s)
-
- def to_unicode(s):
- return unicode(s)
-
-if PY3:
- string_types = str,
- integer_types = int,
- class_types = type,
- text_type = str
- binary_type = bytes
-
- MAXSIZE = sys.maxsize
- unichr = chr
- import io
- StringIO = io.StringIO
- BytesIO = io.BytesIO
-
-else:
- string_types = basestring,
- integer_types = (int, long)
- class_types = (type, types.ClassType)
- text_type = unicode
- binary_type = str
-
- unichr = unichr # to allow importing
- import StringIO
- StringIO = StringIO.StringIO
- import cStringIO
- BytesIO = cStringIO.StringIO
-
-if PY3:
- builtins_module = 'builtins'
-else:
- builtins_module = '__builtin__'
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- return meta("NewBase", bases, {})
-
-DBG_TOKEN = 1
-DBG_EVENT = 2
-DBG_NODE = 4
-
-
-_debug = None
-
-
-# used from yaml util when testing
-def dbg(val=None):
- global _debug
- if _debug is None:
- # set to true or false
- _debug = os.environ.get('YAMLDEBUG')
- if _debug is None:
- _debug = 0
- else:
- _debug = int(_debug)
- if val is None:
- return _debug
- return _debug & val
-
-
-def nprint(*args, **kw):
- if dbg:
- print(*args, **kw)
diff --git a/lib/spack/external/ruamel/yaml/composer.py b/lib/spack/external/ruamel/yaml/composer.py
deleted file mode 100644
index fb0a55c759..0000000000
--- a/lib/spack/external/ruamel/yaml/composer.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-
-try:
- from .error import MarkedYAMLError
- from .compat import utf8
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.error import MarkedYAMLError
- from ruamel.yaml.compat import utf8
-
-from ruamel.yaml.events import (
- StreamStartEvent, StreamEndEvent, MappingStartEvent, MappingEndEvent,
- SequenceStartEvent, SequenceEndEvent, AliasEvent, ScalarEvent,
-)
-from ruamel.yaml.nodes import (
- MappingNode, ScalarNode, SequenceNode,
-)
-
-__all__ = ['Composer', 'ComposerError']
-
-
-class ComposerError(MarkedYAMLError):
- pass
-
-
-class Composer(object):
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError(
- "expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- alias = event.anchor
- if alias not in self.anchors:
- raise ComposerError(
- None, None, "found undefined alias %r"
- % utf8(alias), event.start_mark)
- return self.anchors[alias]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None: # have an anchor
- if anchor in self.anchors:
- raise ComposerError(
- "found duplicate anchor %r; first occurence"
- % utf8(anchor), self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style,
- comment=event.comment)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style,
- comment=start_event.comment, anchor=anchor)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- if node.flow_style is True and end_event.comment is not None:
- if node.comment is not None:
- print('Warning: unexpected end_event commment in sequence '
- 'node {0}'.format(node.flow_style))
- node.comment = end_event.comment
- node.end_mark = end_event.end_mark
- self.check_end_doc_comment(end_event, node)
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style,
- comment=start_event.comment, anchor=anchor)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- # key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- # if item_key in node.value:
- # raise ComposerError("while composing a mapping",
- # start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- # node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- if node.flow_style is True and end_event.comment is not None:
- node.comment = end_event.comment
- node.end_mark = end_event.end_mark
- self.check_end_doc_comment(end_event, node)
- return node
-
- def check_end_doc_comment(self, end_event, node):
- if end_event.comment and end_event.comment[1]:
- # pre comments on an end_event, no following to move to
- if node.comment is None:
- node.comment = [None, None]
- assert not isinstance(node, ScalarEvent)
- # this is a post comment on a mapping node, add as third element
- # in the list
- node.comment.append(end_event.comment[1])
- end_event.comment[1] = None
diff --git a/lib/spack/external/ruamel/yaml/configobjwalker.py b/lib/spack/external/ruamel/yaml/configobjwalker.py
deleted file mode 100644
index bab910cb11..0000000000
--- a/lib/spack/external/ruamel/yaml/configobjwalker.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# coding: utf-8
-
-import warnings
-from ruamel.yaml.util import configobj_walker as new_configobj_walker
-
-
-def configobj_walker(cfg):
- warnings.warn("configobj_walker has move to ruamel.yaml.util, please update your code")
- return new_configobj_walker(cfg)
diff --git a/lib/spack/external/ruamel/yaml/constructor.py b/lib/spack/external/ruamel/yaml/constructor.py
deleted file mode 100644
index 69ad0a74ac..0000000000
--- a/lib/spack/external/ruamel/yaml/constructor.py
+++ /dev/null
@@ -1,1172 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-import datetime
-import base64
-import binascii
-import re
-import sys
-import types
-
-try:
- from .error import * # NOQA
- from .nodes import * # NOQA
- from .compat import utf8, builtins_module, to_str, PY2, PY3, ordereddict, text_type
- from .comments import * # NOQA
- from .scalarstring import * # NOQA
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.error import * # NOQA
- from ruamel.yaml.nodes import * # NOQA
- from ruamel.yaml.compat import (utf8, builtins_module, to_str, PY2, PY3,
- ordereddict, text_type)
- from ruamel.yaml.comments import * # NOQA
- from ruamel.yaml.scalarstring import * # NOQA
-
-
-if sys.version_info >= (3, 3):
- from collections.abc import Hashable
-else:
- from collections import Hashable
-
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError', 'RoundTripConstructor']
-
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-
-class BaseConstructor(object):
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self, preserve_quotes=None):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
- self._preserve_quotes = preserve_quotes
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- """deep is True when creating an object/mapping recursively,
- in that case want the underlying elements available during construction
- """
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(
- None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = next(generator)
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(
- None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- """deep is True when creating an object/mapping recursively,
- in that case want the underlying elements available during construction
- """
- if not isinstance(node, SequenceNode):
- raise ConstructorError(
- None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- """deep is True when creating an object/mapping recursively,
- in that case want the underlying elements available during construction
- """
- if not isinstance(node, MappingNode):
- raise ConstructorError(
- None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- # keys can be list -> deep
- key = self.construct_object(key_node, deep=True)
- # lists are not hashable, but tuples are
- if not isinstance(key, Hashable):
- if isinstance(key, list):
- key = tuple(key)
- if PY2:
- try:
- hash(key)
- except TypeError as exc:
- raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" %
- exc, key_node.start_mark)
- else:
- if not isinstance(key, Hashable):
- raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
-
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(
- None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- @classmethod
- def add_constructor(cls, tag, constructor):
- if 'yaml_constructors' not in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
-
- @classmethod
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if 'yaml_multi_constructors' not in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
-
-
-class SafeConstructor(BaseConstructor):
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == u'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return BaseConstructor.construct_scalar(self, node)
-
- def flatten_mapping(self, node):
- """
- This implements the merge key feature http://yaml.org/type/merge.html
- by inserting keys from the merge dict/list of dicts if not yet
- available in this node
- """
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == u'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError(
- "while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, "
- "but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == u'tag:yaml.org,2002:value':
- key_node.tag = u'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- """deep is True when creating an object/mapping recursively,
- in that case want the underlying elements available during construction
- """
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return BaseConstructor.construct_mapping(self, node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does
- bool_values = {
- u'yes': True,
- u'no': False,
- u'true': True,
- u'false': False,
- u'on': True,
- u'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = to_str(self.construct_scalar(node))
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value.startswith('0o'):
- return sign*int(value[2:], 8)
- elif self.processing_version != (1, 2) and value[0] == '0':
- return sign*int(value, 8)
- elif self.processing_version != (1, 2) and ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = to_str(self.construct_scalar(node))
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- if PY3:
- def construct_yaml_binary(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(
- None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(
- None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
- else:
- def construct_yaml_binary(self, node):
- value = self.construct_scalar(node)
- try:
- return to_str(value).decode('base64')
- except (binascii.Error, UnicodeEncodeError) as exc:
- raise ConstructorError(
- None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- u'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \\t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\\.(?P<fraction>[0-9]*))?
- (?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node) # NOQA
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second,
- fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do now check for duplicate keys
- omap = ordereddict()
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" %
- subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" %
- len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- assert key not in omap
- value = self.construct_object(value_node)
- omap[key] = value
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError(
- "while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError(
- "while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" %
- subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError(
- "while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" %
- len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- value = self.construct_scalar(node)
- if PY3:
- return value
- try:
- return value.encode('ascii')
- except UnicodeEncodeError:
- return value
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(
- None, None,
- "could not determine a constructor for the tag %r" %
- utf8(node.tag),
- node.start_mark)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(
- None, SafeConstructor.construct_undefined)
-
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return utf8(self.construct_scalar(node))
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- if PY3:
- def construct_python_bytes(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(
- None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(
- None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- def construct_python_long(self, node):
- val = self.construct_yaml_int(node)
- if PY3:
- return val
- return int(val)
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError(
- "while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError as exc:
- raise ConstructorError(
- "while constructing a Python module", mark,
- "cannot find module %r (%s)" % (utf8(name), exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError(
- "while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if u'.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = builtins_module
- object_name = name
- try:
- __import__(module_name)
- except ImportError as exc:
- raise ConstructorError(
- "while constructing a Python object", mark,
- "cannot find module %r (%s)" % (utf8(module_name), exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError(
- "while constructing a Python object", mark,
- "cannot find %r in the module %r" % (utf8(object_name),
- module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError(
- "while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % utf8(value),
- node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError(
- "while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % utf8(value),
- node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- if PY2:
- class classobj:
- pass
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if PY3:
- if newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
- else:
- if newobj and isinstance(cls, type(self.classobj)) \
- and not args and not kwds:
- instance = self.classobj()
- instance.__class__ = cls
- return instance
- elif newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-if PY3:
- Constructor.add_constructor(
- u'tag:yaml.org,2002:python/bytes',
- Constructor.construct_python_bytes)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
-
-class RoundTripConstructor(SafeConstructor):
- """need to store the comments on the node itself,
- as well as on the items
- """
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(
- None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
-
- if node.style == '|' and isinstance(node.value, text_type):
- return PreservedScalarString(node.value)
- elif self._preserve_quotes and isinstance(node.value, text_type):
- if node.style == "'":
- return SingleQuotedScalarString(node.value)
- if node.style == '"':
- return DoubleQuotedScalarString(node.value)
- return node.value
-
- def construct_yaml_str(self, node):
- value = self.construct_scalar(node)
- if isinstance(value, ScalarString):
- return value
- if PY3:
- return value
- try:
- return value.encode('ascii')
- except AttributeError:
- # in case you replace the node dynamically e.g. with a dict
- return value
- except UnicodeEncodeError:
- return value
-
- def construct_sequence(self, node, seqtyp, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(
- None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- ret_val = []
- if node.comment:
- seqtyp._yaml_add_comment(node.comment[:2])
- if len(node.comment) > 2:
- seqtyp.yaml_end_comment_extend(node.comment[2], clear=True)
- if node.anchor:
- from ruamel.yaml.serializer import templated_id
- if not templated_id(node.anchor):
- seqtyp.yaml_set_anchor(node.anchor)
- for idx, child in enumerate(node.value):
- ret_val.append(self.construct_object(child, deep=deep))
- if child.comment:
- seqtyp._yaml_add_comment(child.comment, key=idx)
- seqtyp._yaml_set_idx_line_col(
- idx, [child.start_mark.line, child.start_mark.column])
- return ret_val
-
- def flatten_mapping(self, node):
- """
- This implements the merge key feature http://yaml.org/type/merge.html
- by inserting keys from the merge dict/list of dicts if not yet
- available in this node
- """
-
- def constructed(value_node):
- # If the contents of a merge are defined within the
- # merge marker, then they won't have been constructed
- # yet. But if they were already constructed, we need to use
- # the existing object.
- if value_node in self.constructed_objects:
- value = self.constructed_objects[value_node]
- else:
- value = self.construct_object(value_node, deep=False)
- return value
-
- # merge = []
- merge_map_list = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == u'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- merge_map_list.append(
- (index, constructed(value_node)))
- # self.flatten_mapping(value_node)
- # merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- # submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError(
- "while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- merge_map_list.append(
- (index, constructed(subnode)))
- # self.flatten_mapping(subnode)
- # submerge.append(subnode.value)
- # submerge.reverse()
- # for value in submerge:
- # merge.extend(value)
- else:
- raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, "
- "but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == u'tag:yaml.org,2002:value':
- key_node.tag = u'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- # print ('merge_map_list', merge_map_list)
- return merge_map_list
- # if merge:
- # node.value = merge + node.value
-
- def construct_mapping(self, node, maptyp, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(
- None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- merge_map = self.flatten_mapping(node)
- if merge_map:
- maptyp.add_yaml_merge(merge_map)
- # mapping = {}
- if node.comment:
- maptyp._yaml_add_comment(node.comment[:2])
- if len(node.comment) > 2:
- maptyp.yaml_end_comment_extend(node.comment[2], clear=True)
- if node.anchor:
- from ruamel.yaml.serializer import templated_id
- if not templated_id(node.anchor):
- maptyp.yaml_set_anchor(node.anchor)
- for key_node, value_node in node.value:
- # keys can be list -> deep
- key = self.construct_object(key_node, deep=True)
- # lists are not hashable, but tuples are
- if not isinstance(key, Hashable):
- if isinstance(key, list):
- key = tuple(key)
- if PY2:
- try:
- hash(key)
- except TypeError as exc:
- raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" %
- exc, key_node.start_mark)
- else:
- if not isinstance(key, Hashable):
- raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- if key_node.comment:
- maptyp._yaml_add_comment(key_node.comment, key=key)
- if value_node.comment:
- maptyp._yaml_add_comment(value_node.comment, value=key)
- maptyp._yaml_set_kv_line_col(
- key, [key_node.start_mark.line, key_node.start_mark.column,
- value_node.start_mark.line, value_node.start_mark.column])
- maptyp[key] = value
-
- def construct_setting(self, node, typ, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(
- None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- if node.comment:
- typ._yaml_add_comment(node.comment[:2])
- if len(node.comment) > 2:
- typ.yaml_end_comment_extend(node.comment[2], clear=True)
- if node.anchor:
- from ruamel.yaml.serializer import templated_id
- if not templated_id(node.anchor):
- typ.yaml_set_anchor(node.anchor)
- for key_node, value_node in node.value:
- # keys can be list -> deep
- key = self.construct_object(key_node, deep=True)
- # lists are not hashable, but tuples are
- if not isinstance(key, Hashable):
- if isinstance(key, list):
- key = tuple(key)
- if PY2:
- try:
- hash(key)
- except TypeError as exc:
- raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" %
- exc, key_node.start_mark)
- else:
- if not isinstance(key, Hashable):
- raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
- value = self.construct_object(value_node, deep=deep) # NOQA
- if key_node.comment:
- typ._yaml_add_comment(key_node.comment, key=key)
- if value_node.comment:
- typ._yaml_add_comment(value_node.comment, value=key)
- typ.add(key)
-
- def construct_yaml_seq(self, node):
- data = CommentedSeq()
- data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
- if node.flow_style is True:
- data.fa.set_flow_style()
- elif node.flow_style is False:
- data.fa.set_block_style()
- if node.comment:
- data._yaml_add_comment(node.comment)
- yield data
- data.extend(self.construct_sequence(node, data))
-
- def construct_yaml_map(self, node):
- data = CommentedMap()
- data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
- if node.flow_style is True:
- data.fa.set_flow_style()
- elif node.flow_style is False:
- data.fa.set_block_style()
- yield data
- self.construct_mapping(node, data)
-
- def construct_yaml_omap(self, node):
- # Note: we do now check for duplicate keys
- omap = CommentedOrderedMap()
- omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
- if node.flow_style is True:
- omap.fa.set_flow_style()
- elif node.flow_style is False:
- omap.fa.set_block_style()
- yield omap
- if node.comment:
- omap._yaml_add_comment(node.comment[:2])
- if len(node.comment) > 2:
- omap.yaml_end_comment_extend(node.comment[2], clear=True)
- if not isinstance(node, SequenceNode):
- raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" %
- subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" %
- len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- assert key not in omap
- value = self.construct_object(value_node)
- if key_node.comment:
- omap._yaml_add_comment(key_node.comment, key=key)
- if subnode.comment:
- omap._yaml_add_comment(subnode.comment, key=key)
- if value_node.comment:
- omap._yaml_add_comment(value_node.comment, value=key)
- omap[key] = value
-
- def construct_yaml_set(self, node):
- data = CommentedSet()
- data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
- yield data
- self.construct_setting(node, data)
-
- def construct_undefined(self, node):
- try:
- data = CommentedMap()
- data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
- if node.flow_style is True:
- data.fa.set_flow_style()
- elif node.flow_style is False:
- data.fa.set_block_style()
- data.yaml_set_tag(node.tag)
- yield data
- self.construct_mapping(node, data)
- except:
- raise ConstructorError(
- None, None,
- "could not determine a constructor for the tag %r" %
- utf8(node.tag),
- node.start_mark)
-
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- RoundTripConstructor.construct_yaml_null)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- RoundTripConstructor.construct_yaml_bool)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- RoundTripConstructor.construct_yaml_int)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- RoundTripConstructor.construct_yaml_float)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- RoundTripConstructor.construct_yaml_binary)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- RoundTripConstructor.construct_yaml_timestamp)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- RoundTripConstructor.construct_yaml_omap)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- RoundTripConstructor.construct_yaml_pairs)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- RoundTripConstructor.construct_yaml_set)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- RoundTripConstructor.construct_yaml_str)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- RoundTripConstructor.construct_yaml_seq)
-
-RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- RoundTripConstructor.construct_yaml_map)
-
-RoundTripConstructor.add_constructor(
- None, RoundTripConstructor.construct_undefined)
diff --git a/lib/spack/external/ruamel/yaml/dumper.py b/lib/spack/external/ruamel/yaml/dumper.py
deleted file mode 100644
index 90e2ca9d1b..0000000000
--- a/lib/spack/external/ruamel/yaml/dumper.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
-
-try:
- from .emitter import * # NOQA
- from .serializer import * # NOQA
- from .representer import * # NOQA
- from .resolver import * # NOQA
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.emitter import * # NOQA
- from ruamel.yaml.serializer import * # NOQA
- from ruamel.yaml.representer import * # NOQA
- from ruamel.yaml.resolver import * # NOQA
-
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- block_seq_indent=block_seq_indent)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- block_seq_indent=block_seq_indent)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- block_seq_indent=block_seq_indent)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-
-class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align,
- prefix_colon=prefix_colon)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags)
- RoundTripRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- VersionedResolver.__init__(self)
diff --git a/lib/spack/external/ruamel/yaml/emitter.py b/lib/spack/external/ruamel/yaml/emitter.py
deleted file mode 100644
index b754bc04e1..0000000000
--- a/lib/spack/external/ruamel/yaml/emitter.py
+++ /dev/null
@@ -1,1282 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-try:
- from .error import YAMLError
- from .events import * # NOQA
- from .compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.error import YAMLError
- from ruamel.yaml.events import * # NOQA
- from ruamel.yaml.compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT
-
-
-class EmitterError(YAMLError):
- pass
-
-
-class ScalarAnalysis(object):
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-
-class Emitter(object):
- DEFAULT_TAG_PREFIXES = {
- u'!': u'!',
- u'tag:yaml.org,2002:': u'!!',
- }
-
- MAX_SIMPLE_KEY_LENGTH = 128
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
- self.no_newline = None # set if directly after `- `
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # colon handling
- self.colon = u':'
- self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.block_seq_indent = block_seq_indent if block_seq_indent else 0
- self.top_level_colon_align = top_level_colon_align
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- # if self.best_indent < self.block_seq_indent + 1:
- # self.best_indent = self.block_seq_indent + 1
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = u'\n'
- if line_break in [u'\r', u'\n', u'\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- if dbg(DBG_EVENT):
- nprint(event)
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, sequence=None, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
- # if self.sequence_context and (self.block_seq_indent + 2) > self.best_indent:
- # self.indent = self.block_seq_indent + 2
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if PY2:
- if self.event.encoding \
- and not getattr(self.stream, 'encoding', None):
- self.encoding = self.event.encoding
- else:
- if self.event.encoding \
- and not hasattr(self.stream, 'encoding'):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s" %
- self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = sorted(self.event.tags.keys())
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and
- not self.event.explicit and
- not self.canonical and
- not self.event.version and
- not self.event.tags and
- not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator(u'---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s" %
- self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s" %
- self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence # not used in PyYAML
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor(u'&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.event.comment:
- self.write_pre_comment(self.event)
- if self.event.flow_style is False and self.event.comment:
- self.write_post_comment(self.event)
- # print('seq event', self.event)
- if self.flow_level or self.canonical or self.event.flow_style or \
- self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.event.flow_style is False and self.event.comment:
- self.write_post_comment(self.event)
- if self.event.comment and self.event.comment[1]:
- self.write_pre_comment(self.event)
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor(u'*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator(u'[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True, sequence=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u']', False)
- if self.event.comment and self.event.comment[0]:
- # eol comment on flow sequence
- self.write_post_comment(self.event)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator(u'{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True, sequence=False)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u'}', False)
- # if self.event.comment and self.event.comment[0]:
- # # eol comment on flow sequence
- # self.write_post_comment(self.event)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- # if self.event.comment and self.event.comment[1]:
- # self.write_pre_comment(self.event)
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u'}', False)
- if self.event.comment and self.event.comment[0]:
- # eol comment on flow mapping
- self.write_post_comment(self.event)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(self.prefixed_colon, False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(self.prefixed_colon, True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, sequence=True, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- if self.event.comment and self.event.comment[1]:
- # final comments from a doc
- self.write_pre_comment(self.event)
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.event.comment and self.event.comment[1]:
- self.write_pre_comment(self.event)
- self.write_indent()
- self.write_indicator((u' ' * self.block_seq_indent) + u'-', True, indention=True)
- if self.block_seq_indent + 2 > self.best_indent:
- self.no_newline = True
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False, sequence=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- if self.event.comment and self.event.comment[1]:
- # final comments from a doc
- self.write_pre_comment(self.event)
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- if self.event.comment and self.event.comment[1]:
- # final comments from a doc
- self.write_pre_comment(self.event)
- self.write_indent()
- if self.check_simple_key():
- if self.event.style == '?':
- self.write_indicator(u'?', True, indention=True)
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- if getattr(self.event, 'style', None) != '?':
- # prefix = u''
- if self.indent == 0 and self.top_level_colon_align is not None:
- # write non-prefixed colon
- c = u' ' * (self.top_level_colon_align - self.column) + self.colon
- else:
- c = self.prefixed_colon
- self.write_indicator(c, False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(self.prefixed_colon, True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events and
- isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events and
- isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None and
- event.tag is None and event.implicit and event.value == u'')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < self.MAX_SIMPLE_KEY_LENGTH and (
- isinstance(self.event, AliasEvent) or
- (isinstance(self.event, ScalarEvent) and
- not self.analysis.empty and not self.analysis.multiline) or
- self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0]) or
- (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = u'!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if (not self.event.style or self.event.style == '?') and \
- self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline)) and
- (self.flow_level and self.analysis.allow_flow_plain or
- (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context and
- self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- # if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.sequence_context and not self.flow_level:
- self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
- if self.event.comment:
- self.write_post_comment(self.event)
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" %
- (major, minor))
- return u'%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != u'!' or handle[-1] != u'!':
- raise EmitterError("tag handle must start and end with '!': %r"
- % (utf8(handle)))
- for ch in handle[1:-1]:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or
- u'a' <= ch <= u'z' or ch in u'-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (utf8(ch), utf8(handle)))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == u'!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = utf8(ch)
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return u''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == u'!':
- return tag
- handle = None
- suffix = tag
- prefixes = sorted(self.tag_prefixes.keys())
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == u'!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.~*\'()[]' \
- or (ch == u'!' and handle != u'!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = utf8(ch)
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = u''.join(chunks)
- if handle:
- return u'%s%s' % (handle, suffix_text)
- else:
- return u'!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or
- u'a' <= ch <= u'z' or ch in u'-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (utf8(ch), utf8(anchor)))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(
- scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith(u'---') or scalar.startswith(u'...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in u'#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in u'?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in u',?[]{}':
- flow_indicators = True
- if ch == u':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in u'\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
- if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' or
- u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
- # unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == u' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in u'\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (
- index+1 >= len(scalar) or
- scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write(u'\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = u' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- if self.no_newline:
- self.no_newline = False
- else:
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = u' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = u'%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = u'%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator(u'\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != u' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == u'\'':
- data = u'\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
- self.write_indicator(u'\'', False)
-
- ESCAPE_REPLACEMENTS = {
- u'\0': u'0',
- u'\x07': u'a',
- u'\x08': u'b',
- u'\x09': u't',
- u'\x0A': u'n',
- u'\x0B': u'v',
- u'\x0C': u'f',
- u'\x0D': u'r',
- u'\x1B': u'e',
- u'\"': u'\"',
- u'\\': u'\\',
- u'\x85': u'N',
- u'\xA0': u'_',
- u'\u2028': u'L',
- u'\u2029': u'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator(u'"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
- or not (u'\x20' <= ch <= u'\x7E' or
- (self.allow_unicode and
- (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= u'\xFF':
- data = u'\\x%02X' % ord(ch)
- elif ch <= u'\uFFFF':
- data = u'\\u%04X' % ord(ch)
- else:
- data = u'\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+u'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == u' ':
- data = u'\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator(u'"', False)
-
- def determine_block_hints(self, text):
- hints = u''
- if text:
- if text[0] in u' \n\x85\u2028\u2029':
- hints += text_type(self.best_indent)
- if text[-1] not in u'\n\x85\u2028\u2029':
- hints += u'-'
- elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
- hints += u'+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'>'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != u' ' \
- and text[start] == u'\n':
- self.write_line_break()
- leading_space = (ch == u' ')
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- spaces = (ch == u' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'|'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u'\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = u' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width \
- and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
- def write_comment(self, comment):
- value = comment.value
- # print('{:02d} {:02d} {}'.format(self.column, comment.start_mark.column, value))
- if value[-1] == '\n':
- value = value[:-1]
- try:
- # get original column position
- col = comment.start_mark.column
- if col < self.column + 1:
- ValueError
- except ValueError:
- col = self.column + 1
- # print('post_comment', self.line, self.column, value)
- try:
- # at least one space if the current column >= the start column of the comment
- # but not at the start of a line
- nr_spaces = col - self.column
- if self.column and value.strip() and nr_spaces < 1:
- nr_spaces = 1
- value = ' ' * nr_spaces + value
- try:
- if self.encoding:
- value = value.encode(self.encoding)
- except UnicodeDecodeError:
- pass
- self.stream.write(value)
- except TypeError:
- raise
- self.write_line_break()
-
- def write_pre_comment(self, event):
- comments = event.comment[1]
- if comments is None:
- return
- try:
- for comment in comments:
- if isinstance(event, MappingStartEvent) and \
- getattr(comment, 'pre_done', None):
- continue
- if self.column != 0:
- self.write_line_break()
- self.write_comment(comment)
- if isinstance(event, MappingStartEvent):
- comment.pre_done = True
- except TypeError:
- print ('eventtt', type(event), event)
- raise
-
- def write_post_comment(self, event):
- if self.event.comment[0] is None:
- return
- comment = event.comment[0]
- self.write_comment(comment)
diff --git a/lib/spack/external/ruamel/yaml/error.py b/lib/spack/external/ruamel/yaml/error.py
deleted file mode 100644
index 1ec77e60ec..0000000000
--- a/lib/spack/external/ruamel/yaml/error.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-try:
- from .compat import utf8
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.compat import utf8
-
-
-class Mark(object):
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while (start > 0 and
- self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029'):
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while (end < len(self.buffer) and
- self.buffer[end] not in u'\0\r\n\x85\u2028\u2029'):
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = utf8(self.buffer[start:end])
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-
-class YAMLError(Exception):
- pass
-
-
-class MarkedYAMLError(YAMLError):
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None or
- self.context_mark.name != self.problem_mark.name or
- self.context_mark.line != self.problem_mark.line or
- self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
diff --git a/lib/spack/external/ruamel/yaml/events.py b/lib/spack/external/ruamel/yaml/events.py
deleted file mode 100644
index 7667c016be..0000000000
--- a/lib/spack/external/ruamel/yaml/events.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# coding: utf-8
-
-# Abstract classes.
-
-
-def CommentCheck():
- pass
-
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
- self.start_mark = start_mark
- self.end_mark = end_mark
- # assert comment is not CommentCheck
- if comment is CommentCheck:
- comment = None
- self.comment = comment
-
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value',
- 'flow_style', 'style']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- if self.comment not in [None, CommentCheck]:
- arguments += ', comment={!r}'.format(self.comment)
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
- Event.__init__(self, start_mark, end_mark, comment)
- self.anchor = anchor
-
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None, comment=None):
- Event.__init__(self, start_mark, end_mark, comment)
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.flow_style = flow_style
-
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None,
- comment=None):
- Event.__init__(self, start_mark, end_mark, comment)
- self.encoding = encoding
-
-
-class StreamEndEvent(Event):
- pass
-
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None, comment=None):
- Event.__init__(self, start_mark, end_mark, comment)
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, comment=None):
- Event.__init__(self, start_mark, end_mark, comment)
- self.explicit = explicit
-
-
-class AliasEvent(NodeEvent):
- pass
-
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None, comment=None):
- NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.style = style
-
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-
-class MappingEndEvent(CollectionEndEvent):
- pass
diff --git a/lib/spack/external/ruamel/yaml/loader.py b/lib/spack/external/ruamel/yaml/loader.py
deleted file mode 100644
index b5ba20a0a1..0000000000
--- a/lib/spack/external/ruamel/yaml/loader.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
-
-try:
- from .reader import * # NOQA
- from .scanner import * # NOQA
- from .parser import * # NOQA
- from .composer import * # NOQA
- from .constructor import * # NOQA
- from .resolver import * # NOQA
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.reader import * # NOQA
- from ruamel.yaml.scanner import * # NOQA
- from ruamel.yaml.parser import * # NOQA
- from ruamel.yaml.composer import * # NOQA
- from ruamel.yaml.constructor import * # NOQA
- from ruamel.yaml.resolver import * # NOQA
-
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
- def __init__(self, stream, version=None, preserve_quotes=None):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
- def __init__(self, stream, version=None, preserve_quotes=None):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
- def __init__(self, stream, version=None, preserve_quotes=None):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-
-class RoundTripLoader(Reader, RoundTripScanner, RoundTripParser, Composer,
- RoundTripConstructor, VersionedResolver):
- def __init__(self, stream, version=None, preserve_quotes=None):
- Reader.__init__(self, stream)
- RoundTripScanner.__init__(self)
- RoundTripParser.__init__(self)
- Composer.__init__(self)
- RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes)
- VersionedResolver.__init__(self, version)
diff --git a/lib/spack/external/ruamel/yaml/main.py b/lib/spack/external/ruamel/yaml/main.py
deleted file mode 100644
index 797bdcde65..0000000000
--- a/lib/spack/external/ruamel/yaml/main.py
+++ /dev/null
@@ -1,395 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-
-
-from ruamel.yaml.error import * # NOQA
-
-from ruamel.yaml.tokens import * # NOQA
-from ruamel.yaml.events import * # NOQA
-from ruamel.yaml.nodes import * # NOQA
-
-from ruamel.yaml.loader import * # NOQA
-from ruamel.yaml.dumper import * # NOQA
-from ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, PY3
-
-# import io
-
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-
-def load(stream, Loader=Loader, version=None, preserve_quotes=None):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream, version, preserve_quotes=preserve_quotes)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-
-def load_all(stream, Loader=Loader, version=None):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream, version)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-
-def safe_load(stream, version=None):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader, version)
-
-
-def safe_load_all(stream, version=None):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader, version)
-
-
-def round_trip_load(stream, version=None, preserve_quotes=None):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
-
-
-def round_trip_load_all(stream, version=None, preserve_quotes=None):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
-
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-enc = None if PY3 else 'utf-8'
-
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=enc, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = StringIO()
- else:
- stream = BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=enc, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if top_level_colon_align is True:
- top_level_colon_align = max([len(str(x)) for x in documents[0]])
- if stream is None:
- if encoding is None:
- stream = StringIO()
- else:
- stream = BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, explicit_start=explicit_start,
- explicit_end=explicit_end, version=version,
- tags=tags, block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align, prefix_colon=prefix_colon,
- )
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-
-def dump(data, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=enc, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
-
- default_style ∈ None, '', '"', "'", '|', '>'
-
- """
- return dump_all([data], stream, Dumper=Dumper,
- default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode,
- line_break=line_break,
- encoding=encoding, explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags, block_seq_indent=block_seq_indent)
-
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-
-def round_trip_dump(data, stream=None, Dumper=RoundTripDumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=enc, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- allow_unicode = True if allow_unicode is None else allow_unicode
- return dump_all([data], stream, Dumper=Dumper,
- default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode,
- line_break=line_break,
- encoding=encoding, explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags, block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align, prefix_colon=prefix_colon)
-
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-
-class YAMLObject(with_metaclass(YAMLObjectMetaclass)):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- @classmethod
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
-
- @classmethod
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
diff --git a/lib/spack/external/ruamel/yaml/nodes.py b/lib/spack/external/ruamel/yaml/nodes.py
deleted file mode 100644
index 26c6d77ae6..0000000000
--- a/lib/spack/external/ruamel/yaml/nodes.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# coding: utf-8
-
-from __future__ import print_function
-
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark, comment=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.comment = comment
- self.anchor = None
-
- def __repr__(self):
- value = self.value
- # if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- # else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__,
- self.tag, value)
-
- def dump(self, indent=0):
- if isinstance(self.value, basestring):
- print('{0}{1}(tag={!r}, value={!r})'.format(
- ' ' * indent, self.__class__.__name__, self.tag, self.value))
- if self.comment:
- print(' {0}comment: {1})'.format(
- ' ' * indent, self.comment))
- return
- print('{0}{1}(tag={!r})'.format(
- ' ' * indent, self.__class__.__name__, self.tag))
- if self.comment:
- print(' {0}comment: {1})'.format(
- ' ' * indent, self.comment))
- for v in self.value:
- if isinstance(v, tuple):
- for v1 in v:
- v1.dump(indent+1)
- elif isinstance(v, Node):
- v.dump(indent+1)
- else:
- print('Node value type?', type(v))
-
-
-class ScalarNode(Node):
- """
- styles:
- ? -> set() ? key, no value
- " -> double quoted
- ' -> single quoted
- | -> literal style
- > ->
- """
- id = 'scalar'
-
- def __init__(self, tag, value, start_mark=None, end_mark=None, style=None,
- comment=None):
- Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
- self.style = style
-
-
-class CollectionNode(Node):
- def __init__(self, tag, value, start_mark=None, end_mark=None,
- flow_style=None, comment=None, anchor=None):
- Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
- self.flow_style = flow_style
- self.anchor = anchor
-
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-
-class MappingNode(CollectionNode):
- id = 'mapping'
diff --git a/lib/spack/external/ruamel/yaml/parser.py b/lib/spack/external/ruamel/yaml/parser.py
deleted file mode 100644
index 543cca9b43..0000000000
--- a/lib/spack/external/ruamel/yaml/parser.py
+++ /dev/null
@@ -1,675 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document*
-# STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content |
-# indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
-# BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START
-# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR
-# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START
-# FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
-# FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
-# FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'RoundTripParser', 'ParserError']
-
-# need to have full path, as pkg_resources tries to load parser.py in __init__.py
-# only to not do anything with the package afterwards
-# and for Jython too
-from ruamel.yaml.error import MarkedYAMLError # NOQA
-from ruamel.yaml.tokens import * # NOQA
-from ruamel.yaml.events import * # NOQA
-from ruamel.yaml.scanner import * # NOQA
-from ruamel.yaml.compat import utf8 # NOQA
-
-
-class ParserError(MarkedYAMLError):
- pass
-
-
-class Parser(object):
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- u'!': u'!',
- u'!!': u'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document*
- # STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- token.move_comment(self.peek_token())
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(
- start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark, explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(
- DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == u'YAML':
- if self.yaml_version is not None:
- raise ParserError(
- None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(
- None, None,
- "found incompatible YAML document (version 1.* is "
- "required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == u'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % utf8(handle),
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def transform_tag(self, handle, suffix):
- return self.tag_handles[handle] + suffix
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError(
- "while parsing a node", start_mark,
- "found undefined tag handle %r" % utf8(handle),
- tag_mark)
- tag = self.transform_tag(handle, suffix)
- else:
- tag = suffix
- # if tag == u'!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag'
- # and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == u'!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == u'!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(
- anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style,
- comment=token.comment
- )
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(
- anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(
- anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- # should inserting the comment be dependent on the
- # indentation?
- pt = self.peek_token()
- comment = pt.comment
- # print('pt0', type(pt))
- if comment is None or comment[1] is None:
- comment = pt.split_comment()
- # print('pt1', comment)
- event = SequenceStartEvent(
- anchor, tag, implicit, start_mark, end_mark,
- flow_style=False,
- comment=comment,
- )
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- comment = self.peek_token().comment
- event = MappingStartEvent(
- anchor, tag, implicit, start_mark, end_mark,
- flow_style=False, comment=comment)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), u'',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError(
- "while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
- # BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- # move any comment from start token
- # token.move_comment(self.peek_token())
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- token.move_comment(self.peek_token())
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError(
- "while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" %
- token.id, token.start_mark)
- token = self.get_token() # BlockEndToken
- event = SequenceEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- # indentless_sequence?
- # sequence:
- # - entry
- # - nested
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- token.move_comment(self.peek_token())
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark,
- comment=token.comment)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- token.move_comment(self.peek_token())
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError(
- "while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id,
- token.start_mark)
- token = self.get_token()
- token.move_comment(self.peek_token())
- event = MappingEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- # value token might have post comment move it to e.g. block
- token.move_comment(self.peek_token())
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError(
- "while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id,
- token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError(
- "while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id,
- token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), u'', mark, mark)
-
-
-class RoundTripParser(Parser):
- """roundtrip is a safe loader, that wants to see the unmangled tag"""
- def transform_tag(self, handle, suffix):
- # return self.tag_handles[handle]+suffix
- if handle == '!!' and suffix in (u'null', u'bool', u'int', u'float', u'binary',
- u'timestamp', u'omap', u'pairs', u'set', u'str',
- u'seq', u'map'):
- return Parser.transform_tag(self, handle, suffix)
- return handle+suffix
diff --git a/lib/spack/external/ruamel/yaml/reader.py b/lib/spack/external/ruamel/yaml/reader.py
deleted file mode 100644
index 376c6de8c6..0000000000
--- a/lib/spack/external/ruamel/yaml/reader.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length`
-# characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current
-# character.
-
-import codecs
-import re
-
-try:
- from .error import YAMLError, Mark
- from .compat import text_type, binary_type, PY3
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.error import YAMLError, Mark
- from ruamel.yaml.compat import text_type, binary_type, PY3
-
-__all__ = ['Reader', 'ReaderError']
-
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, binary_type):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to a unicode string,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `str` object (PY2) / a `bytes` object (PY3),
- # - a `unicode` object (PY2) / a `str` object (PY3),
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = u''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, text_type):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+u'\0'
- elif isinstance(stream, binary_type):
- self.name = "<byte string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = None
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in u'\n\x85\u2028\u2029' \
- or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
- self.line += 1
- self.column = 0
- elif ch != u'\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and (self.raw_buffer is None or
- len(self.raw_buffer) < 2):
- self.update_raw()
- if isinstance(self.raw_buffer, binary_type):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile(
- u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
-
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError as exc:
- if PY3:
- character = self.raw_buffer[exc.start]
- else:
- character = exc.object[exc.start]
- if self.stream is not None:
- position = self.stream_pointer - \
- len(self.raw_buffer) + exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += u'\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=None):
- if size is None:
- size = 4096 if PY3 else 1024
- data = self.stream.read(size)
- if self.raw_buffer is None:
- self.raw_buffer = data
- else:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- if not data:
- self.eof = True
-
-# try:
-# import psyco
-# psyco.bind(Reader)
-# except ImportError:
-# pass
diff --git a/lib/spack/external/ruamel/yaml/representer.py b/lib/spack/external/ruamel/yaml/representer.py
deleted file mode 100644
index b4625bfae9..0000000000
--- a/lib/spack/external/ruamel/yaml/representer.py
+++ /dev/null
@@ -1,888 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-try:
- from .error import * # NOQA
- from .nodes import * # NOQA
- from .compat import text_type, binary_type, to_unicode, PY2, PY3, ordereddict
- from .scalarstring import * # NOQA
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.error import * # NOQA
- from ruamel.yaml.nodes import * # NOQA
- from ruamel.yaml.compat import text_type, binary_type, to_unicode, PY2, PY3, ordereddict
- from ruamel.yaml.scalarstring import * # NOQA
-
-
-import datetime
-import sys
-import types
-if PY3:
- import copyreg
- import base64
-else:
- import copy_reg as copyreg
-
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError', 'RoundTripRepresenter']
-
-
-class RepresenterError(YAMLError):
- pass
-
-
-class BaseRepresenter(object):
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- if PY2:
- def get_classobj_bases(self, cls):
- bases = [cls]
- for base in cls.__bases__:
- bases.extend(self.get_classobj_bases(base))
- return bases
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- # if node is None:
- # raise RepresenterError(
- # "recursive objects are not allowed: %r" % data)
- return node
- # self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if PY2:
- # if type(data) is types.InstanceType:
- if isinstance(data, types.InstanceType):
- data_types = self.get_classobj_bases(data.__class__) + \
- list(data_types)
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, text_type(data))
- # if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- def represent_key(self, data):
- """
- David Fraser: Extract a method to represent keys in mappings, so that
- a subclass can choose not to quote them (for example)
- used in repesent_mapping
- https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c
- """
- return self.represent_data(data)
-
- @classmethod
- def add_representer(cls, data_type, representer):
- if 'yaml_representers' not in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
-
- @classmethod
- def add_multi_representer(cls, data_type, representer):
- if 'yaml_multi_representers' not in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_omap(self, tag, omap, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item_key in omap:
- item_val = omap[item_key]
- node_item = self.represent_data({item_key: item_val})
- # if not (isinstance(node_item, ScalarNode) \
- # and not node_item.style):
- # best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = list(mapping.items())
- try:
- mapping = sorted(mapping)
- except TypeError:
- pass
- for item_key, item_value in mapping:
- node_key = self.represent_key(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not
- node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- # https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
- # "i.e. two occurrences of the empty tuple may or may not yield the same object"
- # so "data is ()" should not be used
- if data is None or data == ():
- return True
- if isinstance(data, (binary_type, text_type, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:null',
- u'null')
-
- if PY3:
- def represent_str(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:str', data)
-
- def represent_binary(self, data):
- if hasattr(base64, 'encodebytes'):
- data = base64.encodebytes(data).decode('ascii')
- else:
- data = base64.encodestring(data).decode('ascii')
- return self.represent_scalar(u'tag:yaml.org,2002:binary', data,
- style='|')
- else:
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:str', data)
-
- def represent_bool(self, data):
- if data:
- value = u'true'
- else:
- value = u'false'
- return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data))
-
- if PY2:
- def represent_long(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int',
- text_type(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = u'.nan'
- elif data == self.inf_value:
- value = u'.inf'
- elif data == -self.inf_value:
- value = u'-.inf'
- else:
- value = to_unicode(repr(data)).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if u'.' not in value and u'e' in value:
- value = value.replace(u'e', u'.0e', 1)
- return self.represent_scalar(u'tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- # pairs = (len(data) > 0 and isinstance(data, list))
- # if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- # if not pairs:
- return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
- # value = []
- # for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- # return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping(u'tag:yaml.org,2002:map', data)
-
- def represent_ordereddict(self, data):
- return self.represent_omap(u'tag:yaml.org,2002:omap', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping(u'tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = to_unicode(data.isoformat())
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = to_unicode(data.isoformat(' '))
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-if PY2:
- SafeRepresenter.add_representer(unicode,
- SafeRepresenter.represent_unicode)
-else:
- SafeRepresenter.add_representer(bytes,
- SafeRepresenter.represent_binary)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-if PY2:
- SafeRepresenter.add_representer(long,
- SafeRepresenter.represent_long)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(ordereddict,
- SafeRepresenter.represent_ordereddict)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-
-class Representer(SafeRepresenter):
- if PY2:
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:python/str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- tag = None
- try:
- data.encode('ascii')
- tag = u'tag:yaml.org,2002:python/unicode'
- except UnicodeEncodeError:
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data)
-
- def represent_long(self, data):
- tag = u'tag:yaml.org,2002:int'
- if int(data) is not data:
- tag = u'tag:yaml.org,2002:python/long'
- return self.represent_scalar(tag, to_unicode(data))
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = u'%r' % data.real
- elif data.real == 0.0:
- data = u'%rj' % data.imag
- elif data.imag > 0:
- data = u'%r+%rj' % (data.real, data.imag)
- else:
- data = u'%r%rj' % (data.real, data.imag)
- return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = u'%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar(u'tag:yaml.org,2002:python/name:' +
- name, u'')
-
- def represent_module(self, data):
- return self.represent_scalar(
- u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
-
- if PY2:
- def represent_instance(self, data):
- # For instances of classic classes, we use __getinitargs__ and
- # __getstate__ to serialize the data.
-
- # If data.__getinitargs__ exists, the object must be reconstructed
- # by calling cls(**args), where args is a tuple returned by
- # __getinitargs__. Otherwise, the cls.__init__ method should never
- # be called and the class instance is created by instantiating a
- # trivial class and assigning to the instance's __class__ variable.
-
- # If data.__getstate__ exists, it returns the state of the object.
- # Otherwise, the state of the object is data.__dict__.
-
- # We produce either a !!python/object or !!python/object/new node.
- # If data.__getinitargs__ does not exist and state is a dictionary,
- # we produce a !!python/object node . Otherwise we produce a
- # !!python/object/new node.
-
- cls = data.__class__
- class_name = u'%s.%s' % (cls.__module__, cls.__name__)
- args = None
- state = None
- if hasattr(data, '__getinitargs__'):
- args = list(data.__getinitargs__())
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__
- if args is None and isinstance(state, dict):
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+class_name, state)
- if isinstance(state, dict) and not state:
- return self.represent_sequence(
- u'tag:yaml.org,2002:python/object/new:' +
- class_name, args)
- value = {}
- if args:
- value['args'] = args
- value['state'] = state
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object/new:'+class_name, value)
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copyreg.dispatch_table:
- reduce = copyreg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = u'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = u'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = u'%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-if PY2:
- Representer.add_representer(str,
- Representer.represent_str)
-
- Representer.add_representer(unicode,
- Representer.represent_unicode)
-
- Representer.add_representer(long,
- Representer.represent_long)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-if PY2:
- Representer.add_representer(types.ClassType,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-if PY2:
- Representer.add_multi_representer(types.InstanceType,
- Representer.represent_instance)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
-
-try:
- from .comments import CommentedMap, CommentedOrderedMap, CommentedSeq, \
- CommentedSet, comment_attrib, merge_attrib
-except ImportError: # for Jython
- from ruamel.yaml.comments import CommentedMap, CommentedOrderedMap, \
- CommentedSeq, CommentedSet, comment_attrib, merge_attrib
-
-
-class RoundTripRepresenter(SafeRepresenter):
- # need to add type here and write out the .comment
- # in serializer and emitter
-
- def __init__(self, default_style=None, default_flow_style=None):
- if default_flow_style is None:
- default_flow_style = False
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
-
- def represent_none(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:null',
- u'')
-
- def represent_preserved_scalarstring(self, data):
- tag = None
- style = '|'
- if PY2 and not isinstance(data, unicode):
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_single_quoted_scalarstring(self, data):
- tag = None
- style = "'"
- if PY2 and not isinstance(data, unicode):
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_double_quoted_scalarstring(self, data):
- tag = None
- style = '"'
- if PY2 and not isinstance(data, unicode):
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- # if the flow_style is None, the flow style tacked on to the object
- # explicitly will be taken. If that is None as well the default flow
- # style rules
- try:
- flow_style = sequence.fa.flow_style(flow_style)
- except AttributeError:
- flow_style = flow_style
- try:
- anchor = sequence.yaml_anchor()
- except AttributeError:
- anchor = None
- node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- try:
- comment = getattr(sequence, comment_attrib)
- item_comments = comment.items
- node.comment = comment.comment
- try:
- node.comment.append(comment.end)
- except AttributeError:
- pass
- except AttributeError:
- item_comments = {}
- for idx, item in enumerate(sequence):
- node_item = self.represent_data(item)
- node_item.comment = item_comments.get(idx)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- try:
- flow_style = mapping.fa.flow_style(flow_style)
- except AttributeError:
- flow_style = flow_style
- try:
- anchor = mapping.yaml_anchor()
- except AttributeError:
- anchor = None
- node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- # no sorting! !!
- try:
- comment = getattr(mapping, comment_attrib)
- node.comment = comment.comment
- if node.comment and node.comment[1]:
- for ct in node.comment[1]:
- ct.reset()
- item_comments = comment.items
- for v in item_comments.values():
- if v and v[1]:
- for ct in v[1]:
- ct.reset()
- try:
- node.comment.append(comment.end)
- except AttributeError:
- pass
- except AttributeError:
- item_comments = {}
- for item_key, item_value in mapping.items():
- node_key = self.represent_key(item_key)
- node_value = self.represent_data(item_value)
- item_comment = item_comments.get(item_key)
- if item_comment:
- assert getattr(node_key, 'comment', None) is None
- node_key.comment = item_comment[:2]
- nvc = getattr(node_value, 'comment', None)
- if nvc is not None: # end comment already there
- nvc[0] = item_comment[2]
- nvc[1] = item_comment[3]
- else:
- node_value.comment = item_comment[2:]
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not
- node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
- if merge_list:
- # because of the call to represent_data here, the anchors
- # are marked as being used and thereby created
- if len(merge_list) == 1:
- arg = self.represent_data(merge_list[0])
- else:
- arg = self.represent_data(merge_list)
- arg.flow_style = True
- value.insert(0,
- (ScalarNode(u'tag:yaml.org,2002:merge', '<<'), arg))
- return node
-
- def represent_omap(self, tag, omap, flow_style=None):
- value = []
- try:
- flow_style = omap.fa.flow_style(flow_style)
- except AttributeError:
- flow_style = flow_style
- try:
- anchor = omap.yaml_anchor()
- except AttributeError:
- anchor = None
- node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- try:
- comment = getattr(omap, comment_attrib)
- node.comment = comment.comment
- if node.comment and node.comment[1]:
- for ct in node.comment[1]:
- ct.reset()
- item_comments = comment.items
- for v in item_comments.values():
- if v and v[1]:
- for ct in v[1]:
- ct.reset()
- try:
- node.comment.append(comment.end)
- except AttributeError:
- pass
- except AttributeError:
- item_comments = {}
- for item_key in omap:
- item_val = omap[item_key]
- node_item = self.represent_data({item_key: item_val})
- # node item has two scalars in value: node_key and node_value
- item_comment = item_comments.get(item_key)
- if item_comment:
- if item_comment[1]:
- node_item.comment = [None, item_comment[1]]
- assert getattr(node_item.value[0][0], 'comment', None) is None
- node_item.value[0][0].comment = [item_comment[0], None]
- nvc = getattr(node_item.value[0][1], 'comment', None)
- if nvc is not None: # end comment already there
- nvc[0] = item_comment[2]
- nvc[1] = item_comment[3]
- else:
- node_item.value[0][1].comment = item_comment[2:]
- # if not (isinstance(node_item, ScalarNode) \
- # and not node_item.style):
- # best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_set(self, setting):
- flow_style = False
- tag = u'tag:yaml.org,2002:set'
- # return self.represent_mapping(tag, value)
- value = []
- flow_style = setting.fa.flow_style(flow_style)
- try:
- anchor = setting.yaml_anchor()
- except AttributeError:
- anchor = None
- node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- # no sorting! !!
- try:
- comment = getattr(setting, comment_attrib)
- node.comment = comment.comment
- if node.comment and node.comment[1]:
- for ct in node.comment[1]:
- ct.reset()
- item_comments = comment.items
- for v in item_comments.values():
- if v and v[1]:
- for ct in v[1]:
- ct.reset()
- try:
- node.comment.append(comment.end)
- except AttributeError:
- pass
- except AttributeError:
- item_comments = {}
- for item_key in setting.odict:
- node_key = self.represent_key(item_key)
- node_value = self.represent_data(None)
- item_comment = item_comments.get(item_key)
- if item_comment:
- assert getattr(node_key, 'comment', None) is None
- node_key.comment = item_comment[:2]
- node_key.style = node_value.style = "?"
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not
- node_value.style):
- best_style = False
- value.append((node_key, node_value))
- best_style = best_style
- return node
-
- def represent_dict(self, data):
- """write out tag if saved on loading"""
- try:
- t = data.tag.value
- except AttributeError:
- t = None
- if t:
- while t and t[0] == '!':
- t = t[1:]
- tag = 'tag:yaml.org,2002:' + t
- else:
- tag = u'tag:yaml.org,2002:map'
- return self.represent_mapping(tag, data)
-
-
-RoundTripRepresenter.add_representer(type(None),
- RoundTripRepresenter.represent_none)
-
-RoundTripRepresenter.add_representer(
- PreservedScalarString,
- RoundTripRepresenter.represent_preserved_scalarstring)
-
-RoundTripRepresenter.add_representer(
- SingleQuotedScalarString,
- RoundTripRepresenter.represent_single_quoted_scalarstring)
-
-RoundTripRepresenter.add_representer(
- DoubleQuotedScalarString,
- RoundTripRepresenter.represent_double_quoted_scalarstring)
-
-RoundTripRepresenter.add_representer(CommentedSeq,
- RoundTripRepresenter.represent_list)
-
-RoundTripRepresenter.add_representer(CommentedMap,
- RoundTripRepresenter.represent_dict)
-
-RoundTripRepresenter.add_representer(CommentedOrderedMap,
- RoundTripRepresenter.represent_ordereddict)
-
-if sys.version_info >= (2, 7):
- import collections
- RoundTripRepresenter.add_representer(collections.OrderedDict,
- RoundTripRepresenter.represent_ordereddict)
-
-RoundTripRepresenter.add_representer(CommentedSet,
- RoundTripRepresenter.represent_set)
diff --git a/lib/spack/external/ruamel/yaml/scalarstring.py b/lib/spack/external/ruamel/yaml/scalarstring.py
deleted file mode 100644
index d3abaff4db..0000000000
--- a/lib/spack/external/ruamel/yaml/scalarstring.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-__all__ = ["ScalarString", "PreservedScalarString", "SingleQuotedScalarString",
- "DoubleQuotedScalarString"]
-
-try:
- from .compat import text_type
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.compat import text_type
-
-
-class ScalarString(text_type):
- def __new__(cls, *args, **kw):
- return text_type.__new__(cls, *args, **kw)
-
-
-class PreservedScalarString(ScalarString):
- def __new__(cls, value):
- return ScalarString.__new__(cls, value)
-
-
-class SingleQuotedScalarString(ScalarString):
- def __new__(cls, value):
- return ScalarString.__new__(cls, value)
-
-
-class DoubleQuotedScalarString(ScalarString):
- def __new__(cls, value):
- return ScalarString.__new__(cls, value)
-
-
-def preserve_literal(s):
- return PreservedScalarString(s.replace('\r\n', '\n').replace('\r', '\n'))
-
-
-def walk_tree(base):
- """
- the routine here walks over a simple yaml tree (recursing in
- dict values and list items) and converts strings that
- have multiple lines to literal scalars
- """
- from ruamel.yaml.compat import string_types
-
- if isinstance(base, dict):
- for k in base:
- v = base[k]
- if isinstance(v, string_types) and '\n' in v:
- base[k] = preserve_literal(v)
- else:
- walk_tree(v)
- elif isinstance(base, list):
- for idx, elem in enumerate(base):
- if isinstance(elem, string_types) and '\n' in elem:
- print(elem)
- base[idx] = preserve_literal(elem)
- else:
- walk_tree(elem)
diff --git a/lib/spack/external/ruamel/yaml/scanner.py b/lib/spack/external/ruamel/yaml/scanner.py
deleted file mode 100644
index 61feb34043..0000000000
--- a/lib/spack/external/ruamel/yaml/scanner.py
+++ /dev/null
@@ -1,1661 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# RoundTripScanner
-# COMMENT(value)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError']
-
-try:
- from .error import MarkedYAMLError
- from .tokens import * # NOQA
- from .compat import utf8, unichr, PY3
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.error import MarkedYAMLError
- from ruamel.yaml.tokens import * # NOQA
- from ruamel.yaml.compat import utf8, unichr, PY3
-
-
-class ScannerError(MarkedYAMLError):
- pass
-
-
-class SimpleKey(object):
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-
-class Scanner(object):
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- comment = self.scan_to_next_token()
-
- if comment is not None: # never happens for base scanner
- return self.fetch_comment(comment)
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == u'\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == u'%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == u'-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == u'.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- # if ch == u'\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == u'[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == u'{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == u']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == u'}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == u',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == u'-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == u'?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == u':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == u'*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == u'&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == u'!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == u'|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == u'>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == u'\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == u'\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token"
- % utf8(ch), self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in list(self.possible_simple_keys):
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError(
- "while scanning a simple key", key.mark,
- "could not find expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(
- token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError(
- "while scanning a simple key", key.mark,
- "could not find expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- # In flow context, tokens should respect indentation.
- # Actually the condition should be `self.indent >= column` according to
- # the spec. But this condition will prohibit intuitively correct
- # constructions such as
- # key : {
- # }
- # ####
- # if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(
- key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'---' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'...' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' or \
- (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' and
- (ch == u'-' or (not self.flow_level and ch in u'?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == u'\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == u'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == u'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_:.':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError(
- "while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % utf8(ch), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError(
- "while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % utf8(ch), self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError(
- "while scanning a directive", start_mark,
- "expected a digit or '.', but found %r"
- % utf8(self.peek()),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError(
- "while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r"
- % utf8(self.peek()),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not (u'0' <= ch <= u'9'):
- raise ScannerError(
- "while scanning a directive", start_mark,
- "expected a digit, but found %r" % utf8(ch),
- self.get_mark())
- length = 0
- while u'0' <= self.peek(length) <= u'9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == u' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != u' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % utf8(ch),
- self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % utf8(ch),
- self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError(
- "while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % utf8(ch), self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == u'*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError(
- "while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % utf8(ch), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError(
- "while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % utf8(ch), self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == u'<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != u'>':
- raise ScannerError(
- "while parsing a tag", start_mark,
- "expected '>', but found %r" % utf8(self.peek()),
- self.get_mark())
- self.forward()
- elif ch in u'\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = u'!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in u'\0 \r\n\x85\u2028\u2029':
- if ch == u'!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = u'!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = u'!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % utf8(ch),
- self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = u''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != u'\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in u' \t'
- length = 0
- while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != u'\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == u'\n' \
- and leading_non_space and self.peek() not in u' \t':
- if not breaks:
- chunks.append(u' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- # if folded and line_break == u'\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(u' ')
- # else:
- # chunks.append(line_break)
- # else:
- # chunks.append(line_break)
- else:
- break
-
- # Process trailing line breaks. The 'chomping' setting determines
- # whether they are included in the value.
- comment = []
- if chomping in [None, True]:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
- elif chomping in [None, False]:
- comment.extend(breaks)
-
- # We are done.
- token = ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
- if len(comment) > 0:
- # Keep track of the trailing whitespace as a comment token, if
- # isn't all included in the actual value.
- comment_end_mark = self.get_mark()
- comment = CommentToken(''.join(comment), end_mark,
- comment_end_mark)
- token.add_post_comment(comment)
- return token
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError(
- "while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, "
- "but found 0", self.get_mark())
- self.forward()
- elif ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError(
- "while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, "
- "but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError(
- "while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % utf8(ch), self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError(
- "while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r"
- % utf8(ch), self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() != u' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- while self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- u'0': u'\0',
- u'a': u'\x07',
- u'b': u'\x08',
- u't': u'\x09',
- u'\t': u'\x09',
- u'n': u'\x0A',
- u'v': u'\x0B',
- u'f': u'\x0C',
- u'r': u'\x0D',
- u'e': u'\x1B',
- u' ': u'\x20',
- u'\"': u'\"',
- u'/': u'/', # as per http://www.json.org/
- u'\\': u'\\',
- u'N': u'\x85',
- u'_': u'\xA0',
- u'L': u'\u2028',
- u'P': u'\u2029',
- }
-
- ESCAPE_CODES = {
- u'x': 2,
- u'u': 4,
- u'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == u'\'' and self.peek(1) == u'\'':
- chunks.append(u'\'')
- self.forward(2)
- elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == u'\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError(
- "while scanning a double-quoted scalar",
- start_mark,
- "expected escape sequence of %d hexdecimal "
- "numbers, but found %r" %
- (length, utf8(self.peek(k))), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(unichr(code))
- self.forward(length)
- elif ch in u'\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(
- double, start_mark))
- else:
- raise ScannerError(
- "while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % utf8(ch),
- self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in u' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == u'\0':
- raise ScannerError(
- "while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar",
- start_mark,
- "found unexpected document separator",
- self.get_mark())
- while self.peek() in u' \t':
- self.forward()
- if self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- # if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == u'#':
- break
- while True:
- ch = self.peek(length)
- if ch in u'\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == u':' and
- self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in u',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == u':' and
- self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError(
- "while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check "
- "http://pyyaml.org/wiki/YAMLColonInFlowContext "
- "for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == u'#' \
- or (not self.flow_level and self.column < indent):
- break
-
- token = ScalarToken(u''.join(chunks), True, start_mark, end_mark)
- if spaces and spaces[0] == '\n':
- # Create a comment token to preserve the trailing line breaks.
- comment = CommentToken(''.join(spaces) + '\n', start_mark, end_mark)
- token.add_post_comment(comment)
- return token
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in u' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != u'!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % utf8(ch),
- self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != u' ':
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' \
- or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if ch != u'!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % utf8(ch),
- self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
- if ch == u'%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % utf8(ch),
- self.get_mark())
- return u''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- code_bytes = []
- mark = self.get_mark()
- while self.peek() == u'%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError(
- "while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers,"
- " but found %r"
- % utf8(self.peek(k)), self.get_mark())
- if PY3:
- code_bytes.append(int(self.prefix(2), 16))
- else:
- code_bytes.append(chr(int(self.prefix(2), 16)))
- self.forward(2)
- try:
- if PY3:
- value = bytes(code_bytes).decode('utf-8')
- else:
- value = unicode(''.join(code_bytes), 'utf-8')
- except UnicodeDecodeError as exc:
- raise ScannerError("while scanning a %s" % name, start_mark,
- str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in u'\r\n\x85':
- if self.prefix(2) == u'\r\n':
- self.forward(2)
- else:
- self.forward()
- return u'\n'
- elif ch in u'\u2028\u2029':
- self.forward()
- return ch
- return u''
-
-
-class RoundTripScanner(Scanner):
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- self._gather_comments()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- self._gather_comments()
- if self.tokens:
- return self.tokens[0]
-
- def _gather_comments(self):
- """combine multiple comment lines"""
- comments = []
- if not self.tokens:
- return comments
- if isinstance(self.tokens[0], CommentToken):
- comment = self.tokens.pop(0)
- self.tokens_taken += 1
- # print('################ dropping', comment)
- comments.append(comment)
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if not self.tokens:
- return comments
- if isinstance(self.tokens[0], CommentToken):
- self.tokens_taken += 1
- comment = self.tokens.pop(0)
- # print 'dropping2', comment
- comments.append(comment)
- if len(comments) >= 1:
- # print(' len', len(comments), comments)
- # print(' com', comments[0], comments[0].start_mark.line)
- # print(' tok', self.tokens[0].end_mark.line)
- self.tokens[0].add_pre_comments(comments)
- # pull in post comment on e.g. ':'
- if not self.done and len(self.tokens) < 2:
- self.fetch_more_tokens()
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- self._gather_comments()
- if self.tokens:
- # only add post comment to single line tokens:
- # scalar, value token. FlowXEndToken, otherwise
- # hidden streamtokens could get them (leave them and they will be
- # pre comments for the next map/seq
- if len(self.tokens) > 1 and \
- isinstance(self.tokens[0], (
- ScalarToken,
- ValueToken,
- FlowSequenceEndToken,
- FlowMappingEndToken,
- )) and \
- isinstance(self.tokens[1], CommentToken) and \
- self.tokens[0].end_mark.line == self.tokens[1].start_mark.line:
- self.tokens_taken += 1
- self.tokens[0].add_post_comment(self.tokens.pop(1))
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- def fetch_comment(self, comment): # XXXX
- value, start_mark, end_mark = comment
- self.tokens.append(CommentToken(value, start_mark, end_mark))
-
- # scanner
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == u'\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == u' ':
- self.forward()
- ch = self.peek()
- if ch == u'#':
- start_mark = self.get_mark()
- comment = ch
- self.forward()
- while ch not in u'\0\r\n\x85\u2028\u2029':
- ch = self.peek()
- if ch == u'\0': # don't gobble the end-of-stream character
- break
- comment += ch
- self.forward()
- # gather any blank lines following the comment too
- ch = self.scan_line_break()
- while len(ch) > 0:
- comment += ch
- ch = self.scan_line_break()
- end_mark = self.get_mark()
- if not self.flow_level:
- self.allow_simple_key = True
- return comment, start_mark, end_mark
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
-# try:
-# import psyco
-# psyco.bind(Scanner)
-# except ImportError:
-# pass
diff --git a/lib/spack/external/ruamel/yaml/serializer.py b/lib/spack/external/ruamel/yaml/serializer.py
deleted file mode 100644
index 60dd5170ca..0000000000
--- a/lib/spack/external/ruamel/yaml/serializer.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# coding: utf-8
-
-from __future__ import absolute_import
-
-import re
-
-try:
- from .error import YAMLError
- from .compat import nprint, DBG_NODE, dbg, string_types
-except (ImportError, ValueError): # for Jython
- from ruamel.yaml.error import YAMLError
- from ruamel.yaml.compat import nprint, DBG_NODE, dbg, string_types
-
-from ruamel.yaml.events import (
- StreamStartEvent, StreamEndEvent, MappingStartEvent, MappingEndEvent,
- SequenceStartEvent, SequenceEndEvent, AliasEvent, ScalarEvent,
- DocumentStartEvent, DocumentEndEvent,
-)
-from ruamel.yaml.nodes import (
- MappingNode, ScalarNode, SequenceNode,
-)
-
-__all__ = ['Serializer', 'SerializerError']
-
-
-class SerializerError(YAMLError):
- pass
-
-
-class Serializer(object):
-
- # 'id' and 3+ numbers, but not 000
- ANCHOR_TEMPLATE = u'id%03d'
- ANCHOR_RE = re.compile(u'id(?!000$)\\d{3,}')
-
- def __init__(self, encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- if isinstance(version, string_types):
- self.use_version = tuple(map(int, version.split('.')))
- else:
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
- self._templated_id = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- # def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if dbg(DBG_NODE):
- nprint('Serializing nodes')
- node.dump()
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version,
- tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- anchor = None
- try:
- if node.anchor.always_dump:
- anchor = node.anchor.value
- except:
- pass
- self.anchors[node] = anchor
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- try:
- anchor = node.anchor.value
- except:
- anchor = None
- if anchor is None:
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
- return anchor
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- # here check if the node.tag equals the one that would result from parsing
- # if not equal quoting is necessary for strings
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style, comment=node.comment))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag == self.resolve(SequenceNode, node.value, True))
- comment = node.comment
- # print('comment >>>>>>>>>>>>>.', comment, node.flow_style)
- end_comment = None
- seq_comment = None
- if node.flow_style is True:
- if comment: # eol comment on flow style sequence
- seq_comment = comment[0]
- # comment[0] = None
- if comment and len(comment) > 2:
- end_comment = comment[2]
- else:
- end_comment = None
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style,
- comment=node.comment))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
- elif isinstance(node, MappingNode):
- implicit = (node.tag == self.resolve(MappingNode, node.value, True))
- comment = node.comment
- end_comment = None
- map_comment = None
- if node.flow_style is True:
- if comment: # eol comment on flow style sequence
- map_comment = comment[0]
- # comment[0] = None
- if comment and len(comment) > 2:
- end_comment = comment[2]
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style,
- comment=node.comment))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent(comment=[map_comment, end_comment]))
- self.ascend_resolver()
-
-
-def templated_id(s):
- return Serializer.ANCHOR_RE.match(s)
diff --git a/lib/spack/external/ruamel/yaml/setup.cfg b/lib/spack/external/ruamel/yaml/setup.cfg
deleted file mode 100644
index 861a9f5542..0000000000
--- a/lib/spack/external/ruamel/yaml/setup.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
-
diff --git a/lib/spack/external/ruamel/yaml/tokens.py b/lib/spack/external/ruamel/yaml/tokens.py
deleted file mode 100644
index bd97785b82..0000000000
--- a/lib/spack/external/ruamel/yaml/tokens.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# # header
-# coding: utf-8
-
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
-
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
- def add_post_comment(self, comment):
- if not hasattr(self, '_comment'):
- self._comment = [None, None]
- self._comment[0] = comment
-
- def add_pre_comments(self, comments):
- if not hasattr(self, '_comment'):
- self._comment = [None, None]
- assert self._comment[1] is None
- self._comment[1] = comments
-
- def get_comment(self):
- return getattr(self, '_comment', None)
-
- @property
- def comment(self):
- return getattr(self, '_comment', None)
-
- def move_comment(self, target):
- """move a comment from this token to target (normally next token)
- used to combine e.g. comments before a BlockEntryToken to the
- ScalarToken that follows it
- """
- c = self.comment
- if c is None:
- return
- # don't push beyond last element
- if isinstance(target, StreamEndToken):
- return
- delattr(self, '_comment')
- tc = target.comment
- if not tc: # target comment, just insert
- target._comment = c
- return self
- if c[0] and tc[0] or c[1] and tc[1]:
- raise NotImplementedError('overlap in comment %r %r' % c, tc)
- if c[0]:
- tc[0] = c[0]
- if c[1]:
- tc[1] = c[1]
- return self
-
- def split_comment(self):
- """ split the post part of a comment, and return it
- as comment to be added. Delete second part if [None, None]
- abc: # this goes to sequence
- # this goes to first element
- - first element
- """
- comment = self.comment
- if comment is None or comment[0] is None:
- return None # nothing to do
- ret_val = [comment[0], None]
- if comment[1] is None:
- delattr(self, '_comment')
- return ret_val
-
-
-# class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
-
- def __init__(self, name, value, start_mark, end_mark):
- Token.__init__(self, start_mark, end_mark)
- self.name = name
- self.value = value
-
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-
-class StreamStartToken(Token):
- id = '<stream start>'
-
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- Token.__init__(self, start_mark, end_mark)
- self.encoding = encoding
-
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-
-class KeyToken(Token):
- id = '?'
-
-
-class ValueToken(Token):
- id = ':'
-
-
-class BlockEntryToken(Token):
- id = '-'
-
-
-class FlowEntryToken(Token):
- id = ','
-
-
-class AliasToken(Token):
- id = '<alias>'
-
- def __init__(self, value, start_mark, end_mark):
- Token.__init__(self, start_mark, end_mark)
- self.value = value
-
-
-class AnchorToken(Token):
- id = '<anchor>'
-
- def __init__(self, value, start_mark, end_mark):
- Token.__init__(self, start_mark, end_mark)
- self.value = value
-
-
-class TagToken(Token):
- id = '<tag>'
-
- def __init__(self, value, start_mark, end_mark):
- Token.__init__(self, start_mark, end_mark)
- self.value = value
-
-
-class ScalarToken(Token):
- id = '<scalar>'
-
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- Token.__init__(self, start_mark, end_mark)
- self.value = value
- self.plain = plain
- self.style = style
-
-
-class CommentToken(Token):
- id = '<comment>'
-
- def __init__(self, value, start_mark, end_mark):
- Token.__init__(self, start_mark, end_mark)
- self.value = value
-
- def reset(self):
- if hasattr(self, 'pre_done'):
- delattr(self, 'pre_done')
diff --git a/lib/spack/external/ruamel/yaml/util.py b/lib/spack/external/ruamel/yaml/util.py
deleted file mode 100644
index afc46fb12a..0000000000
--- a/lib/spack/external/ruamel/yaml/util.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# coding: utf-8
-
-"""
-some helper functions that might be generally useful
-"""
-
-from __future__ import print_function
-from __future__ import absolute_import
-
-from .compat import text_type, binary_type
-from .main import round_trip_load
-
-
-# originally as comment
-# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605
-# if you use this in your code, I suggest adding a test in your test suite
-# that check this routines output against a known piece of your YAML
-# before upgrades to this code break your round-tripped YAML
-def load_yaml_guess_indent(stream, **kw):
- """guess the indent and block sequence indent of yaml stream/string
-
- returns round_trip_loaded stream, indent level, block sequence indent
- - block sequence indent is the number of spaces before a dash relative to previous indent
- - if there are no block sequences, indent is taken from nested mappings, block sequence
- indent is unset (None) in that case
- """
- # load a yaml file guess the indentation, if you use TABs ...
- def leading_spaces(l):
- idx = 0
- while idx < len(l) and l[idx] == ' ':
- idx += 1
- return idx
-
- if isinstance(stream, text_type):
- yaml_str = stream
- elif isinstance(stream, binary_type):
- yaml_str = stream.decode('utf-8') # most likely, but the Reader checks BOM for this
- else:
- yaml_str = stream.read()
- map_indent = None
- indent = None # default if not found for some reason
- block_seq_indent = None
- prev_line_key_only = None
- key_indent = 0
- for line in yaml_str.splitlines():
- rline = line.rstrip()
- lline = rline.lstrip()
- if lline.startswith('- '):
- l_s = leading_spaces(line)
- block_seq_indent = l_s - key_indent
- idx = l_s + 1
- while line[idx] == ' ': # this will end as we rstripped
- idx += 1
- if line[idx] == '#': # comment after -
- continue
- indent = idx - key_indent
- break
- if map_indent is None and prev_line_key_only is not None and rline:
- idx = 0
- while line[idx] in ' -':
- idx += 1
- if idx > prev_line_key_only:
- map_indent = idx - prev_line_key_only
- if rline.endswith(':'):
- key_indent = leading_spaces(line)
- idx = 0
- while line[idx] == ' ': # this will end on ':'
- idx += 1
- prev_line_key_only = idx
- continue
- prev_line_key_only = None
- if indent is None and map_indent is not None:
- indent = map_indent
- return round_trip_load(yaml_str, **kw), indent, block_seq_indent
-
-
-def configobj_walker(cfg):
- """
- walks over a ConfigObj (INI file with comments) generating
- corresponding YAML output (including comments
- """
- from configobj import ConfigObj
- assert isinstance(cfg, ConfigObj)
- for c in cfg.initial_comment:
- if c.strip():
- yield c
- for s in _walk_section(cfg):
- if s.strip():
- yield s
- for c in cfg.final_comment:
- if c.strip():
- yield c
-
-
-def _walk_section(s, level=0):
- from configobj import Section
- assert isinstance(s, Section)
- indent = u' ' * level
- for name in s.scalars:
- for c in s.comments[name]:
- yield indent + c.strip()
- x = s[name]
- if u'\n' in x:
- i = indent + u' '
- x = u'|\n' + i + x.strip().replace(u'\n', u'\n' + i)
- elif ':' in x:
- x = u"'" + x.replace(u"'", u"''") + u"'"
- line = u'{0}{1}: {2}'.format(indent, name, x)
- c = s.inline_comments[name]
- if c:
- line += u' ' + c
- yield line
- for name in s.sections:
- for c in s.comments[name]:
- yield indent + c.strip()
- line = u'{0}{1}:'.format(indent, name)
- c = s.inline_comments[name]
- if c:
- line += u' ' + c
- yield line
- for val in _walk_section(s[name], level=level+1):
- yield val
-
-# def config_obj_2_rt_yaml(cfg):
-# from .comments import CommentedMap, CommentedSeq
-# from configobj import ConfigObj
-# assert isinstance(cfg, ConfigObj)
-# #for c in cfg.initial_comment:
-# # if c.strip():
-# # pass
-# cm = CommentedMap()
-# for name in s.sections:
-# cm[name] = d = CommentedMap()
-#
-#
-# #for c in cfg.final_comment:
-# # if c.strip():
-# # yield c
-# return cm
diff --git a/lib/spack/external/vendor.txt b/lib/spack/external/vendor.txt
index 3080ef110e..e198ede9f3 100644
--- a/lib/spack/external/vendor.txt
+++ b/lib/spack/external/vendor.txt
@@ -7,3 +7,4 @@ jinja2==3.0.3
six==1.16.0
macholib==1.16.2
altgraph==0.17.3
+ruamel.yaml==0.17.21
diff --git a/lib/spack/spack/binary_distribution.py b/lib/spack/spack/binary_distribution.py
index f486238037..1b89e8bf44 100644
--- a/lib/spack/spack/binary_distribution.py
+++ b/lib/spack/spack/binary_distribution.py
@@ -27,8 +27,6 @@ from gzip import GzipFile
from typing import List, NamedTuple, Optional, Union
from urllib.error import HTTPError, URLError
-import ruamel.yaml as yaml
-
import llnl.util.filesystem as fsys
import llnl.util.lang
import llnl.util.tty as tty
@@ -616,7 +614,7 @@ def read_buildinfo_file(prefix):
filename = buildinfo_file_name(prefix)
with open(filename, "r") as inputfile:
content = inputfile.read()
- buildinfo = yaml.load(content)
+ buildinfo = syaml.load(content)
return buildinfo
diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py
index f0dbc4d43e..2dae43d41b 100644
--- a/lib/spack/spack/cmd/__init__.py
+++ b/lib/spack/spack/cmd/__init__.py
@@ -13,9 +13,6 @@ import sys
from textwrap import dedent
from typing import List, Match, Tuple
-import ruamel.yaml as yaml
-from ruamel.yaml.error import MarkedYAMLError
-
import llnl.util.tty as tty
from llnl.util.filesystem import join_path
from llnl.util.lang import attr_setdefault, index_by
@@ -33,6 +30,7 @@ import spack.store
import spack.traverse as traverse
import spack.user_environment as uenv
import spack.util.spack_json as sjson
+import spack.util.spack_yaml as syaml
import spack.util.string
# cmd has a submodule called "list" so preserve the python list module
@@ -537,9 +535,9 @@ def is_git_repo(path):
# we might be in a git worktree
try:
with open(dotgit_path, "rb") as f:
- dotgit_content = yaml.load(f)
+ dotgit_content = syaml.load(f)
return os.path.isdir(dotgit_content.get("gitdir", dotgit_path))
- except MarkedYAMLError:
+ except syaml.SpackYAMLError:
pass
return False
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index 101fca2ff7..2153f45589 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -38,10 +38,6 @@ import sys
from contextlib import contextmanager
from typing import Dict, List, Optional, Union
-import ruamel.yaml as yaml
-from ruamel.yaml.comments import Comment
-from ruamel.yaml.error import MarkedYAMLError
-
import llnl.util.lang
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp, rename
@@ -163,8 +159,8 @@ class ConfigScope(object):
mkdirp(self.path)
with open(filename, "w") as f:
syaml.dump_config(data, stream=f, default_flow_style=False)
- except (yaml.YAMLError, IOError) as e:
- raise ConfigFileError("Error writing to config file: '%s'" % str(e))
+ except (syaml.SpackYAMLError, IOError) as e:
+ raise ConfigFileError(f"cannot write to '{filename}'") from e
def clear(self):
"""Empty cached config information."""
@@ -293,8 +289,8 @@ class SingleFileScope(ConfigScope):
syaml.dump_config(data_to_write, stream=f, default_flow_style=False)
rename(tmp, self.path)
- except (yaml.YAMLError, IOError) as e:
- raise ConfigFileError("Error writing to config file: '%s'" % str(e))
+ except (syaml.SpackYAMLError, IOError) as e:
+ raise ConfigFileError(f"cannot write to config file {str(e)}") from e
def __repr__(self):
return "<SingleFileScope: %s: %s>" % (self.name, self.path)
@@ -546,12 +542,12 @@ class Configuration(object):
# manually preserve comments
need_comment_copy = section in scope.sections and scope.sections[section]
if need_comment_copy:
- comments = getattr(scope.sections[section][section], Comment.attrib, None)
+ comments = syaml.extract_comments(scope.sections[section][section])
# read only the requested section's data.
scope.sections[section] = syaml.syaml_dict({section: update_data})
if need_comment_copy and comments:
- setattr(scope.sections[section][section], Comment.attrib, comments)
+ syaml.set_comments(scope.sections[section][section], data_comments=comments)
scope._write_section(section)
@@ -704,8 +700,8 @@ class Configuration(object):
data = syaml.syaml_dict()
data[section] = self.get_config(section)
syaml.dump_config(data, stream=sys.stdout, default_flow_style=False, blame=blame)
- except (yaml.YAMLError, IOError):
- raise ConfigError("Error reading configuration: %s" % section)
+ except (syaml.SpackYAMLError, IOError) as e:
+ raise ConfigError(f"cannot read '{section}' configuration") from e
@contextmanager
@@ -959,19 +955,9 @@ def validate(data, schema, filename=None):
"""
import jsonschema
- # validate a copy to avoid adding defaults
+ # Validate a copy to avoid adding defaults
# This allows us to round-trip data without adding to it.
- test_data = copy.deepcopy(data)
-
- if isinstance(test_data, yaml.comments.CommentedMap):
- # HACK to fully copy ruamel CommentedMap that doesn't provide copy
- # method. Especially necessary for environments
- setattr(
- test_data,
- yaml.comments.Comment.attrib,
- getattr(data, yaml.comments.Comment.attrib, yaml.comments.Comment()),
- )
-
+ test_data = syaml.deepcopy(data)
try:
spack.schema.Validator(schema).validate(test_data)
except jsonschema.ValidationError as e:
@@ -1019,21 +1005,13 @@ def read_config_file(filename, schema=None):
return data
except StopIteration:
- raise ConfigFileError("Config file is empty or is not a valid YAML dict: %s" % filename)
+ raise ConfigFileError(f"Config file is empty or is not a valid YAML dict: {filename}")
- except MarkedYAMLError as e:
- msg = "Error parsing yaml"
- mark = e.context_mark if e.context_mark else e.problem_mark
- if mark:
- line, column = mark.line, mark.column
- msg += ": near %s, %s, %s" % (mark.name, str(line), str(column))
- else:
- msg += ": %s" % (filename)
- msg += ": %s" % (e.problem)
- raise ConfigFileError(msg)
+ except syaml.SpackYAMLError as e:
+ raise ConfigFileError(str(e)) from e
except IOError as e:
- raise ConfigFileError("Error reading configuration file %s: %s" % (filename, str(e)))
+ raise ConfigFileError(f"Error reading configuration file {filename}: {str(e)}") from e
def _override(string):
@@ -1089,8 +1067,8 @@ def _mark_internal(data, name):
d = syaml.syaml_type(data)
if syaml.markable(d):
- d._start_mark = yaml.Mark(name, None, None, None, None, None)
- d._end_mark = yaml.Mark(name, None, None, None, None, None)
+ d._start_mark = syaml.name_mark(name)
+ d._end_mark = syaml.name_mark(name)
return d
diff --git a/lib/spack/spack/environment/environment.py b/lib/spack/spack/environment/environment.py
index e24fb65a64..56b4775b9f 100644
--- a/lib/spack/spack/environment/environment.py
+++ b/lib/spack/spack/environment/environment.py
@@ -18,8 +18,6 @@ import urllib.request
import warnings
from typing import Any, Dict, List, Optional, Union
-import ruamel.yaml as yaml
-
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import llnl.util.tty.color as clr
@@ -526,11 +524,6 @@ class ViewDescriptor:
def to_dict(self):
ret = syaml.syaml_dict([("root", self.raw_root)])
if self.projections:
- # projections guaranteed to be ordered dict if true-ish
- # for python2.6, may be syaml or ruamel.yaml implementation
- # so we have to check for both
- types = (collections.OrderedDict, syaml.syaml_dict, yaml.comments.CommentedMap)
- assert isinstance(self.projections, types)
ret["projections"] = self.projections
if self.select:
ret["select"] = self.select
diff --git a/lib/spack/spack/mirror.py b/lib/spack/spack/mirror.py
index c74b68a43b..f81cf6fd88 100644
--- a/lib/spack/spack/mirror.py
+++ b/lib/spack/spack/mirror.py
@@ -19,8 +19,6 @@ import sys
import traceback
import urllib.parse
-import ruamel.yaml.error as yaml_error
-
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
@@ -89,11 +87,8 @@ class Mirror(object):
@staticmethod
def from_yaml(stream, name=None):
- try:
- data = syaml.load(stream)
- return Mirror.from_dict(data, name)
- except yaml_error.MarkedYAMLError as e:
- raise syaml.SpackYAMLError("error parsing YAML mirror:", str(e)) from e
+ data = syaml.load(stream)
+ return Mirror.from_dict(data, name)
@staticmethod
def from_json(stream, name=None):
@@ -288,11 +283,8 @@ class MirrorCollection(collections.abc.Mapping):
# TODO: this isn't called anywhere
@staticmethod
def from_yaml(stream, name=None):
- try:
- data = syaml.load(stream)
- return MirrorCollection(data)
- except yaml_error.MarkedYAMLError as e:
- raise syaml.SpackYAMLError("error parsing YAML mirror collection:", str(e)) from e
+ data = syaml.load(stream)
+ return MirrorCollection(data)
@staticmethod
def from_json(stream, name=None):
diff --git a/lib/spack/spack/repo.py b/lib/spack/spack/repo.py
index 1d62e83fb6..f9bf3a8926 100644
--- a/lib/spack/spack/repo.py
+++ b/lib/spack/spack/repo.py
@@ -26,8 +26,6 @@ import types
import uuid
from typing import Dict, Union
-import ruamel.yaml as yaml
-
import llnl.util.filesystem as fs
import llnl.util.lang
import llnl.util.tty as tty
@@ -44,6 +42,7 @@ import spack.util.file_cache
import spack.util.git
import spack.util.naming as nm
import spack.util.path
+import spack.util.spack_yaml as syaml
#: Package modules are imported as spack.pkg.<repo-namespace>.<pkg-name>
ROOT_PYTHON_NAMESPACE = "spack.pkg"
@@ -1008,7 +1007,7 @@ class Repo(object):
"""Check for a YAML config file in this db's root directory."""
try:
with open(self.config_file) as reponame_file:
- yaml_data = yaml.load(reponame_file)
+ yaml_data = syaml.load(reponame_file)
if (
not yaml_data
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 415be6699b..87f07e88eb 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -57,8 +57,6 @@ import re
import warnings
from typing import Tuple
-import ruamel.yaml as yaml
-
import llnl.util.filesystem as fs
import llnl.util.lang as lang
import llnl.util.tty as tty
@@ -2305,11 +2303,8 @@ class Spec(object):
Args:
stream: string or file object to read from.
"""
- try:
- data = yaml.load(stream)
- return Spec.from_dict(data)
- except yaml.error.MarkedYAMLError as e:
- raise syaml.SpackYAMLError("error parsing YAML spec:", str(e)) from e
+ data = syaml.load(stream)
+ return Spec.from_dict(data)
@staticmethod
def from_json(stream):
diff --git a/lib/spack/spack/test/cmd/config.py b/lib/spack/spack/test/cmd/config.py
index a7793d65c7..39cd9fcb7d 100644
--- a/lib/spack/spack/test/cmd/config.py
+++ b/lib/spack/spack/test/cmd/config.py
@@ -483,7 +483,7 @@ def test_config_add_to_env_preserve_comments(mutable_empty_config, mutable_mock_
spack: # comment
# comment
specs: # comment
- - foo # comment
+ - foo # comment
# comment
view: true # comment
packages: # comment
diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py
index 42580e8e99..697d056177 100644
--- a/lib/spack/spack/test/config.py
+++ b/lib/spack/spack/test/config.py
@@ -1414,5 +1414,5 @@ def test_config_file_read_invalid_yaml(tmpdir, mutable_empty_config):
with open(filename, "w") as f:
f.write("spack:\nview")
- with pytest.raises(spack.config.ConfigFileError, match="parsing yaml"):
+ with pytest.raises(spack.config.ConfigFileError, match="parsing YAML"):
spack.config.read_config_file(filename)
diff --git a/lib/spack/spack/test/env.py b/lib/spack/spack/test/env.py
index 5c3a8caae5..fc0ef4cba8 100644
--- a/lib/spack/spack/test/env.py
+++ b/lib/spack/spack/test/env.py
@@ -385,3 +385,37 @@ spack:
assert len(env.user_specs) == 1
assert env.manifest.pristine_yaml_content["spack"]["specs"] == ["a"]
+
+
+@pytest.mark.parametrize(
+ "original_yaml,new_spec,expected_yaml",
+ [
+ (
+ """spack:
+ specs:
+ # baz
+ - zlib
+""",
+ "libpng",
+ """spack:
+ specs:
+ # baz
+ - zlib
+ - libpng
+""",
+ )
+ ],
+)
+def test_preserving_comments_when_adding_specs(
+ original_yaml, new_spec, expected_yaml, config, tmp_path
+):
+ """Ensure that round-tripping a spack.yaml file doesn't change its content."""
+ spack_yaml = tmp_path / "spack.yaml"
+ spack_yaml.write_text(original_yaml)
+
+ e = ev.Environment(str(tmp_path))
+ e.add(new_spec)
+ e.write()
+
+ content = spack_yaml.read_text()
+ assert content == expected_yaml
diff --git a/lib/spack/spack/test/mirror.py b/lib/spack/spack/test/mirror.py
index 8fa32c4893..80a6b8690c 100644
--- a/lib/spack/spack/test/mirror.py
+++ b/lib/spack/spack/test/mirror.py
@@ -145,11 +145,9 @@ def test_roundtrip_mirror(mirror):
"invalid_yaml", ["playing_playlist: {{ action }} playlist {{ playlist_name }}"]
)
def test_invalid_yaml_mirror(invalid_yaml):
- with pytest.raises(SpackYAMLError) as e:
+ with pytest.raises(SpackYAMLError, match="error parsing YAML") as e:
spack.mirror.Mirror.from_yaml(invalid_yaml)
- exc_msg = str(e.value)
- assert exc_msg.startswith("error parsing YAML mirror:")
- assert invalid_yaml in exc_msg
+ assert invalid_yaml in str(e.value)
@pytest.mark.parametrize("invalid_json, error_message", [("{13:", "Expecting property name")])
@@ -184,11 +182,9 @@ def test_roundtrip_mirror_collection(mirror_collection):
"invalid_yaml", ["playing_playlist: {{ action }} playlist {{ playlist_name }}"]
)
def test_invalid_yaml_mirror_collection(invalid_yaml):
- with pytest.raises(SpackYAMLError) as e:
+ with pytest.raises(SpackYAMLError, match="error parsing YAML") as e:
spack.mirror.MirrorCollection.from_yaml(invalid_yaml)
- exc_msg = str(e.value)
- assert exc_msg.startswith("error parsing YAML mirror collection:")
- assert invalid_yaml in exc_msg
+ assert invalid_yaml in str(e.value)
@pytest.mark.parametrize("invalid_json, error_message", [("{13:", "Expecting property name")])
diff --git a/lib/spack/spack/test/spack_yaml.py b/lib/spack/spack/test/spack_yaml.py
index de5e6814d9..168a556323 100644
--- a/lib/spack/spack/test/spack_yaml.py
+++ b/lib/spack/spack/test/spack_yaml.py
@@ -4,6 +4,8 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Test Spack's custom YAML format."""
+import io
+import sys
import pytest
@@ -88,7 +90,53 @@ def test_yaml_aliases():
"e": aliased_list_2,
"f": aliased_list_2,
}
- string = syaml.dump(dict_with_aliases)
+ stringio = io.StringIO()
+ syaml.dump(dict_with_aliases, stream=stringio)
# ensure no YAML aliases appear in syaml dumps.
- assert "*id" not in string
+ assert "*id" not in stringio.getvalue()
+
+
+@pytest.mark.parametrize(
+ "initial_content,expected_final_content",
+ [
+ # List are dumped indented as the outer attribute
+ (
+ """spack:
+ #foo
+ specs:
+ # bar
+ - zlib
+""",
+ None,
+ ),
+ (
+ """spack:
+ #foo
+ specs:
+ # bar
+ - zlib
+""",
+ """spack:
+ #foo
+ specs:
+ # bar
+ - zlib
+""",
+ ),
+ ],
+)
+@pytest.mark.xfail(sys.platform == "win32", reason="fails on Windows")
+def test_round_trip_configuration(initial_content, expected_final_content, tmp_path):
+ """Test that configuration can be loaded and dumped without too many changes"""
+ file = tmp_path / "test.yaml"
+ file.write_text(initial_content)
+ final_content = io.StringIO()
+
+ data = syaml.load_config(file)
+ syaml.dump_config(data, stream=final_content)
+
+ if expected_final_content is None:
+ expected_final_content = initial_content
+
+ assert final_content.getvalue() == expected_final_content
diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py
index ff614c0d68..a9d850a890 100644
--- a/lib/spack/spack/test/spec_yaml.py
+++ b/lib/spack/spack/test/spec_yaml.py
@@ -81,11 +81,9 @@ def test_normal_spec(mock_packages):
"invalid_yaml", ["playing_playlist: {{ action }} playlist {{ playlist_name }}"]
)
def test_invalid_yaml_spec(invalid_yaml):
- with pytest.raises(SpackYAMLError) as e:
+ with pytest.raises(SpackYAMLError, match="error parsing YAML") as e:
Spec.from_yaml(invalid_yaml)
- exc_msg = str(e.value)
- assert exc_msg.startswith("error parsing YAML spec:")
- assert invalid_yaml in exc_msg
+ assert invalid_yaml in str(e)
@pytest.mark.parametrize("invalid_json, error_message", [("{13:", "Expecting property name")])
diff --git a/lib/spack/spack/test/util/spack_yaml.py b/lib/spack/spack/test/util/spack_yaml.py
index 3c8c48a2bb..9beccdec71 100644
--- a/lib/spack/spack/test/util/spack_yaml.py
+++ b/lib/spack/spack/test/util/spack_yaml.py
@@ -92,5 +92,5 @@ def test_config_blame_defaults():
val = val.lower()
lines = get_file_lines(filename)
- assert key in lines[line]
+ assert key in lines[line], filename
assert val in lines[line]
diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py
index 84e22bee07..f0cb05ab04 100644
--- a/lib/spack/spack/util/spack_yaml.py
+++ b/lib/spack/spack/util/spack_yaml.py
@@ -14,13 +14,16 @@
"""
import collections
import collections.abc
+import copy
import ctypes
+import enum
+import functools
import io
import re
-from typing import List
+from typing import IO, List, Optional
-import ruamel.yaml as yaml
-from ruamel.yaml import RoundTripDumper, RoundTripLoader
+import ruamel.yaml
+from ruamel.yaml import comments, constructor, emitter, error, representer
from llnl.util.tty.color import cextra, clen, colorize
@@ -34,7 +37,7 @@ __all__ = ["load", "dump", "SpackYAMLError"]
# Also, use OrderedDict instead of just dict.
class syaml_dict(collections.OrderedDict):
def __repr__(self):
- mappings = ("%r: %r" % (k, v) for k, v in self.items())
+ mappings = (f"{k!r}: {v!r}" for k, v in self.items())
return "{%s}" % ", ".join(mappings)
@@ -54,7 +57,7 @@ class syaml_int(int):
syaml_types = {syaml_str: str, syaml_int: int, syaml_dict: dict, syaml_list: list}
-markable_types = set(syaml_types) | set([yaml.comments.CommentedSeq, yaml.comments.CommentedMap])
+markable_types = set(syaml_types) | {comments.CommentedSeq, comments.CommentedMap}
def syaml_type(obj):
@@ -96,7 +99,7 @@ def marked(obj):
)
-class OrderedLineLoader(RoundTripLoader):
+class OrderedLineConstructor(constructor.RoundTripConstructor):
"""YAML loader specifically intended for reading Spack configuration
files. It preserves order and line numbers. It also has special-purpose
logic for handling dictionary keys that indicate a Spack config
@@ -120,7 +123,7 @@ class OrderedLineLoader(RoundTripLoader):
#
def construct_yaml_str(self, node):
- value = super(OrderedLineLoader, self).construct_yaml_str(node)
+ value = super().construct_yaml_str(node)
# There is no specific marker to indicate that we are parsing a key,
# so this assumes we are talking about a Spack config override key if
# it ends with a ':' and does not contain a '@' (which can appear
@@ -134,7 +137,7 @@ class OrderedLineLoader(RoundTripLoader):
return value
def construct_yaml_seq(self, node):
- gen = super(OrderedLineLoader, self).construct_yaml_seq(node)
+ gen = super().construct_yaml_seq(node)
data = next(gen)
if markable(data):
mark(data, node)
@@ -143,7 +146,7 @@ class OrderedLineLoader(RoundTripLoader):
pass
def construct_yaml_map(self, node):
- gen = super(OrderedLineLoader, self).construct_yaml_map(node)
+ gen = super().construct_yaml_map(node)
data = next(gen)
if markable(data):
mark(data, node)
@@ -153,19 +156,24 @@ class OrderedLineLoader(RoundTripLoader):
# register above new constructors
-OrderedLineLoader.add_constructor("tag:yaml.org,2002:map", OrderedLineLoader.construct_yaml_map)
-OrderedLineLoader.add_constructor("tag:yaml.org,2002:seq", OrderedLineLoader.construct_yaml_seq)
-OrderedLineLoader.add_constructor("tag:yaml.org,2002:str", OrderedLineLoader.construct_yaml_str)
+OrderedLineConstructor.add_constructor(
+ "tag:yaml.org,2002:map", OrderedLineConstructor.construct_yaml_map
+)
+OrderedLineConstructor.add_constructor(
+ "tag:yaml.org,2002:seq", OrderedLineConstructor.construct_yaml_seq
+)
+OrderedLineConstructor.add_constructor(
+ "tag:yaml.org,2002:str", OrderedLineConstructor.construct_yaml_str
+)
-class OrderedLineDumper(RoundTripDumper):
- """Dumper that preserves ordering and formats ``syaml_*`` objects.
+class OrderedLineRepresenter(representer.RoundTripRepresenter):
+ """Representer that preserves ordering and formats ``syaml_*`` objects.
- This dumper preserves insertion ordering ``syaml_dict`` objects
+ This representer preserves insertion ordering ``syaml_dict`` objects
when they're written out. It also has some custom formatters
for ``syaml_*`` objects so that they are formatted like their
regular Python equivalents, instead of ugly YAML pyobjects.
-
"""
def ignore_aliases(self, _data):
@@ -173,7 +181,7 @@ class OrderedLineDumper(RoundTripDumper):
return True
def represent_data(self, data):
- result = super(OrderedLineDumper, self).represent_data(data)
+ result = super().represent_data(data)
if data is None:
result.value = syaml_str("null")
return result
@@ -181,31 +189,53 @@ class OrderedLineDumper(RoundTripDumper):
def represent_str(self, data):
if hasattr(data, "override") and data.override:
data = data + ":"
- return super(OrderedLineDumper, self).represent_str(data)
+ return super().represent_str(data)
-class SafeDumper(RoundTripDumper):
+class SafeRepresenter(representer.RoundTripRepresenter):
def ignore_aliases(self, _data):
"""Make the dumper NEVER print YAML aliases."""
return True
# Make our special objects look like normal YAML ones.
-RoundTripDumper.add_representer(syaml_dict, RoundTripDumper.represent_dict)
-RoundTripDumper.add_representer(syaml_list, RoundTripDumper.represent_list)
-RoundTripDumper.add_representer(syaml_int, RoundTripDumper.represent_int)
-RoundTripDumper.add_representer(syaml_str, RoundTripDumper.represent_str)
-OrderedLineDumper.add_representer(syaml_str, OrderedLineDumper.represent_str)
+representer.RoundTripRepresenter.add_representer(
+ syaml_dict, representer.RoundTripRepresenter.represent_dict
+)
+representer.RoundTripRepresenter.add_representer(
+ syaml_list, representer.RoundTripRepresenter.represent_list
+)
+representer.RoundTripRepresenter.add_representer(
+ syaml_int, representer.RoundTripRepresenter.represent_int
+)
+representer.RoundTripRepresenter.add_representer(
+ syaml_str, representer.RoundTripRepresenter.represent_str
+)
+OrderedLineRepresenter.add_representer(syaml_str, OrderedLineRepresenter.represent_str)
#: Max integer helps avoid passing too large a value to cyaml.
maxint = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
-def dump(obj, default_flow_style=False, stream=None):
- return yaml.dump(
- obj, default_flow_style=default_flow_style, width=maxint, Dumper=SafeDumper, stream=stream
- )
+def return_string_when_no_stream(func):
+ @functools.wraps(func)
+ def wrapper(data, stream=None, **kwargs):
+ if stream:
+ return func(data, stream=stream, **kwargs)
+ stream = io.StringIO()
+ func(data, stream=stream, **kwargs)
+ return stream.getvalue()
+
+ return wrapper
+
+
+@return_string_when_no_stream
+def dump(data, stream=None, default_flow_style=False):
+ handler = ConfigYAML(yaml_type=YAMLType.GENERIC_YAML)
+ handler.default_flow_style = default_flow_style
+ handler.width = maxint
+ return handler.dump(data, stream=stream)
def file_line(mark):
@@ -220,11 +250,11 @@ def file_line(mark):
#: This is nasty but YAML doesn't give us many ways to pass arguments --
#: yaml.dump() takes a class (not an instance) and instantiates the dumper
#: itself, so we can't just pass an instance
-_annotations: List[str] = []
+_ANNOTATIONS: List[str] = []
-class LineAnnotationDumper(OrderedLineDumper):
- """Dumper that generates per-line annotations.
+class LineAnnotationRepresenter(OrderedLineRepresenter):
+ """Representer that generates per-line annotations.
Annotations are stored in the ``_annotations`` global. After one
dump pass, the strings in ``_annotations`` will correspond one-to-one
@@ -240,22 +270,9 @@ class LineAnnotationDumper(OrderedLineDumper):
annotations.
"""
- saved = None
-
- def __init__(self, *args, **kwargs):
- super(LineAnnotationDumper, self).__init__(*args, **kwargs)
- del _annotations[:]
- self.colors = "KgrbmcyGRBMCY"
- self.filename_colors = {}
-
- def process_scalar(self):
- super(LineAnnotationDumper, self).process_scalar()
- if marked(self.event.value):
- self.saved = self.event.value
-
def represent_data(self, data):
"""Force syaml_str to be passed through with marks."""
- result = super(LineAnnotationDumper, self).represent_data(data)
+ result = super().represent_data(data)
if data is None:
result.value = syaml_str("null")
elif isinstance(result.value, str):
@@ -264,10 +281,25 @@ class LineAnnotationDumper(OrderedLineDumper):
mark(result.value, data)
return result
+
+class LineAnnotationEmitter(emitter.Emitter):
+ saved = None
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ del _ANNOTATIONS[:]
+ self.colors = "KgrbmcyGRBMCY"
+ self.filename_colors = {}
+
+ def process_scalar(self):
+ super().process_scalar()
+ if marked(self.event.value):
+ self.saved = self.event.value
+
def write_line_break(self):
- super(LineAnnotationDumper, self).write_line_break()
+ super().write_line_break()
if self.saved is None:
- _annotations.append(colorize("@K{---}"))
+ _ANNOTATIONS.append(colorize("@K{---}"))
return
# append annotations at the end of each line
@@ -284,37 +316,131 @@ class LineAnnotationDumper(OrderedLineDumper):
ann = fmt % mark.name
if mark.line is not None:
ann += ":@c{%s}" % (mark.line + 1)
- _annotations.append(colorize(ann))
+ _ANNOTATIONS.append(colorize(ann))
+ else:
+ _ANNOTATIONS.append("")
+
+ def write_comment(self, comment, pre=False):
+ pass
+
+
+class YAMLType(enum.Enum):
+ """YAML configurations handled by Spack"""
+
+ #: Generic YAML configuration
+ GENERIC_YAML = enum.auto()
+ #: A Spack config file with overrides
+ SPACK_CONFIG_FILE = enum.auto()
+ #: A Spack config file with line annotations
+ ANNOTATED_SPACK_CONFIG_FILE = enum.auto()
+
+
+class ConfigYAML:
+ """Handles the loading and dumping of Spack's YAML files."""
+
+ def __init__(self, yaml_type: YAMLType) -> None:
+ self.yaml = ruamel.yaml.YAML(typ="rt", pure=True)
+ if yaml_type == YAMLType.GENERIC_YAML:
+ self.yaml.Representer = SafeRepresenter
+ elif yaml_type == YAMLType.ANNOTATED_SPACK_CONFIG_FILE:
+ self.yaml.Representer = LineAnnotationRepresenter
+ self.yaml.Emitter = LineAnnotationEmitter
+ self.yaml.Constructor = OrderedLineConstructor
else:
- _annotations.append("")
+ self.yaml.Representer = OrderedLineRepresenter
+ self.yaml.Constructor = OrderedLineConstructor
+
+ def load(self, stream: IO):
+ """Loads the YAML data from a stream and returns it.
+
+ Args:
+ stream: stream to load from.
+
+ Raises:
+ SpackYAMLError: if anything goes wrong while loading
+ """
+ try:
+ return self.yaml.load(stream)
+
+ except error.MarkedYAMLError as e:
+ msg = "error parsing YAML"
+ error_mark = e.context_mark if e.context_mark else e.problem_mark
+ if error_mark:
+ line, column = error_mark.line, error_mark.column
+ msg += f": near {error_mark.name}, {str(line)}, {str(column)}"
+ else:
+ msg += f": {stream.name}"
+ msg += f": {e.problem}"
+ raise SpackYAMLError(msg, e) from e
+
+ except Exception as e:
+ msg = "cannot load Spack YAML configuration"
+ raise SpackYAMLError(msg, e) from e
+
+ def dump(self, data, stream: Optional[IO] = None, *, transform=None) -> None:
+ """Dumps the YAML data to a stream.
+
+ Args:
+ data: data to be dumped
+ stream: stream to dump the data into.
+
+ Raises:
+ SpackYAMLError: if anything goes wrong while dumping
+ """
+ try:
+ return self.yaml.dump(data, stream=stream, transform=transform)
+ except Exception as e:
+ msg = "cannot dump Spack YAML configuration"
+ raise SpackYAMLError(msg, str(e)) from e
+
+ def as_string(self, data) -> str:
+ """Returns a string representing the YAML data passed as input."""
+ result = io.StringIO()
+ self.dump(data, stream=result)
+ return result.getvalue()
+
+
+def deepcopy(data):
+ """Returns a deepcopy of the input YAML data."""
+ result = copy.deepcopy(data)
+
+ if isinstance(result, comments.CommentedMap):
+ # HACK to fully copy ruamel CommentedMap that doesn't provide copy
+ # method. Especially necessary for environments
+ extracted_comments = extract_comments(data)
+ if extracted_comments:
+ set_comments(result, data_comments=extracted_comments)
+
+ return result
-def load_config(*args, **kwargs):
+def load_config(str_or_file):
"""Load but modify the loader instance so that it will add __line__
attributes to the returned object."""
- kwargs["Loader"] = OrderedLineLoader
- return yaml.load(*args, **kwargs)
+ handler = ConfigYAML(yaml_type=YAMLType.SPACK_CONFIG_FILE)
+ return handler.load(str_or_file)
def load(*args, **kwargs):
- return yaml.load(*args, **kwargs)
-
+ handler = ConfigYAML(yaml_type=YAMLType.GENERIC_YAML)
+ return handler.load(*args, **kwargs)
-def dump_config(*args, **kwargs):
- blame = kwargs.pop("blame", False)
+@return_string_when_no_stream
+def dump_config(data, stream, *, default_flow_style=False, blame=False):
if blame:
- return dump_annotated(*args, **kwargs)
- else:
- kwargs["Dumper"] = OrderedLineDumper
- return yaml.dump(*args, **kwargs)
+ handler = ConfigYAML(yaml_type=YAMLType.ANNOTATED_SPACK_CONFIG_FILE)
+ handler.yaml.default_flow_style = default_flow_style
+ return _dump_annotated(handler, data, stream)
+ handler = ConfigYAML(yaml_type=YAMLType.SPACK_CONFIG_FILE)
+ handler.yaml.default_flow_style = default_flow_style
+ return handler.dump(data, stream)
-def dump_annotated(data, stream=None, *args, **kwargs):
- kwargs["Dumper"] = LineAnnotationDumper
+def _dump_annotated(handler, data, stream=None):
sio = io.StringIO()
- yaml.dump(data, sio, *args, **kwargs)
+ handler.dump(data, sio)
# write_line_break() is not called by YAML for empty lines, so we
# skip empty lines here with \n+.
@@ -326,10 +452,10 @@ def dump_annotated(data, stream=None, *args, **kwargs):
getvalue = stream.getvalue
# write out annotations and lines, accounting for color
- width = max(clen(a) for a in _annotations)
- formats = ["%%-%ds %%s\n" % (width + cextra(a)) for a in _annotations]
+ width = max(clen(a) for a in _ANNOTATIONS)
+ formats = ["%%-%ds %%s\n" % (width + cextra(a)) for a in _ANNOTATIONS]
- for f, a, l in zip(formats, _annotations, lines):
+ for f, a, l in zip(formats, _ANNOTATIONS, lines):
stream.write(f % (a, l))
if getvalue:
@@ -352,8 +478,23 @@ def sorted_dict(dict_like):
return result
+def extract_comments(data):
+ """Extract and returns comments from some YAML data"""
+ return getattr(data, comments.Comment.attrib, None)
+
+
+def set_comments(data, *, data_comments):
+ """Set comments on some YAML data"""
+ return setattr(data, comments.Comment.attrib, data_comments)
+
+
+def name_mark(name):
+ """Returns a mark with just a name"""
+ return error.StringMark(name, None, None, None, None, None)
+
+
class SpackYAMLError(spack.error.SpackError):
"""Raised when there are issues with YAML parsing."""
def __init__(self, msg, yaml_error):
- super(SpackYAMLError, self).__init__(msg, str(yaml_error))
+ super().__init__(msg, str(yaml_error))
diff --git a/pyproject.toml b/pyproject.toml
index c469a16be4..a89a206666 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -222,6 +222,9 @@ drop = [
'^pygments/lexers/(?!python|__init__|_mapping).*\.py$',
# trim rich's markdown support
"rich/markdown.py",
+ # ruamel.yaml installs unneded files
+ "ruamel.*.pth",
+ "pvectorc.*.so"
]
[tool.vendoring.typing-stubs]