summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMassimiliano Culpo <massimiliano.culpo@gmail.com>2022-11-16 20:43:44 +0100
committerTodd Gamblin <tgamblin@llnl.gov>2023-01-04 09:43:04 -0800
commit033cb86fd69abe34e04f0ed0e08719d5a8b4f34d (patch)
tree38c15aa1ab47c86de3b4280b975ceabf69c80c5b
parent51751894122ff02f96a3df8fcdb37884deebf385 (diff)
downloadspack-033cb86fd69abe34e04f0ed0e08719d5a8b4f34d.tar.gz
spack-033cb86fd69abe34e04f0ed0e08719d5a8b4f34d.tar.bz2
spack-033cb86fd69abe34e04f0ed0e08719d5a8b4f34d.tar.xz
spack-033cb86fd69abe34e04f0ed0e08719d5a8b4f34d.zip
Add vendored packages back
-rw-r--r--lib/spack/external/_vendoring/_pyrsistent_version.py1
-rw-r--r--lib/spack/external/_vendoring/_pyrsistent_version.pyi1
-rw-r--r--lib/spack/external/_vendoring/altgraph.pyi1
-rw-r--r--lib/spack/external/_vendoring/altgraph/Dot.py321
-rw-r--r--lib/spack/external/_vendoring/altgraph/Graph.py682
-rw-r--r--lib/spack/external/_vendoring/altgraph/GraphAlgo.py171
-rw-r--r--lib/spack/external/_vendoring/altgraph/GraphStat.py73
-rw-r--r--lib/spack/external/_vendoring/altgraph/GraphUtil.py139
-rw-r--r--lib/spack/external/_vendoring/altgraph/LICENSE18
-rw-r--r--lib/spack/external/_vendoring/altgraph/ObjectGraph.py212
-rw-r--r--lib/spack/external/_vendoring/altgraph/__init__.py148
-rw-r--r--lib/spack/external/_vendoring/attr/__init__.py79
-rw-r--r--lib/spack/external/_vendoring/attr/__init__.pyi486
-rw-r--r--lib/spack/external/_vendoring/attr/_cmp.py155
-rw-r--r--lib/spack/external/_vendoring/attr/_cmp.pyi13
-rw-r--r--lib/spack/external/_vendoring/attr/_compat.py185
-rw-r--r--lib/spack/external/_vendoring/attr/_config.py31
-rw-r--r--lib/spack/external/_vendoring/attr/_funcs.py420
-rw-r--r--lib/spack/external/_vendoring/attr/_make.py3006
-rw-r--r--lib/spack/external/_vendoring/attr/_next_gen.py220
-rw-r--r--lib/spack/external/_vendoring/attr/_version_info.py86
-rw-r--r--lib/spack/external/_vendoring/attr/_version_info.pyi9
-rw-r--r--lib/spack/external/_vendoring/attr/converters.py144
-rw-r--r--lib/spack/external/_vendoring/attr/converters.pyi13
-rw-r--r--lib/spack/external/_vendoring/attr/exceptions.py92
-rw-r--r--lib/spack/external/_vendoring/attr/exceptions.pyi17
-rw-r--r--lib/spack/external/_vendoring/attr/filters.py51
-rw-r--r--lib/spack/external/_vendoring/attr/filters.pyi6
-rw-r--r--lib/spack/external/_vendoring/attr/py.typed0
-rw-r--r--lib/spack/external/_vendoring/attr/setters.py73
-rw-r--r--lib/spack/external/_vendoring/attr/setters.pyi19
-rw-r--r--lib/spack/external/_vendoring/attr/validators.py594
-rw-r--r--lib/spack/external/_vendoring/attr/validators.pyi80
-rw-r--r--lib/spack/external/_vendoring/attrs/LICENSE21
-rw-r--r--lib/spack/external/_vendoring/attrs/__init__.py70
-rw-r--r--lib/spack/external/_vendoring/attrs/__init__.pyi66
-rw-r--r--lib/spack/external/_vendoring/attrs/converters.py3
-rw-r--r--lib/spack/external/_vendoring/attrs/exceptions.py3
-rw-r--r--lib/spack/external/_vendoring/attrs/filters.py3
-rw-r--r--lib/spack/external/_vendoring/attrs/py.typed0
-rw-r--r--lib/spack/external/_vendoring/attrs/setters.py3
-rw-r--r--lib/spack/external/_vendoring/attrs/validators.py3
-rw-r--r--lib/spack/external/_vendoring/distro/LICENSE202
-rw-r--r--lib/spack/external/_vendoring/distro/__init__.py54
-rw-r--r--lib/spack/external/_vendoring/distro/__main__.py4
-rw-r--r--lib/spack/external/_vendoring/distro/distro.py1399
-rw-r--r--lib/spack/external/_vendoring/distro/py.typed0
-rw-r--r--lib/spack/external/_vendoring/jinja2/LICENSE.rst28
-rw-r--r--lib/spack/external/_vendoring/jinja2/__init__.py45
-rw-r--r--lib/spack/external/_vendoring/jinja2/_identifier.py6
-rw-r--r--lib/spack/external/_vendoring/jinja2/async_utils.py75
-rw-r--r--lib/spack/external/_vendoring/jinja2/bccache.py364
-rw-r--r--lib/spack/external/_vendoring/jinja2/compiler.py1957
-rw-r--r--lib/spack/external/_vendoring/jinja2/constants.py20
-rw-r--r--lib/spack/external/_vendoring/jinja2/debug.py259
-rw-r--r--lib/spack/external/_vendoring/jinja2/defaults.py48
-rw-r--r--lib/spack/external/_vendoring/jinja2/environment.py1661
-rw-r--r--lib/spack/external/_vendoring/jinja2/exceptions.py166
-rw-r--r--lib/spack/external/_vendoring/jinja2/ext.py879
-rw-r--r--lib/spack/external/_vendoring/jinja2/filters.py1824
-rw-r--r--lib/spack/external/_vendoring/jinja2/idtracking.py318
-rw-r--r--lib/spack/external/_vendoring/jinja2/lexer.py869
-rw-r--r--lib/spack/external/_vendoring/jinja2/loaders.py652
-rw-r--r--lib/spack/external/_vendoring/jinja2/meta.py111
-rw-r--r--lib/spack/external/_vendoring/jinja2/nativetypes.py124
-rw-r--r--lib/spack/external/_vendoring/jinja2/nodes.py1204
-rw-r--r--lib/spack/external/_vendoring/jinja2/optimizer.py47
-rw-r--r--lib/spack/external/_vendoring/jinja2/parser.py1040
-rw-r--r--lib/spack/external/_vendoring/jinja2/py.typed0
-rw-r--r--lib/spack/external/_vendoring/jinja2/runtime.py1104
-rw-r--r--lib/spack/external/_vendoring/jinja2/sandbox.py428
-rw-r--r--lib/spack/external/_vendoring/jinja2/tests.py255
-rw-r--r--lib/spack/external/_vendoring/jinja2/utils.py854
-rw-r--r--lib/spack/external/_vendoring/jinja2/visitor.py92
-rw-r--r--lib/spack/external/_vendoring/jsonschema.pyi1
-rw-r--r--lib/spack/external/_vendoring/jsonschema/COPYING19
-rw-r--r--lib/spack/external/_vendoring/jsonschema/__init__.py31
-rw-r--r--lib/spack/external/_vendoring/jsonschema/__main__.py2
-rw-r--r--lib/spack/external/_vendoring/jsonschema/_format.py425
-rw-r--r--lib/spack/external/_vendoring/jsonschema/_legacy_validators.py141
-rw-r--r--lib/spack/external/_vendoring/jsonschema/_reflect.py155
-rw-r--r--lib/spack/external/_vendoring/jsonschema/_types.py188
-rw-r--r--lib/spack/external/_vendoring/jsonschema/_utils.py212
-rw-r--r--lib/spack/external/_vendoring/jsonschema/_validators.py373
-rw-r--r--lib/spack/external/_vendoring/jsonschema/benchmarks/__init__.py5
-rw-r--r--lib/spack/external/_vendoring/jsonschema/benchmarks/issue232.py26
-rw-r--r--lib/spack/external/_vendoring/jsonschema/benchmarks/json_schema_test_suite.py14
-rw-r--r--lib/spack/external/_vendoring/jsonschema/cli.py90
-rw-r--r--lib/spack/external/_vendoring/jsonschema/compat.py55
-rw-r--r--lib/spack/external/_vendoring/jsonschema/exceptions.py374
-rw-r--r--lib/spack/external/_vendoring/jsonschema/schemas/draft3.json199
-rw-r--r--lib/spack/external/_vendoring/jsonschema/schemas/draft4.json222
-rw-r--r--lib/spack/external/_vendoring/jsonschema/schemas/draft6.json153
-rw-r--r--lib/spack/external/_vendoring/jsonschema/schemas/draft7.json166
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/__init__.py0
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/_helpers.py5
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/_suite.py239
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/test_cli.py151
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/test_exceptions.py462
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/test_format.py89
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/test_jsonschema_test_suite.py277
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/test_types.py190
-rw-r--r--lib/spack/external/_vendoring/jsonschema/tests/test_validators.py1762
-rw-r--r--lib/spack/external/_vendoring/jsonschema/validators.py970
-rw-r--r--lib/spack/external/_vendoring/macholib.pyi1
-rw-r--r--lib/spack/external/_vendoring/macholib/LICENSE21
-rw-r--r--lib/spack/external/_vendoring/macholib/MachO.py501
-rw-r--r--lib/spack/external/_vendoring/macholib/MachOGraph.py141
-rw-r--r--lib/spack/external/_vendoring/macholib/MachOStandalone.py173
-rw-r--r--lib/spack/external/_vendoring/macholib/SymbolTable.py104
-rw-r--r--lib/spack/external/_vendoring/macholib/__init__.py8
-rw-r--r--lib/spack/external/_vendoring/macholib/__main__.py80
-rw-r--r--lib/spack/external/_vendoring/macholib/_cmdline.py49
-rw-r--r--lib/spack/external/_vendoring/macholib/dyld.py228
-rw-r--r--lib/spack/external/_vendoring/macholib/dylib.py45
-rw-r--r--lib/spack/external/_vendoring/macholib/framework.py45
-rw-r--r--lib/spack/external/_vendoring/macholib/itergraphreport.py73
-rw-r--r--lib/spack/external/_vendoring/macholib/mach_o.py1641
-rw-r--r--lib/spack/external/_vendoring/macholib/macho_dump.py57
-rw-r--r--lib/spack/external/_vendoring/macholib/macho_find.py22
-rw-r--r--lib/spack/external/_vendoring/macholib/macho_standalone.py30
-rw-r--r--lib/spack/external/_vendoring/macholib/ptypes.py334
-rw-r--r--lib/spack/external/_vendoring/macholib/util.py262
-rw-r--r--lib/spack/external/_vendoring/markupsafe/LICENSE.rst28
-rw-r--r--lib/spack/external/_vendoring/markupsafe/__init__.py288
-rw-r--r--lib/spack/external/_vendoring/markupsafe/_native.py75
-rw-r--r--lib/spack/external/_vendoring/markupsafe/_speedups.c339
-rw-r--r--lib/spack/external/_vendoring/markupsafe/_speedups.pyi9
-rw-r--r--lib/spack/external/_vendoring/markupsafe/py.typed0
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/LICENSE.mit22
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/__init__.py47
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/__init__.pyi213
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_checked_types.py542
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_field_common.py329
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_helpers.py97
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_immutable.py103
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_pbag.py267
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_pclass.py262
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_pdeque.py376
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_plist.py313
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_pmap.py461
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_precord.py167
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_pset.py227
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_pvector.py711
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_toolz.py83
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/_transformations.py139
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/py.typed0
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/typing.py80
-rw-r--r--lib/spack/external/_vendoring/pyrsistent/typing.pyi292
-rw-r--r--lib/spack/external/_vendoring/six.LICENSE18
-rw-r--r--lib/spack/external/_vendoring/six.py998
-rw-r--r--lib/spack/external/_vendoring/six/__init__.pyi1
-rw-r--r--lib/spack/external/_vendoring/six/moves/__init__.pyi1
-rw-r--r--lib/spack/external/_vendoring/six/moves/configparser.pyi1
154 files changed, 41107 insertions, 0 deletions
diff --git a/lib/spack/external/_vendoring/_pyrsistent_version.py b/lib/spack/external/_vendoring/_pyrsistent_version.py
new file mode 100644
index 0000000000..5ec52a922c
--- /dev/null
+++ b/lib/spack/external/_vendoring/_pyrsistent_version.py
@@ -0,0 +1 @@
+__version__ = '0.18.0'
diff --git a/lib/spack/external/_vendoring/_pyrsistent_version.pyi b/lib/spack/external/_vendoring/_pyrsistent_version.pyi
new file mode 100644
index 0000000000..873cc62efd
--- /dev/null
+++ b/lib/spack/external/_vendoring/_pyrsistent_version.pyi
@@ -0,0 +1 @@
+from _pyrsistent_version import * \ No newline at end of file
diff --git a/lib/spack/external/_vendoring/altgraph.pyi b/lib/spack/external/_vendoring/altgraph.pyi
new file mode 100644
index 0000000000..0d67a24e01
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph.pyi
@@ -0,0 +1 @@
+from altgraph import * \ No newline at end of file
diff --git a/lib/spack/external/_vendoring/altgraph/Dot.py b/lib/spack/external/_vendoring/altgraph/Dot.py
new file mode 100644
index 0000000000..f265a7121c
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph/Dot.py
@@ -0,0 +1,321 @@
+"""
+altgraph.Dot - Interface to the dot language
+============================================
+
+The :py:mod:`~altgraph.Dot` module provides a simple interface to the
+file format used in the
+`graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
+program. The module is intended to offload the most tedious part of the process
+(the **dot** file generation) while transparently exposing most of its
+features.
+
+To display the graphs or to generate image files the
+`graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
+package needs to be installed on the system, moreover the :command:`dot` and
+:command:`dotty` programs must be accesible in the program path so that they
+can be ran from processes spawned within the module.
+
+Example usage
+-------------
+
+Here is a typical usage::
+
+ from altgraph import Graph, Dot
+
+ # create a graph
+ edges = [ (1,2), (1,3), (3,4), (3,5), (4,5), (5,4) ]
+ graph = Graph.Graph(edges)
+
+ # create a dot representation of the graph
+ dot = Dot.Dot(graph)
+
+ # display the graph
+ dot.display()
+
+ # save the dot representation into the mydot.dot file
+ dot.save_dot(file_name='mydot.dot')
+
+ # save dot file as gif image into the graph.gif file
+ dot.save_img(file_name='graph', file_type='gif')
+
+Directed graph and non-directed graph
+-------------------------------------
+
+Dot class can use for both directed graph and non-directed graph
+by passing ``graphtype`` parameter.
+
+Example::
+
+ # create directed graph(default)
+ dot = Dot.Dot(graph, graphtype="digraph")
+
+ # create non-directed graph
+ dot = Dot.Dot(graph, graphtype="graph")
+
+Customizing the output
+----------------------
+
+The graph drawing process may be customized by passing
+valid :command:`dot` parameters for the nodes and edges. For a list of all
+parameters see the `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
+documentation.
+
+Example::
+
+ # customizing the way the overall graph is drawn
+ dot.style(size='10,10', rankdir='RL', page='5, 5' , ranksep=0.75)
+
+ # customizing node drawing
+ dot.node_style(1, label='BASE_NODE',shape='box', color='blue' )
+ dot.node_style(2, style='filled', fillcolor='red')
+
+ # customizing edge drawing
+ dot.edge_style(1, 2, style='dotted')
+ dot.edge_style(3, 5, arrowhead='dot', label='binds', labelangle='90')
+ dot.edge_style(4, 5, arrowsize=2, style='bold')
+
+
+.. note::
+
+ dotty (invoked via :py:func:`~altgraph.Dot.display`) may not be able to
+ display all graphics styles. To verify the output save it to an image file
+ and look at it that way.
+
+Valid attributes
+----------------
+
+ - dot styles, passed via the :py:meth:`Dot.style` method::
+
+ rankdir = 'LR' (draws the graph horizontally, left to right)
+ ranksep = number (rank separation in inches)
+
+ - node attributes, passed via the :py:meth:`Dot.node_style` method::
+
+ style = 'filled' | 'invisible' | 'diagonals' | 'rounded'
+ shape = 'box' | 'ellipse' | 'circle' | 'point' | 'triangle'
+
+ - edge attributes, passed via the :py:meth:`Dot.edge_style` method::
+
+ style = 'dashed' | 'dotted' | 'solid' | 'invis' | 'bold'
+ arrowhead = 'box' | 'crow' | 'diamond' | 'dot' | 'inv' | 'none'
+ | 'tee' | 'vee'
+ weight = number (the larger the number the closer the nodes will be)
+
+ - valid `graphviz colors
+ <http://www.research.att.com/~erg/graphviz/info/colors.html>`_
+
+ - for more details on how to control the graph drawing process see the
+ `graphviz reference
+ <http://www.research.att.com/sw/tools/graphviz/refs.html>`_.
+"""
+import os
+import warnings
+
+from altgraph import GraphError
+
+
+class Dot(object):
+ """
+ A class providing a **graphviz** (dot language) representation
+ allowing a fine grained control over how the graph is being
+ displayed.
+
+ If the :command:`dot` and :command:`dotty` programs are not in the current
+ system path their location needs to be specified in the contructor.
+ """
+
+ def __init__(
+ self,
+ graph=None,
+ nodes=None,
+ edgefn=None,
+ nodevisitor=None,
+ edgevisitor=None,
+ name="G",
+ dot="dot",
+ dotty="dotty",
+ neato="neato",
+ graphtype="digraph",
+ ):
+ """
+ Initialization.
+ """
+ self.name, self.attr = name, {}
+
+ assert graphtype in ["graph", "digraph"]
+ self.type = graphtype
+
+ self.temp_dot = "tmp_dot.dot"
+ self.temp_neo = "tmp_neo.dot"
+
+ self.dot, self.dotty, self.neato = dot, dotty, neato
+
+ # self.nodes: node styles
+ # self.edges: edge styles
+ self.nodes, self.edges = {}, {}
+
+ if graph is not None and nodes is None:
+ nodes = graph
+ if graph is not None and edgefn is None:
+
+ def edgefn(node, graph=graph):
+ return graph.out_nbrs(node)
+
+ if nodes is None:
+ nodes = ()
+
+ seen = set()
+ for node in nodes:
+ if nodevisitor is None:
+ style = {}
+ else:
+ style = nodevisitor(node)
+ if style is not None:
+ self.nodes[node] = {}
+ self.node_style(node, **style)
+ seen.add(node)
+ if edgefn is not None:
+ for head in seen:
+ for tail in (n for n in edgefn(head) if n in seen):
+ if edgevisitor is None:
+ edgestyle = {}
+ else:
+ edgestyle = edgevisitor(head, tail)
+ if edgestyle is not None:
+ if head not in self.edges:
+ self.edges[head] = {}
+ self.edges[head][tail] = {}
+ self.edge_style(head, tail, **edgestyle)
+
+ def style(self, **attr):
+ """
+ Changes the overall style
+ """
+ self.attr = attr
+
+ def display(self, mode="dot"):
+ """
+ Displays the current graph via dotty
+ """
+
+ if mode == "neato":
+ self.save_dot(self.temp_neo)
+ neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
+ os.system(neato_cmd)
+ else:
+ self.save_dot(self.temp_dot)
+
+ plot_cmd = "%s %s" % (self.dotty, self.temp_dot)
+ os.system(plot_cmd)
+
+ def node_style(self, node, **kwargs):
+ """
+ Modifies a node style to the dot representation.
+ """
+ if node not in self.edges:
+ self.edges[node] = {}
+ self.nodes[node] = kwargs
+
+ def all_node_style(self, **kwargs):
+ """
+ Modifies all node styles
+ """
+ for node in self.nodes:
+ self.node_style(node, **kwargs)
+
+ def edge_style(self, head, tail, **kwargs):
+ """
+ Modifies an edge style to the dot representation.
+ """
+ if tail not in self.nodes:
+ raise GraphError("invalid node %s" % (tail,))
+
+ try:
+ if tail not in self.edges[head]:
+ self.edges[head][tail] = {}
+ self.edges[head][tail] = kwargs
+ except KeyError:
+ raise GraphError("invalid edge %s -> %s " % (head, tail))
+
+ def iterdot(self):
+ # write graph title
+ if self.type == "digraph":
+ yield "digraph %s {\n" % (self.name,)
+ elif self.type == "graph":
+ yield "graph %s {\n" % (self.name,)
+
+ else:
+ raise GraphError("unsupported graphtype %s" % (self.type,))
+
+ # write overall graph attributes
+ for attr_name, attr_value in sorted(self.attr.items()):
+ yield '%s="%s";' % (attr_name, attr_value)
+ yield "\n"
+
+ # some reusable patterns
+ cpatt = '%s="%s",' # to separate attributes
+ epatt = "];\n" # to end attributes
+
+ # write node attributes
+ for node_name, node_attr in sorted(self.nodes.items()):
+ yield '\t"%s" [' % (node_name,)
+ for attr_name, attr_value in sorted(node_attr.items()):
+ yield cpatt % (attr_name, attr_value)
+ yield epatt
+
+ # write edge attributes
+ for head in sorted(self.edges):
+ for tail in sorted(self.edges[head]):
+ if self.type == "digraph":
+ yield '\t"%s" -> "%s" [' % (head, tail)
+ else:
+ yield '\t"%s" -- "%s" [' % (head, tail)
+ for attr_name, attr_value in sorted(self.edges[head][tail].items()):
+ yield cpatt % (attr_name, attr_value)
+ yield epatt
+
+ # finish file
+ yield "}\n"
+
+ def __iter__(self):
+ return self.iterdot()
+
+ def save_dot(self, file_name=None):
+ """
+ Saves the current graph representation into a file
+ """
+
+ if not file_name:
+ warnings.warn(DeprecationWarning, "always pass a file_name")
+ file_name = self.temp_dot
+
+ with open(file_name, "w") as fp:
+ for chunk in self.iterdot():
+ fp.write(chunk)
+
+ def save_img(self, file_name=None, file_type="gif", mode="dot"):
+ """
+ Saves the dot file as an image file
+ """
+
+ if not file_name:
+ warnings.warn(DeprecationWarning, "always pass a file_name")
+ file_name = "out"
+
+ if mode == "neato":
+ self.save_dot(self.temp_neo)
+ neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
+ os.system(neato_cmd)
+ plot_cmd = self.dot
+ else:
+ self.save_dot(self.temp_dot)
+ plot_cmd = self.dot
+
+ file_name = "%s.%s" % (file_name, file_type)
+ create_cmd = "%s -T%s %s -o %s" % (
+ plot_cmd,
+ file_type,
+ self.temp_dot,
+ file_name,
+ )
+ os.system(create_cmd)
diff --git a/lib/spack/external/_vendoring/altgraph/Graph.py b/lib/spack/external/_vendoring/altgraph/Graph.py
new file mode 100644
index 0000000000..8088007abd
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph/Graph.py
@@ -0,0 +1,682 @@
+"""
+altgraph.Graph - Base Graph class
+=================================
+
+..
+ #--Version 2.1
+ #--Bob Ippolito October, 2004
+
+ #--Version 2.0
+ #--Istvan Albert June, 2004
+
+ #--Version 1.0
+ #--Nathan Denny, May 27, 1999
+"""
+
+from collections import deque
+
+from altgraph import GraphError
+
+
+class Graph(object):
+ """
+ The Graph class represents a directed graph with *N* nodes and *E* edges.
+
+ Naming conventions:
+
+ - the prefixes such as *out*, *inc* and *all* will refer to methods
+ that operate on the outgoing, incoming or all edges of that node.
+
+ For example: :py:meth:`inc_degree` will refer to the degree of the node
+ computed over the incoming edges (the number of neighbours linking to
+ the node).
+
+ - the prefixes such as *forw* and *back* will refer to the
+ orientation of the edges used in the method with respect to the node.
+
+ For example: :py:meth:`forw_bfs` will start at the node then use the
+ outgoing edges to traverse the graph (goes forward).
+ """
+
+ def __init__(self, edges=None):
+ """
+ Initialization
+ """
+
+ self.next_edge = 0
+ self.nodes, self.edges = {}, {}
+ self.hidden_edges, self.hidden_nodes = {}, {}
+
+ if edges is not None:
+ for item in edges:
+ if len(item) == 2:
+ head, tail = item
+ self.add_edge(head, tail)
+ elif len(item) == 3:
+ head, tail, data = item
+ self.add_edge(head, tail, data)
+ else:
+ raise GraphError("Cannot create edge from %s" % (item,))
+
+ def __repr__(self):
+ return "<Graph: %d nodes, %d edges>" % (
+ self.number_of_nodes(),
+ self.number_of_edges(),
+ )
+
+ def add_node(self, node, node_data=None):
+ """
+ Adds a new node to the graph. Arbitrary data can be attached to the
+ node via the node_data parameter. Adding the same node twice will be
+ silently ignored.
+
+ The node must be a hashable value.
+ """
+ #
+ # the nodes will contain tuples that will store incoming edges,
+ # outgoing edges and data
+ #
+ # index 0 -> incoming edges
+ # index 1 -> outgoing edges
+
+ if node in self.hidden_nodes:
+ # Node is present, but hidden
+ return
+
+ if node not in self.nodes:
+ self.nodes[node] = ([], [], node_data)
+
+ def add_edge(self, head_id, tail_id, edge_data=1, create_nodes=True):
+ """
+ Adds a directed edge going from head_id to tail_id.
+ Arbitrary data can be attached to the edge via edge_data.
+ It may create the nodes if adding edges between nonexisting ones.
+
+ :param head_id: head node
+ :param tail_id: tail node
+ :param edge_data: (optional) data attached to the edge
+ :param create_nodes: (optional) creates the head_id or tail_id
+ node in case they did not exist
+ """
+ # shorcut
+ edge = self.next_edge
+
+ # add nodes if on automatic node creation
+ if create_nodes:
+ self.add_node(head_id)
+ self.add_node(tail_id)
+
+ # update the corresponding incoming and outgoing lists in the nodes
+ # index 0 -> incoming edges
+ # index 1 -> outgoing edges
+
+ try:
+ self.nodes[tail_id][0].append(edge)
+ self.nodes[head_id][1].append(edge)
+ except KeyError:
+ raise GraphError("Invalid nodes %s -> %s" % (head_id, tail_id))
+
+ # store edge information
+ self.edges[edge] = (head_id, tail_id, edge_data)
+
+ self.next_edge += 1
+
+ def hide_edge(self, edge):
+ """
+ Hides an edge from the graph. The edge may be unhidden at some later
+ time.
+ """
+ try:
+ head_id, tail_id, edge_data = self.hidden_edges[edge] = self.edges[edge]
+ self.nodes[tail_id][0].remove(edge)
+ self.nodes[head_id][1].remove(edge)
+ del self.edges[edge]
+ except KeyError:
+ raise GraphError("Invalid edge %s" % edge)
+
+ def hide_node(self, node):
+ """
+ Hides a node from the graph. The incoming and outgoing edges of the
+ node will also be hidden. The node may be unhidden at some later time.
+ """
+ try:
+ all_edges = self.all_edges(node)
+ self.hidden_nodes[node] = (self.nodes[node], all_edges)
+ for edge in all_edges:
+ self.hide_edge(edge)
+ del self.nodes[node]
+ except KeyError:
+ raise GraphError("Invalid node %s" % node)
+
+ def restore_node(self, node):
+ """
+ Restores a previously hidden node back into the graph and restores
+ all of its incoming and outgoing edges.
+ """
+ try:
+ self.nodes[node], all_edges = self.hidden_nodes[node]
+ for edge in all_edges:
+ self.restore_edge(edge)
+ del self.hidden_nodes[node]
+ except KeyError:
+ raise GraphError("Invalid node %s" % node)
+
+ def restore_edge(self, edge):
+ """
+ Restores a previously hidden edge back into the graph.
+ """
+ try:
+ head_id, tail_id, data = self.hidden_edges[edge]
+ self.nodes[tail_id][0].append(edge)
+ self.nodes[head_id][1].append(edge)
+ self.edges[edge] = head_id, tail_id, data
+ del self.hidden_edges[edge]
+ except KeyError:
+ raise GraphError("Invalid edge %s" % edge)
+
+ def restore_all_edges(self):
+ """
+ Restores all hidden edges.
+ """
+ for edge in list(self.hidden_edges.keys()):
+ try:
+ self.restore_edge(edge)
+ except GraphError:
+ pass
+
+ def restore_all_nodes(self):
+ """
+ Restores all hidden nodes.
+ """
+ for node in list(self.hidden_nodes.keys()):
+ self.restore_node(node)
+
+ def __contains__(self, node):
+ """
+ Test whether a node is in the graph
+ """
+ return node in self.nodes
+
+ def edge_by_id(self, edge):
+ """
+ Returns the edge that connects the head_id and tail_id nodes
+ """
+ try:
+ head, tail, data = self.edges[edge]
+ except KeyError:
+ head, tail = None, None
+ raise GraphError("Invalid edge %s" % edge)
+
+ return (head, tail)
+
+ def edge_by_node(self, head, tail):
+ """
+ Returns the edge that connects the head_id and tail_id nodes
+ """
+ for edge in self.out_edges(head):
+ if self.tail(edge) == tail:
+ return edge
+ return None
+
+ def number_of_nodes(self):
+ """
+ Returns the number of nodes
+ """
+ return len(self.nodes)
+
+ def number_of_edges(self):
+ """
+ Returns the number of edges
+ """
+ return len(self.edges)
+
+ def __iter__(self):
+ """
+ Iterates over all nodes in the graph
+ """
+ return iter(self.nodes)
+
+ def node_list(self):
+ """
+ Return a list of the node ids for all visible nodes in the graph.
+ """
+ return list(self.nodes.keys())
+
+ def edge_list(self):
+ """
+ Returns an iterator for all visible nodes in the graph.
+ """
+ return list(self.edges.keys())
+
+ def number_of_hidden_edges(self):
+ """
+ Returns the number of hidden edges
+ """
+ return len(self.hidden_edges)
+
+ def number_of_hidden_nodes(self):
+ """
+ Returns the number of hidden nodes
+ """
+ return len(self.hidden_nodes)
+
+ def hidden_node_list(self):
+ """
+ Returns the list with the hidden nodes
+ """
+ return list(self.hidden_nodes.keys())
+
+ def hidden_edge_list(self):
+ """
+ Returns a list with the hidden edges
+ """
+ return list(self.hidden_edges.keys())
+
+ def describe_node(self, node):
+ """
+ return node, node data, outgoing edges, incoming edges for node
+ """
+ incoming, outgoing, data = self.nodes[node]
+ return node, data, outgoing, incoming
+
+ def describe_edge(self, edge):
+ """
+ return edge, edge data, head, tail for edge
+ """
+ head, tail, data = self.edges[edge]
+ return edge, data, head, tail
+
+ def node_data(self, node):
+ """
+ Returns the data associated with a node
+ """
+ return self.nodes[node][2]
+
+ def edge_data(self, edge):
+ """
+ Returns the data associated with an edge
+ """
+ return self.edges[edge][2]
+
+ def update_edge_data(self, edge, edge_data):
+ """
+ Replace the edge data for a specific edge
+ """
+ self.edges[edge] = self.edges[edge][0:2] + (edge_data,)
+
+ def head(self, edge):
+ """
+ Returns the node of the head of the edge.
+ """
+ return self.edges[edge][0]
+
+ def tail(self, edge):
+ """
+ Returns node of the tail of the edge.
+ """
+ return self.edges[edge][1]
+
+ def out_nbrs(self, node):
+ """
+ List of nodes connected by outgoing edges
+ """
+ return [self.tail(n) for n in self.out_edges(node)]
+
+ def inc_nbrs(self, node):
+ """
+ List of nodes connected by incoming edges
+ """
+ return [self.head(n) for n in self.inc_edges(node)]
+
+ def all_nbrs(self, node):
+ """
+ List of nodes connected by incoming and outgoing edges
+ """
+ return list(dict.fromkeys(self.inc_nbrs(node) + self.out_nbrs(node)))
+
+ def out_edges(self, node):
+ """
+ Returns a list of the outgoing edges
+ """
+ try:
+ return list(self.nodes[node][1])
+ except KeyError:
+ raise GraphError("Invalid node %s" % node)
+
+ def inc_edges(self, node):
+ """
+ Returns a list of the incoming edges
+ """
+ try:
+ return list(self.nodes[node][0])
+ except KeyError:
+ raise GraphError("Invalid node %s" % node)
+
+ def all_edges(self, node):
+ """
+ Returns a list of incoming and outging edges.
+ """
+ return set(self.inc_edges(node) + self.out_edges(node))
+
+ def out_degree(self, node):
+ """
+ Returns the number of outgoing edges
+ """
+ return len(self.out_edges(node))
+
+ def inc_degree(self, node):
+ """
+ Returns the number of incoming edges
+ """
+ return len(self.inc_edges(node))
+
+ def all_degree(self, node):
+ """
+ The total degree of a node
+ """
+ return self.inc_degree(node) + self.out_degree(node)
+
+ def _topo_sort(self, forward=True):
+ """
+ Topological sort.
+
+ Returns a list of nodes where the successors (based on outgoing and
+ incoming edges selected by the forward parameter) of any given node
+ appear in the sequence after that node.
+ """
+ topo_list = []
+ queue = deque()
+ indeg = {}
+
+ # select the operation that will be performed
+ if forward:
+ get_edges = self.out_edges
+ get_degree = self.inc_degree
+ get_next = self.tail
+ else:
+ get_edges = self.inc_edges
+ get_degree = self.out_degree
+ get_next = self.head
+
+ for node in self.node_list():
+ degree = get_degree(node)
+ if degree:
+ indeg[node] = degree
+ else:
+ queue.append(node)
+
+ while queue:
+ curr_node = queue.popleft()
+ topo_list.append(curr_node)
+ for edge in get_edges(curr_node):
+ tail_id = get_next(edge)
+ if tail_id in indeg:
+ indeg[tail_id] -= 1
+ if indeg[tail_id] == 0:
+ queue.append(tail_id)
+
+ if len(topo_list) == len(self.node_list()):
+ valid = True
+ else:
+ # the graph has cycles, invalid topological sort
+ valid = False
+
+ return (valid, topo_list)
+
+ def forw_topo_sort(self):
+ """
+ Topological sort.
+
+ Returns a list of nodes where the successors (based on outgoing edges)
+ of any given node appear in the sequence after that node.
+ """
+ return self._topo_sort(forward=True)
+
+ def back_topo_sort(self):
+ """
+ Reverse topological sort.
+
+ Returns a list of nodes where the successors (based on incoming edges)
+ of any given node appear in the sequence after that node.
+ """
+ return self._topo_sort(forward=False)
+
+ def _bfs_subgraph(self, start_id, forward=True):
+ """
+ Private method creates a subgraph in a bfs order.
+
+ The forward parameter specifies whether it is a forward or backward
+ traversal.
+ """
+ if forward:
+ get_bfs = self.forw_bfs
+ get_nbrs = self.out_nbrs
+ else:
+ get_bfs = self.back_bfs
+ get_nbrs = self.inc_nbrs
+
+ g = Graph()
+ bfs_list = get_bfs(start_id)
+ for node in bfs_list:
+ g.add_node(node)
+
+ for node in bfs_list:
+ for nbr_id in get_nbrs(node):
+ if forward:
+ g.add_edge(node, nbr_id)
+ else:
+ g.add_edge(nbr_id, node)
+
+ return g
+
+ def forw_bfs_subgraph(self, start_id):
+ """
+ Creates and returns a subgraph consisting of the breadth first
+ reachable nodes based on their outgoing edges.
+ """
+ return self._bfs_subgraph(start_id, forward=True)
+
+ def back_bfs_subgraph(self, start_id):
+ """
+ Creates and returns a subgraph consisting of the breadth first
+ reachable nodes based on the incoming edges.
+ """
+ return self._bfs_subgraph(start_id, forward=False)
+
+ def iterdfs(self, start, end=None, forward=True):
+ """
+ Collecting nodes in some depth first traversal.
+
+ The forward parameter specifies whether it is a forward or backward
+ traversal.
+ """
+ visited, stack = {start}, deque([start])
+
+ if forward:
+ get_edges = self.out_edges
+ get_next = self.tail
+ else:
+ get_edges = self.inc_edges
+ get_next = self.head
+
+ while stack:
+ curr_node = stack.pop()
+ yield curr_node
+ if curr_node == end:
+ break
+ for edge in sorted(get_edges(curr_node)):
+ tail = get_next(edge)
+ if tail not in visited:
+ visited.add(tail)
+ stack.append(tail)
+
+ def iterdata(self, start, end=None, forward=True, condition=None):
+ """
+ Perform a depth-first walk of the graph (as ``iterdfs``)
+ and yield the item data of every node where condition matches. The
+ condition callback is only called when node_data is not None.
+ """
+
+ visited, stack = {start}, deque([start])
+
+ if forward:
+ get_edges = self.out_edges
+ get_next = self.tail
+ else:
+ get_edges = self.inc_edges
+ get_next = self.head
+
+ get_data = self.node_data
+
+ while stack:
+ curr_node = stack.pop()
+ curr_data = get_data(curr_node)
+ if curr_data is not None:
+ if condition is not None and not condition(curr_data):
+ continue
+ yield curr_data
+ if curr_node == end:
+ break
+ for edge in get_edges(curr_node):
+ tail = get_next(edge)
+ if tail not in visited:
+ visited.add(tail)
+ stack.append(tail)
+
+ def _iterbfs(self, start, end=None, forward=True):
+ """
+ The forward parameter specifies whether it is a forward or backward
+ traversal. Returns a list of tuples where the first value is the hop
+ value the second value is the node id.
+ """
+ queue, visited = deque([(start, 0)]), {start}
+
+ # the direction of the bfs depends on the edges that are sampled
+ if forward:
+ get_edges = self.out_edges
+ get_next = self.tail
+ else:
+ get_edges = self.inc_edges
+ get_next = self.head
+
+ while queue:
+ curr_node, curr_step = queue.popleft()
+ yield (curr_node, curr_step)
+ if curr_node == end:
+ break
+ for edge in get_edges(curr_node):
+ tail = get_next(edge)
+ if tail not in visited:
+ visited.add(tail)
+ queue.append((tail, curr_step + 1))
+
+ def forw_bfs(self, start, end=None):
+ """
+ Returns a list of nodes in some forward BFS order.
+
+ Starting from the start node the breadth first search proceeds along
+ outgoing edges.
+ """
+ return [node for node, step in self._iterbfs(start, end, forward=True)]
+
+ def back_bfs(self, start, end=None):
+ """
+ Returns a list of nodes in some backward BFS order.
+
+ Starting from the start node the breadth first search proceeds along
+ incoming edges.
+ """
+ return [node for node, _ in self._iterbfs(start, end, forward=False)]
+
+ def forw_dfs(self, start, end=None):
+ """
+ Returns a list of nodes in some forward DFS order.
+
+ Starting with the start node the depth first search proceeds along
+ outgoing edges.
+ """
+ return list(self.iterdfs(start, end, forward=True))
+
+ def back_dfs(self, start, end=None):
+ """
+ Returns a list of nodes in some backward DFS order.
+
+ Starting from the start node the depth first search proceeds along
+ incoming edges.
+ """
+ return list(self.iterdfs(start, end, forward=False))
+
+ def connected(self):
+ """
+ Returns :py:data:`True` if the graph's every node can be reached from
+ every other node.
+ """
+ node_list = self.node_list()
+ for node in node_list:
+ bfs_list = self.forw_bfs(node)
+ if len(bfs_list) != len(node_list):
+ return False
+ return True
+
+ def clust_coef(self, node):
+ """
+ Computes and returns the local clustering coefficient of node.
+
+ The local cluster coefficient is proportion of the actual number of
+ edges between neighbours of node and the maximum number of edges
+ between those neighbours.
+
+ See "Local Clustering Coefficient" on
+ <http://en.wikipedia.org/wiki/Clustering_coefficient>
+ for a formal definition.
+ """
+ num = 0
+ nbr_set = set(self.out_nbrs(node))
+
+ if node in nbr_set:
+ nbr_set.remove(node) # loop defense
+
+ for nbr in nbr_set:
+ sec_set = set(self.out_nbrs(nbr))
+ if nbr in sec_set:
+ sec_set.remove(nbr) # loop defense
+ num += len(nbr_set & sec_set)
+
+ nbr_num = len(nbr_set)
+ if nbr_num:
+ clust_coef = float(num) / (nbr_num * (nbr_num - 1))
+ else:
+ clust_coef = 0.0
+ return clust_coef
+
+ def get_hops(self, start, end=None, forward=True):
+ """
+ Computes the hop distance to all nodes centered around a node.
+
+ First order neighbours are at hop 1, their neigbours are at hop 2 etc.
+ Uses :py:meth:`forw_bfs` or :py:meth:`back_bfs` depending on the value
+ of the forward parameter. If the distance between all neighbouring
+ nodes is 1 the hop number corresponds to the shortest distance between
+ the nodes.
+
+ :param start: the starting node
+ :param end: ending node (optional). When not specified will search the
+ whole graph.
+ :param forward: directionality parameter (optional).
+ If C{True} (default) it uses L{forw_bfs} otherwise L{back_bfs}.
+ :return: returns a list of tuples where each tuple contains the
+ node and the hop.
+
+ Typical usage::
+
+ >>> print (graph.get_hops(1, 8))
+ >>> [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
+ # node 1 is at 0 hops
+ # node 2 is at 1 hop
+ # ...
+ # node 8 is at 5 hops
+ """
+ if forward:
+ return list(self._iterbfs(start=start, end=end, forward=True))
+ else:
+ return list(self._iterbfs(start=start, end=end, forward=False))
diff --git a/lib/spack/external/_vendoring/altgraph/GraphAlgo.py b/lib/spack/external/_vendoring/altgraph/GraphAlgo.py
new file mode 100644
index 0000000000..f93e73dcda
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph/GraphAlgo.py
@@ -0,0 +1,171 @@
+"""
+altgraph.GraphAlgo - Graph algorithms
+=====================================
+"""
+from altgraph import GraphError
+
+
+def dijkstra(graph, start, end=None):
+ """
+ Dijkstra's algorithm for shortest paths
+
+ `David Eppstein, UC Irvine, 4 April 2002
+ <http://www.ics.uci.edu/~eppstein/161/python/>`_
+
+ `Python Cookbook Recipe
+ <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466>`_
+
+ Find shortest paths from the start node to all nodes nearer than or
+ equal to the end node.
+
+ Dijkstra's algorithm is only guaranteed to work correctly when all edge
+ lengths are positive. This code does not verify this property for all
+ edges (only the edges examined until the end vertex is reached), but will
+ correctly compute shortest paths even for some graphs with negative edges,
+ and will raise an exception if it discovers that a negative edge has
+ caused it to make a mistake.
+
+ Adapted to altgraph by Istvan Albert, Pennsylvania State University -
+ June, 9 2004
+ """
+ D = {} # dictionary of final distances
+ P = {} # dictionary of predecessors
+ Q = _priorityDictionary() # estimated distances of non-final vertices
+ Q[start] = 0
+
+ for v in Q:
+ D[v] = Q[v]
+ if v == end:
+ break
+
+ for w in graph.out_nbrs(v):
+ edge_id = graph.edge_by_node(v, w)
+ vwLength = D[v] + graph.edge_data(edge_id)
+ if w in D:
+ if vwLength < D[w]:
+ raise GraphError(
+ "Dijkstra: found better path to already-final vertex"
+ )
+ elif w not in Q or vwLength < Q[w]:
+ Q[w] = vwLength
+ P[w] = v
+
+ return (D, P)
+
+
+def shortest_path(graph, start, end):
+ """
+ Find a single shortest path from the *start* node to the *end* node.
+ The input has the same conventions as dijkstra(). The output is a list of
+ the nodes in order along the shortest path.
+
+ **Note that the distances must be stored in the edge data as numeric data**
+ """
+
+ D, P = dijkstra(graph, start, end)
+ Path = []
+ while 1:
+ Path.append(end)
+ if end == start:
+ break
+ end = P[end]
+ Path.reverse()
+ return Path
+
+
+#
+# Utility classes and functions
+#
+class _priorityDictionary(dict):
+ """
+ Priority dictionary using binary heaps (internal use only)
+
+ David Eppstein, UC Irvine, 8 Mar 2002
+
+ Implements a data structure that acts almost like a dictionary, with
+ two modifications:
+
+ 1. D.smallest() returns the value x minimizing D[x]. For this to
+ work correctly, all values D[x] stored in the dictionary must be
+ comparable.
+
+ 2. iterating "for x in D" finds and removes the items from D in sorted
+ order. Each item is not removed until the next item is requested,
+ so D[x] will still return a useful value until the next iteration
+ of the for-loop. Each operation takes logarithmic amortized time.
+ """
+
+ def __init__(self):
+ """
+ Initialize priorityDictionary by creating binary heap of pairs
+ (value,key). Note that changing or removing a dict entry will not
+ remove the old pair from the heap until it is found by smallest()
+ or until the heap is rebuilt.
+ """
+ self.__heap = []
+ dict.__init__(self)
+
+ def smallest(self):
+ """
+ Find smallest item after removing deleted items from front of heap.
+ """
+ if len(self) == 0:
+ raise IndexError("smallest of empty priorityDictionary")
+ heap = self.__heap
+ while heap[0][1] not in self or self[heap[0][1]] != heap[0][0]:
+ lastItem = heap.pop()
+ insertionPoint = 0
+ while 1:
+ smallChild = 2 * insertionPoint + 1
+ if (
+ smallChild + 1 < len(heap)
+ and heap[smallChild] > heap[smallChild + 1]
+ ):
+ smallChild += 1
+ if smallChild >= len(heap) or lastItem <= heap[smallChild]:
+ heap[insertionPoint] = lastItem
+ break
+ heap[insertionPoint] = heap[smallChild]
+ insertionPoint = smallChild
+ return heap[0][1]
+
+ def __iter__(self):
+ """
+ Create destructive sorted iterator of priorityDictionary.
+ """
+
+ def iterfn():
+ while len(self) > 0:
+ x = self.smallest()
+ yield x
+ del self[x]
+
+ return iterfn()
+
+ def __setitem__(self, key, val):
+ """
+ Change value stored in dictionary and add corresponding pair to heap.
+ Rebuilds the heap if the number of deleted items gets large, to avoid
+ memory leakage.
+ """
+ dict.__setitem__(self, key, val)
+ heap = self.__heap
+ if len(heap) > 2 * len(self):
+ self.__heap = [(v, k) for k, v in self.items()]
+ self.__heap.sort()
+ else:
+ newPair = (val, key)
+ insertionPoint = len(heap)
+ heap.append(None)
+ while insertionPoint > 0 and newPair < heap[(insertionPoint - 1) // 2]:
+ heap[insertionPoint] = heap[(insertionPoint - 1) // 2]
+ insertionPoint = (insertionPoint - 1) // 2
+ heap[insertionPoint] = newPair
+
+ def setdefault(self, key, val):
+ """
+ Reimplement setdefault to pass through our customized __setitem__.
+ """
+ if key not in self:
+ self[key] = val
+ return self[key]
diff --git a/lib/spack/external/_vendoring/altgraph/GraphStat.py b/lib/spack/external/_vendoring/altgraph/GraphStat.py
new file mode 100644
index 0000000000..577464b41e
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph/GraphStat.py
@@ -0,0 +1,73 @@
+"""
+altgraph.GraphStat - Functions providing various graph statistics
+=================================================================
+"""
+
+
+def degree_dist(graph, limits=(0, 0), bin_num=10, mode="out"):
+ """
+ Computes the degree distribution for a graph.
+
+ Returns a list of tuples where the first element of the tuple is the
+ center of the bin representing a range of degrees and the second element
+ of the tuple are the number of nodes with the degree falling in the range.
+
+ Example::
+
+ ....
+ """
+
+ deg = []
+ if mode == "inc":
+ get_deg = graph.inc_degree
+ else:
+ get_deg = graph.out_degree
+
+ for node in graph:
+ deg.append(get_deg(node))
+
+ if not deg:
+ return []
+
+ results = _binning(values=deg, limits=limits, bin_num=bin_num)
+
+ return results
+
+
+_EPS = 1.0 / (2.0 ** 32)
+
+
+def _binning(values, limits=(0, 0), bin_num=10):
+ """
+ Bins data that falls between certain limits, if the limits are (0, 0) the
+ minimum and maximum values are used.
+
+ Returns a list of tuples where the first element of the tuple is the
+ center of the bin and the second element of the tuple are the counts.
+ """
+ if limits == (0, 0):
+ min_val, max_val = min(values) - _EPS, max(values) + _EPS
+ else:
+ min_val, max_val = limits
+
+ # get bin size
+ bin_size = (max_val - min_val) / float(bin_num)
+ bins = [0] * (bin_num)
+
+ # will ignore these outliers for now
+ for value in values:
+ try:
+ if (value - min_val) >= 0:
+ index = int((value - min_val) / float(bin_size))
+ bins[index] += 1
+ except IndexError:
+ pass
+
+ # make it ready for an x,y plot
+ result = []
+ center = (bin_size / 2) + min_val
+ for i, y in enumerate(bins):
+ x = center + bin_size * i
+ result.append((x, y))
+
+ return result
diff --git a/lib/spack/external/_vendoring/altgraph/GraphUtil.py b/lib/spack/external/_vendoring/altgraph/GraphUtil.py
new file mode 100644
index 0000000000..cfd6a34f3c
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph/GraphUtil.py
@@ -0,0 +1,139 @@
+"""
+altgraph.GraphUtil - Utility classes and functions
+==================================================
+"""
+
+import random
+from collections import deque
+
+from altgraph import Graph, GraphError
+
+
+def generate_random_graph(node_num, edge_num, self_loops=False, multi_edges=False):
+ """
+ Generates and returns a :py:class:`~altgraph.Graph.Graph` instance with
+ *node_num* nodes randomly connected by *edge_num* edges.
+ """
+ g = Graph.Graph()
+
+ if not multi_edges:
+ if self_loops:
+ max_edges = node_num * node_num
+ else:
+ max_edges = node_num * (node_num - 1)
+
+ if edge_num > max_edges:
+ raise GraphError("inconsistent arguments to 'generate_random_graph'")
+
+ nodes = range(node_num)
+
+ for node in nodes:
+ g.add_node(node)
+
+ while 1:
+ head = random.choice(nodes)
+ tail = random.choice(nodes)
+
+ # loop defense
+ if head == tail and not self_loops:
+ continue
+
+ # multiple edge defense
+ if g.edge_by_node(head, tail) is not None and not multi_edges:
+ continue
+
+ # add the edge
+ g.add_edge(head, tail)
+ if g.number_of_edges() >= edge_num:
+ break
+
+ return g
+
+
+def generate_scale_free_graph(steps, growth_num, self_loops=False, multi_edges=False):
+ """
+ Generates and returns a :py:class:`~altgraph.Graph.Graph` instance that
+ will have *steps* \\* *growth_num* nodes and a scale free (powerlaw)
+ connectivity. Starting with a fully connected graph with *growth_num*
+ nodes at every step *growth_num* nodes are added to the graph and are
+ connected to existing nodes with a probability proportional to the degree
+ of these existing nodes.
+ """
+ # The code doesn't seem to do what the documentation claims.
+ graph = Graph.Graph()
+
+ # initialize the graph
+ store = []
+ for i in range(growth_num):
+ for j in range(i + 1, growth_num):
+ store.append(i)
+ store.append(j)
+ graph.add_edge(i, j)
+
+ # generate
+ for node in range(growth_num, steps * growth_num):
+ graph.add_node(node)
+ while graph.out_degree(node) < growth_num:
+ nbr = random.choice(store)
+
+ # loop defense
+ if node == nbr and not self_loops:
+ continue
+
+ # multi edge defense
+ if graph.edge_by_node(node, nbr) and not multi_edges:
+ continue
+
+ graph.add_edge(node, nbr)
+
+ for nbr in graph.out_nbrs(node):
+ store.append(node)
+ store.append(nbr)
+
+ return graph
+
+
+def filter_stack(graph, head, filters):
+ """
+ Perform a walk in a depth-first order starting
+ at *head*.
+
+ Returns (visited, removes, orphans).
+
+ * visited: the set of visited nodes
+ * removes: the list of nodes where the node
+ data does not all *filters*
+ * orphans: tuples of (last_good, node),
+ where node is not in removes, is directly
+ reachable from a node in *removes* and
+ *last_good* is the closest upstream node that is not
+ in *removes*.
+ """
+
+ visited, removes, orphans = {head}, set(), set()
+ stack = deque([(head, head)])
+ get_data = graph.node_data
+ get_edges = graph.out_edges
+ get_tail = graph.tail
+
+ while stack:
+ last_good, node = stack.pop()
+ data = get_data(node)
+ if data is not None:
+ for filtfunc in filters:
+ if not filtfunc(data):
+ removes.add(node)
+ break
+ else:
+ last_good = node
+ for edge in get_edges(node):
+ tail = get_tail(edge)
+ if last_good is not node:
+ orphans.add((last_good, tail))
+ if tail not in visited:
+ visited.add(tail)
+ stack.append((last_good, tail))
+
+ orphans = [(lg, tl) for (lg, tl) in orphans if tl not in removes]
+
+ return visited, removes, orphans
diff --git a/lib/spack/external/_vendoring/altgraph/LICENSE b/lib/spack/external/_vendoring/altgraph/LICENSE
new file mode 100644
index 0000000000..6013a212b8
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2004 Istvan Albert unless otherwise noted.
+Copyright (c) 2006-2010 Bob Ippolito
+Copyright (2) 2010-2020 Ronald Oussoren, et. al.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/lib/spack/external/_vendoring/altgraph/ObjectGraph.py b/lib/spack/external/_vendoring/altgraph/ObjectGraph.py
new file mode 100644
index 0000000000..379b05b129
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph/ObjectGraph.py
@@ -0,0 +1,212 @@
+"""
+altgraph.ObjectGraph - Graph of objects with an identifier
+==========================================================
+
+A graph of objects that have a "graphident" attribute.
+graphident is the key for the object in the graph
+"""
+
+from altgraph import GraphError
+from altgraph.Graph import Graph
+from altgraph.GraphUtil import filter_stack
+
+
+class ObjectGraph(object):
+ """
+ A graph of objects that have a "graphident" attribute.
+ graphident is the key for the object in the graph
+ """
+
+ def __init__(self, graph=None, debug=0):
+ if graph is None:
+ graph = Graph()
+ self.graphident = self
+ self.graph = graph
+ self.debug = debug
+ self.indent = 0
+ graph.add_node(self, None)
+
+ def __repr__(self):
+ return "<%s>" % (type(self).__name__,)
+
+ def flatten(self, condition=None, start=None):
+ """
+ Iterate over the subgraph that is entirely reachable by condition
+ starting from the given start node or the ObjectGraph root
+ """
+ if start is None:
+ start = self
+ start = self.getRawIdent(start)
+ return self.graph.iterdata(start=start, condition=condition)
+
+ def nodes(self):
+ for ident in self.graph:
+ node = self.graph.node_data(ident)
+ if node is not None:
+ yield self.graph.node_data(ident)
+
+ def get_edges(self, node):
+ if node is None:
+ node = self
+ start = self.getRawIdent(node)
+ _, _, outraw, incraw = self.graph.describe_node(start)
+
+ def iter_edges(lst, n):
+ seen = set()
+ for tpl in (self.graph.describe_edge(e) for e in lst):
+ ident = tpl[n]
+ if ident not in seen:
+ yield self.findNode(ident)
+ seen.add(ident)
+
+ return iter_edges(outraw, 3), iter_edges(incraw, 2)
+
+ def edgeData(self, fromNode, toNode):
+ if fromNode is None:
+ fromNode = self
+ start = self.getRawIdent(fromNode)
+ stop = self.getRawIdent(toNode)
+ edge = self.graph.edge_by_node(start, stop)
+ return self.graph.edge_data(edge)
+
+ def updateEdgeData(self, fromNode, toNode, edgeData):
+ if fromNode is None:
+ fromNode = self
+ start = self.getRawIdent(fromNode)
+ stop = self.getRawIdent(toNode)
+ edge = self.graph.edge_by_node(start, stop)
+ self.graph.update_edge_data(edge, edgeData)
+
+ def filterStack(self, filters):
+ """
+ Filter the ObjectGraph in-place by removing all edges to nodes that
+ do not match every filter in the given filter list
+
+ Returns a tuple containing the number of:
+ (nodes_visited, nodes_removed, nodes_orphaned)
+ """
+ visited, removes, orphans = filter_stack(self.graph, self, filters)
+
+ for last_good, tail in orphans:
+ self.graph.add_edge(last_good, tail, edge_data="orphan")
+
+ for node in removes:
+ self.graph.hide_node(node)
+
+ return len(visited) - 1, len(removes), len(orphans)
+
+ def removeNode(self, node):
+ """
+ Remove the given node from the graph if it exists
+ """
+ ident = self.getIdent(node)
+ if ident is not None:
+ self.graph.hide_node(ident)
+
+ def removeReference(self, fromnode, tonode):
+ """
+ Remove all edges from fromnode to tonode
+ """
+ if fromnode is None:
+ fromnode = self
+ fromident = self.getIdent(fromnode)
+ toident = self.getIdent(tonode)
+ if fromident is not None and toident is not None:
+ while True:
+ edge = self.graph.edge_by_node(fromident, toident)
+ if edge is None:
+ break
+ self.graph.hide_edge(edge)
+
+ def getIdent(self, node):
+ """
+ Get the graph identifier for a node
+ """
+ ident = self.getRawIdent(node)
+ if ident is not None:
+ return ident
+ node = self.findNode(node)
+ if node is None:
+ return None
+ return node.graphident
+
+ def getRawIdent(self, node):
+ """
+ Get the identifier for a node object
+ """
+ if node is self:
+ return node
+ ident = getattr(node, "graphident", None)
+ return ident
+
+ def __contains__(self, node):
+ return self.findNode(node) is not None
+
+ def findNode(self, node):
+ """
+ Find the node on the graph
+ """
+ ident = self.getRawIdent(node)
+ if ident is None:
+ ident = node
+ try:
+ return self.graph.node_data(ident)
+ except KeyError:
+ return None
+
+ def addNode(self, node):
+ """
+ Add a node to the graph referenced by the root
+ """
+ self.msg(4, "addNode", node)
+
+ try:
+ self.graph.restore_node(node.graphident)
+ except GraphError:
+ self.graph.add_node(node.graphident, node)
+
+ def createReference(self, fromnode, tonode, edge_data=None):
+ """
+ Create a reference from fromnode to tonode
+ """
+ if fromnode is None:
+ fromnode = self
+ fromident, toident = self.getIdent(fromnode), self.getIdent(tonode)
+ if fromident is None or toident is None:
+ return
+ self.msg(4, "createReference", fromnode, tonode, edge_data)
+ self.graph.add_edge(fromident, toident, edge_data=edge_data)
+
+ def createNode(self, cls, name, *args, **kw):
+ """
+ Add a node of type cls to the graph if it does not already exist
+ by the given name
+ """
+ m = self.findNode(name)
+ if m is None:
+ m = cls(name, *args, **kw)
+ self.addNode(m)
+ return m
+
+ def msg(self, level, s, *args):
+ """
+ Print a debug message with the given level
+ """
+ if s and level <= self.debug:
+ print("%s%s %s" % (" " * self.indent, s, " ".join(map(repr, args))))
+
+ def msgin(self, level, s, *args):
+ """
+ Print a debug message and indent
+ """
+ if level <= self.debug:
+ self.msg(level, s, *args)
+ self.indent = self.indent + 1
+
+ def msgout(self, level, s, *args):
+ """
+ Dedent and print a debug message
+ """
+ if level <= self.debug:
+ self.indent = self.indent - 1
+ self.msg(level, s, *args)
diff --git a/lib/spack/external/_vendoring/altgraph/__init__.py b/lib/spack/external/_vendoring/altgraph/__init__.py
new file mode 100644
index 0000000000..a56342438b
--- /dev/null
+++ b/lib/spack/external/_vendoring/altgraph/__init__.py
@@ -0,0 +1,148 @@
+"""
+altgraph - a python graph library
+=================================
+
+altgraph is a fork of `graphlib <http://pygraphlib.sourceforge.net>`_ tailored
+to use newer Python 2.3+ features, including additional support used by the
+py2app suite (modulegraph and macholib, specifically).
+
+altgraph is a python based graph (network) representation and manipulation
+package. It has started out as an extension to the
+`graph_lib module
+<http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html>`_
+written by Nathan Denny it has been significantly optimized and expanded.
+
+The :class:`altgraph.Graph.Graph` class is loosely modeled after the
+`LEDA <http://www.algorithmic-solutions.com/enleda.htm>`_
+(Library of Efficient Datatypes) representation. The library
+includes methods for constructing graphs, BFS and DFS traversals,
+topological sort, finding connected components, shortest paths as well as a
+number graph statistics functions. The library can also visualize graphs
+via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_.
+
+The package contains the following modules:
+
+ - the :py:mod:`altgraph.Graph` module contains the
+ :class:`~altgraph.Graph.Graph` class that stores the graph data
+
+ - the :py:mod:`altgraph.GraphAlgo` module implements graph algorithms
+ operating on graphs (:py:class:`~altgraph.Graph.Graph`} instances)
+
+ - the :py:mod:`altgraph.GraphStat` module contains functions for
+ computing statistical measures on graphs
+
+ - the :py:mod:`altgraph.GraphUtil` module contains functions for
+ generating, reading and saving graphs
+
+ - the :py:mod:`altgraph.Dot` module contains functions for displaying
+ graphs via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
+
+ - the :py:mod:`altgraph.ObjectGraph` module implements a graph of
+ objects with a unique identifier
+
+Installation
+------------
+
+Download and unpack the archive then type::
+
+ python setup.py install
+
+This will install the library in the default location. For instructions on
+how to customize the install procedure read the output of::
+
+ python setup.py --help install
+
+To verify that the code works run the test suite::
+
+ python setup.py test
+
+Example usage
+-------------
+
+Lets assume that we want to analyze the graph below (links to the full picture)
+GRAPH_IMG. Our script then might look the following way::
+
+ from altgraph import Graph, GraphAlgo, Dot
+
+ # these are the edges
+ edges = [ (1,2), (2,4), (1,3), (2,4), (3,4), (4,5), (6,5),
+ (6,14), (14,15), (6, 15), (5,7), (7, 8), (7,13), (12,8),
+ (8,13), (11,12), (11,9), (13,11), (9,13), (13,10) ]
+
+ # creates the graph
+ graph = Graph.Graph()
+ for head, tail in edges:
+ graph.add_edge(head, tail)
+
+ # do a forward bfs from 1 at most to 20
+ print(graph.forw_bfs(1))
+
+This will print the nodes in some breadth first order::
+
+ [1, 2, 3, 4, 5, 7, 8, 13, 11, 10, 12, 9]
+
+If we wanted to get the hop-distance from node 1 to node 8
+we coud write::
+
+ print(graph.get_hops(1, 8))
+
+This will print the following::
+
+ [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
+
+Node 1 is at 0 hops since it is the starting node, nodes 2,3 are 1 hop away ...
+node 8 is 5 hops away. To find the shortest distance between two nodes you
+can use::
+
+ print(GraphAlgo.shortest_path(graph, 1, 12))
+
+It will print the nodes on one (if there are more) the shortest paths::
+
+ [1, 2, 4, 5, 7, 13, 11, 12]
+
+To display the graph we can use the GraphViz backend::
+
+ dot = Dot.Dot(graph)
+
+ # display the graph on the monitor
+ dot.display()
+
+ # save it in an image file
+ dot.save_img(file_name='graph', file_type='gif')
+
+
+
+..
+ @author: U{Istvan Albert<http://www.personal.psu.edu/staff/i/u/iua1/>}
+
+ @license: MIT License
+
+ Copyright (c) 2004 Istvan Albert unless otherwise noted.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to
+ deal in the Software without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ IN THE SOFTWARE.
+ @requires: Python 2.3 or higher
+
+ @newfield contributor: Contributors:
+ @contributor: U{Reka Albert <http://www.phys.psu.edu/~ralbert/>}
+
+"""
+import pkg_resources
+
+__version__ = pkg_resources.require("altgraph")[0].version
+
+
+class GraphError(ValueError):
+ pass
diff --git a/lib/spack/external/_vendoring/attr/__init__.py b/lib/spack/external/_vendoring/attr/__init__.py
new file mode 100644
index 0000000000..386305d628
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/__init__.py
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: MIT
+
+
+import sys
+
+from functools import partial
+
+from . import converters, exceptions, filters, setters, validators
+from ._cmp import cmp_using
+from ._config import get_run_validators, set_run_validators
+from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
+from ._make import (
+ NOTHING,
+ Attribute,
+ Factory,
+ attrib,
+ attrs,
+ fields,
+ fields_dict,
+ make_class,
+ validate,
+)
+from ._version_info import VersionInfo
+
+
+__version__ = "22.1.0"
+__version_info__ = VersionInfo._from_version_string(__version__)
+
+__title__ = "attrs"
+__description__ = "Classes Without Boilerplate"
+__url__ = "https://www.attrs.org/"
+__uri__ = __url__
+__doc__ = __description__ + " <" + __uri__ + ">"
+
+__author__ = "Hynek Schlawack"
+__email__ = "hs@ox.cx"
+
+__license__ = "MIT"
+__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
+
+
+s = attributes = attrs
+ib = attr = attrib
+dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
+
+__all__ = [
+ "Attribute",
+ "Factory",
+ "NOTHING",
+ "asdict",
+ "assoc",
+ "astuple",
+ "attr",
+ "attrib",
+ "attributes",
+ "attrs",
+ "cmp_using",
+ "converters",
+ "evolve",
+ "exceptions",
+ "fields",
+ "fields_dict",
+ "filters",
+ "get_run_validators",
+ "has",
+ "ib",
+ "make_class",
+ "resolve_types",
+ "s",
+ "set_run_validators",
+ "setters",
+ "validate",
+ "validators",
+]
+
+if sys.version_info[:2] >= (3, 6):
+ from ._next_gen import define, field, frozen, mutable # noqa: F401
+
+ __all__.extend(("define", "field", "frozen", "mutable"))
diff --git a/lib/spack/external/_vendoring/attr/__init__.pyi b/lib/spack/external/_vendoring/attr/__init__.pyi
new file mode 100644
index 0000000000..03cc4c82d2
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/__init__.pyi
@@ -0,0 +1,486 @@
+import sys
+
+from typing import (
+ Any,
+ Callable,
+ ClassVar,
+ Dict,
+ Generic,
+ List,
+ Mapping,
+ Optional,
+ Protocol,
+ Sequence,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ overload,
+)
+
+# `import X as X` is required to make these public
+from . import converters as converters
+from . import exceptions as exceptions
+from . import filters as filters
+from . import setters as setters
+from . import validators as validators
+from ._cmp import cmp_using as cmp_using
+from ._version_info import VersionInfo
+
+__version__: str
+__version_info__: VersionInfo
+__title__: str
+__description__: str
+__url__: str
+__uri__: str
+__author__: str
+__email__: str
+__license__: str
+__copyright__: str
+
+_T = TypeVar("_T")
+_C = TypeVar("_C", bound=type)
+
+_EqOrderType = Union[bool, Callable[[Any], Any]]
+_ValidatorType = Callable[[Any, Attribute[_T], _T], Any]
+_ConverterType = Callable[[Any], Any]
+_FilterType = Callable[[Attribute[_T], _T], bool]
+_ReprType = Callable[[Any], str]
+_ReprArgType = Union[bool, _ReprType]
+_OnSetAttrType = Callable[[Any, Attribute[Any], Any], Any]
+_OnSetAttrArgType = Union[
+ _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType
+]
+_FieldTransformer = Callable[
+ [type, List[Attribute[Any]]], List[Attribute[Any]]
+]
+# FIXME: in reality, if multiple validators are passed they must be in a list
+# or tuple, but those are invariant and so would prevent subtypes of
+# _ValidatorType from working when passed in a list or tuple.
+_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]]
+
+# A protocol to be able to statically accept an attrs class.
+class AttrsInstance(Protocol):
+ __attrs_attrs__: ClassVar[Any]
+
+# _make --
+
+NOTHING: object
+
+# NOTE: Factory lies about its return type to make this possible:
+# `x: List[int] # = Factory(list)`
+# Work around mypy issue #4554 in the common case by using an overload.
+if sys.version_info >= (3, 8):
+ from typing import Literal
+ @overload
+ def Factory(factory: Callable[[], _T]) -> _T: ...
+ @overload
+ def Factory(
+ factory: Callable[[Any], _T],
+ takes_self: Literal[True],
+ ) -> _T: ...
+ @overload
+ def Factory(
+ factory: Callable[[], _T],
+ takes_self: Literal[False],
+ ) -> _T: ...
+
+else:
+ @overload
+ def Factory(factory: Callable[[], _T]) -> _T: ...
+ @overload
+ def Factory(
+ factory: Union[Callable[[Any], _T], Callable[[], _T]],
+ takes_self: bool = ...,
+ ) -> _T: ...
+
+# Static type inference support via __dataclass_transform__ implemented as per:
+# https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md
+# This annotation must be applied to all overloads of "define" and "attrs"
+#
+# NOTE: This is a typing construct and does not exist at runtime. Extensions
+# wrapping attrs decorators should declare a separate __dataclass_transform__
+# signature in the extension module using the specification linked above to
+# provide pyright support.
+def __dataclass_transform__(
+ *,
+ eq_default: bool = True,
+ order_default: bool = False,
+ kw_only_default: bool = False,
+ field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
+) -> Callable[[_T], _T]: ...
+
+class Attribute(Generic[_T]):
+ name: str
+ default: Optional[_T]
+ validator: Optional[_ValidatorType[_T]]
+ repr: _ReprArgType
+ cmp: _EqOrderType
+ eq: _EqOrderType
+ order: _EqOrderType
+ hash: Optional[bool]
+ init: bool
+ converter: Optional[_ConverterType]
+ metadata: Dict[Any, Any]
+ type: Optional[Type[_T]]
+ kw_only: bool
+ on_setattr: _OnSetAttrType
+ def evolve(self, **changes: Any) -> "Attribute[Any]": ...
+
+# NOTE: We had several choices for the annotation to use for type arg:
+# 1) Type[_T]
+# - Pros: Handles simple cases correctly
+# - Cons: Might produce less informative errors in the case of conflicting
+# TypeVars e.g. `attr.ib(default='bad', type=int)`
+# 2) Callable[..., _T]
+# - Pros: Better error messages than #1 for conflicting TypeVars
+# - Cons: Terrible error messages for validator checks.
+# e.g. attr.ib(type=int, validator=validate_str)
+# -> error: Cannot infer function type argument
+# 3) type (and do all of the work in the mypy plugin)
+# - Pros: Simple here, and we could customize the plugin with our own errors.
+# - Cons: Would need to write mypy plugin code to handle all the cases.
+# We chose option #1.
+
+# `attr` lies about its return type to make the following possible:
+# attr() -> Any
+# attr(8) -> int
+# attr(validator=<some callable>) -> Whatever the callable expects.
+# This makes this type of assignments possible:
+# x: int = attr(8)
+#
+# This form catches explicit None or no default but with no other arguments
+# returns Any.
+@overload
+def attrib(
+ default: None = ...,
+ validator: None = ...,
+ repr: _ReprArgType = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ type: None = ...,
+ converter: None = ...,
+ factory: None = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> Any: ...
+
+# This form catches an explicit None or no default and infers the type from the
+# other arguments.
+@overload
+def attrib(
+ default: None = ...,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ type: Optional[Type[_T]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> _T: ...
+
+# This form catches an explicit default argument.
+@overload
+def attrib(
+ default: _T,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ type: Optional[Type[_T]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> _T: ...
+
+# This form covers type=non-Type: e.g. forward references (str), Any
+@overload
+def attrib(
+ default: Optional[_T] = ...,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ type: object = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> Any: ...
+@overload
+def field(
+ *,
+ default: None = ...,
+ validator: None = ...,
+ repr: _ReprArgType = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ converter: None = ...,
+ factory: None = ...,
+ kw_only: bool = ...,
+ eq: Optional[bool] = ...,
+ order: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> Any: ...
+
+# This form catches an explicit None or no default and infers the type from the
+# other arguments.
+@overload
+def field(
+ *,
+ default: None = ...,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> _T: ...
+
+# This form catches an explicit default argument.
+@overload
+def field(
+ *,
+ default: _T,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> _T: ...
+
+# This form covers type=non-Type: e.g. forward references (str), Any
+@overload
+def field(
+ *,
+ default: Optional[_T] = ...,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> Any: ...
+@overload
+@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field))
+def attrs(
+ maybe_cls: _C,
+ these: Optional[Dict[str, Any]] = ...,
+ repr_ns: Optional[str] = ...,
+ repr: bool = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ auto_detect: bool = ...,
+ collect_by_mro: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+) -> _C: ...
+@overload
+@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field))
+def attrs(
+ maybe_cls: None = ...,
+ these: Optional[Dict[str, Any]] = ...,
+ repr_ns: Optional[str] = ...,
+ repr: bool = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ auto_detect: bool = ...,
+ collect_by_mro: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+) -> Callable[[_C], _C]: ...
+@overload
+@__dataclass_transform__(field_descriptors=(attrib, field))
+def define(
+ maybe_cls: _C,
+ *,
+ these: Optional[Dict[str, Any]] = ...,
+ repr: bool = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[bool] = ...,
+ order: Optional[bool] = ...,
+ auto_detect: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+) -> _C: ...
+@overload
+@__dataclass_transform__(field_descriptors=(attrib, field))
+def define(
+ maybe_cls: None = ...,
+ *,
+ these: Optional[Dict[str, Any]] = ...,
+ repr: bool = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[bool] = ...,
+ order: Optional[bool] = ...,
+ auto_detect: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+) -> Callable[[_C], _C]: ...
+
+mutable = define
+frozen = define # they differ only in their defaults
+
+def fields(cls: Type[AttrsInstance]) -> Any: ...
+def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ...
+def validate(inst: AttrsInstance) -> None: ...
+def resolve_types(
+ cls: _C,
+ globalns: Optional[Dict[str, Any]] = ...,
+ localns: Optional[Dict[str, Any]] = ...,
+ attribs: Optional[List[Attribute[Any]]] = ...,
+) -> _C: ...
+
+# TODO: add support for returning a proper attrs class from the mypy plugin
+# we use Any instead of _CountingAttr so that e.g. `make_class('Foo',
+# [attr.ib()])` is valid
+def make_class(
+ name: str,
+ attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]],
+ bases: Tuple[type, ...] = ...,
+ repr_ns: Optional[str] = ...,
+ repr: bool = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ collect_by_mro: bool = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+) -> type: ...
+
+# _funcs --
+
+# TODO: add support for returning TypedDict from the mypy plugin
+# FIXME: asdict/astuple do not honor their factory args. Waiting on one of
+# these:
+# https://github.com/python/mypy/issues/4236
+# https://github.com/python/typing/issues/253
+# XXX: remember to fix attrs.asdict/astuple too!
+def asdict(
+ inst: AttrsInstance,
+ recurse: bool = ...,
+ filter: Optional[_FilterType[Any]] = ...,
+ dict_factory: Type[Mapping[Any, Any]] = ...,
+ retain_collection_types: bool = ...,
+ value_serializer: Optional[
+ Callable[[type, Attribute[Any], Any], Any]
+ ] = ...,
+ tuple_keys: Optional[bool] = ...,
+) -> Dict[str, Any]: ...
+
+# TODO: add support for returning NamedTuple from the mypy plugin
+def astuple(
+ inst: AttrsInstance,
+ recurse: bool = ...,
+ filter: Optional[_FilterType[Any]] = ...,
+ tuple_factory: Type[Sequence[Any]] = ...,
+ retain_collection_types: bool = ...,
+) -> Tuple[Any, ...]: ...
+def has(cls: type) -> bool: ...
+def assoc(inst: _T, **changes: Any) -> _T: ...
+def evolve(inst: _T, **changes: Any) -> _T: ...
+
+# _config --
+
+def set_run_validators(run: bool) -> None: ...
+def get_run_validators() -> bool: ...
+
+# aliases --
+
+s = attributes = attrs
+ib = attr = attrib
+dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;)
diff --git a/lib/spack/external/_vendoring/attr/_cmp.py b/lib/spack/external/_vendoring/attr/_cmp.py
new file mode 100644
index 0000000000..81b99e4c33
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_cmp.py
@@ -0,0 +1,155 @@
+# SPDX-License-Identifier: MIT
+
+
+import functools
+import types
+
+from ._make import _make_ne
+
+
+_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
+
+
+def cmp_using(
+ eq=None,
+ lt=None,
+ le=None,
+ gt=None,
+ ge=None,
+ require_same_type=True,
+ class_name="Comparable",
+):
+ """
+ Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and
+ ``cmp`` arguments to customize field comparison.
+
+ The resulting class will have a full set of ordering methods if
+ at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided.
+
+ :param Optional[callable] eq: `callable` used to evaluate equality
+ of two objects.
+ :param Optional[callable] lt: `callable` used to evaluate whether
+ one object is less than another object.
+ :param Optional[callable] le: `callable` used to evaluate whether
+ one object is less than or equal to another object.
+ :param Optional[callable] gt: `callable` used to evaluate whether
+ one object is greater than another object.
+ :param Optional[callable] ge: `callable` used to evaluate whether
+ one object is greater than or equal to another object.
+
+ :param bool require_same_type: When `True`, equality and ordering methods
+ will return `NotImplemented` if objects are not of the same type.
+
+ :param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
+
+ See `comparison` for more details.
+
+ .. versionadded:: 21.1.0
+ """
+
+ body = {
+ "__slots__": ["value"],
+ "__init__": _make_init(),
+ "_requirements": [],
+ "_is_comparable_to": _is_comparable_to,
+ }
+
+ # Add operations.
+ num_order_functions = 0
+ has_eq_function = False
+
+ if eq is not None:
+ has_eq_function = True
+ body["__eq__"] = _make_operator("eq", eq)
+ body["__ne__"] = _make_ne()
+
+ if lt is not None:
+ num_order_functions += 1
+ body["__lt__"] = _make_operator("lt", lt)
+
+ if le is not None:
+ num_order_functions += 1
+ body["__le__"] = _make_operator("le", le)
+
+ if gt is not None:
+ num_order_functions += 1
+ body["__gt__"] = _make_operator("gt", gt)
+
+ if ge is not None:
+ num_order_functions += 1
+ body["__ge__"] = _make_operator("ge", ge)
+
+ type_ = types.new_class(
+ class_name, (object,), {}, lambda ns: ns.update(body)
+ )
+
+ # Add same type requirement.
+ if require_same_type:
+ type_._requirements.append(_check_same_type)
+
+ # Add total ordering if at least one operation was defined.
+ if 0 < num_order_functions < 4:
+ if not has_eq_function:
+ # functools.total_ordering requires __eq__ to be defined,
+ # so raise early error here to keep a nice stack.
+ raise ValueError(
+ "eq must be define is order to complete ordering from "
+ "lt, le, gt, ge."
+ )
+ type_ = functools.total_ordering(type_)
+
+ return type_
+
+
+def _make_init():
+ """
+ Create __init__ method.
+ """
+
+ def __init__(self, value):
+ """
+ Initialize object with *value*.
+ """
+ self.value = value
+
+ return __init__
+
+
+def _make_operator(name, func):
+ """
+ Create operator method.
+ """
+
+ def method(self, other):
+ if not self._is_comparable_to(other):
+ return NotImplemented
+
+ result = func(self.value, other.value)
+ if result is NotImplemented:
+ return NotImplemented
+
+ return result
+
+ method.__name__ = "__%s__" % (name,)
+ method.__doc__ = "Return a %s b. Computed by attrs." % (
+ _operation_names[name],
+ )
+
+ return method
+
+
+def _is_comparable_to(self, other):
+ """
+ Check whether `other` is comparable to `self`.
+ """
+ for func in self._requirements:
+ if not func(self, other):
+ return False
+ return True
+
+
+def _check_same_type(self, other):
+ """
+ Return True if *self* and *other* are of the same type, False otherwise.
+ """
+ return other.value.__class__ is self.value.__class__
diff --git a/lib/spack/external/_vendoring/attr/_cmp.pyi b/lib/spack/external/_vendoring/attr/_cmp.pyi
new file mode 100644
index 0000000000..35437eff62
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_cmp.pyi
@@ -0,0 +1,13 @@
+from typing import Any, Callable, Optional, Type
+
+_CompareWithType = Callable[[Any, Any], bool]
+
+def cmp_using(
+ eq: Optional[_CompareWithType],
+ lt: Optional[_CompareWithType],
+ le: Optional[_CompareWithType],
+ gt: Optional[_CompareWithType],
+ ge: Optional[_CompareWithType],
+ require_same_type: bool,
+ class_name: str,
+) -> Type: ...
diff --git a/lib/spack/external/_vendoring/attr/_compat.py b/lib/spack/external/_vendoring/attr/_compat.py
new file mode 100644
index 0000000000..5826493257
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_compat.py
@@ -0,0 +1,185 @@
+# SPDX-License-Identifier: MIT
+
+
+import inspect
+import platform
+import sys
+import threading
+import types
+import warnings
+
+from collections.abc import Mapping, Sequence # noqa
+
+
+PYPY = platform.python_implementation() == "PyPy"
+PY36 = sys.version_info[:2] >= (3, 6)
+HAS_F_STRINGS = PY36
+PY310 = sys.version_info[:2] >= (3, 10)
+
+
+if PYPY or PY36:
+ ordered_dict = dict
+else:
+ from collections import OrderedDict
+
+ ordered_dict = OrderedDict
+
+
+def just_warn(*args, **kw):
+ warnings.warn(
+ "Running interpreter doesn't sufficiently support code object "
+ "introspection. Some features like bare super() or accessing "
+ "__class__ will not work with slotted classes.",
+ RuntimeWarning,
+ stacklevel=2,
+ )
+
+
+class _AnnotationExtractor:
+ """
+ Extract type annotations from a callable, returning None whenever there
+ is none.
+ """
+
+ __slots__ = ["sig"]
+
+ def __init__(self, callable):
+ try:
+ self.sig = inspect.signature(callable)
+ except (ValueError, TypeError): # inspect failed
+ self.sig = None
+
+ def get_first_param_type(self):
+ """
+ Return the type annotation of the first argument if it's not empty.
+ """
+ if not self.sig:
+ return None
+
+ params = list(self.sig.parameters.values())
+ if params and params[0].annotation is not inspect.Parameter.empty:
+ return params[0].annotation
+
+ return None
+
+ def get_return_type(self):
+ """
+ Return the return type if it's not empty.
+ """
+ if (
+ self.sig
+ and self.sig.return_annotation is not inspect.Signature.empty
+ ):
+ return self.sig.return_annotation
+
+ return None
+
+
+def make_set_closure_cell():
+ """Return a function of two arguments (cell, value) which sets
+ the value stored in the closure cell `cell` to `value`.
+ """
+ # pypy makes this easy. (It also supports the logic below, but
+ # why not do the easy/fast thing?)
+ if PYPY:
+
+ def set_closure_cell(cell, value):
+ cell.__setstate__((value,))
+
+ return set_closure_cell
+
+ # Otherwise gotta do it the hard way.
+
+ # Create a function that will set its first cellvar to `value`.
+ def set_first_cellvar_to(value):
+ x = value
+ return
+
+ # This function will be eliminated as dead code, but
+ # not before its reference to `x` forces `x` to be
+ # represented as a closure cell rather than a local.
+ def force_x_to_be_a_cell(): # pragma: no cover
+ return x
+
+ try:
+ # Extract the code object and make sure our assumptions about
+ # the closure behavior are correct.
+ co = set_first_cellvar_to.__code__
+ if co.co_cellvars != ("x",) or co.co_freevars != ():
+ raise AssertionError # pragma: no cover
+
+ # Convert this code object to a code object that sets the
+ # function's first _freevar_ (not cellvar) to the argument.
+ if sys.version_info >= (3, 8):
+
+ def set_closure_cell(cell, value):
+ cell.cell_contents = value
+
+ else:
+ args = [co.co_argcount]
+ args.append(co.co_kwonlyargcount)
+ args.extend(
+ [
+ co.co_nlocals,
+ co.co_stacksize,
+ co.co_flags,
+ co.co_code,
+ co.co_consts,
+ co.co_names,
+ co.co_varnames,
+ co.co_filename,
+ co.co_name,
+ co.co_firstlineno,
+ co.co_lnotab,
+ # These two arguments are reversed:
+ co.co_cellvars,
+ co.co_freevars,
+ ]
+ )
+ set_first_freevar_code = types.CodeType(*args)
+
+ def set_closure_cell(cell, value):
+ # Create a function using the set_first_freevar_code,
+ # whose first closure cell is `cell`. Calling it will
+ # change the value of that cell.
+ setter = types.FunctionType(
+ set_first_freevar_code, {}, "setter", (), (cell,)
+ )
+ # And call it to set the cell.
+ setter(value)
+
+ # Make sure it works on this interpreter:
+ def make_func_with_cell():
+ x = None
+
+ def func():
+ return x # pragma: no cover
+
+ return func
+
+ cell = make_func_with_cell().__closure__[0]
+ set_closure_cell(cell, 100)
+ if cell.cell_contents != 100:
+ raise AssertionError # pragma: no cover
+
+ except Exception:
+ return just_warn
+ else:
+ return set_closure_cell
+
+
+set_closure_cell = make_set_closure_cell()
+
+# Thread-local global to track attrs instances which are already being repr'd.
+# This is needed because there is no other (thread-safe) way to pass info
+# about the instances that are already being repr'd through the call stack
+# in order to ensure we don't perform infinite recursion.
+#
+# For instance, if an instance contains a dict which contains that instance,
+# we need to know that we're already repr'ing the outside instance from within
+# the dict's repr() call.
+#
+# This lives here rather than in _make.py so that the functions in _make.py
+# don't have a direct reference to the thread-local in their globals dict.
+# If they have such a reference, it breaks cloudpickle.
+repr_context = threading.local()
diff --git a/lib/spack/external/_vendoring/attr/_config.py b/lib/spack/external/_vendoring/attr/_config.py
new file mode 100644
index 0000000000..96d4200773
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_config.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: MIT
+
+
+__all__ = ["set_run_validators", "get_run_validators"]
+
+_run_validators = True
+
+
+def set_run_validators(run):
+ """
+ Set whether or not validators are run. By default, they are run.
+
+ .. deprecated:: 21.3.0 It will not be removed, but it also will not be
+ moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()`
+ instead.
+ """
+ if not isinstance(run, bool):
+ raise TypeError("'run' must be bool.")
+ global _run_validators
+ _run_validators = run
+
+
+def get_run_validators():
+ """
+ Return whether or not validators are run.
+
+ .. deprecated:: 21.3.0 It will not be removed, but it also will not be
+ moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()`
+ instead.
+ """
+ return _run_validators
diff --git a/lib/spack/external/_vendoring/attr/_funcs.py b/lib/spack/external/_vendoring/attr/_funcs.py
new file mode 100644
index 0000000000..a982d7cb56
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_funcs.py
@@ -0,0 +1,420 @@
+# SPDX-License-Identifier: MIT
+
+
+import copy
+
+from ._make import NOTHING, _obj_setattr, fields
+from .exceptions import AttrsAttributeNotFoundError
+
+
+def asdict(
+ inst,
+ recurse=True,
+ filter=None,
+ dict_factory=dict,
+ retain_collection_types=False,
+ value_serializer=None,
+):
+ """
+ Return the ``attrs`` attribute values of *inst* as a dict.
+
+ Optionally recurse into other ``attrs``-decorated classes.
+
+ :param inst: Instance of an ``attrs``-decorated class.
+ :param bool recurse: Recurse into classes that are also
+ ``attrs``-decorated.
+ :param callable filter: A callable whose return code determines whether an
+ attribute or element is included (``True``) or dropped (``False``). Is
+ called with the `attrs.Attribute` as the first argument and the
+ value as the second argument.
+ :param callable dict_factory: A callable to produce dictionaries from. For
+ example, to produce ordered dictionaries instead of normal Python
+ dictionaries, pass in ``collections.OrderedDict``.
+ :param bool retain_collection_types: Do not convert to ``list`` when
+ encountering an attribute whose type is ``tuple`` or ``set``. Only
+ meaningful if ``recurse`` is ``True``.
+ :param Optional[callable] value_serializer: A hook that is called for every
+ attribute or dict key/value. It receives the current instance, field
+ and value and must return the (updated) value. The hook is run *after*
+ the optional *filter* has been applied.
+
+ :rtype: return type of *dict_factory*
+
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ .. versionadded:: 16.0.0 *dict_factory*
+ .. versionadded:: 16.1.0 *retain_collection_types*
+ .. versionadded:: 20.3.0 *value_serializer*
+ .. versionadded:: 21.3.0 If a dict has a collection for a key, it is
+ serialized as a tuple.
+ """
+ attrs = fields(inst.__class__)
+ rv = dict_factory()
+ for a in attrs:
+ v = getattr(inst, a.name)
+ if filter is not None and not filter(a, v):
+ continue
+
+ if value_serializer is not None:
+ v = value_serializer(inst, a, v)
+
+ if recurse is True:
+ if has(v.__class__):
+ rv[a.name] = asdict(
+ v,
+ recurse=True,
+ filter=filter,
+ dict_factory=dict_factory,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ )
+ elif isinstance(v, (tuple, list, set, frozenset)):
+ cf = v.__class__ if retain_collection_types is True else list
+ rv[a.name] = cf(
+ [
+ _asdict_anything(
+ i,
+ is_key=False,
+ filter=filter,
+ dict_factory=dict_factory,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ )
+ for i in v
+ ]
+ )
+ elif isinstance(v, dict):
+ df = dict_factory
+ rv[a.name] = df(
+ (
+ _asdict_anything(
+ kk,
+ is_key=True,
+ filter=filter,
+ dict_factory=df,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ ),
+ _asdict_anything(
+ vv,
+ is_key=False,
+ filter=filter,
+ dict_factory=df,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ ),
+ )
+ for kk, vv in v.items()
+ )
+ else:
+ rv[a.name] = v
+ else:
+ rv[a.name] = v
+ return rv
+
+
+def _asdict_anything(
+ val,
+ is_key,
+ filter,
+ dict_factory,
+ retain_collection_types,
+ value_serializer,
+):
+ """
+ ``asdict`` only works on attrs instances, this works on anything.
+ """
+ if getattr(val.__class__, "__attrs_attrs__", None) is not None:
+ # Attrs class.
+ rv = asdict(
+ val,
+ recurse=True,
+ filter=filter,
+ dict_factory=dict_factory,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ )
+ elif isinstance(val, (tuple, list, set, frozenset)):
+ if retain_collection_types is True:
+ cf = val.__class__
+ elif is_key:
+ cf = tuple
+ else:
+ cf = list
+
+ rv = cf(
+ [
+ _asdict_anything(
+ i,
+ is_key=False,
+ filter=filter,
+ dict_factory=dict_factory,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ )
+ for i in val
+ ]
+ )
+ elif isinstance(val, dict):
+ df = dict_factory
+ rv = df(
+ (
+ _asdict_anything(
+ kk,
+ is_key=True,
+ filter=filter,
+ dict_factory=df,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ ),
+ _asdict_anything(
+ vv,
+ is_key=False,
+ filter=filter,
+ dict_factory=df,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ ),
+ )
+ for kk, vv in val.items()
+ )
+ else:
+ rv = val
+ if value_serializer is not None:
+ rv = value_serializer(None, None, rv)
+
+ return rv
+
+
+def astuple(
+ inst,
+ recurse=True,
+ filter=None,
+ tuple_factory=tuple,
+ retain_collection_types=False,
+):
+ """
+ Return the ``attrs`` attribute values of *inst* as a tuple.
+
+ Optionally recurse into other ``attrs``-decorated classes.
+
+ :param inst: Instance of an ``attrs``-decorated class.
+ :param bool recurse: Recurse into classes that are also
+ ``attrs``-decorated.
+ :param callable filter: A callable whose return code determines whether an
+ attribute or element is included (``True``) or dropped (``False``). Is
+ called with the `attrs.Attribute` as the first argument and the
+ value as the second argument.
+ :param callable tuple_factory: A callable to produce tuples from. For
+ example, to produce lists instead of tuples.
+ :param bool retain_collection_types: Do not convert to ``list``
+ or ``dict`` when encountering an attribute which type is
+ ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is
+ ``True``.
+
+ :rtype: return type of *tuple_factory*
+
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ .. versionadded:: 16.2.0
+ """
+ attrs = fields(inst.__class__)
+ rv = []
+ retain = retain_collection_types # Very long. :/
+ for a in attrs:
+ v = getattr(inst, a.name)
+ if filter is not None and not filter(a, v):
+ continue
+ if recurse is True:
+ if has(v.__class__):
+ rv.append(
+ astuple(
+ v,
+ recurse=True,
+ filter=filter,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ )
+ elif isinstance(v, (tuple, list, set, frozenset)):
+ cf = v.__class__ if retain is True else list
+ rv.append(
+ cf(
+ [
+ astuple(
+ j,
+ recurse=True,
+ filter=filter,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(j.__class__)
+ else j
+ for j in v
+ ]
+ )
+ )
+ elif isinstance(v, dict):
+ df = v.__class__ if retain is True else dict
+ rv.append(
+ df(
+ (
+ astuple(
+ kk,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(kk.__class__)
+ else kk,
+ astuple(
+ vv,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(vv.__class__)
+ else vv,
+ )
+ for kk, vv in v.items()
+ )
+ )
+ else:
+ rv.append(v)
+ else:
+ rv.append(v)
+
+ return rv if tuple_factory is list else tuple_factory(rv)
+
+
+def has(cls):
+ """
+ Check whether *cls* is a class with ``attrs`` attributes.
+
+ :param type cls: Class to introspect.
+ :raise TypeError: If *cls* is not a class.
+
+ :rtype: bool
+ """
+ return getattr(cls, "__attrs_attrs__", None) is not None
+
+
+def assoc(inst, **changes):
+ """
+ Copy *inst* and apply *changes*.
+
+ :param inst: Instance of a class with ``attrs`` attributes.
+ :param changes: Keyword changes in the new copy.
+
+ :return: A copy of inst with *changes* incorporated.
+
+ :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't
+ be found on *cls*.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ .. deprecated:: 17.1.0
+ Use `attrs.evolve` instead if you can.
+ This function will not be removed du to the slightly different approach
+ compared to `attrs.evolve`.
+ """
+ import warnings
+
+ warnings.warn(
+ "assoc is deprecated and will be removed after 2018/01.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ new = copy.copy(inst)
+ attrs = fields(inst.__class__)
+ for k, v in changes.items():
+ a = getattr(attrs, k, NOTHING)
+ if a is NOTHING:
+ raise AttrsAttributeNotFoundError(
+ "{k} is not an attrs attribute on {cl}.".format(
+ k=k, cl=new.__class__
+ )
+ )
+ _obj_setattr(new, k, v)
+ return new
+
+
+def evolve(inst, **changes):
+ """
+ Create a new instance, based on *inst* with *changes* applied.
+
+ :param inst: Instance of a class with ``attrs`` attributes.
+ :param changes: Keyword changes in the new copy.
+
+ :return: A copy of inst with *changes* incorporated.
+
+ :raise TypeError: If *attr_name* couldn't be found in the class
+ ``__init__``.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ .. versionadded:: 17.1.0
+ """
+ cls = inst.__class__
+ attrs = fields(cls)
+ for a in attrs:
+ if not a.init:
+ continue
+ attr_name = a.name # To deal with private attributes.
+ init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
+ if init_name not in changes:
+ changes[init_name] = getattr(inst, attr_name)
+
+ return cls(**changes)
+
+
+def resolve_types(cls, globalns=None, localns=None, attribs=None):
+ """
+ Resolve any strings and forward annotations in type annotations.
+
+ This is only required if you need concrete types in `Attribute`'s *type*
+ field. In other words, you don't need to resolve your types if you only
+ use them for static type checking.
+
+ With no arguments, names will be looked up in the module in which the class
+ was created. If this is not what you want, e.g. if the name only exists
+ inside a method, you may pass *globalns* or *localns* to specify other
+ dictionaries in which to look up these names. See the docs of
+ `typing.get_type_hints` for more details.
+
+ :param type cls: Class to resolve.
+ :param Optional[dict] globalns: Dictionary containing global variables.
+ :param Optional[dict] localns: Dictionary containing local variables.
+ :param Optional[list] attribs: List of attribs for the given class.
+ This is necessary when calling from inside a ``field_transformer``
+ since *cls* is not an ``attrs`` class yet.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class and you didn't pass any attribs.
+ :raise NameError: If types cannot be resolved because of missing variables.
+
+ :returns: *cls* so you can use this function also as a class decorator.
+ Please note that you have to apply it **after** `attrs.define`. That
+ means the decorator has to come in the line **before** `attrs.define`.
+
+ .. versionadded:: 20.1.0
+ .. versionadded:: 21.1.0 *attribs*
+
+ """
+ # Since calling get_type_hints is expensive we cache whether we've
+ # done it already.
+ if getattr(cls, "__attrs_types_resolved__", None) != cls:
+ import typing
+
+ hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
+ for field in fields(cls) if attribs is None else attribs:
+ if field.name in hints:
+ # Since fields have been frozen we must work around it.
+ _obj_setattr(field, "type", hints[field.name])
+ # We store the class we resolved so that subclasses know they haven't
+ # been resolved.
+ cls.__attrs_types_resolved__ = cls
+
+ # Return the class so you can use it as a decorator too.
+ return cls
diff --git a/lib/spack/external/_vendoring/attr/_make.py b/lib/spack/external/_vendoring/attr/_make.py
new file mode 100644
index 0000000000..4d1afe3fc8
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_make.py
@@ -0,0 +1,3006 @@
+# SPDX-License-Identifier: MIT
+
+import copy
+import linecache
+import sys
+import types
+import typing
+
+from operator import itemgetter
+
+# We need to import _compat itself in addition to the _compat members to avoid
+# having the thread-local in the globals here.
+from . import _compat, _config, setters
+from ._compat import (
+ HAS_F_STRINGS,
+ PY310,
+ PYPY,
+ _AnnotationExtractor,
+ ordered_dict,
+ set_closure_cell,
+)
+from .exceptions import (
+ DefaultAlreadySetError,
+ FrozenInstanceError,
+ NotAnAttrsClassError,
+ UnannotatedAttributeError,
+)
+
+
+# This is used at least twice, so cache it here.
+_obj_setattr = object.__setattr__
+_init_converter_pat = "__attr_converter_%s"
+_init_factory_pat = "__attr_factory_{}"
+_tuple_property_pat = (
+ " {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
+)
+_classvar_prefixes = (
+ "typing.ClassVar",
+ "t.ClassVar",
+ "ClassVar",
+ "typing_extensions.ClassVar",
+)
+# we don't use a double-underscore prefix because that triggers
+# name mangling when trying to create a slot for the field
+# (when slots=True)
+_hash_cache_field = "_attrs_cached_hash"
+
+_empty_metadata_singleton = types.MappingProxyType({})
+
+# Unique object for unequivocal getattr() defaults.
+_sentinel = object()
+
+_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate)
+
+
+class _Nothing:
+ """
+ Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
+
+ ``_Nothing`` is a singleton. There is only ever one of it.
+
+ .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
+ """
+
+ _singleton = None
+
+ def __new__(cls):
+ if _Nothing._singleton is None:
+ _Nothing._singleton = super().__new__(cls)
+ return _Nothing._singleton
+
+ def __repr__(self):
+ return "NOTHING"
+
+ def __bool__(self):
+ return False
+
+
+NOTHING = _Nothing()
+"""
+Sentinel to indicate the lack of a value when ``None`` is ambiguous.
+"""
+
+
+class _CacheHashWrapper(int):
+ """
+ An integer subclass that pickles / copies as None
+
+ This is used for non-slots classes with ``cache_hash=True``, to avoid
+ serializing a potentially (even likely) invalid hash value. Since ``None``
+ is the default value for uncalculated hashes, whenever this is copied,
+ the copy's value for the hash should automatically reset.
+
+ See GH #613 for more details.
+ """
+
+ def __reduce__(self, _none_constructor=type(None), _args=()):
+ return _none_constructor, _args
+
+
+def attrib(
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=None,
+ init=True,
+ metadata=None,
+ type=None,
+ converter=None,
+ factory=None,
+ kw_only=False,
+ eq=None,
+ order=None,
+ on_setattr=None,
+):
+ """
+ Create a new attribute on a class.
+
+ .. warning::
+
+ Does *not* do anything unless the class is also decorated with
+ `attr.s`!
+
+ :param default: A value that is used if an ``attrs``-generated ``__init__``
+ is used and no value is passed while instantiating or the attribute is
+ excluded using ``init=False``.
+
+ If the value is an instance of `attrs.Factory`, its callable will be
+ used to construct a new value (useful for mutable data types like lists
+ or dicts).
+
+ If a default is not set (or set manually to `attrs.NOTHING`), a value
+ *must* be supplied when instantiating; otherwise a `TypeError`
+ will be raised.
+
+ The default can also be set using decorator notation as shown below.
+
+ :type default: Any value
+
+ :param callable factory: Syntactic sugar for
+ ``default=attr.Factory(factory)``.
+
+ :param validator: `callable` that is called by ``attrs``-generated
+ ``__init__`` methods after the instance has been initialized. They
+ receive the initialized instance, the :func:`~attrs.Attribute`, and the
+ passed value.
+
+ The return value is *not* inspected so the validator has to throw an
+ exception itself.
+
+ If a `list` is passed, its items are treated as validators and must
+ all pass.
+
+ Validators can be globally disabled and re-enabled using
+ `get_run_validators`.
+
+ The validator can also be set using decorator notation as shown below.
+
+ :type validator: `callable` or a `list` of `callable`\\ s.
+
+ :param repr: Include this attribute in the generated ``__repr__``
+ method. If ``True``, include the attribute; if ``False``, omit it. By
+ default, the built-in ``repr()`` function is used. To override how the
+ attribute value is formatted, pass a ``callable`` that takes a single
+ value and returns a string. Note that the resulting string is used
+ as-is, i.e. it will be used directly *instead* of calling ``repr()``
+ (the default).
+ :type repr: a `bool` or a `callable` to use a custom function.
+
+ :param eq: If ``True`` (default), include this attribute in the
+ generated ``__eq__`` and ``__ne__`` methods that check two instances
+ for equality. To override how the attribute value is compared,
+ pass a ``callable`` that takes a single value and returns the value
+ to be compared.
+ :type eq: a `bool` or a `callable`.
+
+ :param order: If ``True`` (default), include this attributes in the
+ generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
+ To override how the attribute value is ordered,
+ pass a ``callable`` that takes a single value and returns the value
+ to be ordered.
+ :type order: a `bool` or a `callable`.
+
+ :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the
+ same value. Must not be mixed with *eq* or *order*.
+ :type cmp: a `bool` or a `callable`.
+
+ :param Optional[bool] hash: Include this attribute in the generated
+ ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This
+ is the correct behavior according the Python spec. Setting this value
+ to anything else than ``None`` is *discouraged*.
+ :param bool init: Include this attribute in the generated ``__init__``
+ method. It is possible to set this to ``False`` and set a default
+ value. In that case this attributed is unconditionally initialized
+ with the specified default value or factory.
+ :param callable converter: `callable` that is called by
+ ``attrs``-generated ``__init__`` methods to convert attribute's value
+ to the desired format. It is given the passed-in value, and the
+ returned value will be used as the new value of the attribute. The
+ value is converted before being passed to the validator, if any.
+ :param metadata: An arbitrary mapping, to be used by third-party
+ components. See `extending_metadata`.
+ :param type: The type of the attribute. In Python 3.6 or greater, the
+ preferred method to specify the type is using a variable annotation
+ (see :pep:`526`).
+ This argument is provided for backward compatibility.
+ Regardless of the approach used, the type will be stored on
+ ``Attribute.type``.
+
+ Please note that ``attrs`` doesn't do anything with this metadata by
+ itself. You can use it as part of your own code or for
+ `static type checking <types>`.
+ :param kw_only: Make this attribute keyword-only (Python 3+)
+ in the generated ``__init__`` (if ``init`` is ``False``, this
+ parameter is ignored).
+ :param on_setattr: Allows to overwrite the *on_setattr* setting from
+ `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used.
+ Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this
+ attribute -- regardless of the setting in `attr.s`.
+ :type on_setattr: `callable`, or a list of callables, or `None`, or
+ `attrs.setters.NO_OP`
+
+ .. versionadded:: 15.2.0 *convert*
+ .. versionadded:: 16.3.0 *metadata*
+ .. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
+ .. versionchanged:: 17.1.0
+ *hash* is ``None`` and therefore mirrors *eq* by default.
+ .. versionadded:: 17.3.0 *type*
+ .. deprecated:: 17.4.0 *convert*
+ .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
+ *convert* to achieve consistency with other noun-based arguments.
+ .. versionadded:: 18.1.0
+ ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
+ .. versionadded:: 18.2.0 *kw_only*
+ .. versionchanged:: 19.2.0 *convert* keyword argument removed.
+ .. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
+ .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
+ .. versionadded:: 19.2.0 *eq* and *order*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionchanged:: 20.3.0 *kw_only* backported to Python 2
+ .. versionchanged:: 21.1.0
+ *eq*, *order*, and *cmp* also accept a custom callable
+ .. versionchanged:: 21.1.0 *cmp* undeprecated
+ """
+ eq, eq_key, order, order_key = _determine_attrib_eq_order(
+ cmp, eq, order, True
+ )
+
+ if hash is not None and hash is not True and hash is not False:
+ raise TypeError(
+ "Invalid value for hash. Must be True, False, or None."
+ )
+
+ if factory is not None:
+ if default is not NOTHING:
+ raise ValueError(
+ "The `default` and `factory` arguments are mutually "
+ "exclusive."
+ )
+ if not callable(factory):
+ raise ValueError("The `factory` argument must be a callable.")
+ default = Factory(factory)
+
+ if metadata is None:
+ metadata = {}
+
+ # Apply syntactic sugar by auto-wrapping.
+ if isinstance(on_setattr, (list, tuple)):
+ on_setattr = setters.pipe(*on_setattr)
+
+ if validator and isinstance(validator, (list, tuple)):
+ validator = and_(*validator)
+
+ if converter and isinstance(converter, (list, tuple)):
+ converter = pipe(*converter)
+
+ return _CountingAttr(
+ default=default,
+ validator=validator,
+ repr=repr,
+ cmp=None,
+ hash=hash,
+ init=init,
+ converter=converter,
+ metadata=metadata,
+ type=type,
+ kw_only=kw_only,
+ eq=eq,
+ eq_key=eq_key,
+ order=order,
+ order_key=order_key,
+ on_setattr=on_setattr,
+ )
+
+
+def _compile_and_eval(script, globs, locs=None, filename=""):
+ """
+ "Exec" the script with the given global (globs) and local (locs) variables.
+ """
+ bytecode = compile(script, filename, "exec")
+ eval(bytecode, globs, locs)
+
+
+def _make_method(name, script, filename, globs):
+ """
+ Create the method with the script given and return the method object.
+ """
+ locs = {}
+
+ # In order of debuggers like PDB being able to step through the code,
+ # we add a fake linecache entry.
+ count = 1
+ base_filename = filename
+ while True:
+ linecache_tuple = (
+ len(script),
+ None,
+ script.splitlines(True),
+ filename,
+ )
+ old_val = linecache.cache.setdefault(filename, linecache_tuple)
+ if old_val == linecache_tuple:
+ break
+ else:
+ filename = "{}-{}>".format(base_filename[:-1], count)
+ count += 1
+
+ _compile_and_eval(script, globs, locs, filename)
+
+ return locs[name]
+
+
+def _make_attr_tuple_class(cls_name, attr_names):
+ """
+ Create a tuple subclass to hold `Attribute`s for an `attrs` class.
+
+ The subclass is a bare tuple with properties for names.
+
+ class MyClassAttributes(tuple):
+ __slots__ = ()
+ x = property(itemgetter(0))
+ """
+ attr_class_name = "{}Attributes".format(cls_name)
+ attr_class_template = [
+ "class {}(tuple):".format(attr_class_name),
+ " __slots__ = ()",
+ ]
+ if attr_names:
+ for i, attr_name in enumerate(attr_names):
+ attr_class_template.append(
+ _tuple_property_pat.format(index=i, attr_name=attr_name)
+ )
+ else:
+ attr_class_template.append(" pass")
+ globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
+ _compile_and_eval("\n".join(attr_class_template), globs)
+ return globs[attr_class_name]
+
+
+# Tuple class for extracted attributes from a class definition.
+# `base_attrs` is a subset of `attrs`.
+_Attributes = _make_attr_tuple_class(
+ "_Attributes",
+ [
+ # all attributes to build dunder methods for
+ "attrs",
+ # attributes that have been inherited
+ "base_attrs",
+ # map inherited attributes to their originating classes
+ "base_attrs_map",
+ ],
+)
+
+
+def _is_class_var(annot):
+ """
+ Check whether *annot* is a typing.ClassVar.
+
+ The string comparison hack is used to avoid evaluating all string
+ annotations which would put attrs-based classes at a performance
+ disadvantage compared to plain old classes.
+ """
+ annot = str(annot)
+
+ # Annotation can be quoted.
+ if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
+ annot = annot[1:-1]
+
+ return annot.startswith(_classvar_prefixes)
+
+
+def _has_own_attribute(cls, attrib_name):
+ """
+ Check whether *cls* defines *attrib_name* (and doesn't just inherit it).
+
+ Requires Python 3.
+ """
+ attr = getattr(cls, attrib_name, _sentinel)
+ if attr is _sentinel:
+ return False
+
+ for base_cls in cls.__mro__[1:]:
+ a = getattr(base_cls, attrib_name, None)
+ if attr is a:
+ return False
+
+ return True
+
+
+def _get_annotations(cls):
+ """
+ Get annotations for *cls*.
+ """
+ if _has_own_attribute(cls, "__annotations__"):
+ return cls.__annotations__
+
+ return {}
+
+
+def _counter_getter(e):
+ """
+ Key function for sorting to avoid re-creating a lambda for every class.
+ """
+ return e[1].counter
+
+
+def _collect_base_attrs(cls, taken_attr_names):
+ """
+ Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
+ """
+ base_attrs = []
+ base_attr_map = {} # A dictionary of base attrs to their classes.
+
+ # Traverse the MRO and collect attributes.
+ for base_cls in reversed(cls.__mro__[1:-1]):
+ for a in getattr(base_cls, "__attrs_attrs__", []):
+ if a.inherited or a.name in taken_attr_names:
+ continue
+
+ a = a.evolve(inherited=True)
+ base_attrs.append(a)
+ base_attr_map[a.name] = base_cls
+
+ # For each name, only keep the freshest definition i.e. the furthest at the
+ # back. base_attr_map is fine because it gets overwritten with every new
+ # instance.
+ filtered = []
+ seen = set()
+ for a in reversed(base_attrs):
+ if a.name in seen:
+ continue
+ filtered.insert(0, a)
+ seen.add(a.name)
+
+ return filtered, base_attr_map
+
+
+def _collect_base_attrs_broken(cls, taken_attr_names):
+ """
+ Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
+
+ N.B. *taken_attr_names* will be mutated.
+
+ Adhere to the old incorrect behavior.
+
+ Notably it collects from the front and considers inherited attributes which
+ leads to the buggy behavior reported in #428.
+ """
+ base_attrs = []
+ base_attr_map = {} # A dictionary of base attrs to their classes.
+
+ # Traverse the MRO and collect attributes.
+ for base_cls in cls.__mro__[1:-1]:
+ for a in getattr(base_cls, "__attrs_attrs__", []):
+ if a.name in taken_attr_names:
+ continue
+
+ a = a.evolve(inherited=True)
+ taken_attr_names.add(a.name)
+ base_attrs.append(a)
+ base_attr_map[a.name] = base_cls
+
+ return base_attrs, base_attr_map
+
+
+def _transform_attrs(
+ cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer
+):
+ """
+ Transform all `_CountingAttr`s on a class into `Attribute`s.
+
+ If *these* is passed, use that and don't look for them on the class.
+
+ *collect_by_mro* is True, collect them in the correct MRO order, otherwise
+ use the old -- incorrect -- order. See #428.
+
+ Return an `_Attributes`.
+ """
+ cd = cls.__dict__
+ anns = _get_annotations(cls)
+
+ if these is not None:
+ ca_list = [(name, ca) for name, ca in these.items()]
+
+ if not isinstance(these, ordered_dict):
+ ca_list.sort(key=_counter_getter)
+ elif auto_attribs is True:
+ ca_names = {
+ name
+ for name, attr in cd.items()
+ if isinstance(attr, _CountingAttr)
+ }
+ ca_list = []
+ annot_names = set()
+ for attr_name, type in anns.items():
+ if _is_class_var(type):
+ continue
+ annot_names.add(attr_name)
+ a = cd.get(attr_name, NOTHING)
+
+ if not isinstance(a, _CountingAttr):
+ if a is NOTHING:
+ a = attrib()
+ else:
+ a = attrib(default=a)
+ ca_list.append((attr_name, a))
+
+ unannotated = ca_names - annot_names
+ if len(unannotated) > 0:
+ raise UnannotatedAttributeError(
+ "The following `attr.ib`s lack a type annotation: "
+ + ", ".join(
+ sorted(unannotated, key=lambda n: cd.get(n).counter)
+ )
+ + "."
+ )
+ else:
+ ca_list = sorted(
+ (
+ (name, attr)
+ for name, attr in cd.items()
+ if isinstance(attr, _CountingAttr)
+ ),
+ key=lambda e: e[1].counter,
+ )
+
+ own_attrs = [
+ Attribute.from_counting_attr(
+ name=attr_name, ca=ca, type=anns.get(attr_name)
+ )
+ for attr_name, ca in ca_list
+ ]
+
+ if collect_by_mro:
+ base_attrs, base_attr_map = _collect_base_attrs(
+ cls, {a.name for a in own_attrs}
+ )
+ else:
+ base_attrs, base_attr_map = _collect_base_attrs_broken(
+ cls, {a.name for a in own_attrs}
+ )
+
+ if kw_only:
+ own_attrs = [a.evolve(kw_only=True) for a in own_attrs]
+ base_attrs = [a.evolve(kw_only=True) for a in base_attrs]
+
+ attrs = base_attrs + own_attrs
+
+ # Mandatory vs non-mandatory attr order only matters when they are part of
+ # the __init__ signature and when they aren't kw_only (which are moved to
+ # the end and can be mandatory or non-mandatory in any order, as they will
+ # be specified as keyword args anyway). Check the order of those attrs:
+ had_default = False
+ for a in (a for a in attrs if a.init is not False and a.kw_only is False):
+ if had_default is True and a.default is NOTHING:
+ raise ValueError(
+ "No mandatory attributes allowed after an attribute with a "
+ "default value or factory. Attribute in question: %r" % (a,)
+ )
+
+ if had_default is False and a.default is not NOTHING:
+ had_default = True
+
+ if field_transformer is not None:
+ attrs = field_transformer(cls, attrs)
+
+ # Create AttrsClass *after* applying the field_transformer since it may
+ # add or remove attributes!
+ attr_names = [a.name for a in attrs]
+ AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
+
+ return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map))
+
+
+if PYPY:
+
+ def _frozen_setattrs(self, name, value):
+ """
+ Attached to frozen classes as __setattr__.
+ """
+ if isinstance(self, BaseException) and name in (
+ "__cause__",
+ "__context__",
+ ):
+ BaseException.__setattr__(self, name, value)
+ return
+
+ raise FrozenInstanceError()
+
+else:
+
+ def _frozen_setattrs(self, name, value):
+ """
+ Attached to frozen classes as __setattr__.
+ """
+ raise FrozenInstanceError()
+
+
+def _frozen_delattrs(self, name):
+ """
+ Attached to frozen classes as __delattr__.
+ """
+ raise FrozenInstanceError()
+
+
+class _ClassBuilder:
+ """
+ Iteratively build *one* class.
+ """
+
+ __slots__ = (
+ "_attr_names",
+ "_attrs",
+ "_base_attr_map",
+ "_base_names",
+ "_cache_hash",
+ "_cls",
+ "_cls_dict",
+ "_delete_attribs",
+ "_frozen",
+ "_has_pre_init",
+ "_has_post_init",
+ "_is_exc",
+ "_on_setattr",
+ "_slots",
+ "_weakref_slot",
+ "_wrote_own_setattr",
+ "_has_custom_setattr",
+ )
+
+ def __init__(
+ self,
+ cls,
+ these,
+ slots,
+ frozen,
+ weakref_slot,
+ getstate_setstate,
+ auto_attribs,
+ kw_only,
+ cache_hash,
+ is_exc,
+ collect_by_mro,
+ on_setattr,
+ has_custom_setattr,
+ field_transformer,
+ ):
+ attrs, base_attrs, base_map = _transform_attrs(
+ cls,
+ these,
+ auto_attribs,
+ kw_only,
+ collect_by_mro,
+ field_transformer,
+ )
+
+ self._cls = cls
+ self._cls_dict = dict(cls.__dict__) if slots else {}
+ self._attrs = attrs
+ self._base_names = {a.name for a in base_attrs}
+ self._base_attr_map = base_map
+ self._attr_names = tuple(a.name for a in attrs)
+ self._slots = slots
+ self._frozen = frozen
+ self._weakref_slot = weakref_slot
+ self._cache_hash = cache_hash
+ self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
+ self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
+ self._delete_attribs = not bool(these)
+ self._is_exc = is_exc
+ self._on_setattr = on_setattr
+
+ self._has_custom_setattr = has_custom_setattr
+ self._wrote_own_setattr = False
+
+ self._cls_dict["__attrs_attrs__"] = self._attrs
+
+ if frozen:
+ self._cls_dict["__setattr__"] = _frozen_setattrs
+ self._cls_dict["__delattr__"] = _frozen_delattrs
+
+ self._wrote_own_setattr = True
+ elif on_setattr in (
+ _ng_default_on_setattr,
+ setters.validate,
+ setters.convert,
+ ):
+ has_validator = has_converter = False
+ for a in attrs:
+ if a.validator is not None:
+ has_validator = True
+ if a.converter is not None:
+ has_converter = True
+
+ if has_validator and has_converter:
+ break
+ if (
+ (
+ on_setattr == _ng_default_on_setattr
+ and not (has_validator or has_converter)
+ )
+ or (on_setattr == setters.validate and not has_validator)
+ or (on_setattr == setters.convert and not has_converter)
+ ):
+ # If class-level on_setattr is set to convert + validate, but
+ # there's no field to convert or validate, pretend like there's
+ # no on_setattr.
+ self._on_setattr = None
+
+ if getstate_setstate:
+ (
+ self._cls_dict["__getstate__"],
+ self._cls_dict["__setstate__"],
+ ) = self._make_getstate_setstate()
+
+ def __repr__(self):
+ return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
+
+ def build_class(self):
+ """
+ Finalize class based on the accumulated configuration.
+
+ Builder cannot be used after calling this method.
+ """
+ if self._slots is True:
+ return self._create_slots_class()
+ else:
+ return self._patch_original_class()
+
+ def _patch_original_class(self):
+ """
+ Apply accumulated methods and return the class.
+ """
+ cls = self._cls
+ base_names = self._base_names
+
+ # Clean class of attribute definitions (`attr.ib()`s).
+ if self._delete_attribs:
+ for name in self._attr_names:
+ if (
+ name not in base_names
+ and getattr(cls, name, _sentinel) is not _sentinel
+ ):
+ try:
+ delattr(cls, name)
+ except AttributeError:
+ # This can happen if a base class defines a class
+ # variable and we want to set an attribute with the
+ # same name by using only a type annotation.
+ pass
+
+ # Attach our dunder methods.
+ for name, value in self._cls_dict.items():
+ setattr(cls, name, value)
+
+ # If we've inherited an attrs __setattr__ and don't write our own,
+ # reset it to object's.
+ if not self._wrote_own_setattr and getattr(
+ cls, "__attrs_own_setattr__", False
+ ):
+ cls.__attrs_own_setattr__ = False
+
+ if not self._has_custom_setattr:
+ cls.__setattr__ = _obj_setattr
+
+ return cls
+
+ def _create_slots_class(self):
+ """
+ Build and return a new class with a `__slots__` attribute.
+ """
+ cd = {
+ k: v
+ for k, v in self._cls_dict.items()
+ if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
+ }
+
+ # If our class doesn't have its own implementation of __setattr__
+ # (either from the user or by us), check the bases, if one of them has
+ # an attrs-made __setattr__, that needs to be reset. We don't walk the
+ # MRO because we only care about our immediate base classes.
+ # XXX: This can be confused by subclassing a slotted attrs class with
+ # XXX: a non-attrs class and subclass the resulting class with an attrs
+ # XXX: class. See `test_slotted_confused` for details. For now that's
+ # XXX: OK with us.
+ if not self._wrote_own_setattr:
+ cd["__attrs_own_setattr__"] = False
+
+ if not self._has_custom_setattr:
+ for base_cls in self._cls.__bases__:
+ if base_cls.__dict__.get("__attrs_own_setattr__", False):
+ cd["__setattr__"] = _obj_setattr
+ break
+
+ # Traverse the MRO to collect existing slots
+ # and check for an existing __weakref__.
+ existing_slots = dict()
+ weakref_inherited = False
+ for base_cls in self._cls.__mro__[1:-1]:
+ if base_cls.__dict__.get("__weakref__", None) is not None:
+ weakref_inherited = True
+ existing_slots.update(
+ {
+ name: getattr(base_cls, name)
+ for name in getattr(base_cls, "__slots__", [])
+ }
+ )
+
+ base_names = set(self._base_names)
+
+ names = self._attr_names
+ if (
+ self._weakref_slot
+ and "__weakref__" not in getattr(self._cls, "__slots__", ())
+ and "__weakref__" not in names
+ and not weakref_inherited
+ ):
+ names += ("__weakref__",)
+
+ # We only add the names of attributes that aren't inherited.
+ # Setting __slots__ to inherited attributes wastes memory.
+ slot_names = [name for name in names if name not in base_names]
+ # There are slots for attributes from current class
+ # that are defined in parent classes.
+ # As their descriptors may be overridden by a child class,
+ # we collect them here and update the class dict
+ reused_slots = {
+ slot: slot_descriptor
+ for slot, slot_descriptor in existing_slots.items()
+ if slot in slot_names
+ }
+ slot_names = [name for name in slot_names if name not in reused_slots]
+ cd.update(reused_slots)
+ if self._cache_hash:
+ slot_names.append(_hash_cache_field)
+ cd["__slots__"] = tuple(slot_names)
+
+ cd["__qualname__"] = self._cls.__qualname__
+
+ # Create new class based on old class and our methods.
+ cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
+
+ # The following is a fix for
+ # <https://github.com/python-attrs/attrs/issues/102>. On Python 3,
+ # if a method mentions `__class__` or uses the no-arg super(), the
+ # compiler will bake a reference to the class in the method itself
+ # as `method.__closure__`. Since we replace the class with a
+ # clone, we rewrite these references so it keeps working.
+ for item in cls.__dict__.values():
+ if isinstance(item, (classmethod, staticmethod)):
+ # Class- and staticmethods hide their functions inside.
+ # These might need to be rewritten as well.
+ closure_cells = getattr(item.__func__, "__closure__", None)
+ elif isinstance(item, property):
+ # Workaround for property `super()` shortcut (PY3-only).
+ # There is no universal way for other descriptors.
+ closure_cells = getattr(item.fget, "__closure__", None)
+ else:
+ closure_cells = getattr(item, "__closure__", None)
+
+ if not closure_cells: # Catch None or the empty list.
+ continue
+ for cell in closure_cells:
+ try:
+ match = cell.cell_contents is self._cls
+ except ValueError: # ValueError: Cell is empty
+ pass
+ else:
+ if match:
+ set_closure_cell(cell, cls)
+
+ return cls
+
+ def add_repr(self, ns):
+ self._cls_dict["__repr__"] = self._add_method_dunders(
+ _make_repr(self._attrs, ns, self._cls)
+ )
+ return self
+
+ def add_str(self):
+ repr = self._cls_dict.get("__repr__")
+ if repr is None:
+ raise ValueError(
+ "__str__ can only be generated if a __repr__ exists."
+ )
+
+ def __str__(self):
+ return self.__repr__()
+
+ self._cls_dict["__str__"] = self._add_method_dunders(__str__)
+ return self
+
+ def _make_getstate_setstate(self):
+ """
+ Create custom __setstate__ and __getstate__ methods.
+ """
+ # __weakref__ is not writable.
+ state_attr_names = tuple(
+ an for an in self._attr_names if an != "__weakref__"
+ )
+
+ def slots_getstate(self):
+ """
+ Automatically created by attrs.
+ """
+ return tuple(getattr(self, name) for name in state_attr_names)
+
+ hash_caching_enabled = self._cache_hash
+
+ def slots_setstate(self, state):
+ """
+ Automatically created by attrs.
+ """
+ __bound_setattr = _obj_setattr.__get__(self, Attribute)
+ for name, value in zip(state_attr_names, state):
+ __bound_setattr(name, value)
+
+ # The hash code cache is not included when the object is
+ # serialized, but it still needs to be initialized to None to
+ # indicate that the first call to __hash__ should be a cache
+ # miss.
+ if hash_caching_enabled:
+ __bound_setattr(_hash_cache_field, None)
+
+ return slots_getstate, slots_setstate
+
+ def make_unhashable(self):
+ self._cls_dict["__hash__"] = None
+ return self
+
+ def add_hash(self):
+ self._cls_dict["__hash__"] = self._add_method_dunders(
+ _make_hash(
+ self._cls,
+ self._attrs,
+ frozen=self._frozen,
+ cache_hash=self._cache_hash,
+ )
+ )
+
+ return self
+
+ def add_init(self):
+ self._cls_dict["__init__"] = self._add_method_dunders(
+ _make_init(
+ self._cls,
+ self._attrs,
+ self._has_pre_init,
+ self._has_post_init,
+ self._frozen,
+ self._slots,
+ self._cache_hash,
+ self._base_attr_map,
+ self._is_exc,
+ self._on_setattr,
+ attrs_init=False,
+ )
+ )
+
+ return self
+
+ def add_match_args(self):
+ self._cls_dict["__match_args__"] = tuple(
+ field.name
+ for field in self._attrs
+ if field.init and not field.kw_only
+ )
+
+ def add_attrs_init(self):
+ self._cls_dict["__attrs_init__"] = self._add_method_dunders(
+ _make_init(
+ self._cls,
+ self._attrs,
+ self._has_pre_init,
+ self._has_post_init,
+ self._frozen,
+ self._slots,
+ self._cache_hash,
+ self._base_attr_map,
+ self._is_exc,
+ self._on_setattr,
+ attrs_init=True,
+ )
+ )
+
+ return self
+
+ def add_eq(self):
+ cd = self._cls_dict
+
+ cd["__eq__"] = self._add_method_dunders(
+ _make_eq(self._cls, self._attrs)
+ )
+ cd["__ne__"] = self._add_method_dunders(_make_ne())
+
+ return self
+
+ def add_order(self):
+ cd = self._cls_dict
+
+ cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
+ self._add_method_dunders(meth)
+ for meth in _make_order(self._cls, self._attrs)
+ )
+
+ return self
+
+ def add_setattr(self):
+ if self._frozen:
+ return self
+
+ sa_attrs = {}
+ for a in self._attrs:
+ on_setattr = a.on_setattr or self._on_setattr
+ if on_setattr and on_setattr is not setters.NO_OP:
+ sa_attrs[a.name] = a, on_setattr
+
+ if not sa_attrs:
+ return self
+
+ if self._has_custom_setattr:
+ # We need to write a __setattr__ but there already is one!
+ raise ValueError(
+ "Can't combine custom __setattr__ with on_setattr hooks."
+ )
+
+ # docstring comes from _add_method_dunders
+ def __setattr__(self, name, val):
+ try:
+ a, hook = sa_attrs[name]
+ except KeyError:
+ nval = val
+ else:
+ nval = hook(self, a, val)
+
+ _obj_setattr(self, name, nval)
+
+ self._cls_dict["__attrs_own_setattr__"] = True
+ self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__)
+ self._wrote_own_setattr = True
+
+ return self
+
+ def _add_method_dunders(self, method):
+ """
+ Add __module__ and __qualname__ to a *method* if possible.
+ """
+ try:
+ method.__module__ = self._cls.__module__
+ except AttributeError:
+ pass
+
+ try:
+ method.__qualname__ = ".".join(
+ (self._cls.__qualname__, method.__name__)
+ )
+ except AttributeError:
+ pass
+
+ try:
+ method.__doc__ = "Method generated by attrs for class %s." % (
+ self._cls.__qualname__,
+ )
+ except AttributeError:
+ pass
+
+ return method
+
+
+def _determine_attrs_eq_order(cmp, eq, order, default_eq):
+ """
+ Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
+ values of eq and order. If *eq* is None, set it to *default_eq*.
+ """
+ if cmp is not None and any((eq is not None, order is not None)):
+ raise ValueError("Don't mix `cmp` with `eq' and `order`.")
+
+ # cmp takes precedence due to bw-compatibility.
+ if cmp is not None:
+ return cmp, cmp
+
+ # If left None, equality is set to the specified default and ordering
+ # mirrors equality.
+ if eq is None:
+ eq = default_eq
+
+ if order is None:
+ order = eq
+
+ if eq is False and order is True:
+ raise ValueError("`order` can only be True if `eq` is True too.")
+
+ return eq, order
+
+
+def _determine_attrib_eq_order(cmp, eq, order, default_eq):
+ """
+ Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
+ values of eq and order. If *eq* is None, set it to *default_eq*.
+ """
+ if cmp is not None and any((eq is not None, order is not None)):
+ raise ValueError("Don't mix `cmp` with `eq' and `order`.")
+
+ def decide_callable_or_boolean(value):
+ """
+ Decide whether a key function is used.
+ """
+ if callable(value):
+ value, key = True, value
+ else:
+ key = None
+ return value, key
+
+ # cmp takes precedence due to bw-compatibility.
+ if cmp is not None:
+ cmp, cmp_key = decide_callable_or_boolean(cmp)
+ return cmp, cmp_key, cmp, cmp_key
+
+ # If left None, equality is set to the specified default and ordering
+ # mirrors equality.
+ if eq is None:
+ eq, eq_key = default_eq, None
+ else:
+ eq, eq_key = decide_callable_or_boolean(eq)
+
+ if order is None:
+ order, order_key = eq, eq_key
+ else:
+ order, order_key = decide_callable_or_boolean(order)
+
+ if eq is False and order is True:
+ raise ValueError("`order` can only be True if `eq` is True too.")
+
+ return eq, eq_key, order, order_key
+
+
+def _determine_whether_to_implement(
+ cls, flag, auto_detect, dunders, default=True
+):
+ """
+ Check whether we should implement a set of methods for *cls*.
+
+ *flag* is the argument passed into @attr.s like 'init', *auto_detect* the
+ same as passed into @attr.s and *dunders* is a tuple of attribute names
+ whose presence signal that the user has implemented it themselves.
+
+ Return *default* if no reason for either for or against is found.
+ """
+ if flag is True or flag is False:
+ return flag
+
+ if flag is None and auto_detect is False:
+ return default
+
+ # Logically, flag is None and auto_detect is True here.
+ for dunder in dunders:
+ if _has_own_attribute(cls, dunder):
+ return False
+
+ return default
+
+
+def attrs(
+ maybe_cls=None,
+ these=None,
+ repr_ns=None,
+ repr=None,
+ cmp=None,
+ hash=None,
+ init=None,
+ slots=False,
+ frozen=False,
+ weakref_slot=True,
+ str=False,
+ auto_attribs=False,
+ kw_only=False,
+ cache_hash=False,
+ auto_exc=False,
+ eq=None,
+ order=None,
+ auto_detect=False,
+ collect_by_mro=False,
+ getstate_setstate=None,
+ on_setattr=None,
+ field_transformer=None,
+ match_args=True,
+):
+ r"""
+ A class decorator that adds `dunder
+ <https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
+ specified attributes using `attr.ib` or the *these* argument.
+
+ :param these: A dictionary of name to `attr.ib` mappings. This is
+ useful to avoid the definition of your attributes within the class body
+ because you can't (e.g. if you want to add ``__repr__`` methods to
+ Django models) or don't want to.
+
+ If *these* is not ``None``, ``attrs`` will *not* search the class body
+ for attributes and will *not* remove any attributes from it.
+
+ If *these* is an ordered dict (`dict` on Python 3.6+,
+ `collections.OrderedDict` otherwise), the order is deduced from
+ the order of the attributes inside *these*. Otherwise the order
+ of the definition of the attributes is used.
+
+ :type these: `dict` of `str` to `attr.ib`
+
+ :param str repr_ns: When using nested classes, there's no way in Python 2
+ to automatically detect that. Therefore it's possible to set the
+ namespace explicitly for a more meaningful ``repr`` output.
+ :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
+ *order*, and *hash* arguments explicitly, assume they are set to
+ ``True`` **unless any** of the involved methods for one of the
+ arguments is implemented in the *current* class (i.e. it is *not*
+ inherited from some base class).
+
+ So for example by implementing ``__eq__`` on a class yourself,
+ ``attrs`` will deduce ``eq=False`` and will create *neither*
+ ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
+ ``__ne__`` by default, so it *should* be enough to only implement
+ ``__eq__`` in most cases).
+
+ .. warning::
+
+ If you prevent ``attrs`` from creating the ordering methods for you
+ (``order=False``, e.g. by implementing ``__le__``), it becomes
+ *your* responsibility to make sure its ordering is sound. The best
+ way is to use the `functools.total_ordering` decorator.
+
+
+ Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
+ *cmp*, or *hash* overrides whatever *auto_detect* would determine.
+
+ *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises
+ an `attrs.exceptions.PythonTooOldError`.
+
+ :param bool repr: Create a ``__repr__`` method with a human readable
+ representation of ``attrs`` attributes..
+ :param bool str: Create a ``__str__`` method that is identical to
+ ``__repr__``. This is usually not necessary except for
+ `Exception`\ s.
+ :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
+ and ``__ne__`` methods that check two instances for equality.
+
+ They compare the instances as if they were tuples of their ``attrs``
+ attributes if and only if the types of both classes are *identical*!
+ :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
+ ``__gt__``, and ``__ge__`` methods that behave like *eq* above and
+ allow instances to be ordered. If ``None`` (default) mirror value of
+ *eq*.
+ :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
+ and *order* to the same value. Must not be mixed with *eq* or *order*.
+ :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method
+ is generated according how *eq* and *frozen* are set.
+
+ 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
+ 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
+ None, marking it unhashable (which it is).
+ 3. If *eq* is False, ``__hash__`` will be left untouched meaning the
+ ``__hash__`` method of the base class will be used (if base class is
+ ``object``, this means it will fall back to id-based hashing.).
+
+ Although not recommended, you can decide for yourself and force
+ ``attrs`` to create one (e.g. if the class is immutable even though you
+ didn't freeze it programmatically) by passing ``True`` or not. Both of
+ these cases are rather special and should be used carefully.
+
+ See our documentation on `hashing`, Python's documentation on
+ `object.__hash__`, and the `GitHub issue that led to the default \
+ behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
+ details.
+ :param bool init: Create a ``__init__`` method that initializes the
+ ``attrs`` attributes. Leading underscores are stripped for the argument
+ name. If a ``__attrs_pre_init__`` method exists on the class, it will
+ be called before the class is initialized. If a ``__attrs_post_init__``
+ method exists on the class, it will be called after the class is fully
+ initialized.
+
+ If ``init`` is ``False``, an ``__attrs_init__`` method will be
+ injected instead. This allows you to define a custom ``__init__``
+ method that can do pre-init work such as ``super().__init__()``,
+ and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
+ :param bool slots: Create a `slotted class <slotted classes>` that's more
+ memory-efficient. Slotted classes are generally superior to the default
+ dict classes, but have some gotchas you should know about, so we
+ encourage you to read the `glossary entry <slotted classes>`.
+ :param bool frozen: Make instances immutable after initialization. If
+ someone attempts to modify a frozen instance,
+ `attr.exceptions.FrozenInstanceError` is raised.
+
+ .. note::
+
+ 1. This is achieved by installing a custom ``__setattr__`` method
+ on your class, so you can't implement your own.
+
+ 2. True immutability is impossible in Python.
+
+ 3. This *does* have a minor a runtime performance `impact
+ <how-frozen>` when initializing new instances. In other words:
+ ``__init__`` is slightly slower with ``frozen=True``.
+
+ 4. If a class is frozen, you cannot modify ``self`` in
+ ``__attrs_post_init__`` or a self-written ``__init__``. You can
+ circumvent that limitation by using
+ ``object.__setattr__(self, "attribute_name", value)``.
+
+ 5. Subclasses of a frozen class are frozen too.
+
+ :param bool weakref_slot: Make instances weak-referenceable. This has no
+ effect unless ``slots`` is also enabled.
+ :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated
+ attributes (Python 3.6 and later only) from the class body.
+
+ In this case, you **must** annotate every field. If ``attrs``
+ encounters a field that is set to an `attr.ib` but lacks a type
+ annotation, an `attr.exceptions.UnannotatedAttributeError` is
+ raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
+ want to set a type.
+
+ If you assign a value to those attributes (e.g. ``x: int = 42``), that
+ value becomes the default value like if it were passed using
+ ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also
+ works as expected in most cases (see warning below).
+
+ Attributes annotated as `typing.ClassVar`, and attributes that are
+ neither annotated nor set to an `attr.ib` are **ignored**.
+
+ .. warning::
+ For features that use the attribute name to create decorators (e.g.
+ `validators <validators>`), you still *must* assign `attr.ib` to
+ them. Otherwise Python will either not find the name or try to use
+ the default value to call e.g. ``validator`` on it.
+
+ These errors can be quite confusing and probably the most common bug
+ report on our bug tracker.
+
+ :param bool kw_only: Make all attributes keyword-only (Python 3+)
+ in the generated ``__init__`` (if ``init`` is ``False``, this
+ parameter is ignored).
+ :param bool cache_hash: Ensure that the object's hash code is computed
+ only once and stored on the object. If this is set to ``True``,
+ hashing must be either explicitly or implicitly enabled for this
+ class. If the hash code is cached, avoid any reassignments of
+ fields involved in hash code computation or mutations of the objects
+ those fields point to after object creation. If such changes occur,
+ the behavior of the object's hash code is undefined.
+ :param bool auto_exc: If the class subclasses `BaseException`
+ (which implicitly includes any subclass of any exception), the
+ following happens to behave like a well-behaved Python exceptions
+ class:
+
+ - the values for *eq*, *order*, and *hash* are ignored and the
+ instances compare and hash by the instance's ids (N.B. ``attrs`` will
+ *not* remove existing implementations of ``__hash__`` or the equality
+ methods. It just won't add own ones.),
+ - all attributes that are either passed into ``__init__`` or have a
+ default value are additionally available as a tuple in the ``args``
+ attribute,
+ - the value of *str* is ignored leaving ``__str__`` to base classes.
+ :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs``
+ collects attributes from base classes. The default behavior is
+ incorrect in certain cases of multiple inheritance. It should be on by
+ default but is kept off for backward-compatibility.
+
+ See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
+ more details.
+
+ :param Optional[bool] getstate_setstate:
+ .. note::
+ This is usually only interesting for slotted classes and you should
+ probably just set *auto_detect* to `True`.
+
+ If `True`, ``__getstate__`` and
+ ``__setstate__`` are generated and attached to the class. This is
+ necessary for slotted classes to be pickleable. If left `None`, it's
+ `True` by default for slotted classes and ``False`` for dict classes.
+
+ If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
+ and **either** ``__getstate__`` or ``__setstate__`` is detected directly
+ on the class (i.e. not inherited), it is set to `False` (this is usually
+ what you want).
+
+ :param on_setattr: A callable that is run whenever the user attempts to set
+ an attribute (either by assignment like ``i.x = 42`` or by using
+ `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
+ as validators: the instance, the attribute that is being modified, and
+ the new value.
+
+ If no exception is raised, the attribute is set to the return value of
+ the callable.
+
+ If a list of callables is passed, they're automatically wrapped in an
+ `attrs.setters.pipe`.
+ :type on_setattr: `callable`, or a list of callables, or `None`, or
+ `attrs.setters.NO_OP`
+
+ :param Optional[callable] field_transformer:
+ A function that is called with the original class object and all
+ fields right before ``attrs`` finalizes the class. You can use
+ this, e.g., to automatically add converters or validators to
+ fields based on their types. See `transform-fields` for more details.
+
+ :param bool match_args:
+ If `True` (default), set ``__match_args__`` on the class to support
+ :pep:`634` (Structural Pattern Matching). It is a tuple of all
+ non-keyword-only ``__init__`` parameter names on Python 3.10 and later.
+ Ignored on older Python versions.
+
+ .. versionadded:: 16.0.0 *slots*
+ .. versionadded:: 16.1.0 *frozen*
+ .. versionadded:: 16.3.0 *str*
+ .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
+ .. versionchanged:: 17.1.0
+ *hash* supports ``None`` as value which is also the default now.
+ .. versionadded:: 17.3.0 *auto_attribs*
+ .. versionchanged:: 18.1.0
+ If *these* is passed, no attributes are deleted from the class body.
+ .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
+ .. versionadded:: 18.2.0 *weakref_slot*
+ .. deprecated:: 18.2.0
+ ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
+ `DeprecationWarning` if the classes compared are subclasses of
+ each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
+ to each other.
+ .. versionchanged:: 19.2.0
+ ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
+ subclasses comparable anymore.
+ .. versionadded:: 18.2.0 *kw_only*
+ .. versionadded:: 18.2.0 *cache_hash*
+ .. versionadded:: 19.1.0 *auto_exc*
+ .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
+ .. versionadded:: 19.2.0 *eq* and *order*
+ .. versionadded:: 20.1.0 *auto_detect*
+ .. versionadded:: 20.1.0 *collect_by_mro*
+ .. versionadded:: 20.1.0 *getstate_setstate*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionadded:: 20.3.0 *field_transformer*
+ .. versionchanged:: 21.1.0
+ ``init=False`` injects ``__attrs_init__``
+ .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
+ .. versionchanged:: 21.1.0 *cmp* undeprecated
+ .. versionadded:: 21.3.0 *match_args*
+ """
+ eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
+ hash_ = hash # work around the lack of nonlocal
+
+ if isinstance(on_setattr, (list, tuple)):
+ on_setattr = setters.pipe(*on_setattr)
+
+ def wrap(cls):
+ is_frozen = frozen or _has_frozen_base_class(cls)
+ is_exc = auto_exc is True and issubclass(cls, BaseException)
+ has_own_setattr = auto_detect and _has_own_attribute(
+ cls, "__setattr__"
+ )
+
+ if has_own_setattr and is_frozen:
+ raise ValueError("Can't freeze a class with a custom __setattr__.")
+
+ builder = _ClassBuilder(
+ cls,
+ these,
+ slots,
+ is_frozen,
+ weakref_slot,
+ _determine_whether_to_implement(
+ cls,
+ getstate_setstate,
+ auto_detect,
+ ("__getstate__", "__setstate__"),
+ default=slots,
+ ),
+ auto_attribs,
+ kw_only,
+ cache_hash,
+ is_exc,
+ collect_by_mro,
+ on_setattr,
+ has_own_setattr,
+ field_transformer,
+ )
+ if _determine_whether_to_implement(
+ cls, repr, auto_detect, ("__repr__",)
+ ):
+ builder.add_repr(repr_ns)
+ if str is True:
+ builder.add_str()
+
+ eq = _determine_whether_to_implement(
+ cls, eq_, auto_detect, ("__eq__", "__ne__")
+ )
+ if not is_exc and eq is True:
+ builder.add_eq()
+ if not is_exc and _determine_whether_to_implement(
+ cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
+ ):
+ builder.add_order()
+
+ builder.add_setattr()
+
+ if (
+ hash_ is None
+ and auto_detect is True
+ and _has_own_attribute(cls, "__hash__")
+ ):
+ hash = False
+ else:
+ hash = hash_
+ if hash is not True and hash is not False and hash is not None:
+ # Can't use `hash in` because 1 == True for example.
+ raise TypeError(
+ "Invalid value for hash. Must be True, False, or None."
+ )
+ elif hash is False or (hash is None and eq is False) or is_exc:
+ # Don't do anything. Should fall back to __object__'s __hash__
+ # which is by id.
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " hashing must be either explicitly or implicitly "
+ "enabled."
+ )
+ elif hash is True or (
+ hash is None and eq is True and is_frozen is True
+ ):
+ # Build a __hash__ if told so, or if it's safe.
+ builder.add_hash()
+ else:
+ # Raise TypeError on attempts to hash.
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " hashing must be either explicitly or implicitly "
+ "enabled."
+ )
+ builder.make_unhashable()
+
+ if _determine_whether_to_implement(
+ cls, init, auto_detect, ("__init__",)
+ ):
+ builder.add_init()
+ else:
+ builder.add_attrs_init()
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " init must be True."
+ )
+
+ if (
+ PY310
+ and match_args
+ and not _has_own_attribute(cls, "__match_args__")
+ ):
+ builder.add_match_args()
+
+ return builder.build_class()
+
+ # maybe_cls's type depends on the usage of the decorator. It's a class
+ # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
+ if maybe_cls is None:
+ return wrap
+ else:
+ return wrap(maybe_cls)
+
+
+_attrs = attrs
+"""
+Internal alias so we can use it in functions that take an argument called
+*attrs*.
+"""
+
+
+def _has_frozen_base_class(cls):
+ """
+ Check whether *cls* has a frozen ancestor by looking at its
+ __setattr__.
+ """
+ return cls.__setattr__ is _frozen_setattrs
+
+
+def _generate_unique_filename(cls, func_name):
+ """
+ Create a "filename" suitable for a function being generated.
+ """
+ unique_filename = "<attrs generated {} {}.{}>".format(
+ func_name,
+ cls.__module__,
+ getattr(cls, "__qualname__", cls.__name__),
+ )
+ return unique_filename
+
+
+def _make_hash(cls, attrs, frozen, cache_hash):
+ attrs = tuple(
+ a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
+ )
+
+ tab = " "
+
+ unique_filename = _generate_unique_filename(cls, "hash")
+ type_hash = hash(unique_filename)
+ # If eq is custom generated, we need to include the functions in globs
+ globs = {}
+
+ hash_def = "def __hash__(self"
+ hash_func = "hash(("
+ closing_braces = "))"
+ if not cache_hash:
+ hash_def += "):"
+ else:
+ hash_def += ", *"
+
+ hash_def += (
+ ", _cache_wrapper="
+ + "__import__('attr._make')._make._CacheHashWrapper):"
+ )
+ hash_func = "_cache_wrapper(" + hash_func
+ closing_braces += ")"
+
+ method_lines = [hash_def]
+
+ def append_hash_computation_lines(prefix, indent):
+ """
+ Generate the code for actually computing the hash code.
+ Below this will either be returned directly or used to compute
+ a value which is then cached, depending on the value of cache_hash
+ """
+
+ method_lines.extend(
+ [
+ indent + prefix + hash_func,
+ indent + " %d," % (type_hash,),
+ ]
+ )
+
+ for a in attrs:
+ if a.eq_key:
+ cmp_name = "_%s_key" % (a.name,)
+ globs[cmp_name] = a.eq_key
+ method_lines.append(
+ indent + " %s(self.%s)," % (cmp_name, a.name)
+ )
+ else:
+ method_lines.append(indent + " self.%s," % a.name)
+
+ method_lines.append(indent + " " + closing_braces)
+
+ if cache_hash:
+ method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
+ if frozen:
+ append_hash_computation_lines(
+ "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
+ )
+ method_lines.append(tab * 2 + ")") # close __setattr__
+ else:
+ append_hash_computation_lines(
+ "self.%s = " % _hash_cache_field, tab * 2
+ )
+ method_lines.append(tab + "return self.%s" % _hash_cache_field)
+ else:
+ append_hash_computation_lines("return ", tab)
+
+ script = "\n".join(method_lines)
+ return _make_method("__hash__", script, unique_filename, globs)
+
+
+def _add_hash(cls, attrs):
+ """
+ Add a hash method to *cls*.
+ """
+ cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
+ return cls
+
+
+def _make_ne():
+ """
+ Create __ne__ method.
+ """
+
+ def __ne__(self, other):
+ """
+ Check equality and either forward a NotImplemented or
+ return the result negated.
+ """
+ result = self.__eq__(other)
+ if result is NotImplemented:
+ return NotImplemented
+
+ return not result
+
+ return __ne__
+
+
+def _make_eq(cls, attrs):
+ """
+ Create __eq__ method for *cls* with *attrs*.
+ """
+ attrs = [a for a in attrs if a.eq]
+
+ unique_filename = _generate_unique_filename(cls, "eq")
+ lines = [
+ "def __eq__(self, other):",
+ " if other.__class__ is not self.__class__:",
+ " return NotImplemented",
+ ]
+
+ # We can't just do a big self.x = other.x and... clause due to
+ # irregularities like nan == nan is false but (nan,) == (nan,) is true.
+ globs = {}
+ if attrs:
+ lines.append(" return (")
+ others = [" ) == ("]
+ for a in attrs:
+ if a.eq_key:
+ cmp_name = "_%s_key" % (a.name,)
+ # Add the key function to the global namespace
+ # of the evaluated function.
+ globs[cmp_name] = a.eq_key
+ lines.append(
+ " %s(self.%s),"
+ % (
+ cmp_name,
+ a.name,
+ )
+ )
+ others.append(
+ " %s(other.%s),"
+ % (
+ cmp_name,
+ a.name,
+ )
+ )
+ else:
+ lines.append(" self.%s," % (a.name,))
+ others.append(" other.%s," % (a.name,))
+
+ lines += others + [" )"]
+ else:
+ lines.append(" return True")
+
+ script = "\n".join(lines)
+
+ return _make_method("__eq__", script, unique_filename, globs)
+
+
+def _make_order(cls, attrs):
+ """
+ Create ordering methods for *cls* with *attrs*.
+ """
+ attrs = [a for a in attrs if a.order]
+
+ def attrs_to_tuple(obj):
+ """
+ Save us some typing.
+ """
+ return tuple(
+ key(value) if key else value
+ for value, key in (
+ (getattr(obj, a.name), a.order_key) for a in attrs
+ )
+ )
+
+ def __lt__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) < attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __le__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) <= attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __gt__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) > attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __ge__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) >= attrs_to_tuple(other)
+
+ return NotImplemented
+
+ return __lt__, __le__, __gt__, __ge__
+
+
+def _add_eq(cls, attrs=None):
+ """
+ Add equality methods to *cls* with *attrs*.
+ """
+ if attrs is None:
+ attrs = cls.__attrs_attrs__
+
+ cls.__eq__ = _make_eq(cls, attrs)
+ cls.__ne__ = _make_ne()
+
+ return cls
+
+
+if HAS_F_STRINGS:
+
+ def _make_repr(attrs, ns, cls):
+ unique_filename = _generate_unique_filename(cls, "repr")
+ # Figure out which attributes to include, and which function to use to
+ # format them. The a.repr value can be either bool or a custom
+ # callable.
+ attr_names_with_reprs = tuple(
+ (a.name, (repr if a.repr is True else a.repr), a.init)
+ for a in attrs
+ if a.repr is not False
+ )
+ globs = {
+ name + "_repr": r
+ for name, r, _ in attr_names_with_reprs
+ if r != repr
+ }
+ globs["_compat"] = _compat
+ globs["AttributeError"] = AttributeError
+ globs["NOTHING"] = NOTHING
+ attribute_fragments = []
+ for name, r, i in attr_names_with_reprs:
+ accessor = (
+ "self." + name
+ if i
+ else 'getattr(self, "' + name + '", NOTHING)'
+ )
+ fragment = (
+ "%s={%s!r}" % (name, accessor)
+ if r == repr
+ else "%s={%s_repr(%s)}" % (name, name, accessor)
+ )
+ attribute_fragments.append(fragment)
+ repr_fragment = ", ".join(attribute_fragments)
+
+ if ns is None:
+ cls_name_fragment = (
+ '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}'
+ )
+ else:
+ cls_name_fragment = ns + ".{self.__class__.__name__}"
+
+ lines = [
+ "def __repr__(self):",
+ " try:",
+ " already_repring = _compat.repr_context.already_repring",
+ " except AttributeError:",
+ " already_repring = {id(self),}",
+ " _compat.repr_context.already_repring = already_repring",
+ " else:",
+ " if id(self) in already_repring:",
+ " return '...'",
+ " else:",
+ " already_repring.add(id(self))",
+ " try:",
+ " return f'%s(%s)'" % (cls_name_fragment, repr_fragment),
+ " finally:",
+ " already_repring.remove(id(self))",
+ ]
+
+ return _make_method(
+ "__repr__", "\n".join(lines), unique_filename, globs=globs
+ )
+
+else:
+
+ def _make_repr(attrs, ns, _):
+ """
+ Make a repr method that includes relevant *attrs*, adding *ns* to the
+ full name.
+ """
+
+ # Figure out which attributes to include, and which function to use to
+ # format them. The a.repr value can be either bool or a custom
+ # callable.
+ attr_names_with_reprs = tuple(
+ (a.name, repr if a.repr is True else a.repr)
+ for a in attrs
+ if a.repr is not False
+ )
+
+ def __repr__(self):
+ """
+ Automatically created by attrs.
+ """
+ try:
+ already_repring = _compat.repr_context.already_repring
+ except AttributeError:
+ already_repring = set()
+ _compat.repr_context.already_repring = already_repring
+
+ if id(self) in already_repring:
+ return "..."
+ real_cls = self.__class__
+ if ns is None:
+ class_name = real_cls.__qualname__.rsplit(">.", 1)[-1]
+ else:
+ class_name = ns + "." + real_cls.__name__
+
+ # Since 'self' remains on the stack (i.e.: strongly referenced)
+ # for the duration of this call, it's safe to depend on id(...)
+ # stability, and not need to track the instance and therefore
+ # worry about properties like weakref- or hash-ability.
+ already_repring.add(id(self))
+ try:
+ result = [class_name, "("]
+ first = True
+ for name, attr_repr in attr_names_with_reprs:
+ if first:
+ first = False
+ else:
+ result.append(", ")
+ result.extend(
+ (name, "=", attr_repr(getattr(self, name, NOTHING)))
+ )
+ return "".join(result) + ")"
+ finally:
+ already_repring.remove(id(self))
+
+ return __repr__
+
+
+def _add_repr(cls, ns=None, attrs=None):
+ """
+ Add a repr method to *cls*.
+ """
+ if attrs is None:
+ attrs = cls.__attrs_attrs__
+
+ cls.__repr__ = _make_repr(attrs, ns, cls)
+ return cls
+
+
+def fields(cls):
+ """
+ Return the tuple of ``attrs`` attributes for a class.
+
+ The tuple also allows accessing the fields by their names (see below for
+ examples).
+
+ :param type cls: Class to introspect.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ :rtype: tuple (with name accessors) of `attrs.Attribute`
+
+ .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
+ by name.
+ """
+ if not isinstance(cls, type):
+ raise TypeError("Passed object must be a class.")
+ attrs = getattr(cls, "__attrs_attrs__", None)
+ if attrs is None:
+ raise NotAnAttrsClassError(
+ "{cls!r} is not an attrs-decorated class.".format(cls=cls)
+ )
+ return attrs
+
+
+def fields_dict(cls):
+ """
+ Return an ordered dictionary of ``attrs`` attributes for a class, whose
+ keys are the attribute names.
+
+ :param type cls: Class to introspect.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ :rtype: an ordered dict where keys are attribute names and values are
+ `attrs.Attribute`\\ s. This will be a `dict` if it's
+ naturally ordered like on Python 3.6+ or an
+ :class:`~collections.OrderedDict` otherwise.
+
+ .. versionadded:: 18.1.0
+ """
+ if not isinstance(cls, type):
+ raise TypeError("Passed object must be a class.")
+ attrs = getattr(cls, "__attrs_attrs__", None)
+ if attrs is None:
+ raise NotAnAttrsClassError(
+ "{cls!r} is not an attrs-decorated class.".format(cls=cls)
+ )
+ return ordered_dict((a.name, a) for a in attrs)
+
+
+def validate(inst):
+ """
+ Validate all attributes on *inst* that have a validator.
+
+ Leaves all exceptions through.
+
+ :param inst: Instance of a class with ``attrs`` attributes.
+ """
+ if _config._run_validators is False:
+ return
+
+ for a in fields(inst.__class__):
+ v = a.validator
+ if v is not None:
+ v(inst, a, getattr(inst, a.name))
+
+
+def _is_slot_cls(cls):
+ return "__slots__" in cls.__dict__
+
+
+def _is_slot_attr(a_name, base_attr_map):
+ """
+ Check if the attribute name comes from a slot class.
+ """
+ return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
+
+
+def _make_init(
+ cls,
+ attrs,
+ pre_init,
+ post_init,
+ frozen,
+ slots,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ cls_on_setattr,
+ attrs_init,
+):
+ has_cls_on_setattr = (
+ cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP
+ )
+
+ if frozen and has_cls_on_setattr:
+ raise ValueError("Frozen classes can't use on_setattr.")
+
+ needs_cached_setattr = cache_hash or frozen
+ filtered_attrs = []
+ attr_dict = {}
+ for a in attrs:
+ if not a.init and a.default is NOTHING:
+ continue
+
+ filtered_attrs.append(a)
+ attr_dict[a.name] = a
+
+ if a.on_setattr is not None:
+ if frozen is True:
+ raise ValueError("Frozen classes can't use on_setattr.")
+
+ needs_cached_setattr = True
+ elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP:
+ needs_cached_setattr = True
+
+ unique_filename = _generate_unique_filename(cls, "init")
+
+ script, globs, annotations = _attrs_to_init_script(
+ filtered_attrs,
+ frozen,
+ slots,
+ pre_init,
+ post_init,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ has_cls_on_setattr,
+ attrs_init,
+ )
+ if cls.__module__ in sys.modules:
+ # This makes typing.get_type_hints(CLS.__init__) resolve string types.
+ globs.update(sys.modules[cls.__module__].__dict__)
+
+ globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
+
+ if needs_cached_setattr:
+ # Save the lookup overhead in __init__ if we need to circumvent
+ # setattr hooks.
+ globs["_setattr"] = _obj_setattr
+
+ init = _make_method(
+ "__attrs_init__" if attrs_init else "__init__",
+ script,
+ unique_filename,
+ globs,
+ )
+ init.__annotations__ = annotations
+
+ return init
+
+
+def _setattr(attr_name, value_var, has_on_setattr):
+ """
+ Use the cached object.setattr to set *attr_name* to *value_var*.
+ """
+ return "_setattr(self, '%s', %s)" % (attr_name, value_var)
+
+
+def _setattr_with_converter(attr_name, value_var, has_on_setattr):
+ """
+ Use the cached object.setattr to set *attr_name* to *value_var*, but run
+ its converter first.
+ """
+ return "_setattr(self, '%s', %s(%s))" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+
+def _assign(attr_name, value, has_on_setattr):
+ """
+ Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise
+ relegate to _setattr.
+ """
+ if has_on_setattr:
+ return _setattr(attr_name, value, True)
+
+ return "self.%s = %s" % (attr_name, value)
+
+
+def _assign_with_converter(attr_name, value_var, has_on_setattr):
+ """
+ Unless *attr_name* has an on_setattr hook, use normal assignment after
+ conversion. Otherwise relegate to _setattr_with_converter.
+ """
+ if has_on_setattr:
+ return _setattr_with_converter(attr_name, value_var, True)
+
+ return "self.%s = %s(%s)" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+
+def _attrs_to_init_script(
+ attrs,
+ frozen,
+ slots,
+ pre_init,
+ post_init,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ has_cls_on_setattr,
+ attrs_init,
+):
+ """
+ Return a script of an initializer for *attrs* and a dict of globals.
+
+ The globals are expected by the generated script.
+
+ If *frozen* is True, we cannot set the attributes directly so we use
+ a cached ``object.__setattr__``.
+ """
+ lines = []
+ if pre_init:
+ lines.append("self.__attrs_pre_init__()")
+
+ if frozen is True:
+ if slots is True:
+ fmt_setter = _setattr
+ fmt_setter_with_converter = _setattr_with_converter
+ else:
+ # Dict frozen classes assign directly to __dict__.
+ # But only if the attribute doesn't come from an ancestor slot
+ # class.
+ # Note _inst_dict will be used again below if cache_hash is True
+ lines.append("_inst_dict = self.__dict__")
+
+ def fmt_setter(attr_name, value_var, has_on_setattr):
+ if _is_slot_attr(attr_name, base_attr_map):
+ return _setattr(attr_name, value_var, has_on_setattr)
+
+ return "_inst_dict['%s'] = %s" % (attr_name, value_var)
+
+ def fmt_setter_with_converter(
+ attr_name, value_var, has_on_setattr
+ ):
+ if has_on_setattr or _is_slot_attr(attr_name, base_attr_map):
+ return _setattr_with_converter(
+ attr_name, value_var, has_on_setattr
+ )
+
+ return "_inst_dict['%s'] = %s(%s)" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+ else:
+ # Not frozen.
+ fmt_setter = _assign
+ fmt_setter_with_converter = _assign_with_converter
+
+ args = []
+ kw_only_args = []
+ attrs_to_validate = []
+
+ # This is a dictionary of names to validator and converter callables.
+ # Injecting this into __init__ globals lets us avoid lookups.
+ names_for_globals = {}
+ annotations = {"return": None}
+
+ for a in attrs:
+ if a.validator:
+ attrs_to_validate.append(a)
+
+ attr_name = a.name
+ has_on_setattr = a.on_setattr is not None or (
+ a.on_setattr is not setters.NO_OP and has_cls_on_setattr
+ )
+ arg_name = a.name.lstrip("_")
+
+ has_factory = isinstance(a.default, Factory)
+ if has_factory and a.default.takes_self:
+ maybe_self = "self"
+ else:
+ maybe_self = ""
+
+ if a.init is False:
+ if has_factory:
+ init_factory_name = _init_factory_pat.format(a.name)
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name,
+ init_factory_name + "(%s)" % (maybe_self,),
+ has_on_setattr,
+ )
+ )
+ conv_name = _init_converter_pat % (a.name,)
+ names_for_globals[conv_name] = a.converter
+ else:
+ lines.append(
+ fmt_setter(
+ attr_name,
+ init_factory_name + "(%s)" % (maybe_self,),
+ has_on_setattr,
+ )
+ )
+ names_for_globals[init_factory_name] = a.default.factory
+ else:
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name,
+ "attr_dict['%s'].default" % (attr_name,),
+ has_on_setattr,
+ )
+ )
+ conv_name = _init_converter_pat % (a.name,)
+ names_for_globals[conv_name] = a.converter
+ else:
+ lines.append(
+ fmt_setter(
+ attr_name,
+ "attr_dict['%s'].default" % (attr_name,),
+ has_on_setattr,
+ )
+ )
+ elif a.default is not NOTHING and not has_factory:
+ arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name)
+ if a.kw_only:
+ kw_only_args.append(arg)
+ else:
+ args.append(arg)
+
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
+
+ elif has_factory:
+ arg = "%s=NOTHING" % (arg_name,)
+ if a.kw_only:
+ kw_only_args.append(arg)
+ else:
+ args.append(arg)
+ lines.append("if %s is not NOTHING:" % (arg_name,))
+
+ init_factory_name = _init_factory_pat.format(a.name)
+ if a.converter is not None:
+ lines.append(
+ " "
+ + fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ lines.append("else:")
+ lines.append(
+ " "
+ + fmt_setter_with_converter(
+ attr_name,
+ init_factory_name + "(" + maybe_self + ")",
+ has_on_setattr,
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(
+ " " + fmt_setter(attr_name, arg_name, has_on_setattr)
+ )
+ lines.append("else:")
+ lines.append(
+ " "
+ + fmt_setter(
+ attr_name,
+ init_factory_name + "(" + maybe_self + ")",
+ has_on_setattr,
+ )
+ )
+ names_for_globals[init_factory_name] = a.default.factory
+ else:
+ if a.kw_only:
+ kw_only_args.append(arg_name)
+ else:
+ args.append(arg_name)
+
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
+
+ if a.init is True:
+ if a.type is not None and a.converter is None:
+ annotations[arg_name] = a.type
+ elif a.converter is not None:
+ # Try to get the type from the converter.
+ t = _AnnotationExtractor(a.converter).get_first_param_type()
+ if t:
+ annotations[arg_name] = t
+
+ if attrs_to_validate: # we can skip this if there are no validators.
+ names_for_globals["_config"] = _config
+ lines.append("if _config._run_validators is True:")
+ for a in attrs_to_validate:
+ val_name = "__attr_validator_" + a.name
+ attr_name = "__attr_" + a.name
+ lines.append(
+ " %s(self, %s, self.%s)" % (val_name, attr_name, a.name)
+ )
+ names_for_globals[val_name] = a.validator
+ names_for_globals[attr_name] = a
+
+ if post_init:
+ lines.append("self.__attrs_post_init__()")
+
+ # because this is set only after __attrs_post_init__ is called, a crash
+ # will result if post-init tries to access the hash code. This seemed
+ # preferable to setting this beforehand, in which case alteration to
+ # field values during post-init combined with post-init accessing the
+ # hash code would result in silent bugs.
+ if cache_hash:
+ if frozen:
+ if slots:
+ # if frozen and slots, then _setattr defined above
+ init_hash_cache = "_setattr(self, '%s', %s)"
+ else:
+ # if frozen and not slots, then _inst_dict defined above
+ init_hash_cache = "_inst_dict['%s'] = %s"
+ else:
+ init_hash_cache = "self.%s = %s"
+ lines.append(init_hash_cache % (_hash_cache_field, "None"))
+
+ # For exceptions we rely on BaseException.__init__ for proper
+ # initialization.
+ if is_exc:
+ vals = ",".join("self." + a.name for a in attrs if a.init)
+
+ lines.append("BaseException.__init__(self, %s)" % (vals,))
+
+ args = ", ".join(args)
+ if kw_only_args:
+ args += "%s*, %s" % (
+ ", " if args else "", # leading comma
+ ", ".join(kw_only_args), # kw_only args
+ )
+ return (
+ """\
+def {init_name}(self, {args}):
+ {lines}
+""".format(
+ init_name=("__attrs_init__" if attrs_init else "__init__"),
+ args=args,
+ lines="\n ".join(lines) if lines else "pass",
+ ),
+ names_for_globals,
+ annotations,
+ )
+
+
+class Attribute:
+ """
+ *Read-only* representation of an attribute.
+
+ The class has *all* arguments of `attr.ib` (except for ``factory``
+ which is only syntactic sugar for ``default=Factory(...)`` plus the
+ following:
+
+ - ``name`` (`str`): The name of the attribute.
+ - ``inherited`` (`bool`): Whether or not that attribute has been inherited
+ from a base class.
+ - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables
+ that are used for comparing and ordering objects by this attribute,
+ respectively. These are set by passing a callable to `attr.ib`'s ``eq``,
+ ``order``, or ``cmp`` arguments. See also :ref:`comparison customization
+ <custom-comparison>`.
+
+ Instances of this class are frequently used for introspection purposes
+ like:
+
+ - `fields` returns a tuple of them.
+ - Validators get them passed as the first argument.
+ - The :ref:`field transformer <transform-fields>` hook receives a list of
+ them.
+
+ .. versionadded:: 20.1.0 *inherited*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionchanged:: 20.2.0 *inherited* is not taken into account for
+ equality checks and hashing anymore.
+ .. versionadded:: 21.1.0 *eq_key* and *order_key*
+
+ For the full version history of the fields, see `attr.ib`.
+ """
+
+ __slots__ = (
+ "name",
+ "default",
+ "validator",
+ "repr",
+ "eq",
+ "eq_key",
+ "order",
+ "order_key",
+ "hash",
+ "init",
+ "metadata",
+ "type",
+ "converter",
+ "kw_only",
+ "inherited",
+ "on_setattr",
+ )
+
+ def __init__(
+ self,
+ name,
+ default,
+ validator,
+ repr,
+ cmp, # XXX: unused, remove along with other cmp code.
+ hash,
+ init,
+ inherited,
+ metadata=None,
+ type=None,
+ converter=None,
+ kw_only=False,
+ eq=None,
+ eq_key=None,
+ order=None,
+ order_key=None,
+ on_setattr=None,
+ ):
+ eq, eq_key, order, order_key = _determine_attrib_eq_order(
+ cmp, eq_key or eq, order_key or order, True
+ )
+
+ # Cache this descriptor here to speed things up later.
+ bound_setattr = _obj_setattr.__get__(self, Attribute)
+
+ # Despite the big red warning, people *do* instantiate `Attribute`
+ # themselves.
+ bound_setattr("name", name)
+ bound_setattr("default", default)
+ bound_setattr("validator", validator)
+ bound_setattr("repr", repr)
+ bound_setattr("eq", eq)
+ bound_setattr("eq_key", eq_key)
+ bound_setattr("order", order)
+ bound_setattr("order_key", order_key)
+ bound_setattr("hash", hash)
+ bound_setattr("init", init)
+ bound_setattr("converter", converter)
+ bound_setattr(
+ "metadata",
+ (
+ types.MappingProxyType(dict(metadata)) # Shallow copy
+ if metadata
+ else _empty_metadata_singleton
+ ),
+ )
+ bound_setattr("type", type)
+ bound_setattr("kw_only", kw_only)
+ bound_setattr("inherited", inherited)
+ bound_setattr("on_setattr", on_setattr)
+
+ def __setattr__(self, name, value):
+ raise FrozenInstanceError()
+
+ @classmethod
+ def from_counting_attr(cls, name, ca, type=None):
+ # type holds the annotated value. deal with conflicts:
+ if type is None:
+ type = ca.type
+ elif ca.type is not None:
+ raise ValueError(
+ "Type annotation and type argument cannot both be present"
+ )
+ inst_dict = {
+ k: getattr(ca, k)
+ for k in Attribute.__slots__
+ if k
+ not in (
+ "name",
+ "validator",
+ "default",
+ "type",
+ "inherited",
+ ) # exclude methods and deprecated alias
+ }
+ return cls(
+ name=name,
+ validator=ca._validator,
+ default=ca._default,
+ type=type,
+ cmp=None,
+ inherited=False,
+ **inst_dict
+ )
+
+ # Don't use attr.evolve since fields(Attribute) doesn't work
+ def evolve(self, **changes):
+ """
+ Copy *self* and apply *changes*.
+
+ This works similarly to `attr.evolve` but that function does not work
+ with ``Attribute``.
+
+ It is mainly meant to be used for `transform-fields`.
+
+ .. versionadded:: 20.3.0
+ """
+ new = copy.copy(self)
+
+ new._setattrs(changes.items())
+
+ return new
+
+ # Don't use _add_pickle since fields(Attribute) doesn't work
+ def __getstate__(self):
+ """
+ Play nice with pickle.
+ """
+ return tuple(
+ getattr(self, name) if name != "metadata" else dict(self.metadata)
+ for name in self.__slots__
+ )
+
+ def __setstate__(self, state):
+ """
+ Play nice with pickle.
+ """
+ self._setattrs(zip(self.__slots__, state))
+
+ def _setattrs(self, name_values_pairs):
+ bound_setattr = _obj_setattr.__get__(self, Attribute)
+ for name, value in name_values_pairs:
+ if name != "metadata":
+ bound_setattr(name, value)
+ else:
+ bound_setattr(
+ name,
+ types.MappingProxyType(dict(value))
+ if value
+ else _empty_metadata_singleton,
+ )
+
+
+_a = [
+ Attribute(
+ name=name,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ eq=True,
+ order=False,
+ hash=(name != "metadata"),
+ init=True,
+ inherited=False,
+ )
+ for name in Attribute.__slots__
+]
+
+Attribute = _add_hash(
+ _add_eq(
+ _add_repr(Attribute, attrs=_a),
+ attrs=[a for a in _a if a.name != "inherited"],
+ ),
+ attrs=[a for a in _a if a.hash and a.name != "inherited"],
+)
+
+
+class _CountingAttr:
+ """
+ Intermediate representation of attributes that uses a counter to preserve
+ the order in which the attributes have been defined.
+
+ *Internal* data structure of the attrs library. Running into is most
+ likely the result of a bug like a forgotten `@attr.s` decorator.
+ """
+
+ __slots__ = (
+ "counter",
+ "_default",
+ "repr",
+ "eq",
+ "eq_key",
+ "order",
+ "order_key",
+ "hash",
+ "init",
+ "metadata",
+ "_validator",
+ "converter",
+ "type",
+ "kw_only",
+ "on_setattr",
+ )
+ __attrs_attrs__ = tuple(
+ Attribute(
+ name=name,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=True,
+ init=True,
+ kw_only=False,
+ eq=True,
+ eq_key=None,
+ order=False,
+ order_key=None,
+ inherited=False,
+ on_setattr=None,
+ )
+ for name in (
+ "counter",
+ "_default",
+ "repr",
+ "eq",
+ "order",
+ "hash",
+ "init",
+ "on_setattr",
+ )
+ ) + (
+ Attribute(
+ name="metadata",
+ default=None,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=False,
+ init=True,
+ kw_only=False,
+ eq=True,
+ eq_key=None,
+ order=False,
+ order_key=None,
+ inherited=False,
+ on_setattr=None,
+ ),
+ )
+ cls_counter = 0
+
+ def __init__(
+ self,
+ default,
+ validator,
+ repr,
+ cmp,
+ hash,
+ init,
+ converter,
+ metadata,
+ type,
+ kw_only,
+ eq,
+ eq_key,
+ order,
+ order_key,
+ on_setattr,
+ ):
+ _CountingAttr.cls_counter += 1
+ self.counter = _CountingAttr.cls_counter
+ self._default = default
+ self._validator = validator
+ self.converter = converter
+ self.repr = repr
+ self.eq = eq
+ self.eq_key = eq_key
+ self.order = order
+ self.order_key = order_key
+ self.hash = hash
+ self.init = init
+ self.metadata = metadata
+ self.type = type
+ self.kw_only = kw_only
+ self.on_setattr = on_setattr
+
+ def validator(self, meth):
+ """
+ Decorator that adds *meth* to the list of validators.
+
+ Returns *meth* unchanged.
+
+ .. versionadded:: 17.1.0
+ """
+ if self._validator is None:
+ self._validator = meth
+ else:
+ self._validator = and_(self._validator, meth)
+ return meth
+
+ def default(self, meth):
+ """
+ Decorator that allows to set the default for an attribute.
+
+ Returns *meth* unchanged.
+
+ :raises DefaultAlreadySetError: If default has been set before.
+
+ .. versionadded:: 17.1.0
+ """
+ if self._default is not NOTHING:
+ raise DefaultAlreadySetError()
+
+ self._default = Factory(meth, takes_self=True)
+
+ return meth
+
+
+_CountingAttr = _add_eq(_add_repr(_CountingAttr))
+
+
+class Factory:
+ """
+ Stores a factory callable.
+
+ If passed as the default value to `attrs.field`, the factory is used to
+ generate a new value.
+
+ :param callable factory: A callable that takes either none or exactly one
+ mandatory positional argument depending on *takes_self*.
+ :param bool takes_self: Pass the partially initialized instance that is
+ being initialized as a positional argument.
+
+ .. versionadded:: 17.1.0 *takes_self*
+ """
+
+ __slots__ = ("factory", "takes_self")
+
+ def __init__(self, factory, takes_self=False):
+ """
+ `Factory` is part of the default machinery so if we want a default
+ value here, we have to implement it ourselves.
+ """
+ self.factory = factory
+ self.takes_self = takes_self
+
+ def __getstate__(self):
+ """
+ Play nice with pickle.
+ """
+ return tuple(getattr(self, name) for name in self.__slots__)
+
+ def __setstate__(self, state):
+ """
+ Play nice with pickle.
+ """
+ for name, value in zip(self.__slots__, state):
+ setattr(self, name, value)
+
+
+_f = [
+ Attribute(
+ name=name,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ eq=True,
+ order=False,
+ hash=True,
+ init=True,
+ inherited=False,
+ )
+ for name in Factory.__slots__
+]
+
+Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
+
+
+def make_class(name, attrs, bases=(object,), **attributes_arguments):
+ """
+ A quick way to create a new class called *name* with *attrs*.
+
+ :param str name: The name for the new class.
+
+ :param attrs: A list of names or a dictionary of mappings of names to
+ attributes.
+
+ If *attrs* is a list or an ordered dict (`dict` on Python 3.6+,
+ `collections.OrderedDict` otherwise), the order is deduced from
+ the order of the names or attributes inside *attrs*. Otherwise the
+ order of the definition of the attributes is used.
+ :type attrs: `list` or `dict`
+
+ :param tuple bases: Classes that the new class will subclass.
+
+ :param attributes_arguments: Passed unmodified to `attr.s`.
+
+ :return: A new class with *attrs*.
+ :rtype: type
+
+ .. versionadded:: 17.1.0 *bases*
+ .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
+ """
+ if isinstance(attrs, dict):
+ cls_dict = attrs
+ elif isinstance(attrs, (list, tuple)):
+ cls_dict = {a: attrib() for a in attrs}
+ else:
+ raise TypeError("attrs argument must be a dict or a list.")
+
+ pre_init = cls_dict.pop("__attrs_pre_init__", None)
+ post_init = cls_dict.pop("__attrs_post_init__", None)
+ user_init = cls_dict.pop("__init__", None)
+
+ body = {}
+ if pre_init is not None:
+ body["__attrs_pre_init__"] = pre_init
+ if post_init is not None:
+ body["__attrs_post_init__"] = post_init
+ if user_init is not None:
+ body["__init__"] = user_init
+
+ type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body))
+
+ # For pickling to work, the __module__ variable needs to be set to the
+ # frame where the class is created. Bypass this step in environments where
+ # sys._getframe is not defined (Jython for example) or sys._getframe is not
+ # defined for arguments greater than 0 (IronPython).
+ try:
+ type_.__module__ = sys._getframe(1).f_globals.get(
+ "__name__", "__main__"
+ )
+ except (AttributeError, ValueError):
+ pass
+
+ # We do it here for proper warnings with meaningful stacklevel.
+ cmp = attributes_arguments.pop("cmp", None)
+ (
+ attributes_arguments["eq"],
+ attributes_arguments["order"],
+ ) = _determine_attrs_eq_order(
+ cmp,
+ attributes_arguments.get("eq"),
+ attributes_arguments.get("order"),
+ True,
+ )
+
+ return _attrs(these=cls_dict, **attributes_arguments)(type_)
+
+
+# These are required by within this module so we define them here and merely
+# import into .validators / .converters.
+
+
+@attrs(slots=True, hash=True)
+class _AndValidator:
+ """
+ Compose many validators to a single one.
+ """
+
+ _validators = attrib()
+
+ def __call__(self, inst, attr, value):
+ for v in self._validators:
+ v(inst, attr, value)
+
+
+def and_(*validators):
+ """
+ A validator that composes multiple validators into one.
+
+ When called on a value, it runs all wrapped validators.
+
+ :param callables validators: Arbitrary number of validators.
+
+ .. versionadded:: 17.1.0
+ """
+ vals = []
+ for validator in validators:
+ vals.extend(
+ validator._validators
+ if isinstance(validator, _AndValidator)
+ else [validator]
+ )
+
+ return _AndValidator(tuple(vals))
+
+
+def pipe(*converters):
+ """
+ A converter that composes multiple converters into one.
+
+ When called on a value, it runs all wrapped converters, returning the
+ *last* value.
+
+ Type annotations will be inferred from the wrapped converters', if
+ they have any.
+
+ :param callables converters: Arbitrary number of converters.
+
+ .. versionadded:: 20.1.0
+ """
+
+ def pipe_converter(val):
+ for converter in converters:
+ val = converter(val)
+
+ return val
+
+ if not converters:
+ # If the converter list is empty, pipe_converter is the identity.
+ A = typing.TypeVar("A")
+ pipe_converter.__annotations__ = {"val": A, "return": A}
+ else:
+ # Get parameter type from first converter.
+ t = _AnnotationExtractor(converters[0]).get_first_param_type()
+ if t:
+ pipe_converter.__annotations__["val"] = t
+
+ # Get return type from last converter.
+ rt = _AnnotationExtractor(converters[-1]).get_return_type()
+ if rt:
+ pipe_converter.__annotations__["return"] = rt
+
+ return pipe_converter
diff --git a/lib/spack/external/_vendoring/attr/_next_gen.py b/lib/spack/external/_vendoring/attr/_next_gen.py
new file mode 100644
index 0000000000..5a06a74385
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_next_gen.py
@@ -0,0 +1,220 @@
+# SPDX-License-Identifier: MIT
+
+"""
+These are Python 3.6+-only and keyword-only APIs that call `attr.s` and
+`attr.ib` with different default values.
+"""
+
+
+from functools import partial
+
+from . import setters
+from ._funcs import asdict as _asdict
+from ._funcs import astuple as _astuple
+from ._make import (
+ NOTHING,
+ _frozen_setattrs,
+ _ng_default_on_setattr,
+ attrib,
+ attrs,
+)
+from .exceptions import UnannotatedAttributeError
+
+
+def define(
+ maybe_cls=None,
+ *,
+ these=None,
+ repr=None,
+ hash=None,
+ init=None,
+ slots=True,
+ frozen=False,
+ weakref_slot=True,
+ str=False,
+ auto_attribs=None,
+ kw_only=False,
+ cache_hash=False,
+ auto_exc=True,
+ eq=None,
+ order=False,
+ auto_detect=True,
+ getstate_setstate=None,
+ on_setattr=None,
+ field_transformer=None,
+ match_args=True,
+):
+ r"""
+ Define an ``attrs`` class.
+
+ Differences to the classic `attr.s` that it uses underneath:
+
+ - Automatically detect whether or not *auto_attribs* should be `True` (c.f.
+ *auto_attribs* parameter).
+ - If *frozen* is `False`, run converters and validators when setting an
+ attribute by default.
+ - *slots=True*
+
+ .. caution::
+
+ Usually this has only upsides and few visible effects in everyday
+ programming. But it *can* lead to some suprising behaviors, so please
+ make sure to read :term:`slotted classes`.
+ - *auto_exc=True*
+ - *auto_detect=True*
+ - *order=False*
+ - Some options that were only relevant on Python 2 or were kept around for
+ backwards-compatibility have been removed.
+
+ Please note that these are all defaults and you can change them as you
+ wish.
+
+ :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves
+ exactly like `attr.s`. If left `None`, `attr.s` will try to guess:
+
+ 1. If any attributes are annotated and no unannotated `attrs.fields`\ s
+ are found, it assumes *auto_attribs=True*.
+ 2. Otherwise it assumes *auto_attribs=False* and tries to collect
+ `attrs.fields`\ s.
+
+ For now, please refer to `attr.s` for the rest of the parameters.
+
+ .. versionadded:: 20.1.0
+ .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``.
+ """
+
+ def do_it(cls, auto_attribs):
+ return attrs(
+ maybe_cls=cls,
+ these=these,
+ repr=repr,
+ hash=hash,
+ init=init,
+ slots=slots,
+ frozen=frozen,
+ weakref_slot=weakref_slot,
+ str=str,
+ auto_attribs=auto_attribs,
+ kw_only=kw_only,
+ cache_hash=cache_hash,
+ auto_exc=auto_exc,
+ eq=eq,
+ order=order,
+ auto_detect=auto_detect,
+ collect_by_mro=True,
+ getstate_setstate=getstate_setstate,
+ on_setattr=on_setattr,
+ field_transformer=field_transformer,
+ match_args=match_args,
+ )
+
+ def wrap(cls):
+ """
+ Making this a wrapper ensures this code runs during class creation.
+
+ We also ensure that frozen-ness of classes is inherited.
+ """
+ nonlocal frozen, on_setattr
+
+ had_on_setattr = on_setattr not in (None, setters.NO_OP)
+
+ # By default, mutable classes convert & validate on setattr.
+ if frozen is False and on_setattr is None:
+ on_setattr = _ng_default_on_setattr
+
+ # However, if we subclass a frozen class, we inherit the immutability
+ # and disable on_setattr.
+ for base_cls in cls.__bases__:
+ if base_cls.__setattr__ is _frozen_setattrs:
+ if had_on_setattr:
+ raise ValueError(
+ "Frozen classes can't use on_setattr "
+ "(frozen-ness was inherited)."
+ )
+
+ on_setattr = setters.NO_OP
+ break
+
+ if auto_attribs is not None:
+ return do_it(cls, auto_attribs)
+
+ try:
+ return do_it(cls, True)
+ except UnannotatedAttributeError:
+ return do_it(cls, False)
+
+ # maybe_cls's type depends on the usage of the decorator. It's a class
+ # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
+ if maybe_cls is None:
+ return wrap
+ else:
+ return wrap(maybe_cls)
+
+
+mutable = define
+frozen = partial(define, frozen=True, on_setattr=None)
+
+
+def field(
+ *,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ hash=None,
+ init=True,
+ metadata=None,
+ converter=None,
+ factory=None,
+ kw_only=False,
+ eq=None,
+ order=None,
+ on_setattr=None,
+):
+ """
+ Identical to `attr.ib`, except keyword-only and with some arguments
+ removed.
+
+ .. versionadded:: 20.1.0
+ """
+ return attrib(
+ default=default,
+ validator=validator,
+ repr=repr,
+ hash=hash,
+ init=init,
+ metadata=metadata,
+ converter=converter,
+ factory=factory,
+ kw_only=kw_only,
+ eq=eq,
+ order=order,
+ on_setattr=on_setattr,
+ )
+
+
+def asdict(inst, *, recurse=True, filter=None, value_serializer=None):
+ """
+ Same as `attr.asdict`, except that collections types are always retained
+ and dict is always used as *dict_factory*.
+
+ .. versionadded:: 21.3.0
+ """
+ return _asdict(
+ inst=inst,
+ recurse=recurse,
+ filter=filter,
+ value_serializer=value_serializer,
+ retain_collection_types=True,
+ )
+
+
+def astuple(inst, *, recurse=True, filter=None):
+ """
+ Same as `attr.astuple`, except that collections types are always retained
+ and `tuple` is always used as the *tuple_factory*.
+
+ .. versionadded:: 21.3.0
+ """
+ return _astuple(
+ inst=inst, recurse=recurse, filter=filter, retain_collection_types=True
+ )
diff --git a/lib/spack/external/_vendoring/attr/_version_info.py b/lib/spack/external/_vendoring/attr/_version_info.py
new file mode 100644
index 0000000000..51a1312f97
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_version_info.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+
+from functools import total_ordering
+
+from ._funcs import astuple
+from ._make import attrib, attrs
+
+
+@total_ordering
+@attrs(eq=False, order=False, slots=True, frozen=True)
+class VersionInfo:
+ """
+ A version object that can be compared to tuple of length 1--4:
+
+ >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
+ True
+ >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
+ True
+ >>> vi = attr.VersionInfo(19, 2, 0, "final")
+ >>> vi < (19, 1, 1)
+ False
+ >>> vi < (19,)
+ False
+ >>> vi == (19, 2,)
+ True
+ >>> vi == (19, 2, 1)
+ False
+
+ .. versionadded:: 19.2
+ """
+
+ year = attrib(type=int)
+ minor = attrib(type=int)
+ micro = attrib(type=int)
+ releaselevel = attrib(type=str)
+
+ @classmethod
+ def _from_version_string(cls, s):
+ """
+ Parse *s* and return a _VersionInfo.
+ """
+ v = s.split(".")
+ if len(v) == 3:
+ v.append("final")
+
+ return cls(
+ year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
+ )
+
+ def _ensure_tuple(self, other):
+ """
+ Ensure *other* is a tuple of a valid length.
+
+ Returns a possibly transformed *other* and ourselves as a tuple of
+ the same length as *other*.
+ """
+
+ if self.__class__ is other.__class__:
+ other = astuple(other)
+
+ if not isinstance(other, tuple):
+ raise NotImplementedError
+
+ if not (1 <= len(other) <= 4):
+ raise NotImplementedError
+
+ return astuple(self)[: len(other)], other
+
+ def __eq__(self, other):
+ try:
+ us, them = self._ensure_tuple(other)
+ except NotImplementedError:
+ return NotImplemented
+
+ return us == them
+
+ def __lt__(self, other):
+ try:
+ us, them = self._ensure_tuple(other)
+ except NotImplementedError:
+ return NotImplemented
+
+ # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
+ # have to do anything special with releaselevel for now.
+ return us < them
diff --git a/lib/spack/external/_vendoring/attr/_version_info.pyi b/lib/spack/external/_vendoring/attr/_version_info.pyi
new file mode 100644
index 0000000000..45ced08633
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/_version_info.pyi
@@ -0,0 +1,9 @@
+class VersionInfo:
+ @property
+ def year(self) -> int: ...
+ @property
+ def minor(self) -> int: ...
+ @property
+ def micro(self) -> int: ...
+ @property
+ def releaselevel(self) -> str: ...
diff --git a/lib/spack/external/_vendoring/attr/converters.py b/lib/spack/external/_vendoring/attr/converters.py
new file mode 100644
index 0000000000..a73626c26d
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/converters.py
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly useful converters.
+"""
+
+
+import typing
+
+from ._compat import _AnnotationExtractor
+from ._make import NOTHING, Factory, pipe
+
+
+__all__ = [
+ "default_if_none",
+ "optional",
+ "pipe",
+ "to_bool",
+]
+
+
+def optional(converter):
+ """
+ A converter that allows an attribute to be optional. An optional attribute
+ is one which can be set to ``None``.
+
+ Type annotations will be inferred from the wrapped converter's, if it
+ has any.
+
+ :param callable converter: the converter that is used for non-``None``
+ values.
+
+ .. versionadded:: 17.1.0
+ """
+
+ def optional_converter(val):
+ if val is None:
+ return None
+ return converter(val)
+
+ xtr = _AnnotationExtractor(converter)
+
+ t = xtr.get_first_param_type()
+ if t:
+ optional_converter.__annotations__["val"] = typing.Optional[t]
+
+ rt = xtr.get_return_type()
+ if rt:
+ optional_converter.__annotations__["return"] = typing.Optional[rt]
+
+ return optional_converter
+
+
+def default_if_none(default=NOTHING, factory=None):
+ """
+ A converter that allows to replace ``None`` values by *default* or the
+ result of *factory*.
+
+ :param default: Value to be used if ``None`` is passed. Passing an instance
+ of `attrs.Factory` is supported, however the ``takes_self`` option
+ is *not*.
+ :param callable factory: A callable that takes no parameters whose result
+ is used if ``None`` is passed.
+
+ :raises TypeError: If **neither** *default* or *factory* is passed.
+ :raises TypeError: If **both** *default* and *factory* are passed.
+ :raises ValueError: If an instance of `attrs.Factory` is passed with
+ ``takes_self=True``.
+
+ .. versionadded:: 18.2.0
+ """
+ if default is NOTHING and factory is None:
+ raise TypeError("Must pass either `default` or `factory`.")
+
+ if default is not NOTHING and factory is not None:
+ raise TypeError(
+ "Must pass either `default` or `factory` but not both."
+ )
+
+ if factory is not None:
+ default = Factory(factory)
+
+ if isinstance(default, Factory):
+ if default.takes_self:
+ raise ValueError(
+ "`takes_self` is not supported by default_if_none."
+ )
+
+ def default_if_none_converter(val):
+ if val is not None:
+ return val
+
+ return default.factory()
+
+ else:
+
+ def default_if_none_converter(val):
+ if val is not None:
+ return val
+
+ return default
+
+ return default_if_none_converter
+
+
+def to_bool(val):
+ """
+ Convert "boolean" strings (e.g., from env. vars.) to real booleans.
+
+ Values mapping to :code:`True`:
+
+ - :code:`True`
+ - :code:`"true"` / :code:`"t"`
+ - :code:`"yes"` / :code:`"y"`
+ - :code:`"on"`
+ - :code:`"1"`
+ - :code:`1`
+
+ Values mapping to :code:`False`:
+
+ - :code:`False`
+ - :code:`"false"` / :code:`"f"`
+ - :code:`"no"` / :code:`"n"`
+ - :code:`"off"`
+ - :code:`"0"`
+ - :code:`0`
+
+ :raises ValueError: for any other value.
+
+ .. versionadded:: 21.3.0
+ """
+ if isinstance(val, str):
+ val = val.lower()
+ truthy = {True, "true", "t", "yes", "y", "on", "1", 1}
+ falsy = {False, "false", "f", "no", "n", "off", "0", 0}
+ try:
+ if val in truthy:
+ return True
+ if val in falsy:
+ return False
+ except TypeError:
+ # Raised when "val" is not hashable (e.g., lists)
+ pass
+ raise ValueError("Cannot convert value to bool: {}".format(val))
diff --git a/lib/spack/external/_vendoring/attr/converters.pyi b/lib/spack/external/_vendoring/attr/converters.pyi
new file mode 100644
index 0000000000..0f58088a37
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/converters.pyi
@@ -0,0 +1,13 @@
+from typing import Callable, Optional, TypeVar, overload
+
+from . import _ConverterType
+
+_T = TypeVar("_T")
+
+def pipe(*validators: _ConverterType) -> _ConverterType: ...
+def optional(converter: _ConverterType) -> _ConverterType: ...
+@overload
+def default_if_none(default: _T) -> _ConverterType: ...
+@overload
+def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ...
+def to_bool(val: str) -> bool: ...
diff --git a/lib/spack/external/_vendoring/attr/exceptions.py b/lib/spack/external/_vendoring/attr/exceptions.py
new file mode 100644
index 0000000000..5dc51e0a82
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/exceptions.py
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: MIT
+
+
+class FrozenError(AttributeError):
+ """
+ A frozen/immutable instance or attribute have been attempted to be
+ modified.
+
+ It mirrors the behavior of ``namedtuples`` by using the same error message
+ and subclassing `AttributeError`.
+
+ .. versionadded:: 20.1.0
+ """
+
+ msg = "can't set attribute"
+ args = [msg]
+
+
+class FrozenInstanceError(FrozenError):
+ """
+ A frozen instance has been attempted to be modified.
+
+ .. versionadded:: 16.1.0
+ """
+
+
+class FrozenAttributeError(FrozenError):
+ """
+ A frozen attribute has been attempted to be modified.
+
+ .. versionadded:: 20.1.0
+ """
+
+
+class AttrsAttributeNotFoundError(ValueError):
+ """
+ An ``attrs`` function couldn't find an attribute that the user asked for.
+
+ .. versionadded:: 16.2.0
+ """
+
+
+class NotAnAttrsClassError(ValueError):
+ """
+ A non-``attrs`` class has been passed into an ``attrs`` function.
+
+ .. versionadded:: 16.2.0
+ """
+
+
+class DefaultAlreadySetError(RuntimeError):
+ """
+ A default has been set using ``attr.ib()`` and is attempted to be reset
+ using the decorator.
+
+ .. versionadded:: 17.1.0
+ """
+
+
+class UnannotatedAttributeError(RuntimeError):
+ """
+ A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
+ annotation.
+
+ .. versionadded:: 17.3.0
+ """
+
+
+class PythonTooOldError(RuntimeError):
+ """
+ It was attempted to use an ``attrs`` feature that requires a newer Python
+ version.
+
+ .. versionadded:: 18.2.0
+ """
+
+
+class NotCallableError(TypeError):
+ """
+ A ``attr.ib()`` requiring a callable has been set with a value
+ that is not callable.
+
+ .. versionadded:: 19.2.0
+ """
+
+ def __init__(self, msg, value):
+ super(TypeError, self).__init__(msg, value)
+ self.msg = msg
+ self.value = value
+
+ def __str__(self):
+ return str(self.msg)
diff --git a/lib/spack/external/_vendoring/attr/exceptions.pyi b/lib/spack/external/_vendoring/attr/exceptions.pyi
new file mode 100644
index 0000000000..f2680118b4
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/exceptions.pyi
@@ -0,0 +1,17 @@
+from typing import Any
+
+class FrozenError(AttributeError):
+ msg: str = ...
+
+class FrozenInstanceError(FrozenError): ...
+class FrozenAttributeError(FrozenError): ...
+class AttrsAttributeNotFoundError(ValueError): ...
+class NotAnAttrsClassError(ValueError): ...
+class DefaultAlreadySetError(RuntimeError): ...
+class UnannotatedAttributeError(RuntimeError): ...
+class PythonTooOldError(RuntimeError): ...
+
+class NotCallableError(TypeError):
+ msg: str = ...
+ value: Any = ...
+ def __init__(self, msg: str, value: Any) -> None: ...
diff --git a/lib/spack/external/_vendoring/attr/filters.py b/lib/spack/external/_vendoring/attr/filters.py
new file mode 100644
index 0000000000..baa25e9465
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/filters.py
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly useful filters for `attr.asdict`.
+"""
+
+from ._make import Attribute
+
+
+def _split_what(what):
+ """
+ Returns a tuple of `frozenset`s of classes and attributes.
+ """
+ return (
+ frozenset(cls for cls in what if isinstance(cls, type)),
+ frozenset(cls for cls in what if isinstance(cls, Attribute)),
+ )
+
+
+def include(*what):
+ """
+ Include *what*.
+
+ :param what: What to include.
+ :type what: `list` of `type` or `attrs.Attribute`\\ s
+
+ :rtype: `callable`
+ """
+ cls, attrs = _split_what(what)
+
+ def include_(attribute, value):
+ return value.__class__ in cls or attribute in attrs
+
+ return include_
+
+
+def exclude(*what):
+ """
+ Exclude *what*.
+
+ :param what: What to exclude.
+ :type what: `list` of classes or `attrs.Attribute`\\ s.
+
+ :rtype: `callable`
+ """
+ cls, attrs = _split_what(what)
+
+ def exclude_(attribute, value):
+ return value.__class__ not in cls and attribute not in attrs
+
+ return exclude_
diff --git a/lib/spack/external/_vendoring/attr/filters.pyi b/lib/spack/external/_vendoring/attr/filters.pyi
new file mode 100644
index 0000000000..993866865e
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/filters.pyi
@@ -0,0 +1,6 @@
+from typing import Any, Union
+
+from . import Attribute, _FilterType
+
+def include(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ...
+def exclude(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ...
diff --git a/lib/spack/external/_vendoring/attr/py.typed b/lib/spack/external/_vendoring/attr/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/py.typed
diff --git a/lib/spack/external/_vendoring/attr/setters.py b/lib/spack/external/_vendoring/attr/setters.py
new file mode 100644
index 0000000000..12ed6750df
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/setters.py
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly used hooks for on_setattr.
+"""
+
+
+from . import _config
+from .exceptions import FrozenAttributeError
+
+
+def pipe(*setters):
+ """
+ Run all *setters* and return the return value of the last one.
+
+ .. versionadded:: 20.1.0
+ """
+
+ def wrapped_pipe(instance, attrib, new_value):
+ rv = new_value
+
+ for setter in setters:
+ rv = setter(instance, attrib, rv)
+
+ return rv
+
+ return wrapped_pipe
+
+
+def frozen(_, __, ___):
+ """
+ Prevent an attribute to be modified.
+
+ .. versionadded:: 20.1.0
+ """
+ raise FrozenAttributeError()
+
+
+def validate(instance, attrib, new_value):
+ """
+ Run *attrib*'s validator on *new_value* if it has one.
+
+ .. versionadded:: 20.1.0
+ """
+ if _config._run_validators is False:
+ return new_value
+
+ v = attrib.validator
+ if not v:
+ return new_value
+
+ v(instance, attrib, new_value)
+
+ return new_value
+
+
+def convert(instance, attrib, new_value):
+ """
+ Run *attrib*'s converter -- if it has one -- on *new_value* and return the
+ result.
+
+ .. versionadded:: 20.1.0
+ """
+ c = attrib.converter
+ if c:
+ return c(new_value)
+
+ return new_value
+
+
+# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes.
+# autodata stopped working, so the docstring is inlined in the API docs.
+NO_OP = object()
diff --git a/lib/spack/external/_vendoring/attr/setters.pyi b/lib/spack/external/_vendoring/attr/setters.pyi
new file mode 100644
index 0000000000..3f5603c2b0
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/setters.pyi
@@ -0,0 +1,19 @@
+from typing import Any, NewType, NoReturn, TypeVar, cast
+
+from . import Attribute, _OnSetAttrType
+
+_T = TypeVar("_T")
+
+def frozen(
+ instance: Any, attribute: Attribute[Any], new_value: Any
+) -> NoReturn: ...
+def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ...
+def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ...
+
+# convert is allowed to return Any, because they can be chained using pipe.
+def convert(
+ instance: Any, attribute: Attribute[Any], new_value: Any
+) -> Any: ...
+
+_NoOpType = NewType("_NoOpType", object)
+NO_OP: _NoOpType
diff --git a/lib/spack/external/_vendoring/attr/validators.py b/lib/spack/external/_vendoring/attr/validators.py
new file mode 100644
index 0000000000..eece517da8
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/validators.py
@@ -0,0 +1,594 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly useful validators.
+"""
+
+
+import operator
+import re
+
+from contextlib import contextmanager
+
+from ._config import get_run_validators, set_run_validators
+from ._make import _AndValidator, and_, attrib, attrs
+from .exceptions import NotCallableError
+
+
+try:
+ Pattern = re.Pattern
+except AttributeError: # Python <3.7 lacks a Pattern type.
+ Pattern = type(re.compile(""))
+
+
+__all__ = [
+ "and_",
+ "deep_iterable",
+ "deep_mapping",
+ "disabled",
+ "ge",
+ "get_disabled",
+ "gt",
+ "in_",
+ "instance_of",
+ "is_callable",
+ "le",
+ "lt",
+ "matches_re",
+ "max_len",
+ "min_len",
+ "optional",
+ "provides",
+ "set_disabled",
+]
+
+
+def set_disabled(disabled):
+ """
+ Globally disable or enable running validators.
+
+ By default, they are run.
+
+ :param disabled: If ``True``, disable running all validators.
+ :type disabled: bool
+
+ .. warning::
+
+ This function is not thread-safe!
+
+ .. versionadded:: 21.3.0
+ """
+ set_run_validators(not disabled)
+
+
+def get_disabled():
+ """
+ Return a bool indicating whether validators are currently disabled or not.
+
+ :return: ``True`` if validators are currently disabled.
+ :rtype: bool
+
+ .. versionadded:: 21.3.0
+ """
+ return not get_run_validators()
+
+
+@contextmanager
+def disabled():
+ """
+ Context manager that disables running validators within its context.
+
+ .. warning::
+
+ This context manager is not thread-safe!
+
+ .. versionadded:: 21.3.0
+ """
+ set_run_validators(False)
+ try:
+ yield
+ finally:
+ set_run_validators(True)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _InstanceOfValidator:
+ type = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not isinstance(value, self.type):
+ raise TypeError(
+ "'{name}' must be {type!r} (got {value!r} that is a "
+ "{actual!r}).".format(
+ name=attr.name,
+ type=self.type,
+ actual=value.__class__,
+ value=value,
+ ),
+ attr,
+ self.type,
+ value,
+ )
+
+ def __repr__(self):
+ return "<instance_of validator for type {type!r}>".format(
+ type=self.type
+ )
+
+
+def instance_of(type):
+ """
+ A validator that raises a `TypeError` if the initializer is called
+ with a wrong type for this particular attribute (checks are performed using
+ `isinstance` therefore it's also valid to pass a tuple of types).
+
+ :param type: The type to check for.
+ :type type: type or tuple of types
+
+ :raises TypeError: With a human readable error message, the attribute
+ (of type `attrs.Attribute`), the expected type, and the value it
+ got.
+ """
+ return _InstanceOfValidator(type)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MatchesReValidator:
+ pattern = attrib()
+ match_func = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not self.match_func(value):
+ raise ValueError(
+ "'{name}' must match regex {pattern!r}"
+ " ({value!r} doesn't)".format(
+ name=attr.name, pattern=self.pattern.pattern, value=value
+ ),
+ attr,
+ self.pattern,
+ value,
+ )
+
+ def __repr__(self):
+ return "<matches_re validator for pattern {pattern!r}>".format(
+ pattern=self.pattern
+ )
+
+
+def matches_re(regex, flags=0, func=None):
+ r"""
+ A validator that raises `ValueError` if the initializer is called
+ with a string that doesn't match *regex*.
+
+ :param regex: a regex string or precompiled pattern to match against
+ :param int flags: flags that will be passed to the underlying re function
+ (default 0)
+ :param callable func: which underlying `re` function to call. Valid options
+ are `re.fullmatch`, `re.search`, and `re.match`; the default ``None``
+ means `re.fullmatch`. For performance reasons, the pattern is always
+ precompiled using `re.compile`.
+
+ .. versionadded:: 19.2.0
+ .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern.
+ """
+ valid_funcs = (re.fullmatch, None, re.search, re.match)
+ if func not in valid_funcs:
+ raise ValueError(
+ "'func' must be one of {}.".format(
+ ", ".join(
+ sorted(
+ e and e.__name__ or "None" for e in set(valid_funcs)
+ )
+ )
+ )
+ )
+
+ if isinstance(regex, Pattern):
+ if flags:
+ raise TypeError(
+ "'flags' can only be used with a string pattern; "
+ "pass flags to re.compile() instead"
+ )
+ pattern = regex
+ else:
+ pattern = re.compile(regex, flags)
+
+ if func is re.match:
+ match_func = pattern.match
+ elif func is re.search:
+ match_func = pattern.search
+ else:
+ match_func = pattern.fullmatch
+
+ return _MatchesReValidator(pattern, match_func)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _ProvidesValidator:
+ interface = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not self.interface.providedBy(value):
+ raise TypeError(
+ "'{name}' must provide {interface!r} which {value!r} "
+ "doesn't.".format(
+ name=attr.name, interface=self.interface, value=value
+ ),
+ attr,
+ self.interface,
+ value,
+ )
+
+ def __repr__(self):
+ return "<provides validator for interface {interface!r}>".format(
+ interface=self.interface
+ )
+
+
+def provides(interface):
+ """
+ A validator that raises a `TypeError` if the initializer is called
+ with an object that does not provide the requested *interface* (checks are
+ performed using ``interface.providedBy(value)`` (see `zope.interface
+ <https://zopeinterface.readthedocs.io/en/latest/>`_).
+
+ :param interface: The interface to check for.
+ :type interface: ``zope.interface.Interface``
+
+ :raises TypeError: With a human readable error message, the attribute
+ (of type `attrs.Attribute`), the expected interface, and the
+ value it got.
+ """
+ return _ProvidesValidator(interface)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _OptionalValidator:
+ validator = attrib()
+
+ def __call__(self, inst, attr, value):
+ if value is None:
+ return
+
+ self.validator(inst, attr, value)
+
+ def __repr__(self):
+ return "<optional validator for {what} or None>".format(
+ what=repr(self.validator)
+ )
+
+
+def optional(validator):
+ """
+ A validator that makes an attribute optional. An optional attribute is one
+ which can be set to ``None`` in addition to satisfying the requirements of
+ the sub-validator.
+
+ :param validator: A validator (or a list of validators) that is used for
+ non-``None`` values.
+ :type validator: callable or `list` of callables.
+
+ .. versionadded:: 15.1.0
+ .. versionchanged:: 17.1.0 *validator* can be a list of validators.
+ """
+ if isinstance(validator, list):
+ return _OptionalValidator(_AndValidator(validator))
+ return _OptionalValidator(validator)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _InValidator:
+ options = attrib()
+
+ def __call__(self, inst, attr, value):
+ try:
+ in_options = value in self.options
+ except TypeError: # e.g. `1 in "abc"`
+ in_options = False
+
+ if not in_options:
+ raise ValueError(
+ "'{name}' must be in {options!r} (got {value!r})".format(
+ name=attr.name, options=self.options, value=value
+ ),
+ attr,
+ self.options,
+ value,
+ )
+
+ def __repr__(self):
+ return "<in_ validator with options {options!r}>".format(
+ options=self.options
+ )
+
+
+def in_(options):
+ """
+ A validator that raises a `ValueError` if the initializer is called
+ with a value that does not belong in the options provided. The check is
+ performed using ``value in options``.
+
+ :param options: Allowed options.
+ :type options: list, tuple, `enum.Enum`, ...
+
+ :raises ValueError: With a human readable error message, the attribute (of
+ type `attrs.Attribute`), the expected options, and the value it
+ got.
+
+ .. versionadded:: 17.1.0
+ .. versionchanged:: 22.1.0
+ The ValueError was incomplete until now and only contained the human
+ readable error message. Now it contains all the information that has
+ been promised since 17.1.0.
+ """
+ return _InValidator(options)
+
+
+@attrs(repr=False, slots=False, hash=True)
+class _IsCallableValidator:
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not callable(value):
+ message = (
+ "'{name}' must be callable "
+ "(got {value!r} that is a {actual!r})."
+ )
+ raise NotCallableError(
+ msg=message.format(
+ name=attr.name, value=value, actual=value.__class__
+ ),
+ value=value,
+ )
+
+ def __repr__(self):
+ return "<is_callable validator>"
+
+
+def is_callable():
+ """
+ A validator that raises a `attr.exceptions.NotCallableError` if the
+ initializer is called with a value for this particular attribute
+ that is not callable.
+
+ .. versionadded:: 19.1.0
+
+ :raises `attr.exceptions.NotCallableError`: With a human readable error
+ message containing the attribute (`attrs.Attribute`) name,
+ and the value it got.
+ """
+ return _IsCallableValidator()
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _DeepIterable:
+ member_validator = attrib(validator=is_callable())
+ iterable_validator = attrib(
+ default=None, validator=optional(is_callable())
+ )
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if self.iterable_validator is not None:
+ self.iterable_validator(inst, attr, value)
+
+ for member in value:
+ self.member_validator(inst, attr, member)
+
+ def __repr__(self):
+ iterable_identifier = (
+ ""
+ if self.iterable_validator is None
+ else " {iterable!r}".format(iterable=self.iterable_validator)
+ )
+ return (
+ "<deep_iterable validator for{iterable_identifier}"
+ " iterables of {member!r}>"
+ ).format(
+ iterable_identifier=iterable_identifier,
+ member=self.member_validator,
+ )
+
+
+def deep_iterable(member_validator, iterable_validator=None):
+ """
+ A validator that performs deep validation of an iterable.
+
+ :param member_validator: Validator(s) to apply to iterable members
+ :param iterable_validator: Validator to apply to iterable itself
+ (optional)
+
+ .. versionadded:: 19.1.0
+
+ :raises TypeError: if any sub-validators fail
+ """
+ if isinstance(member_validator, (list, tuple)):
+ member_validator = and_(*member_validator)
+ return _DeepIterable(member_validator, iterable_validator)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _DeepMapping:
+ key_validator = attrib(validator=is_callable())
+ value_validator = attrib(validator=is_callable())
+ mapping_validator = attrib(default=None, validator=optional(is_callable()))
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if self.mapping_validator is not None:
+ self.mapping_validator(inst, attr, value)
+
+ for key in value:
+ self.key_validator(inst, attr, key)
+ self.value_validator(inst, attr, value[key])
+
+ def __repr__(self):
+ return (
+ "<deep_mapping validator for objects mapping {key!r} to {value!r}>"
+ ).format(key=self.key_validator, value=self.value_validator)
+
+
+def deep_mapping(key_validator, value_validator, mapping_validator=None):
+ """
+ A validator that performs deep validation of a dictionary.
+
+ :param key_validator: Validator to apply to dictionary keys
+ :param value_validator: Validator to apply to dictionary values
+ :param mapping_validator: Validator to apply to top-level mapping
+ attribute (optional)
+
+ .. versionadded:: 19.1.0
+
+ :raises TypeError: if any sub-validators fail
+ """
+ return _DeepMapping(key_validator, value_validator, mapping_validator)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _NumberValidator:
+ bound = attrib()
+ compare_op = attrib()
+ compare_func = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not self.compare_func(value, self.bound):
+ raise ValueError(
+ "'{name}' must be {op} {bound}: {value}".format(
+ name=attr.name,
+ op=self.compare_op,
+ bound=self.bound,
+ value=value,
+ )
+ )
+
+ def __repr__(self):
+ return "<Validator for x {op} {bound}>".format(
+ op=self.compare_op, bound=self.bound
+ )
+
+
+def lt(val):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a number larger or equal to *val*.
+
+ :param val: Exclusive upper bound for values
+
+ .. versionadded:: 21.3.0
+ """
+ return _NumberValidator(val, "<", operator.lt)
+
+
+def le(val):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a number greater than *val*.
+
+ :param val: Inclusive upper bound for values
+
+ .. versionadded:: 21.3.0
+ """
+ return _NumberValidator(val, "<=", operator.le)
+
+
+def ge(val):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a number smaller than *val*.
+
+ :param val: Inclusive lower bound for values
+
+ .. versionadded:: 21.3.0
+ """
+ return _NumberValidator(val, ">=", operator.ge)
+
+
+def gt(val):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a number smaller or equal to *val*.
+
+ :param val: Exclusive lower bound for values
+
+ .. versionadded:: 21.3.0
+ """
+ return _NumberValidator(val, ">", operator.gt)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MaxLengthValidator:
+ max_length = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if len(value) > self.max_length:
+ raise ValueError(
+ "Length of '{name}' must be <= {max}: {len}".format(
+ name=attr.name, max=self.max_length, len=len(value)
+ )
+ )
+
+ def __repr__(self):
+ return "<max_len validator for {max}>".format(max=self.max_length)
+
+
+def max_len(length):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a string or iterable that is longer than *length*.
+
+ :param int length: Maximum length of the string or iterable
+
+ .. versionadded:: 21.3.0
+ """
+ return _MaxLengthValidator(length)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MinLengthValidator:
+ min_length = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if len(value) < self.min_length:
+ raise ValueError(
+ "Length of '{name}' must be => {min}: {len}".format(
+ name=attr.name, min=self.min_length, len=len(value)
+ )
+ )
+
+ def __repr__(self):
+ return "<min_len validator for {min}>".format(min=self.min_length)
+
+
+def min_len(length):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a string or iterable that is shorter than *length*.
+
+ :param int length: Minimum length of the string or iterable
+
+ .. versionadded:: 22.1.0
+ """
+ return _MinLengthValidator(length)
diff --git a/lib/spack/external/_vendoring/attr/validators.pyi b/lib/spack/external/_vendoring/attr/validators.pyi
new file mode 100644
index 0000000000..54b9dba24e
--- /dev/null
+++ b/lib/spack/external/_vendoring/attr/validators.pyi
@@ -0,0 +1,80 @@
+from typing import (
+ Any,
+ AnyStr,
+ Callable,
+ Container,
+ ContextManager,
+ Iterable,
+ List,
+ Mapping,
+ Match,
+ Optional,
+ Pattern,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ overload,
+)
+
+from . import _ValidatorType
+from . import _ValidatorArgType
+
+_T = TypeVar("_T")
+_T1 = TypeVar("_T1")
+_T2 = TypeVar("_T2")
+_T3 = TypeVar("_T3")
+_I = TypeVar("_I", bound=Iterable)
+_K = TypeVar("_K")
+_V = TypeVar("_V")
+_M = TypeVar("_M", bound=Mapping)
+
+def set_disabled(run: bool) -> None: ...
+def get_disabled() -> bool: ...
+def disabled() -> ContextManager[None]: ...
+
+# To be more precise on instance_of use some overloads.
+# If there are more than 3 items in the tuple then we fall back to Any
+@overload
+def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ...
+@overload
+def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ...
+@overload
+def instance_of(
+ type: Tuple[Type[_T1], Type[_T2]]
+) -> _ValidatorType[Union[_T1, _T2]]: ...
+@overload
+def instance_of(
+ type: Tuple[Type[_T1], Type[_T2], Type[_T3]]
+) -> _ValidatorType[Union[_T1, _T2, _T3]]: ...
+@overload
+def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ...
+def provides(interface: Any) -> _ValidatorType[Any]: ...
+def optional(
+ validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]]
+) -> _ValidatorType[Optional[_T]]: ...
+def in_(options: Container[_T]) -> _ValidatorType[_T]: ...
+def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ...
+def matches_re(
+ regex: Union[Pattern[AnyStr], AnyStr],
+ flags: int = ...,
+ func: Optional[
+ Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]]
+ ] = ...,
+) -> _ValidatorType[AnyStr]: ...
+def deep_iterable(
+ member_validator: _ValidatorArgType[_T],
+ iterable_validator: Optional[_ValidatorType[_I]] = ...,
+) -> _ValidatorType[_I]: ...
+def deep_mapping(
+ key_validator: _ValidatorType[_K],
+ value_validator: _ValidatorType[_V],
+ mapping_validator: Optional[_ValidatorType[_M]] = ...,
+) -> _ValidatorType[_M]: ...
+def is_callable() -> _ValidatorType[_T]: ...
+def lt(val: _T) -> _ValidatorType[_T]: ...
+def le(val: _T) -> _ValidatorType[_T]: ...
+def ge(val: _T) -> _ValidatorType[_T]: ...
+def gt(val: _T) -> _ValidatorType[_T]: ...
+def max_len(length: int) -> _ValidatorType[_T]: ...
+def min_len(length: int) -> _ValidatorType[_T]: ...
diff --git a/lib/spack/external/_vendoring/attrs/LICENSE b/lib/spack/external/_vendoring/attrs/LICENSE
new file mode 100644
index 0000000000..2bd6453d25
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Hynek Schlawack and the attrs contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/spack/external/_vendoring/attrs/__init__.py b/lib/spack/external/_vendoring/attrs/__init__.py
new file mode 100644
index 0000000000..a704b8b56b
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/__init__.py
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: MIT
+
+from attr import (
+ NOTHING,
+ Attribute,
+ Factory,
+ __author__,
+ __copyright__,
+ __description__,
+ __doc__,
+ __email__,
+ __license__,
+ __title__,
+ __url__,
+ __version__,
+ __version_info__,
+ assoc,
+ cmp_using,
+ define,
+ evolve,
+ field,
+ fields,
+ fields_dict,
+ frozen,
+ has,
+ make_class,
+ mutable,
+ resolve_types,
+ validate,
+)
+from attr._next_gen import asdict, astuple
+
+from . import converters, exceptions, filters, setters, validators
+
+
+__all__ = [
+ "__author__",
+ "__copyright__",
+ "__description__",
+ "__doc__",
+ "__email__",
+ "__license__",
+ "__title__",
+ "__url__",
+ "__version__",
+ "__version_info__",
+ "asdict",
+ "assoc",
+ "astuple",
+ "Attribute",
+ "cmp_using",
+ "converters",
+ "define",
+ "evolve",
+ "exceptions",
+ "Factory",
+ "field",
+ "fields_dict",
+ "fields",
+ "filters",
+ "frozen",
+ "has",
+ "make_class",
+ "mutable",
+ "NOTHING",
+ "resolve_types",
+ "setters",
+ "validate",
+ "validators",
+]
diff --git a/lib/spack/external/_vendoring/attrs/__init__.pyi b/lib/spack/external/_vendoring/attrs/__init__.pyi
new file mode 100644
index 0000000000..fc44de46a0
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/__init__.pyi
@@ -0,0 +1,66 @@
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+)
+
+# Because we need to type our own stuff, we have to make everything from
+# attr explicitly public too.
+from attr import __author__ as __author__
+from attr import __copyright__ as __copyright__
+from attr import __description__ as __description__
+from attr import __email__ as __email__
+from attr import __license__ as __license__
+from attr import __title__ as __title__
+from attr import __url__ as __url__
+from attr import __version__ as __version__
+from attr import __version_info__ as __version_info__
+from attr import _FilterType
+from attr import assoc as assoc
+from attr import Attribute as Attribute
+from attr import cmp_using as cmp_using
+from attr import converters as converters
+from attr import define as define
+from attr import evolve as evolve
+from attr import exceptions as exceptions
+from attr import Factory as Factory
+from attr import field as field
+from attr import fields as fields
+from attr import fields_dict as fields_dict
+from attr import filters as filters
+from attr import frozen as frozen
+from attr import has as has
+from attr import make_class as make_class
+from attr import mutable as mutable
+from attr import NOTHING as NOTHING
+from attr import resolve_types as resolve_types
+from attr import setters as setters
+from attr import validate as validate
+from attr import validators as validators
+
+# TODO: see definition of attr.asdict/astuple
+def asdict(
+ inst: Any,
+ recurse: bool = ...,
+ filter: Optional[_FilterType[Any]] = ...,
+ dict_factory: Type[Mapping[Any, Any]] = ...,
+ retain_collection_types: bool = ...,
+ value_serializer: Optional[
+ Callable[[type, Attribute[Any], Any], Any]
+ ] = ...,
+ tuple_keys: bool = ...,
+) -> Dict[str, Any]: ...
+
+# TODO: add support for returning NamedTuple from the mypy plugin
+def astuple(
+ inst: Any,
+ recurse: bool = ...,
+ filter: Optional[_FilterType[Any]] = ...,
+ tuple_factory: Type[Sequence[Any]] = ...,
+ retain_collection_types: bool = ...,
+) -> Tuple[Any, ...]: ...
diff --git a/lib/spack/external/_vendoring/attrs/converters.py b/lib/spack/external/_vendoring/attrs/converters.py
new file mode 100644
index 0000000000..edfa8d3c16
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/converters.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.converters import * # noqa
diff --git a/lib/spack/external/_vendoring/attrs/exceptions.py b/lib/spack/external/_vendoring/attrs/exceptions.py
new file mode 100644
index 0000000000..bd9efed202
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/exceptions.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.exceptions import * # noqa
diff --git a/lib/spack/external/_vendoring/attrs/filters.py b/lib/spack/external/_vendoring/attrs/filters.py
new file mode 100644
index 0000000000..52959005b0
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/filters.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.filters import * # noqa
diff --git a/lib/spack/external/_vendoring/attrs/py.typed b/lib/spack/external/_vendoring/attrs/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/py.typed
diff --git a/lib/spack/external/_vendoring/attrs/setters.py b/lib/spack/external/_vendoring/attrs/setters.py
new file mode 100644
index 0000000000..9b50770804
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/setters.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.setters import * # noqa
diff --git a/lib/spack/external/_vendoring/attrs/validators.py b/lib/spack/external/_vendoring/attrs/validators.py
new file mode 100644
index 0000000000..ab2c9b3024
--- /dev/null
+++ b/lib/spack/external/_vendoring/attrs/validators.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.validators import * # noqa
diff --git a/lib/spack/external/_vendoring/distro/LICENSE b/lib/spack/external/_vendoring/distro/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/lib/spack/external/_vendoring/distro/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/lib/spack/external/_vendoring/distro/__init__.py b/lib/spack/external/_vendoring/distro/__init__.py
new file mode 100644
index 0000000000..7686fe85a7
--- /dev/null
+++ b/lib/spack/external/_vendoring/distro/__init__.py
@@ -0,0 +1,54 @@
+from .distro import (
+ NORMALIZED_DISTRO_ID,
+ NORMALIZED_LSB_ID,
+ NORMALIZED_OS_ID,
+ LinuxDistribution,
+ __version__,
+ build_number,
+ codename,
+ distro_release_attr,
+ distro_release_info,
+ id,
+ info,
+ like,
+ linux_distribution,
+ lsb_release_attr,
+ lsb_release_info,
+ major_version,
+ minor_version,
+ name,
+ os_release_attr,
+ os_release_info,
+ uname_attr,
+ uname_info,
+ version,
+ version_parts,
+)
+
+__all__ = [
+ "NORMALIZED_DISTRO_ID",
+ "NORMALIZED_LSB_ID",
+ "NORMALIZED_OS_ID",
+ "LinuxDistribution",
+ "build_number",
+ "codename",
+ "distro_release_attr",
+ "distro_release_info",
+ "id",
+ "info",
+ "like",
+ "linux_distribution",
+ "lsb_release_attr",
+ "lsb_release_info",
+ "major_version",
+ "minor_version",
+ "name",
+ "os_release_attr",
+ "os_release_info",
+ "uname_attr",
+ "uname_info",
+ "version",
+ "version_parts",
+]
+
+__version__ = __version__
diff --git a/lib/spack/external/_vendoring/distro/__main__.py b/lib/spack/external/_vendoring/distro/__main__.py
new file mode 100644
index 0000000000..0c01d5b08b
--- /dev/null
+++ b/lib/spack/external/_vendoring/distro/__main__.py
@@ -0,0 +1,4 @@
+from .distro import main
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/spack/external/_vendoring/distro/distro.py b/lib/spack/external/_vendoring/distro/distro.py
new file mode 100644
index 0000000000..89e1868047
--- /dev/null
+++ b/lib/spack/external/_vendoring/distro/distro.py
@@ -0,0 +1,1399 @@
+#!/usr/bin/env python
+# Copyright 2015,2016,2017 Nir Cohen
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+The ``distro`` package (``distro`` stands for Linux Distribution) provides
+information about the Linux distribution it runs on, such as a reliable
+machine-readable distro ID, or version information.
+
+It is the recommended replacement for Python's original
+:py:func:`platform.linux_distribution` function, but it provides much more
+functionality. An alternative implementation became necessary because Python
+3.5 deprecated this function, and Python 3.8 removed it altogether. Its
+predecessor function :py:func:`platform.dist` was already deprecated since
+Python 2.6 and removed in Python 3.8. Still, there are many cases in which
+access to OS distribution information is needed. See `Python issue 1322
+<https://bugs.python.org/issue1322>`_ for more information.
+"""
+
+import argparse
+import json
+import logging
+import os
+import re
+import shlex
+import subprocess
+import sys
+import warnings
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ Optional,
+ Sequence,
+ TextIO,
+ Tuple,
+ Type,
+)
+
+try:
+ from typing import TypedDict
+except ImportError:
+ # Python 3.7
+ TypedDict = dict
+
+__version__ = "1.8.0"
+
+
+class VersionDict(TypedDict):
+ major: str
+ minor: str
+ build_number: str
+
+
+class InfoDict(TypedDict):
+ id: str
+ version: str
+ version_parts: VersionDict
+ like: str
+ codename: str
+
+
+_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc")
+_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib")
+_OS_RELEASE_BASENAME = "os-release"
+
+#: Translation table for normalizing the "ID" attribute defined in os-release
+#: files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as defined in the os-release file, translated to lower case,
+#: with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_OS_ID = {
+ "ol": "oracle", # Oracle Linux
+ "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap
+}
+
+#: Translation table for normalizing the "Distributor ID" attribute returned by
+#: the lsb_release command, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as returned by the lsb_release command, translated to lower
+#: case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_LSB_ID = {
+ "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4
+ "enterpriseenterpriseserver": "oracle", # Oracle Linux 5
+ "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation
+ "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server
+ "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode
+}
+
+#: Translation table for normalizing the distro ID derived from the file name
+#: of distro release files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as derived from the file name of a distro release file,
+#: translated to lower case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_DISTRO_ID = {
+ "redhat": "rhel", # RHEL 6.x, 7.x
+}
+
+# Pattern for content of distro release file (reversed)
+_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
+ r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)"
+)
+
+# Pattern for base file name of distro release file
+_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
+
+# Base file names to be looked up for if _UNIXCONFDIR is not readable.
+_DISTRO_RELEASE_BASENAMES = [
+ "SuSE-release",
+ "arch-release",
+ "base-release",
+ "centos-release",
+ "fedora-release",
+ "gentoo-release",
+ "mageia-release",
+ "mandrake-release",
+ "mandriva-release",
+ "mandrivalinux-release",
+ "manjaro-release",
+ "oracle-release",
+ "redhat-release",
+ "rocky-release",
+ "sl-release",
+ "slackware-version",
+]
+
+# Base file names to be ignored when searching for distro release file
+_DISTRO_RELEASE_IGNORE_BASENAMES = (
+ "debian_version",
+ "lsb-release",
+ "oem-release",
+ _OS_RELEASE_BASENAME,
+ "system-release",
+ "plesk-release",
+ "iredmail-release",
+)
+
+
+def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]:
+ """
+ .. deprecated:: 1.6.0
+
+ :func:`distro.linux_distribution()` is deprecated. It should only be
+ used as a compatibility shim with Python's
+ :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`,
+ :func:`distro.version` and :func:`distro.name` instead.
+
+ Return information about the current OS distribution as a tuple
+ ``(id_name, version, codename)`` with items as follows:
+
+ * ``id_name``: If *full_distribution_name* is false, the result of
+ :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``codename``: The extra item (usually in parentheses) after the
+ os-release version number, or the result of :func:`distro.codename`.
+
+ The interface of this function is compatible with the original
+ :py:func:`platform.linux_distribution` function, supporting a subset of
+ its parameters.
+
+ The data it returns may not exactly be the same, because it uses more data
+ sources than the original function, and that may lead to different data if
+ the OS distribution is not consistent across multiple data sources it
+ provides (there are indeed such distributions ...).
+
+ Another reason for differences is the fact that the :func:`distro.id`
+ method normalizes the distro ID string to a reliable machine-readable value
+ for a number of popular OS distributions.
+ """
+ warnings.warn(
+ "distro.linux_distribution() is deprecated. It should only be used as a "
+ "compatibility shim with Python's platform.linux_distribution(). Please use "
+ "distro.id(), distro.version() and distro.name() instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _distro.linux_distribution(full_distribution_name)
+
+
+def id() -> str:
+ """
+ Return the distro ID of the current distribution, as a
+ machine-readable string.
+
+ For a number of OS distributions, the returned distro ID value is
+ *reliable*, in the sense that it is documented and that it does not change
+ across releases of the distribution.
+
+ This package maintains the following reliable distro ID values:
+
+ ============== =========================================
+ Distro ID Distribution
+ ============== =========================================
+ "ubuntu" Ubuntu
+ "debian" Debian
+ "rhel" RedHat Enterprise Linux
+ "centos" CentOS
+ "fedora" Fedora
+ "sles" SUSE Linux Enterprise Server
+ "opensuse" openSUSE
+ "amzn" Amazon Linux
+ "arch" Arch Linux
+ "buildroot" Buildroot
+ "cloudlinux" CloudLinux OS
+ "exherbo" Exherbo Linux
+ "gentoo" GenToo Linux
+ "ibm_powerkvm" IBM PowerKVM
+ "kvmibm" KVM for IBM z Systems
+ "linuxmint" Linux Mint
+ "mageia" Mageia
+ "mandriva" Mandriva Linux
+ "parallels" Parallels
+ "pidora" Pidora
+ "raspbian" Raspbian
+ "oracle" Oracle Linux (and Oracle Enterprise Linux)
+ "scientific" Scientific Linux
+ "slackware" Slackware
+ "xenserver" XenServer
+ "openbsd" OpenBSD
+ "netbsd" NetBSD
+ "freebsd" FreeBSD
+ "midnightbsd" MidnightBSD
+ "rocky" Rocky Linux
+ "aix" AIX
+ "guix" Guix System
+ ============== =========================================
+
+ If you have a need to get distros for reliable IDs added into this set,
+ or if you find that the :func:`distro.id` function returns a different
+ distro ID for one of the listed distros, please create an issue in the
+ `distro issue tracker`_.
+
+ **Lookup hierarchy and transformations:**
+
+ First, the ID is obtained from the following sources, in the specified
+ order. The first available and non-empty value is used:
+
+ * the value of the "ID" attribute of the os-release file,
+
+ * the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ * the first part of the file name of the distro release file,
+
+ The so determined ID value then passes the following transformations,
+ before it is returned by this method:
+
+ * it is translated to lower case,
+
+ * blanks (which should not be there anyway) are translated to underscores,
+
+ * a normalization of the ID is performed, based upon
+ `normalization tables`_. The purpose of this normalization is to ensure
+ that the ID is as reliable as possible, even across incompatible changes
+ in the OS distributions. A common reason for an incompatible change is
+ the addition of an os-release file, or the addition of the lsb_release
+ command, with ID values that differ from what was previously determined
+ from the distro release file name.
+ """
+ return _distro.id()
+
+
+def name(pretty: bool = False) -> str:
+ """
+ Return the name of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the name is returned without version or codename.
+ (e.g. "CentOS Linux")
+
+ If *pretty* is true, the version and codename are appended.
+ (e.g. "CentOS Linux 7.1.1503 (Core)")
+
+ **Lookup hierarchy:**
+
+ The name is obtained from the following sources, in the specified order.
+ The first available and non-empty value is used:
+
+ * If *pretty* is false:
+
+ - the value of the "NAME" attribute of the os-release file,
+
+ - the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file.
+
+ * If *pretty* is true:
+
+ - the value of the "PRETTY_NAME" attribute of the os-release file,
+
+ - the value of the "Description" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file, appended
+ with the value of the pretty version ("<version_id>" and "<codename>"
+ fields) of the distro release file, if available.
+ """
+ return _distro.name(pretty)
+
+
+def version(pretty: bool = False, best: bool = False) -> str:
+ """
+ Return the version of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the version is returned without codename (e.g.
+ "7.0").
+
+ If *pretty* is true, the codename in parenthesis is appended, if the
+ codename is non-empty (e.g. "7.0 (Maipo)").
+
+ Some distributions provide version numbers with different precisions in
+ the different sources of distribution information. Examining the different
+ sources in a fixed priority order does not always yield the most precise
+ version (e.g. for Debian 8.2, or CentOS 7.1).
+
+ Some other distributions may not provide this kind of information. In these
+ cases, an empty string would be returned. This behavior can be observed
+ with rolling releases distributions (e.g. Arch Linux).
+
+ The *best* parameter can be used to control the approach for the returned
+ version:
+
+ If *best* is false, the first non-empty version number in priority order of
+ the examined sources is returned.
+
+ If *best* is true, the most precise version number out of all examined
+ sources is returned.
+
+ **Lookup hierarchy:**
+
+ In all cases, the version number is obtained from the following sources.
+ If *best* is false, this order represents the priority order:
+
+ * the value of the "VERSION_ID" attribute of the os-release file,
+ * the value of the "Release" attribute returned by the lsb_release
+ command,
+ * the version number parsed from the "<version_id>" field of the first line
+ of the distro release file,
+ * the version number parsed from the "PRETTY_NAME" attribute of the
+ os-release file, if it follows the format of the distro release files.
+ * the version number parsed from the "Description" attribute returned by
+ the lsb_release command, if it follows the format of the distro release
+ files.
+ """
+ return _distro.version(pretty, best)
+
+
+def version_parts(best: bool = False) -> Tuple[str, str, str]:
+ """
+ Return the version of the current OS distribution as a tuple
+ ``(major, minor, build_number)`` with items as follows:
+
+ * ``major``: The result of :func:`distro.major_version`.
+
+ * ``minor``: The result of :func:`distro.minor_version`.
+
+ * ``build_number``: The result of :func:`distro.build_number`.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.version_parts(best)
+
+
+def major_version(best: bool = False) -> str:
+ """
+ Return the major version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The major version is the first
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.major_version(best)
+
+
+def minor_version(best: bool = False) -> str:
+ """
+ Return the minor version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The minor version is the second
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.minor_version(best)
+
+
+def build_number(best: bool = False) -> str:
+ """
+ Return the build number of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The build number is the third part
+ of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.build_number(best)
+
+
+def like() -> str:
+ """
+ Return a space-separated list of distro IDs of distributions that are
+ closely related to the current OS distribution in regards to packaging
+ and programming interfaces, for example distributions the current
+ distribution is a derivative from.
+
+ **Lookup hierarchy:**
+
+ This information item is only provided by the os-release file.
+ For details, see the description of the "ID_LIKE" attribute in the
+ `os-release man page
+ <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
+ """
+ return _distro.like()
+
+
+def codename() -> str:
+ """
+ Return the codename for the release of the current OS distribution,
+ as a string.
+
+ If the distribution does not have a codename, an empty string is returned.
+
+ Note that the returned codename is not always really a codename. For
+ example, openSUSE returns "x86_64". This function does not handle such
+ cases in any special way and just returns the string it finds, if any.
+
+ **Lookup hierarchy:**
+
+ * the codename within the "VERSION" attribute of the os-release file, if
+ provided,
+
+ * the value of the "Codename" attribute returned by the lsb_release
+ command,
+
+ * the value of the "<codename>" field of the distro release file.
+ """
+ return _distro.codename()
+
+
+def info(pretty: bool = False, best: bool = False) -> InfoDict:
+ """
+ Return certain machine-readable information items about the current OS
+ distribution in a dictionary, as shown in the following example:
+
+ .. sourcecode:: python
+
+ {
+ 'id': 'rhel',
+ 'version': '7.0',
+ 'version_parts': {
+ 'major': '7',
+ 'minor': '0',
+ 'build_number': ''
+ },
+ 'like': 'fedora',
+ 'codename': 'Maipo'
+ }
+
+ The dictionary structure and keys are always the same, regardless of which
+ information items are available in the underlying data sources. The values
+ for the various keys are as follows:
+
+ * ``id``: The result of :func:`distro.id`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``version_parts -> major``: The result of :func:`distro.major_version`.
+
+ * ``version_parts -> minor``: The result of :func:`distro.minor_version`.
+
+ * ``version_parts -> build_number``: The result of
+ :func:`distro.build_number`.
+
+ * ``like``: The result of :func:`distro.like`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ For a description of the *pretty* and *best* parameters, see the
+ :func:`distro.version` method.
+ """
+ return _distro.info(pretty, best)
+
+
+def os_release_info() -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the os-release file data source of the current OS distribution.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_info()
+
+
+def lsb_release_info() -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the lsb_release command data source of the current OS distribution.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_info()
+
+
+def distro_release_info() -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_info()
+
+
+def uname_info() -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+ """
+ return _distro.uname_info()
+
+
+def os_release_attr(attribute: str) -> str:
+ """
+ Return a single named information item from the os-release file data source
+ of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_attr(attribute)
+
+
+def lsb_release_attr(attribute: str) -> str:
+ """
+ Return a single named information item from the lsb_release command output
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_attr(attribute)
+
+
+def distro_release_attr(attribute: str) -> str:
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_attr(attribute)
+
+
+def uname_attr(attribute: str) -> str:
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+ """
+ return _distro.uname_attr(attribute)
+
+
+try:
+ from functools import cached_property
+except ImportError:
+ # Python < 3.8
+ class cached_property: # type: ignore
+ """A version of @property which caches the value. On access, it calls the
+ underlying function and sets the value in `__dict__` so future accesses
+ will not re-call the property.
+ """
+
+ def __init__(self, f: Callable[[Any], Any]) -> None:
+ self._fname = f.__name__
+ self._f = f
+
+ def __get__(self, obj: Any, owner: Type[Any]) -> Any:
+ assert obj is not None, f"call {self._fname} on an instance"
+ ret = obj.__dict__[self._fname] = self._f(obj)
+ return ret
+
+
+class LinuxDistribution:
+ """
+ Provides information about a OS distribution.
+
+ This package creates a private module-global instance of this class with
+ default initialization arguments, that is used by the
+ `consolidated accessor functions`_ and `single source accessor functions`_.
+ By using default initialization arguments, that module-global instance
+ returns data about the current OS distribution (i.e. the distro this
+ package runs on).
+
+ Normally, it is not necessary to create additional instances of this class.
+ However, in situations where control is needed over the exact data sources
+ that are used, instances of this class can be created with a specific
+ distro release file, or a specific os-release file, or without invoking the
+ lsb_release command.
+ """
+
+ def __init__(
+ self,
+ include_lsb: Optional[bool] = None,
+ os_release_file: str = "",
+ distro_release_file: str = "",
+ include_uname: Optional[bool] = None,
+ root_dir: Optional[str] = None,
+ include_oslevel: Optional[bool] = None,
+ ) -> None:
+ """
+ The initialization method of this class gathers information from the
+ available data sources, and stores that in private instance attributes.
+ Subsequent access to the information items uses these private instance
+ attributes, so that the data sources are read only once.
+
+ Parameters:
+
+ * ``include_lsb`` (bool): Controls whether the
+ `lsb_release command output`_ is included as a data source.
+
+ If the lsb_release command is not available in the program execution
+ path, the data source for the lsb_release command will be empty.
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause the default path name to
+ be used (see `os-release file`_ for details).
+
+ If the specified or defaulted os-release file does not exist, the
+ data source for the os-release file will be empty.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause a default search algorithm
+ to be used (see `distro release file`_ for details).
+
+ If the specified distro release file does not exist, or if no default
+ distro release file can be found, the data source for the distro
+ release file will be empty.
+
+ * ``include_uname`` (bool): Controls whether uname command output is
+ included as a data source. If the uname command is not available in
+ the program execution path the data source for the uname command will
+ be empty.
+
+ * ``root_dir`` (string): The absolute path to the root directory to use
+ to find distro-related information files. Note that ``include_*``
+ parameters must not be enabled in combination with ``root_dir``.
+
+ * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command
+ output is included as a data source. If the oslevel command is not
+ available in the program execution path the data source will be
+ empty.
+
+ Public instance attributes:
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
+ This controls whether the lsb information will be loaded.
+
+ * ``include_uname`` (bool): The result of the ``include_uname``
+ parameter. This controls whether the uname information will
+ be loaded.
+
+ * ``include_oslevel`` (bool): The result of the ``include_oslevel``
+ parameter. This controls whether (AIX) oslevel information will be
+ loaded.
+
+ * ``root_dir`` (string): The result of the ``root_dir`` parameter.
+ The absolute path to the root directory to use to find distro-related
+ information files.
+
+ Raises:
+
+ * :py:exc:`ValueError`: Initialization parameters combination is not
+ supported.
+
+ * :py:exc:`OSError`: Some I/O issue with an os-release file or distro
+ release file.
+
+ * :py:exc:`UnicodeError`: A data source has unexpected characters or
+ uses an unexpected encoding.
+ """
+ self.root_dir = root_dir
+ self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR
+ self.usr_lib_dir = (
+ os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR
+ )
+
+ if os_release_file:
+ self.os_release_file = os_release_file
+ else:
+ etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME)
+ usr_lib_os_release_file = os.path.join(
+ self.usr_lib_dir, _OS_RELEASE_BASENAME
+ )
+
+ # NOTE: The idea is to respect order **and** have it set
+ # at all times for API backwards compatibility.
+ if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile(
+ usr_lib_os_release_file
+ ):
+ self.os_release_file = etc_dir_os_release_file
+ else:
+ self.os_release_file = usr_lib_os_release_file
+
+ self.distro_release_file = distro_release_file or "" # updated later
+
+ is_root_dir_defined = root_dir is not None
+ if is_root_dir_defined and (include_lsb or include_uname or include_oslevel):
+ raise ValueError(
+ "Including subprocess data sources from specific root_dir is disallowed"
+ " to prevent false information"
+ )
+ self.include_lsb = (
+ include_lsb if include_lsb is not None else not is_root_dir_defined
+ )
+ self.include_uname = (
+ include_uname if include_uname is not None else not is_root_dir_defined
+ )
+ self.include_oslevel = (
+ include_oslevel if include_oslevel is not None else not is_root_dir_defined
+ )
+
+ def __repr__(self) -> str:
+ """Return repr of all info"""
+ return (
+ "LinuxDistribution("
+ "os_release_file={self.os_release_file!r}, "
+ "distro_release_file={self.distro_release_file!r}, "
+ "include_lsb={self.include_lsb!r}, "
+ "include_uname={self.include_uname!r}, "
+ "include_oslevel={self.include_oslevel!r}, "
+ "root_dir={self.root_dir!r}, "
+ "_os_release_info={self._os_release_info!r}, "
+ "_lsb_release_info={self._lsb_release_info!r}, "
+ "_distro_release_info={self._distro_release_info!r}, "
+ "_uname_info={self._uname_info!r}, "
+ "_oslevel_info={self._oslevel_info!r})".format(self=self)
+ )
+
+ def linux_distribution(
+ self, full_distribution_name: bool = True
+ ) -> Tuple[str, str, str]:
+ """
+ Return information about the OS distribution that is compatible
+ with Python's :func:`platform.linux_distribution`, supporting a subset
+ of its parameters.
+
+ For details, see :func:`distro.linux_distribution`.
+ """
+ return (
+ self.name() if full_distribution_name else self.id(),
+ self.version(),
+ self._os_release_info.get("release_codename") or self.codename(),
+ )
+
+ def id(self) -> str:
+ """Return the distro ID of the OS distribution, as a string.
+
+ For details, see :func:`distro.id`.
+ """
+
+ def normalize(distro_id: str, table: Dict[str, str]) -> str:
+ distro_id = distro_id.lower().replace(" ", "_")
+ return table.get(distro_id, distro_id)
+
+ distro_id = self.os_release_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_OS_ID)
+
+ distro_id = self.lsb_release_attr("distributor_id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_LSB_ID)
+
+ distro_id = self.distro_release_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ distro_id = self.uname_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ return ""
+
+ def name(self, pretty: bool = False) -> str:
+ """
+ Return the name of the OS distribution, as a string.
+
+ For details, see :func:`distro.name`.
+ """
+ name = (
+ self.os_release_attr("name")
+ or self.lsb_release_attr("distributor_id")
+ or self.distro_release_attr("name")
+ or self.uname_attr("name")
+ )
+ if pretty:
+ name = self.os_release_attr("pretty_name") or self.lsb_release_attr(
+ "description"
+ )
+ if not name:
+ name = self.distro_release_attr("name") or self.uname_attr("name")
+ version = self.version(pretty=True)
+ if version:
+ name = f"{name} {version}"
+ return name or ""
+
+ def version(self, pretty: bool = False, best: bool = False) -> str:
+ """
+ Return the version of the OS distribution, as a string.
+
+ For details, see :func:`distro.version`.
+ """
+ versions = [
+ self.os_release_attr("version_id"),
+ self.lsb_release_attr("release"),
+ self.distro_release_attr("version_id"),
+ self._parse_distro_release_content(self.os_release_attr("pretty_name")).get(
+ "version_id", ""
+ ),
+ self._parse_distro_release_content(
+ self.lsb_release_attr("description")
+ ).get("version_id", ""),
+ self.uname_attr("release"),
+ ]
+ if self.uname_attr("id").startswith("aix"):
+ # On AIX platforms, prefer oslevel command output.
+ versions.insert(0, self.oslevel_info())
+ elif self.id() == "debian" or "debian" in self.like().split():
+ # On Debian-like, add debian_version file content to candidates list.
+ versions.append(self._debian_version)
+ version = ""
+ if best:
+ # This algorithm uses the last version in priority order that has
+ # the best precision. If the versions are not in conflict, that
+ # does not matter; otherwise, using the last one instead of the
+ # first one might be considered a surprise.
+ for v in versions:
+ if v.count(".") > version.count(".") or version == "":
+ version = v
+ else:
+ for v in versions:
+ if v != "":
+ version = v
+ break
+ if pretty and version and self.codename():
+ version = f"{version} ({self.codename()})"
+ return version
+
+ def version_parts(self, best: bool = False) -> Tuple[str, str, str]:
+ """
+ Return the version of the OS distribution, as a tuple of version
+ numbers.
+
+ For details, see :func:`distro.version_parts`.
+ """
+ version_str = self.version(best=best)
+ if version_str:
+ version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?")
+ matches = version_regex.match(version_str)
+ if matches:
+ major, minor, build_number = matches.groups()
+ return major, minor or "", build_number or ""
+ return "", "", ""
+
+ def major_version(self, best: bool = False) -> str:
+ """
+ Return the major version number of the current distribution.
+
+ For details, see :func:`distro.major_version`.
+ """
+ return self.version_parts(best)[0]
+
+ def minor_version(self, best: bool = False) -> str:
+ """
+ Return the minor version number of the current distribution.
+
+ For details, see :func:`distro.minor_version`.
+ """
+ return self.version_parts(best)[1]
+
+ def build_number(self, best: bool = False) -> str:
+ """
+ Return the build number of the current distribution.
+
+ For details, see :func:`distro.build_number`.
+ """
+ return self.version_parts(best)[2]
+
+ def like(self) -> str:
+ """
+ Return the IDs of distributions that are like the OS distribution.
+
+ For details, see :func:`distro.like`.
+ """
+ return self.os_release_attr("id_like") or ""
+
+ def codename(self) -> str:
+ """
+ Return the codename of the OS distribution.
+
+ For details, see :func:`distro.codename`.
+ """
+ try:
+ # Handle os_release specially since distros might purposefully set
+ # this to empty string to have no codename
+ return self._os_release_info["codename"]
+ except KeyError:
+ return (
+ self.lsb_release_attr("codename")
+ or self.distro_release_attr("codename")
+ or ""
+ )
+
+ def info(self, pretty: bool = False, best: bool = False) -> InfoDict:
+ """
+ Return certain machine-readable information about the OS
+ distribution.
+
+ For details, see :func:`distro.info`.
+ """
+ return dict(
+ id=self.id(),
+ version=self.version(pretty, best),
+ version_parts=dict(
+ major=self.major_version(best),
+ minor=self.minor_version(best),
+ build_number=self.build_number(best),
+ ),
+ like=self.like(),
+ codename=self.codename(),
+ )
+
+ def os_release_info(self) -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the os-release file data source of the OS distribution.
+
+ For details, see :func:`distro.os_release_info`.
+ """
+ return self._os_release_info
+
+ def lsb_release_info(self) -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the lsb_release command data source of the OS
+ distribution.
+
+ For details, see :func:`distro.lsb_release_info`.
+ """
+ return self._lsb_release_info
+
+ def distro_release_info(self) -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the distro release file data source of the OS
+ distribution.
+
+ For details, see :func:`distro.distro_release_info`.
+ """
+ return self._distro_release_info
+
+ def uname_info(self) -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the uname command data source of the OS distribution.
+
+ For details, see :func:`distro.uname_info`.
+ """
+ return self._uname_info
+
+ def oslevel_info(self) -> str:
+ """
+ Return AIX' oslevel command output.
+ """
+ return self._oslevel_info
+
+ def os_release_attr(self, attribute: str) -> str:
+ """
+ Return a single named information item from the os-release file data
+ source of the OS distribution.
+
+ For details, see :func:`distro.os_release_attr`.
+ """
+ return self._os_release_info.get(attribute, "")
+
+ def lsb_release_attr(self, attribute: str) -> str:
+ """
+ Return a single named information item from the lsb_release command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.lsb_release_attr`.
+ """
+ return self._lsb_release_info.get(attribute, "")
+
+ def distro_release_attr(self, attribute: str) -> str:
+ """
+ Return a single named information item from the distro release file
+ data source of the OS distribution.
+
+ For details, see :func:`distro.distro_release_attr`.
+ """
+ return self._distro_release_info.get(attribute, "")
+
+ def uname_attr(self, attribute: str) -> str:
+ """
+ Return a single named information item from the uname command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.uname_attr`.
+ """
+ return self._uname_info.get(attribute, "")
+
+ @cached_property
+ def _os_release_info(self) -> Dict[str, str]:
+ """
+ Get the information items from the specified os-release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if os.path.isfile(self.os_release_file):
+ with open(self.os_release_file, encoding="utf-8") as release_file:
+ return self._parse_os_release_content(release_file)
+ return {}
+
+ @staticmethod
+ def _parse_os_release_content(lines: TextIO) -> Dict[str, str]:
+ """
+ Parse the lines of an os-release file.
+
+ Parameters:
+
+ * lines: Iterable through the lines in the os-release file.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ lexer = shlex.shlex(lines, posix=True)
+ lexer.whitespace_split = True
+
+ tokens = list(lexer)
+ for token in tokens:
+ # At this point, all shell-like parsing has been done (i.e.
+ # comments processed, quotes and backslash escape sequences
+ # processed, multi-line values assembled, trailing newlines
+ # stripped, etc.), so the tokens are now either:
+ # * variable assignments: var=value
+ # * commands or their arguments (not allowed in os-release)
+ # Ignore any tokens that are not variable assignments
+ if "=" in token:
+ k, v = token.split("=", 1)
+ props[k.lower()] = v
+
+ if "version" in props:
+ # extract release codename (if any) from version attribute
+ match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"])
+ if match:
+ release_codename = match.group(1) or match.group(2)
+ props["codename"] = props["release_codename"] = release_codename
+
+ if "version_codename" in props:
+ # os-release added a version_codename field. Use that in
+ # preference to anything else Note that some distros purposefully
+ # do not have code names. They should be setting
+ # version_codename=""
+ props["codename"] = props["version_codename"]
+ elif "ubuntu_codename" in props:
+ # Same as above but a non-standard field name used on older Ubuntus
+ props["codename"] = props["ubuntu_codename"]
+
+ return props
+
+ @cached_property
+ def _lsb_release_info(self) -> Dict[str, str]:
+ """
+ Get the information items from the lsb_release command output.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if not self.include_lsb:
+ return {}
+ try:
+ cmd = ("lsb_release", "-a")
+ stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
+ # Command not found or lsb_release returned error
+ except (OSError, subprocess.CalledProcessError):
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_lsb_release_content(content)
+
+ @staticmethod
+ def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]:
+ """
+ Parse the output of the lsb_release command.
+
+ Parameters:
+
+ * lines: Iterable through the lines of the lsb_release output.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ for line in lines:
+ kv = line.strip("\n").split(":", 1)
+ if len(kv) != 2:
+ # Ignore lines without colon.
+ continue
+ k, v = kv
+ props.update({k.replace(" ", "_").lower(): v.strip()})
+ return props
+
+ @cached_property
+ def _uname_info(self) -> Dict[str, str]:
+ if not self.include_uname:
+ return {}
+ try:
+ cmd = ("uname", "-rs")
+ stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
+ except OSError:
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_uname_content(content)
+
+ @cached_property
+ def _oslevel_info(self) -> str:
+ if not self.include_oslevel:
+ return ""
+ try:
+ stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL)
+ except (OSError, subprocess.CalledProcessError):
+ return ""
+ return self._to_str(stdout).strip()
+
+ @cached_property
+ def _debian_version(self) -> str:
+ try:
+ with open(
+ os.path.join(self.etc_dir, "debian_version"), encoding="ascii"
+ ) as fp:
+ return fp.readline().rstrip()
+ except FileNotFoundError:
+ return ""
+
+ @staticmethod
+ def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]:
+ if not lines:
+ return {}
+ props = {}
+ match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip())
+ if match:
+ name, version = match.groups()
+
+ # This is to prevent the Linux kernel version from
+ # appearing as the 'best' version on otherwise
+ # identifiable distributions.
+ if name == "Linux":
+ return {}
+ props["id"] = name.lower()
+ props["name"] = name
+ props["release"] = version
+ return props
+
+ @staticmethod
+ def _to_str(bytestring: bytes) -> str:
+ encoding = sys.getfilesystemencoding()
+ return bytestring.decode(encoding)
+
+ @cached_property
+ def _distro_release_info(self) -> Dict[str, str]:
+ """
+ Get the information items from the specified distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if self.distro_release_file:
+ # If it was specified, we use it and parse what we can, even if
+ # its file name or content does not match the expected pattern.
+ distro_info = self._parse_distro_release_file(self.distro_release_file)
+ basename = os.path.basename(self.distro_release_file)
+ # The file name pattern for user-specified distro release files
+ # is somewhat more tolerant (compared to when searching for the
+ # file), because we want to use what was specified as best as
+ # possible.
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ else:
+ try:
+ basenames = [
+ basename
+ for basename in os.listdir(self.etc_dir)
+ if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES
+ and os.path.isfile(os.path.join(self.etc_dir, basename))
+ ]
+ # We sort for repeatability in cases where there are multiple
+ # distro specific files; e.g. CentOS, Oracle, Enterprise all
+ # containing `redhat-release` on top of their own.
+ basenames.sort()
+ except OSError:
+ # This may occur when /etc is not readable but we can't be
+ # sure about the *-release files. Check common entries of
+ # /etc for information. If they turn out to not be there the
+ # error is handled in `_parse_distro_release_file()`.
+ basenames = _DISTRO_RELEASE_BASENAMES
+ for basename in basenames:
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if match is None:
+ continue
+ filepath = os.path.join(self.etc_dir, basename)
+ distro_info = self._parse_distro_release_file(filepath)
+ # The name is always present if the pattern matches.
+ if "name" not in distro_info:
+ continue
+ self.distro_release_file = filepath
+ break
+ else: # the loop didn't "break": no candidate.
+ return {}
+
+ if match is not None:
+ distro_info["id"] = match.group(1)
+
+ # CloudLinux < 7: manually enrich info with proper id.
+ if "cloudlinux" in distro_info.get("name", "").lower():
+ distro_info["id"] = "cloudlinux"
+
+ return distro_info
+
+ def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]:
+ """
+ Parse a distro release file.
+
+ Parameters:
+
+ * filepath: Path name of the distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ try:
+ with open(filepath, encoding="utf-8") as fp:
+ # Only parse the first line. For instance, on SLES there
+ # are multiple lines. We don't want them...
+ return self._parse_distro_release_content(fp.readline())
+ except OSError:
+ # Ignore not being able to read a specific, seemingly version
+ # related file.
+ # See https://github.com/python-distro/distro/issues/162
+ return {}
+
+ @staticmethod
+ def _parse_distro_release_content(line: str) -> Dict[str, str]:
+ """
+ Parse a line from a distro release file.
+
+ Parameters:
+ * line: Line from the distro release file. Must be a unicode string
+ or a UTF-8 encoded byte string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
+ distro_info = {}
+ if matches:
+ # regexp ensures non-None
+ distro_info["name"] = matches.group(3)[::-1]
+ if matches.group(2):
+ distro_info["version_id"] = matches.group(2)[::-1]
+ if matches.group(1):
+ distro_info["codename"] = matches.group(1)[::-1]
+ elif line:
+ distro_info["name"] = line.strip()
+ return distro_info
+
+
+_distro = LinuxDistribution()
+
+
+def main() -> None:
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ parser = argparse.ArgumentParser(description="OS distro info tool")
+ parser.add_argument(
+ "--json", "-j", help="Output in machine readable format", action="store_true"
+ )
+
+ parser.add_argument(
+ "--root-dir",
+ "-r",
+ type=str,
+ dest="root_dir",
+ help="Path to the root filesystem directory (defaults to /)",
+ )
+
+ args = parser.parse_args()
+
+ if args.root_dir:
+ dist = LinuxDistribution(
+ include_lsb=False,
+ include_uname=False,
+ include_oslevel=False,
+ root_dir=args.root_dir,
+ )
+ else:
+ dist = _distro
+
+ if args.json:
+ logger.info(json.dumps(dist.info(), indent=4, sort_keys=True))
+ else:
+ logger.info("Name: %s", dist.name(pretty=True))
+ distribution_version = dist.version(pretty=True)
+ logger.info("Version: %s", distribution_version)
+ distribution_codename = dist.codename()
+ logger.info("Codename: %s", distribution_codename)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/spack/external/_vendoring/distro/py.typed b/lib/spack/external/_vendoring/distro/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/external/_vendoring/distro/py.typed
diff --git a/lib/spack/external/_vendoring/jinja2/LICENSE.rst b/lib/spack/external/_vendoring/jinja2/LICENSE.rst
new file mode 100644
index 0000000000..c37cae49ec
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2007 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lib/spack/external/_vendoring/jinja2/__init__.py b/lib/spack/external/_vendoring/jinja2/__init__.py
new file mode 100644
index 0000000000..9dcd901a51
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/__init__.py
@@ -0,0 +1,45 @@
+"""Jinja is a template engine written in pure Python. It provides a
+non-XML syntax that supports inline expressions and an optional
+sandboxed environment.
+"""
+from .bccache import BytecodeCache as BytecodeCache
+from .bccache import FileSystemBytecodeCache as FileSystemBytecodeCache
+from .bccache import MemcachedBytecodeCache as MemcachedBytecodeCache
+from .environment import Environment as Environment
+from .environment import Template as Template
+from .exceptions import TemplateAssertionError as TemplateAssertionError
+from .exceptions import TemplateError as TemplateError
+from .exceptions import TemplateNotFound as TemplateNotFound
+from .exceptions import TemplateRuntimeError as TemplateRuntimeError
+from .exceptions import TemplatesNotFound as TemplatesNotFound
+from .exceptions import TemplateSyntaxError as TemplateSyntaxError
+from .exceptions import UndefinedError as UndefinedError
+from .filters import contextfilter
+from .filters import environmentfilter
+from .filters import evalcontextfilter
+from .loaders import BaseLoader as BaseLoader
+from .loaders import ChoiceLoader as ChoiceLoader
+from .loaders import DictLoader as DictLoader
+from .loaders import FileSystemLoader as FileSystemLoader
+from .loaders import FunctionLoader as FunctionLoader
+from .loaders import ModuleLoader as ModuleLoader
+from .loaders import PackageLoader as PackageLoader
+from .loaders import PrefixLoader as PrefixLoader
+from .runtime import ChainableUndefined as ChainableUndefined
+from .runtime import DebugUndefined as DebugUndefined
+from .runtime import make_logging_undefined as make_logging_undefined
+from .runtime import StrictUndefined as StrictUndefined
+from .runtime import Undefined as Undefined
+from .utils import clear_caches as clear_caches
+from .utils import contextfunction
+from .utils import environmentfunction
+from .utils import escape
+from .utils import evalcontextfunction
+from .utils import is_undefined as is_undefined
+from .utils import Markup
+from .utils import pass_context as pass_context
+from .utils import pass_environment as pass_environment
+from .utils import pass_eval_context as pass_eval_context
+from .utils import select_autoescape as select_autoescape
+
+__version__ = "3.0.3"
diff --git a/lib/spack/external/_vendoring/jinja2/_identifier.py b/lib/spack/external/_vendoring/jinja2/_identifier.py
new file mode 100644
index 0000000000..224d5449d1
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/_identifier.py
@@ -0,0 +1,6 @@
+import re
+
+# generated by scripts/generate_identifier_pattern.py
+pattern = re.compile(
+ r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
+)
diff --git a/lib/spack/external/_vendoring/jinja2/async_utils.py b/lib/spack/external/_vendoring/jinja2/async_utils.py
new file mode 100644
index 0000000000..35e6cb1090
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/async_utils.py
@@ -0,0 +1,75 @@
+import inspect
+import typing as t
+from functools import wraps
+
+from .utils import _PassArg
+from .utils import pass_eval_context
+
+V = t.TypeVar("V")
+
+
+def async_variant(normal_func): # type: ignore
+ def decorator(async_func): # type: ignore
+ pass_arg = _PassArg.from_obj(normal_func)
+ need_eval_context = pass_arg is None
+
+ if pass_arg is _PassArg.environment:
+
+ def is_async(args: t.Any) -> bool:
+ return t.cast(bool, args[0].is_async)
+
+ else:
+
+ def is_async(args: t.Any) -> bool:
+ return t.cast(bool, args[0].environment.is_async)
+
+ @wraps(normal_func)
+ def wrapper(*args, **kwargs): # type: ignore
+ b = is_async(args)
+
+ if need_eval_context:
+ args = args[1:]
+
+ if b:
+ return async_func(*args, **kwargs)
+
+ return normal_func(*args, **kwargs)
+
+ if need_eval_context:
+ wrapper = pass_eval_context(wrapper)
+
+ wrapper.jinja_async_variant = True
+ return wrapper
+
+ return decorator
+
+
+_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)}
+
+
+async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
+ # Avoid a costly call to isawaitable
+ if type(value) in _common_primitives:
+ return t.cast("V", value)
+
+ if inspect.isawaitable(value):
+ return await t.cast("t.Awaitable[V]", value)
+
+ return t.cast("V", value)
+
+
+async def auto_aiter(
+ iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+) -> "t.AsyncIterator[V]":
+ if hasattr(iterable, "__aiter__"):
+ async for item in t.cast("t.AsyncIterable[V]", iterable):
+ yield item
+ else:
+ for item in t.cast("t.Iterable[V]", iterable):
+ yield item
+
+
+async def auto_to_list(
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+) -> t.List["V"]:
+ return [x async for x in auto_aiter(value)]
diff --git a/lib/spack/external/_vendoring/jinja2/bccache.py b/lib/spack/external/_vendoring/jinja2/bccache.py
new file mode 100644
index 0000000000..3bb61b7c34
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/bccache.py
@@ -0,0 +1,364 @@
+"""The optional bytecode cache system. This is useful if you have very
+complex template situations and the compilation of all those templates
+slows down your application too much.
+
+Situations where this is useful are often forking web applications that
+are initialized on the first request.
+"""
+import errno
+import fnmatch
+import marshal
+import os
+import pickle
+import stat
+import sys
+import tempfile
+import typing as t
+from hashlib import sha1
+from io import BytesIO
+from types import CodeType
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
+ class _MemcachedClient(te.Protocol):
+ def get(self, key: str) -> bytes:
+ ...
+
+ def set(self, key: str, value: bytes, timeout: t.Optional[int] = None) -> None:
+ ...
+
+
+bc_version = 5
+# Magic bytes to identify Jinja bytecode cache files. Contains the
+# Python major and minor version to avoid loading incompatible bytecode
+# if a project upgrades its Python version.
+bc_magic = (
+ b"j2"
+ + pickle.dumps(bc_version, 2)
+ + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
+)
+
+
+class Bucket:
+ """Buckets are used to store the bytecode for one template. It's created
+ and initialized by the bytecode cache and passed to the loading functions.
+
+ The buckets get an internal checksum from the cache assigned and use this
+ to automatically reject outdated cache material. Individual bytecode
+ cache subclasses don't have to care about cache invalidation.
+ """
+
+ def __init__(self, environment: "Environment", key: str, checksum: str) -> None:
+ self.environment = environment
+ self.key = key
+ self.checksum = checksum
+ self.reset()
+
+ def reset(self) -> None:
+ """Resets the bucket (unloads the bytecode)."""
+ self.code: t.Optional[CodeType] = None
+
+ def load_bytecode(self, f: t.BinaryIO) -> None:
+ """Loads bytecode from a file or file like object."""
+ # make sure the magic header is correct
+ magic = f.read(len(bc_magic))
+ if magic != bc_magic:
+ self.reset()
+ return
+ # the source code of the file changed, we need to reload
+ checksum = pickle.load(f)
+ if self.checksum != checksum:
+ self.reset()
+ return
+ # if marshal_load fails then we need to reload
+ try:
+ self.code = marshal.load(f)
+ except (EOFError, ValueError, TypeError):
+ self.reset()
+ return
+
+ def write_bytecode(self, f: t.BinaryIO) -> None:
+ """Dump the bytecode into the file or file like object passed."""
+ if self.code is None:
+ raise TypeError("can't write empty bucket")
+ f.write(bc_magic)
+ pickle.dump(self.checksum, f, 2)
+ marshal.dump(self.code, f)
+
+ def bytecode_from_string(self, string: bytes) -> None:
+ """Load bytecode from bytes."""
+ self.load_bytecode(BytesIO(string))
+
+ def bytecode_to_string(self) -> bytes:
+ """Return the bytecode as bytes."""
+ out = BytesIO()
+ self.write_bytecode(out)
+ return out.getvalue()
+
+
+class BytecodeCache:
+ """To implement your own bytecode cache you have to subclass this class
+ and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
+ these methods are passed a :class:`~jinja2.bccache.Bucket`.
+
+ A very basic bytecode cache that saves the bytecode on the file system::
+
+ from os import path
+
+ class MyCache(BytecodeCache):
+
+ def __init__(self, directory):
+ self.directory = directory
+
+ def load_bytecode(self, bucket):
+ filename = path.join(self.directory, bucket.key)
+ if path.exists(filename):
+ with open(filename, 'rb') as f:
+ bucket.load_bytecode(f)
+
+ def dump_bytecode(self, bucket):
+ filename = path.join(self.directory, bucket.key)
+ with open(filename, 'wb') as f:
+ bucket.write_bytecode(f)
+
+ A more advanced version of a filesystem based bytecode cache is part of
+ Jinja.
+ """
+
+ def load_bytecode(self, bucket: Bucket) -> None:
+ """Subclasses have to override this method to load bytecode into a
+ bucket. If they are not able to find code in the cache for the
+ bucket, it must not do anything.
+ """
+ raise NotImplementedError()
+
+ def dump_bytecode(self, bucket: Bucket) -> None:
+ """Subclasses have to override this method to write the bytecode
+ from a bucket back to the cache. If it unable to do so it must not
+ fail silently but raise an exception.
+ """
+ raise NotImplementedError()
+
+ def clear(self) -> None:
+ """Clears the cache. This method is not used by Jinja but should be
+ implemented to allow applications to clear the bytecode cache used
+ by a particular environment.
+ """
+
+ def get_cache_key(
+ self, name: str, filename: t.Optional[t.Union[str]] = None
+ ) -> str:
+ """Returns the unique hash key for this template name."""
+ hash = sha1(name.encode("utf-8"))
+
+ if filename is not None:
+ hash.update(f"|{filename}".encode())
+
+ return hash.hexdigest()
+
+ def get_source_checksum(self, source: str) -> str:
+ """Returns a checksum for the source."""
+ return sha1(source.encode("utf-8")).hexdigest()
+
+ def get_bucket(
+ self,
+ environment: "Environment",
+ name: str,
+ filename: t.Optional[str],
+ source: str,
+ ) -> Bucket:
+ """Return a cache bucket for the given template. All arguments are
+ mandatory but filename may be `None`.
+ """
+ key = self.get_cache_key(name, filename)
+ checksum = self.get_source_checksum(source)
+ bucket = Bucket(environment, key, checksum)
+ self.load_bytecode(bucket)
+ return bucket
+
+ def set_bucket(self, bucket: Bucket) -> None:
+ """Put the bucket into the cache."""
+ self.dump_bytecode(bucket)
+
+
+class FileSystemBytecodeCache(BytecodeCache):
+ """A bytecode cache that stores bytecode on the filesystem. It accepts
+ two arguments: The directory where the cache items are stored and a
+ pattern string that is used to build the filename.
+
+ If no directory is specified a default cache directory is selected. On
+ Windows the user's temp directory is used, on UNIX systems a directory
+ is created for the user in the system temp directory.
+
+ The pattern can be used to have multiple separate caches operate on the
+ same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
+ is replaced with the cache key.
+
+ >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
+
+ This bytecode cache supports clearing of the cache using the clear method.
+ """
+
+ def __init__(
+ self, directory: t.Optional[str] = None, pattern: str = "__jinja2_%s.cache"
+ ) -> None:
+ if directory is None:
+ directory = self._get_default_cache_dir()
+ self.directory = directory
+ self.pattern = pattern
+
+ def _get_default_cache_dir(self) -> str:
+ def _unsafe_dir() -> "te.NoReturn":
+ raise RuntimeError(
+ "Cannot determine safe temp directory. You "
+ "need to explicitly provide one."
+ )
+
+ tmpdir = tempfile.gettempdir()
+
+ # On windows the temporary directory is used specific unless
+ # explicitly forced otherwise. We can just use that.
+ if os.name == "nt":
+ return tmpdir
+ if not hasattr(os, "getuid"):
+ _unsafe_dir()
+
+ dirname = f"_jinja2-cache-{os.getuid()}"
+ actual_dir = os.path.join(tmpdir, dirname)
+
+ try:
+ os.mkdir(actual_dir, stat.S_IRWXU)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ try:
+ os.chmod(actual_dir, stat.S_IRWXU)
+ actual_dir_stat = os.lstat(actual_dir)
+ if (
+ actual_dir_stat.st_uid != os.getuid()
+ or not stat.S_ISDIR(actual_dir_stat.st_mode)
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
+ ):
+ _unsafe_dir()
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ actual_dir_stat = os.lstat(actual_dir)
+ if (
+ actual_dir_stat.st_uid != os.getuid()
+ or not stat.S_ISDIR(actual_dir_stat.st_mode)
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
+ ):
+ _unsafe_dir()
+
+ return actual_dir
+
+ def _get_cache_filename(self, bucket: Bucket) -> str:
+ return os.path.join(self.directory, self.pattern % (bucket.key,))
+
+ def load_bytecode(self, bucket: Bucket) -> None:
+ filename = self._get_cache_filename(bucket)
+
+ if os.path.exists(filename):
+ with open(filename, "rb") as f:
+ bucket.load_bytecode(f)
+
+ def dump_bytecode(self, bucket: Bucket) -> None:
+ with open(self._get_cache_filename(bucket), "wb") as f:
+ bucket.write_bytecode(f)
+
+ def clear(self) -> None:
+ # imported lazily here because google app-engine doesn't support
+ # write access on the file system and the function does not exist
+ # normally.
+ from os import remove
+
+ files = fnmatch.filter(os.listdir(self.directory), self.pattern % ("*",))
+ for filename in files:
+ try:
+ remove(os.path.join(self.directory, filename))
+ except OSError:
+ pass
+
+
+class MemcachedBytecodeCache(BytecodeCache):
+ """This class implements a bytecode cache that uses a memcache cache for
+ storing the information. It does not enforce a specific memcache library
+ (tummy's memcache or cmemcache) but will accept any class that provides
+ the minimal interface required.
+
+ Libraries compatible with this class:
+
+ - `cachelib <https://github.com/pallets/cachelib>`_
+ - `python-memcached <https://pypi.org/project/python-memcached/>`_
+
+ (Unfortunately the django cache interface is not compatible because it
+ does not support storing binary data, only text. You can however pass
+ the underlying cache client to the bytecode cache which is available
+ as `django.core.cache.cache._client`.)
+
+ The minimal interface for the client passed to the constructor is this:
+
+ .. class:: MinimalClientInterface
+
+ .. method:: set(key, value[, timeout])
+
+ Stores the bytecode in the cache. `value` is a string and
+ `timeout` the timeout of the key. If timeout is not provided
+ a default timeout or no timeout should be assumed, if it's
+ provided it's an integer with the number of seconds the cache
+ item should exist.
+
+ .. method:: get(key)
+
+ Returns the value for the cache key. If the item does not
+ exist in the cache the return value must be `None`.
+
+ The other arguments to the constructor are the prefix for all keys that
+ is added before the actual cache key and the timeout for the bytecode in
+ the cache system. We recommend a high (or no) timeout.
+
+ This bytecode cache does not support clearing of used items in the cache.
+ The clear method is a no-operation function.
+
+ .. versionadded:: 2.7
+ Added support for ignoring memcache errors through the
+ `ignore_memcache_errors` parameter.
+ """
+
+ def __init__(
+ self,
+ client: "_MemcachedClient",
+ prefix: str = "jinja2/bytecode/",
+ timeout: t.Optional[int] = None,
+ ignore_memcache_errors: bool = True,
+ ):
+ self.client = client
+ self.prefix = prefix
+ self.timeout = timeout
+ self.ignore_memcache_errors = ignore_memcache_errors
+
+ def load_bytecode(self, bucket: Bucket) -> None:
+ try:
+ code = self.client.get(self.prefix + bucket.key)
+ except Exception:
+ if not self.ignore_memcache_errors:
+ raise
+ else:
+ bucket.bytecode_from_string(code)
+
+ def dump_bytecode(self, bucket: Bucket) -> None:
+ key = self.prefix + bucket.key
+ value = bucket.bytecode_to_string()
+
+ try:
+ if self.timeout is not None:
+ self.client.set(key, value, self.timeout)
+ else:
+ self.client.set(key, value)
+ except Exception:
+ if not self.ignore_memcache_errors:
+ raise
diff --git a/lib/spack/external/_vendoring/jinja2/compiler.py b/lib/spack/external/_vendoring/jinja2/compiler.py
new file mode 100644
index 0000000000..52fd5b83e2
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/compiler.py
@@ -0,0 +1,1957 @@
+"""Compiles nodes from the parser into Python code."""
+import typing as t
+from contextlib import contextmanager
+from functools import update_wrapper
+from io import StringIO
+from itertools import chain
+from keyword import iskeyword as is_python_keyword
+
+from markupsafe import escape
+from markupsafe import Markup
+
+from . import nodes
+from .exceptions import TemplateAssertionError
+from .idtracking import Symbols
+from .idtracking import VAR_LOAD_ALIAS
+from .idtracking import VAR_LOAD_PARAMETER
+from .idtracking import VAR_LOAD_RESOLVE
+from .idtracking import VAR_LOAD_UNDEFINED
+from .nodes import EvalContext
+from .optimizer import Optimizer
+from .utils import _PassArg
+from .utils import concat
+from .visitor import NodeVisitor
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+operators = {
+ "eq": "==",
+ "ne": "!=",
+ "gt": ">",
+ "gteq": ">=",
+ "lt": "<",
+ "lteq": "<=",
+ "in": "in",
+ "notin": "not in",
+}
+
+
+def optimizeconst(f: F) -> F:
+ def new_func(
+ self: "CodeGenerator", node: nodes.Expr, frame: "Frame", **kwargs: t.Any
+ ) -> t.Any:
+ # Only optimize if the frame is not volatile
+ if self.optimizer is not None and not frame.eval_ctx.volatile:
+ new_node = self.optimizer.visit(node, frame.eval_ctx)
+
+ if new_node != node:
+ return self.visit(new_node, frame)
+
+ return f(self, node, frame, **kwargs)
+
+ return update_wrapper(t.cast(F, new_func), f)
+
+
+def _make_binop(op: str) -> t.Callable[["CodeGenerator", nodes.BinExpr, "Frame"], None]:
+ @optimizeconst
+ def visitor(self: "CodeGenerator", node: nodes.BinExpr, frame: Frame) -> None:
+ if (
+ self.environment.sandboxed
+ and op in self.environment.intercepted_binops # type: ignore
+ ):
+ self.write(f"environment.call_binop(context, {op!r}, ")
+ self.visit(node.left, frame)
+ self.write(", ")
+ self.visit(node.right, frame)
+ else:
+ self.write("(")
+ self.visit(node.left, frame)
+ self.write(f" {op} ")
+ self.visit(node.right, frame)
+
+ self.write(")")
+
+ return visitor
+
+
+def _make_unop(
+ op: str,
+) -> t.Callable[["CodeGenerator", nodes.UnaryExpr, "Frame"], None]:
+ @optimizeconst
+ def visitor(self: "CodeGenerator", node: nodes.UnaryExpr, frame: Frame) -> None:
+ if (
+ self.environment.sandboxed
+ and op in self.environment.intercepted_unops # type: ignore
+ ):
+ self.write(f"environment.call_unop(context, {op!r}, ")
+ self.visit(node.node, frame)
+ else:
+ self.write("(" + op)
+ self.visit(node.node, frame)
+
+ self.write(")")
+
+ return visitor
+
+
+def generate(
+ node: nodes.Template,
+ environment: "Environment",
+ name: t.Optional[str],
+ filename: t.Optional[str],
+ stream: t.Optional[t.TextIO] = None,
+ defer_init: bool = False,
+ optimized: bool = True,
+) -> t.Optional[str]:
+ """Generate the python source for a node tree."""
+ if not isinstance(node, nodes.Template):
+ raise TypeError("Can't compile non template nodes")
+
+ generator = environment.code_generator_class(
+ environment, name, filename, stream, defer_init, optimized
+ )
+ generator.visit(node)
+
+ if stream is None:
+ return generator.stream.getvalue() # type: ignore
+
+ return None
+
+
+def has_safe_repr(value: t.Any) -> bool:
+ """Does the node have a safe representation?"""
+ if value is None or value is NotImplemented or value is Ellipsis:
+ return True
+
+ if type(value) in {bool, int, float, complex, range, str, Markup}:
+ return True
+
+ if type(value) in {tuple, list, set, frozenset}:
+ return all(has_safe_repr(v) for v in value)
+
+ if type(value) is dict:
+ return all(has_safe_repr(k) and has_safe_repr(v) for k, v in value.items())
+
+ return False
+
+
+def find_undeclared(
+ nodes: t.Iterable[nodes.Node], names: t.Iterable[str]
+) -> t.Set[str]:
+ """Check if the names passed are accessed undeclared. The return value
+ is a set of all the undeclared names from the sequence of names found.
+ """
+ visitor = UndeclaredNameVisitor(names)
+ try:
+ for node in nodes:
+ visitor.visit(node)
+ except VisitorExit:
+ pass
+ return visitor.undeclared
+
+
+class MacroRef:
+ def __init__(self, node: t.Union[nodes.Macro, nodes.CallBlock]) -> None:
+ self.node = node
+ self.accesses_caller = False
+ self.accesses_kwargs = False
+ self.accesses_varargs = False
+
+
+class Frame:
+ """Holds compile time information for us."""
+
+ def __init__(
+ self,
+ eval_ctx: EvalContext,
+ parent: t.Optional["Frame"] = None,
+ level: t.Optional[int] = None,
+ ) -> None:
+ self.eval_ctx = eval_ctx
+
+ # the parent of this frame
+ self.parent = parent
+
+ if parent is None:
+ self.symbols = Symbols(level=level)
+
+ # in some dynamic inheritance situations the compiler needs to add
+ # write tests around output statements.
+ self.require_output_check = False
+
+ # inside some tags we are using a buffer rather than yield statements.
+ # this for example affects {% filter %} or {% macro %}. If a frame
+ # is buffered this variable points to the name of the list used as
+ # buffer.
+ self.buffer: t.Optional[str] = None
+
+ # the name of the block we're in, otherwise None.
+ self.block: t.Optional[str] = None
+
+ else:
+ self.symbols = Symbols(parent.symbols, level=level)
+ self.require_output_check = parent.require_output_check
+ self.buffer = parent.buffer
+ self.block = parent.block
+
+ # a toplevel frame is the root + soft frames such as if conditions.
+ self.toplevel = False
+
+ # the root frame is basically just the outermost frame, so no if
+ # conditions. This information is used to optimize inheritance
+ # situations.
+ self.rootlevel = False
+
+ # variables set inside of loops and blocks should not affect outer frames,
+ # but they still needs to be kept track of as part of the active context.
+ self.loop_frame = False
+ self.block_frame = False
+
+ # track whether the frame is being used in an if-statement or conditional
+ # expression as it determines which errors should be raised during runtime
+ # or compile time.
+ self.soft_frame = False
+
+ def copy(self) -> "Frame":
+ """Create a copy of the current one."""
+ rv = t.cast(Frame, object.__new__(self.__class__))
+ rv.__dict__.update(self.__dict__)
+ rv.symbols = self.symbols.copy()
+ return rv
+
+ def inner(self, isolated: bool = False) -> "Frame":
+ """Return an inner frame."""
+ if isolated:
+ return Frame(self.eval_ctx, level=self.symbols.level + 1)
+ return Frame(self.eval_ctx, self)
+
+ def soft(self) -> "Frame":
+ """Return a soft frame. A soft frame may not be modified as
+ standalone thing as it shares the resources with the frame it
+ was created of, but it's not a rootlevel frame any longer.
+
+ This is only used to implement if-statements and conditional
+ expressions.
+ """
+ rv = self.copy()
+ rv.rootlevel = False
+ rv.soft_frame = True
+ return rv
+
+ __copy__ = copy
+
+
+class VisitorExit(RuntimeError):
+ """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
+
+
+class DependencyFinderVisitor(NodeVisitor):
+ """A visitor that collects filter and test calls."""
+
+ def __init__(self) -> None:
+ self.filters: t.Set[str] = set()
+ self.tests: t.Set[str] = set()
+
+ def visit_Filter(self, node: nodes.Filter) -> None:
+ self.generic_visit(node)
+ self.filters.add(node.name)
+
+ def visit_Test(self, node: nodes.Test) -> None:
+ self.generic_visit(node)
+ self.tests.add(node.name)
+
+ def visit_Block(self, node: nodes.Block) -> None:
+ """Stop visiting at blocks."""
+
+
+class UndeclaredNameVisitor(NodeVisitor):
+ """A visitor that checks if a name is accessed without being
+ declared. This is different from the frame visitor as it will
+ not stop at closure frames.
+ """
+
+ def __init__(self, names: t.Iterable[str]) -> None:
+ self.names = set(names)
+ self.undeclared: t.Set[str] = set()
+
+ def visit_Name(self, node: nodes.Name) -> None:
+ if node.ctx == "load" and node.name in self.names:
+ self.undeclared.add(node.name)
+ if self.undeclared == self.names:
+ raise VisitorExit()
+ else:
+ self.names.discard(node.name)
+
+ def visit_Block(self, node: nodes.Block) -> None:
+ """Stop visiting a blocks."""
+
+
+class CompilerExit(Exception):
+ """Raised if the compiler encountered a situation where it just
+ doesn't make sense to further process the code. Any block that
+ raises such an exception is not further processed.
+ """
+
+
+class CodeGenerator(NodeVisitor):
+ def __init__(
+ self,
+ environment: "Environment",
+ name: t.Optional[str],
+ filename: t.Optional[str],
+ stream: t.Optional[t.TextIO] = None,
+ defer_init: bool = False,
+ optimized: bool = True,
+ ) -> None:
+ if stream is None:
+ stream = StringIO()
+ self.environment = environment
+ self.name = name
+ self.filename = filename
+ self.stream = stream
+ self.created_block_context = False
+ self.defer_init = defer_init
+ self.optimizer: t.Optional[Optimizer] = None
+
+ if optimized:
+ self.optimizer = Optimizer(environment)
+
+ # aliases for imports
+ self.import_aliases: t.Dict[str, str] = {}
+
+ # a registry for all blocks. Because blocks are moved out
+ # into the global python scope they are registered here
+ self.blocks: t.Dict[str, nodes.Block] = {}
+
+ # the number of extends statements so far
+ self.extends_so_far = 0
+
+ # some templates have a rootlevel extends. In this case we
+ # can safely assume that we're a child template and do some
+ # more optimizations.
+ self.has_known_extends = False
+
+ # the current line number
+ self.code_lineno = 1
+
+ # registry of all filters and tests (global, not block local)
+ self.tests: t.Dict[str, str] = {}
+ self.filters: t.Dict[str, str] = {}
+
+ # the debug information
+ self.debug_info: t.List[t.Tuple[int, int]] = []
+ self._write_debug_info: t.Optional[int] = None
+
+ # the number of new lines before the next write()
+ self._new_lines = 0
+
+ # the line number of the last written statement
+ self._last_line = 0
+
+ # true if nothing was written so far.
+ self._first_write = True
+
+ # used by the `temporary_identifier` method to get new
+ # unique, temporary identifier
+ self._last_identifier = 0
+
+ # the current indentation
+ self._indentation = 0
+
+ # Tracks toplevel assignments
+ self._assign_stack: t.List[t.Set[str]] = []
+
+ # Tracks parameter definition blocks
+ self._param_def_block: t.List[t.Set[str]] = []
+
+ # Tracks the current context.
+ self._context_reference_stack = ["context"]
+
+ @property
+ def optimized(self) -> bool:
+ return self.optimizer is not None
+
+ # -- Various compilation helpers
+
+ def fail(self, msg: str, lineno: int) -> "te.NoReturn":
+ """Fail with a :exc:`TemplateAssertionError`."""
+ raise TemplateAssertionError(msg, lineno, self.name, self.filename)
+
+ def temporary_identifier(self) -> str:
+ """Get a new unique identifier."""
+ self._last_identifier += 1
+ return f"t_{self._last_identifier}"
+
+ def buffer(self, frame: Frame) -> None:
+ """Enable buffering for the frame from that point onwards."""
+ frame.buffer = self.temporary_identifier()
+ self.writeline(f"{frame.buffer} = []")
+
+ def return_buffer_contents(
+ self, frame: Frame, force_unescaped: bool = False
+ ) -> None:
+ """Return the buffer contents of the frame."""
+ if not force_unescaped:
+ if frame.eval_ctx.volatile:
+ self.writeline("if context.eval_ctx.autoescape:")
+ self.indent()
+ self.writeline(f"return Markup(concat({frame.buffer}))")
+ self.outdent()
+ self.writeline("else:")
+ self.indent()
+ self.writeline(f"return concat({frame.buffer})")
+ self.outdent()
+ return
+ elif frame.eval_ctx.autoescape:
+ self.writeline(f"return Markup(concat({frame.buffer}))")
+ return
+ self.writeline(f"return concat({frame.buffer})")
+
+ def indent(self) -> None:
+ """Indent by one."""
+ self._indentation += 1
+
+ def outdent(self, step: int = 1) -> None:
+ """Outdent by step."""
+ self._indentation -= step
+
+ def start_write(self, frame: Frame, node: t.Optional[nodes.Node] = None) -> None:
+ """Yield or write into the frame buffer."""
+ if frame.buffer is None:
+ self.writeline("yield ", node)
+ else:
+ self.writeline(f"{frame.buffer}.append(", node)
+
+ def end_write(self, frame: Frame) -> None:
+ """End the writing process started by `start_write`."""
+ if frame.buffer is not None:
+ self.write(")")
+
+ def simple_write(
+ self, s: str, frame: Frame, node: t.Optional[nodes.Node] = None
+ ) -> None:
+ """Simple shortcut for start_write + write + end_write."""
+ self.start_write(frame, node)
+ self.write(s)
+ self.end_write(frame)
+
+ def blockvisit(self, nodes: t.Iterable[nodes.Node], frame: Frame) -> None:
+ """Visit a list of nodes as block in a frame. If the current frame
+ is no buffer a dummy ``if 0: yield None`` is written automatically.
+ """
+ try:
+ self.writeline("pass")
+ for node in nodes:
+ self.visit(node, frame)
+ except CompilerExit:
+ pass
+
+ def write(self, x: str) -> None:
+ """Write a string into the output stream."""
+ if self._new_lines:
+ if not self._first_write:
+ self.stream.write("\n" * self._new_lines)
+ self.code_lineno += self._new_lines
+ if self._write_debug_info is not None:
+ self.debug_info.append((self._write_debug_info, self.code_lineno))
+ self._write_debug_info = None
+ self._first_write = False
+ self.stream.write(" " * self._indentation)
+ self._new_lines = 0
+ self.stream.write(x)
+
+ def writeline(
+ self, x: str, node: t.Optional[nodes.Node] = None, extra: int = 0
+ ) -> None:
+ """Combination of newline and write."""
+ self.newline(node, extra)
+ self.write(x)
+
+ def newline(self, node: t.Optional[nodes.Node] = None, extra: int = 0) -> None:
+ """Add one or more newlines before the next write."""
+ self._new_lines = max(self._new_lines, 1 + extra)
+ if node is not None and node.lineno != self._last_line:
+ self._write_debug_info = node.lineno
+ self._last_line = node.lineno
+
+ def signature(
+ self,
+ node: t.Union[nodes.Call, nodes.Filter, nodes.Test],
+ frame: Frame,
+ extra_kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> None:
+ """Writes a function call to the stream for the current node.
+ A leading comma is added automatically. The extra keyword
+ arguments may not include python keywords otherwise a syntax
+ error could occur. The extra keyword arguments should be given
+ as python dict.
+ """
+ # if any of the given keyword arguments is a python keyword
+ # we have to make sure that no invalid call is created.
+ kwarg_workaround = any(
+ is_python_keyword(t.cast(str, k))
+ for k in chain((x.key for x in node.kwargs), extra_kwargs or ())
+ )
+
+ for arg in node.args:
+ self.write(", ")
+ self.visit(arg, frame)
+
+ if not kwarg_workaround:
+ for kwarg in node.kwargs:
+ self.write(", ")
+ self.visit(kwarg, frame)
+ if extra_kwargs is not None:
+ for key, value in extra_kwargs.items():
+ self.write(f", {key}={value}")
+ if node.dyn_args:
+ self.write(", *")
+ self.visit(node.dyn_args, frame)
+
+ if kwarg_workaround:
+ if node.dyn_kwargs is not None:
+ self.write(", **dict({")
+ else:
+ self.write(", **{")
+ for kwarg in node.kwargs:
+ self.write(f"{kwarg.key!r}: ")
+ self.visit(kwarg.value, frame)
+ self.write(", ")
+ if extra_kwargs is not None:
+ for key, value in extra_kwargs.items():
+ self.write(f"{key!r}: {value}, ")
+ if node.dyn_kwargs is not None:
+ self.write("}, **")
+ self.visit(node.dyn_kwargs, frame)
+ self.write(")")
+ else:
+ self.write("}")
+
+ elif node.dyn_kwargs is not None:
+ self.write(", **")
+ self.visit(node.dyn_kwargs, frame)
+
+ def pull_dependencies(self, nodes: t.Iterable[nodes.Node]) -> None:
+ """Find all filter and test names used in the template and
+ assign them to variables in the compiled namespace. Checking
+ that the names are registered with the environment is done when
+ compiling the Filter and Test nodes. If the node is in an If or
+ CondExpr node, the check is done at runtime instead.
+
+ .. versionchanged:: 3.0
+ Filters and tests in If and CondExpr nodes are checked at
+ runtime instead of compile time.
+ """
+ visitor = DependencyFinderVisitor()
+
+ for node in nodes:
+ visitor.visit(node)
+
+ for id_map, names, dependency in (self.filters, visitor.filters, "filters"), (
+ self.tests,
+ visitor.tests,
+ "tests",
+ ):
+ for name in sorted(names):
+ if name not in id_map:
+ id_map[name] = self.temporary_identifier()
+
+ # add check during runtime that dependencies used inside of executed
+ # blocks are defined, as this step may be skipped during compile time
+ self.writeline("try:")
+ self.indent()
+ self.writeline(f"{id_map[name]} = environment.{dependency}[{name!r}]")
+ self.outdent()
+ self.writeline("except KeyError:")
+ self.indent()
+ self.writeline("@internalcode")
+ self.writeline(f"def {id_map[name]}(*unused):")
+ self.indent()
+ self.writeline(
+ f'raise TemplateRuntimeError("No {dependency[:-1]}'
+ f' named {name!r} found.")'
+ )
+ self.outdent()
+ self.outdent()
+
+ def enter_frame(self, frame: Frame) -> None:
+ undefs = []
+ for target, (action, param) in frame.symbols.loads.items():
+ if action == VAR_LOAD_PARAMETER:
+ pass
+ elif action == VAR_LOAD_RESOLVE:
+ self.writeline(f"{target} = {self.get_resolve_func()}({param!r})")
+ elif action == VAR_LOAD_ALIAS:
+ self.writeline(f"{target} = {param}")
+ elif action == VAR_LOAD_UNDEFINED:
+ undefs.append(target)
+ else:
+ raise NotImplementedError("unknown load instruction")
+ if undefs:
+ self.writeline(f"{' = '.join(undefs)} = missing")
+
+ def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None:
+ if not with_python_scope:
+ undefs = []
+ for target in frame.symbols.loads:
+ undefs.append(target)
+ if undefs:
+ self.writeline(f"{' = '.join(undefs)} = missing")
+
+ def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str:
+ return async_value if self.environment.is_async else sync_value
+
+ def func(self, name: str) -> str:
+ return f"{self.choose_async()}def {name}"
+
+ def macro_body(
+ self, node: t.Union[nodes.Macro, nodes.CallBlock], frame: Frame
+ ) -> t.Tuple[Frame, MacroRef]:
+ """Dump the function def of a macro or call block."""
+ frame = frame.inner()
+ frame.symbols.analyze_node(node)
+ macro_ref = MacroRef(node)
+
+ explicit_caller = None
+ skip_special_params = set()
+ args = []
+
+ for idx, arg in enumerate(node.args):
+ if arg.name == "caller":
+ explicit_caller = idx
+ if arg.name in ("kwargs", "varargs"):
+ skip_special_params.add(arg.name)
+ args.append(frame.symbols.ref(arg.name))
+
+ undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
+
+ if "caller" in undeclared:
+ # In older Jinja versions there was a bug that allowed caller
+ # to retain the special behavior even if it was mentioned in
+ # the argument list. However thankfully this was only really
+ # working if it was the last argument. So we are explicitly
+ # checking this now and error out if it is anywhere else in
+ # the argument list.
+ if explicit_caller is not None:
+ try:
+ node.defaults[explicit_caller - len(node.args)]
+ except IndexError:
+ self.fail(
+ "When defining macros or call blocks the "
+ 'special "caller" argument must be omitted '
+ "or be given a default.",
+ node.lineno,
+ )
+ else:
+ args.append(frame.symbols.declare_parameter("caller"))
+ macro_ref.accesses_caller = True
+ if "kwargs" in undeclared and "kwargs" not in skip_special_params:
+ args.append(frame.symbols.declare_parameter("kwargs"))
+ macro_ref.accesses_kwargs = True
+ if "varargs" in undeclared and "varargs" not in skip_special_params:
+ args.append(frame.symbols.declare_parameter("varargs"))
+ macro_ref.accesses_varargs = True
+
+ # macros are delayed, they never require output checks
+ frame.require_output_check = False
+ frame.symbols.analyze_node(node)
+ self.writeline(f"{self.func('macro')}({', '.join(args)}):", node)
+ self.indent()
+
+ self.buffer(frame)
+ self.enter_frame(frame)
+
+ self.push_parameter_definitions(frame)
+ for idx, arg in enumerate(node.args):
+ ref = frame.symbols.ref(arg.name)
+ self.writeline(f"if {ref} is missing:")
+ self.indent()
+ try:
+ default = node.defaults[idx - len(node.args)]
+ except IndexError:
+ self.writeline(
+ f'{ref} = undefined("parameter {arg.name!r} was not provided",'
+ f" name={arg.name!r})"
+ )
+ else:
+ self.writeline(f"{ref} = ")
+ self.visit(default, frame)
+ self.mark_parameter_stored(ref)
+ self.outdent()
+ self.pop_parameter_definitions()
+
+ self.blockvisit(node.body, frame)
+ self.return_buffer_contents(frame, force_unescaped=True)
+ self.leave_frame(frame, with_python_scope=True)
+ self.outdent()
+
+ return frame, macro_ref
+
+ def macro_def(self, macro_ref: MacroRef, frame: Frame) -> None:
+ """Dump the macro definition for the def created by macro_body."""
+ arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
+ name = getattr(macro_ref.node, "name", None)
+ if len(macro_ref.node.args) == 1:
+ arg_tuple += ","
+ self.write(
+ f"Macro(environment, macro, {name!r}, ({arg_tuple}),"
+ f" {macro_ref.accesses_kwargs!r}, {macro_ref.accesses_varargs!r},"
+ f" {macro_ref.accesses_caller!r}, context.eval_ctx.autoescape)"
+ )
+
+ def position(self, node: nodes.Node) -> str:
+ """Return a human readable position for the node."""
+ rv = f"line {node.lineno}"
+ if self.name is not None:
+ rv = f"{rv} in {self.name!r}"
+ return rv
+
+ def dump_local_context(self, frame: Frame) -> str:
+ items_kv = ", ".join(
+ f"{name!r}: {target}"
+ for name, target in frame.symbols.dump_stores().items()
+ )
+ return f"{{{items_kv}}}"
+
+ def write_commons(self) -> None:
+ """Writes a common preamble that is used by root and block functions.
+ Primarily this sets up common local helpers and enforces a generator
+ through a dead branch.
+ """
+ self.writeline("resolve = context.resolve_or_missing")
+ self.writeline("undefined = environment.undefined")
+ # always use the standard Undefined class for the implicit else of
+ # conditional expressions
+ self.writeline("cond_expr_undefined = Undefined")
+ self.writeline("if 0: yield None")
+
+ def push_parameter_definitions(self, frame: Frame) -> None:
+ """Pushes all parameter targets from the given frame into a local
+ stack that permits tracking of yet to be assigned parameters. In
+ particular this enables the optimization from `visit_Name` to skip
+ undefined expressions for parameters in macros as macros can reference
+ otherwise unbound parameters.
+ """
+ self._param_def_block.append(frame.symbols.dump_param_targets())
+
+ def pop_parameter_definitions(self) -> None:
+ """Pops the current parameter definitions set."""
+ self._param_def_block.pop()
+
+ def mark_parameter_stored(self, target: str) -> None:
+ """Marks a parameter in the current parameter definitions as stored.
+ This will skip the enforced undefined checks.
+ """
+ if self._param_def_block:
+ self._param_def_block[-1].discard(target)
+
+ def push_context_reference(self, target: str) -> None:
+ self._context_reference_stack.append(target)
+
+ def pop_context_reference(self) -> None:
+ self._context_reference_stack.pop()
+
+ def get_context_ref(self) -> str:
+ return self._context_reference_stack[-1]
+
+ def get_resolve_func(self) -> str:
+ target = self._context_reference_stack[-1]
+ if target == "context":
+ return "resolve"
+ return f"{target}.resolve"
+
+ def derive_context(self, frame: Frame) -> str:
+ return f"{self.get_context_ref()}.derived({self.dump_local_context(frame)})"
+
+ def parameter_is_undeclared(self, target: str) -> bool:
+ """Checks if a given target is an undeclared parameter."""
+ if not self._param_def_block:
+ return False
+ return target in self._param_def_block[-1]
+
+ def push_assign_tracking(self) -> None:
+ """Pushes a new layer for assignment tracking."""
+ self._assign_stack.append(set())
+
+ def pop_assign_tracking(self, frame: Frame) -> None:
+ """Pops the topmost level for assignment tracking and updates the
+ context variables if necessary.
+ """
+ vars = self._assign_stack.pop()
+ if (
+ not frame.block_frame
+ and not frame.loop_frame
+ and not frame.toplevel
+ or not vars
+ ):
+ return
+ public_names = [x for x in vars if x[:1] != "_"]
+ if len(vars) == 1:
+ name = next(iter(vars))
+ ref = frame.symbols.ref(name)
+ if frame.loop_frame:
+ self.writeline(f"_loop_vars[{name!r}] = {ref}")
+ return
+ if frame.block_frame:
+ self.writeline(f"_block_vars[{name!r}] = {ref}")
+ return
+ self.writeline(f"context.vars[{name!r}] = {ref}")
+ else:
+ if frame.loop_frame:
+ self.writeline("_loop_vars.update({")
+ elif frame.block_frame:
+ self.writeline("_block_vars.update({")
+ else:
+ self.writeline("context.vars.update({")
+ for idx, name in enumerate(vars):
+ if idx:
+ self.write(", ")
+ ref = frame.symbols.ref(name)
+ self.write(f"{name!r}: {ref}")
+ self.write("})")
+ if not frame.block_frame and not frame.loop_frame and public_names:
+ if len(public_names) == 1:
+ self.writeline(f"context.exported_vars.add({public_names[0]!r})")
+ else:
+ names_str = ", ".join(map(repr, public_names))
+ self.writeline(f"context.exported_vars.update(({names_str}))")
+
+ # -- Statement Visitors
+
+ def visit_Template(
+ self, node: nodes.Template, frame: t.Optional[Frame] = None
+ ) -> None:
+ assert frame is None, "no root frame allowed"
+ eval_ctx = EvalContext(self.environment, self.name)
+
+ from .runtime import exported, async_exported
+
+ if self.environment.is_async:
+ exported_names = sorted(exported + async_exported)
+ else:
+ exported_names = sorted(exported)
+
+ self.writeline("from __future__ import generator_stop") # Python < 3.7
+ self.writeline("from jinja2.runtime import " + ", ".join(exported_names))
+
+ # if we want a deferred initialization we cannot move the
+ # environment into a local name
+ envenv = "" if self.defer_init else ", environment=environment"
+
+ # do we have an extends tag at all? If not, we can save some
+ # overhead by just not processing any inheritance code.
+ have_extends = node.find(nodes.Extends) is not None
+
+ # find all blocks
+ for block in node.find_all(nodes.Block):
+ if block.name in self.blocks:
+ self.fail(f"block {block.name!r} defined twice", block.lineno)
+ self.blocks[block.name] = block
+
+ # find all imports and import them
+ for import_ in node.find_all(nodes.ImportedName):
+ if import_.importname not in self.import_aliases:
+ imp = import_.importname
+ self.import_aliases[imp] = alias = self.temporary_identifier()
+ if "." in imp:
+ module, obj = imp.rsplit(".", 1)
+ self.writeline(f"from {module} import {obj} as {alias}")
+ else:
+ self.writeline(f"import {imp} as {alias}")
+
+ # add the load name
+ self.writeline(f"name = {self.name!r}")
+
+ # generate the root render function.
+ self.writeline(
+ f"{self.func('root')}(context, missing=missing{envenv}):", extra=1
+ )
+ self.indent()
+ self.write_commons()
+
+ # process the root
+ frame = Frame(eval_ctx)
+ if "self" in find_undeclared(node.body, ("self",)):
+ ref = frame.symbols.declare_parameter("self")
+ self.writeline(f"{ref} = TemplateReference(context)")
+ frame.symbols.analyze_node(node)
+ frame.toplevel = frame.rootlevel = True
+ frame.require_output_check = have_extends and not self.has_known_extends
+ if have_extends:
+ self.writeline("parent_template = None")
+ self.enter_frame(frame)
+ self.pull_dependencies(node.body)
+ self.blockvisit(node.body, frame)
+ self.leave_frame(frame, with_python_scope=True)
+ self.outdent()
+
+ # make sure that the parent root is called.
+ if have_extends:
+ if not self.has_known_extends:
+ self.indent()
+ self.writeline("if parent_template is not None:")
+ self.indent()
+ if not self.environment.is_async:
+ self.writeline("yield from parent_template.root_render_func(context)")
+ else:
+ self.writeline(
+ "async for event in parent_template.root_render_func(context):"
+ )
+ self.indent()
+ self.writeline("yield event")
+ self.outdent()
+ self.outdent(1 + (not self.has_known_extends))
+
+ # at this point we now have the blocks collected and can visit them too.
+ for name, block in self.blocks.items():
+ self.writeline(
+ f"{self.func('block_' + name)}(context, missing=missing{envenv}):",
+ block,
+ 1,
+ )
+ self.indent()
+ self.write_commons()
+ # It's important that we do not make this frame a child of the
+ # toplevel template. This would cause a variety of
+ # interesting issues with identifier tracking.
+ block_frame = Frame(eval_ctx)
+ block_frame.block_frame = True
+ undeclared = find_undeclared(block.body, ("self", "super"))
+ if "self" in undeclared:
+ ref = block_frame.symbols.declare_parameter("self")
+ self.writeline(f"{ref} = TemplateReference(context)")
+ if "super" in undeclared:
+ ref = block_frame.symbols.declare_parameter("super")
+ self.writeline(f"{ref} = context.super({name!r}, block_{name})")
+ block_frame.symbols.analyze_node(block)
+ block_frame.block = name
+ self.writeline("_block_vars = {}")
+ self.enter_frame(block_frame)
+ self.pull_dependencies(block.body)
+ self.blockvisit(block.body, block_frame)
+ self.leave_frame(block_frame, with_python_scope=True)
+ self.outdent()
+
+ blocks_kv_str = ", ".join(f"{x!r}: block_{x}" for x in self.blocks)
+ self.writeline(f"blocks = {{{blocks_kv_str}}}", extra=1)
+ debug_kv_str = "&".join(f"{k}={v}" for k, v in self.debug_info)
+ self.writeline(f"debug_info = {debug_kv_str!r}")
+
+ def visit_Block(self, node: nodes.Block, frame: Frame) -> None:
+ """Call a block and register it for the template."""
+ level = 0
+ if frame.toplevel:
+ # if we know that we are a child template, there is no need to
+ # check if we are one
+ if self.has_known_extends:
+ return
+ if self.extends_so_far > 0:
+ self.writeline("if parent_template is None:")
+ self.indent()
+ level += 1
+
+ if node.scoped:
+ context = self.derive_context(frame)
+ else:
+ context = self.get_context_ref()
+
+ if node.required:
+ self.writeline(f"if len(context.blocks[{node.name!r}]) <= 1:", node)
+ self.indent()
+ self.writeline(
+ f'raise TemplateRuntimeError("Required block {node.name!r} not found")',
+ node,
+ )
+ self.outdent()
+
+ if not self.environment.is_async and frame.buffer is None:
+ self.writeline(
+ f"yield from context.blocks[{node.name!r}][0]({context})", node
+ )
+ else:
+ self.writeline(
+ f"{self.choose_async()}for event in"
+ f" context.blocks[{node.name!r}][0]({context}):",
+ node,
+ )
+ self.indent()
+ self.simple_write("event", frame)
+ self.outdent()
+
+ self.outdent(level)
+
+ def visit_Extends(self, node: nodes.Extends, frame: Frame) -> None:
+ """Calls the extender."""
+ if not frame.toplevel:
+ self.fail("cannot use extend from a non top-level scope", node.lineno)
+
+ # if the number of extends statements in general is zero so
+ # far, we don't have to add a check if something extended
+ # the template before this one.
+ if self.extends_so_far > 0:
+
+ # if we have a known extends we just add a template runtime
+ # error into the generated code. We could catch that at compile
+ # time too, but i welcome it not to confuse users by throwing the
+ # same error at different times just "because we can".
+ if not self.has_known_extends:
+ self.writeline("if parent_template is not None:")
+ self.indent()
+ self.writeline('raise TemplateRuntimeError("extended multiple times")')
+
+ # if we have a known extends already we don't need that code here
+ # as we know that the template execution will end here.
+ if self.has_known_extends:
+ raise CompilerExit()
+ else:
+ self.outdent()
+
+ self.writeline("parent_template = environment.get_template(", node)
+ self.visit(node.template, frame)
+ self.write(f", {self.name!r})")
+ self.writeline("for name, parent_block in parent_template.blocks.items():")
+ self.indent()
+ self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
+ self.outdent()
+
+ # if this extends statement was in the root level we can take
+ # advantage of that information and simplify the generated code
+ # in the top level from this point onwards
+ if frame.rootlevel:
+ self.has_known_extends = True
+
+ # and now we have one more
+ self.extends_so_far += 1
+
+ def visit_Include(self, node: nodes.Include, frame: Frame) -> None:
+ """Handles includes."""
+ if node.ignore_missing:
+ self.writeline("try:")
+ self.indent()
+
+ func_name = "get_or_select_template"
+ if isinstance(node.template, nodes.Const):
+ if isinstance(node.template.value, str):
+ func_name = "get_template"
+ elif isinstance(node.template.value, (tuple, list)):
+ func_name = "select_template"
+ elif isinstance(node.template, (nodes.Tuple, nodes.List)):
+ func_name = "select_template"
+
+ self.writeline(f"template = environment.{func_name}(", node)
+ self.visit(node.template, frame)
+ self.write(f", {self.name!r})")
+ if node.ignore_missing:
+ self.outdent()
+ self.writeline("except TemplateNotFound:")
+ self.indent()
+ self.writeline("pass")
+ self.outdent()
+ self.writeline("else:")
+ self.indent()
+
+ skip_event_yield = False
+ if node.with_context:
+ self.writeline(
+ f"{self.choose_async()}for event in template.root_render_func("
+ "template.new_context(context.get_all(), True,"
+ f" {self.dump_local_context(frame)})):"
+ )
+ elif self.environment.is_async:
+ self.writeline(
+ "for event in (await template._get_default_module_async())"
+ "._body_stream:"
+ )
+ else:
+ self.writeline("yield from template._get_default_module()._body_stream")
+ skip_event_yield = True
+
+ if not skip_event_yield:
+ self.indent()
+ self.simple_write("event", frame)
+ self.outdent()
+
+ if node.ignore_missing:
+ self.outdent()
+
+ def _import_common(
+ self, node: t.Union[nodes.Import, nodes.FromImport], frame: Frame
+ ) -> None:
+ self.write(f"{self.choose_async('await ')}environment.get_template(")
+ self.visit(node.template, frame)
+ self.write(f", {self.name!r}).")
+
+ if node.with_context:
+ f_name = f"make_module{self.choose_async('_async')}"
+ self.write(
+ f"{f_name}(context.get_all(), True, {self.dump_local_context(frame)})"
+ )
+ else:
+ self.write(f"_get_default_module{self.choose_async('_async')}(context)")
+
+ def visit_Import(self, node: nodes.Import, frame: Frame) -> None:
+ """Visit regular imports."""
+ self.writeline(f"{frame.symbols.ref(node.target)} = ", node)
+ if frame.toplevel:
+ self.write(f"context.vars[{node.target!r}] = ")
+
+ self._import_common(node, frame)
+
+ if frame.toplevel and not node.target.startswith("_"):
+ self.writeline(f"context.exported_vars.discard({node.target!r})")
+
+ def visit_FromImport(self, node: nodes.FromImport, frame: Frame) -> None:
+ """Visit named imports."""
+ self.newline(node)
+ self.write("included_template = ")
+ self._import_common(node, frame)
+ var_names = []
+ discarded_names = []
+ for name in node.names:
+ if isinstance(name, tuple):
+ name, alias = name
+ else:
+ alias = name
+ self.writeline(
+ f"{frame.symbols.ref(alias)} ="
+ f" getattr(included_template, {name!r}, missing)"
+ )
+ self.writeline(f"if {frame.symbols.ref(alias)} is missing:")
+ self.indent()
+ message = (
+ "the template {included_template.__name__!r}"
+ f" (imported on {self.position(node)})"
+ f" does not export the requested name {name!r}"
+ )
+ self.writeline(
+ f"{frame.symbols.ref(alias)} = undefined(f{message!r}, name={name!r})"
+ )
+ self.outdent()
+ if frame.toplevel:
+ var_names.append(alias)
+ if not alias.startswith("_"):
+ discarded_names.append(alias)
+
+ if var_names:
+ if len(var_names) == 1:
+ name = var_names[0]
+ self.writeline(f"context.vars[{name!r}] = {frame.symbols.ref(name)}")
+ else:
+ names_kv = ", ".join(
+ f"{name!r}: {frame.symbols.ref(name)}" for name in var_names
+ )
+ self.writeline(f"context.vars.update({{{names_kv}}})")
+ if discarded_names:
+ if len(discarded_names) == 1:
+ self.writeline(f"context.exported_vars.discard({discarded_names[0]!r})")
+ else:
+ names_str = ", ".join(map(repr, discarded_names))
+ self.writeline(
+ f"context.exported_vars.difference_update(({names_str}))"
+ )
+
+ def visit_For(self, node: nodes.For, frame: Frame) -> None:
+ loop_frame = frame.inner()
+ loop_frame.loop_frame = True
+ test_frame = frame.inner()
+ else_frame = frame.inner()
+
+ # try to figure out if we have an extended loop. An extended loop
+ # is necessary if the loop is in recursive mode if the special loop
+ # variable is accessed in the body if the body is a scoped block.
+ extended_loop = (
+ node.recursive
+ or "loop"
+ in find_undeclared(node.iter_child_nodes(only=("body",)), ("loop",))
+ or any(block.scoped for block in node.find_all(nodes.Block))
+ )
+
+ loop_ref = None
+ if extended_loop:
+ loop_ref = loop_frame.symbols.declare_parameter("loop")
+
+ loop_frame.symbols.analyze_node(node, for_branch="body")
+ if node.else_:
+ else_frame.symbols.analyze_node(node, for_branch="else")
+
+ if node.test:
+ loop_filter_func = self.temporary_identifier()
+ test_frame.symbols.analyze_node(node, for_branch="test")
+ self.writeline(f"{self.func(loop_filter_func)}(fiter):", node.test)
+ self.indent()
+ self.enter_frame(test_frame)
+ self.writeline(self.choose_async("async for ", "for "))
+ self.visit(node.target, loop_frame)
+ self.write(" in ")
+ self.write(self.choose_async("auto_aiter(fiter)", "fiter"))
+ self.write(":")
+ self.indent()
+ self.writeline("if ", node.test)
+ self.visit(node.test, test_frame)
+ self.write(":")
+ self.indent()
+ self.writeline("yield ")
+ self.visit(node.target, loop_frame)
+ self.outdent(3)
+ self.leave_frame(test_frame, with_python_scope=True)
+
+ # if we don't have an recursive loop we have to find the shadowed
+ # variables at that point. Because loops can be nested but the loop
+ # variable is a special one we have to enforce aliasing for it.
+ if node.recursive:
+ self.writeline(
+ f"{self.func('loop')}(reciter, loop_render_func, depth=0):", node
+ )
+ self.indent()
+ self.buffer(loop_frame)
+
+ # Use the same buffer for the else frame
+ else_frame.buffer = loop_frame.buffer
+
+ # make sure the loop variable is a special one and raise a template
+ # assertion error if a loop tries to write to loop
+ if extended_loop:
+ self.writeline(f"{loop_ref} = missing")
+
+ for name in node.find_all(nodes.Name):
+ if name.ctx == "store" and name.name == "loop":
+ self.fail(
+ "Can't assign to special loop variable in for-loop target",
+ name.lineno,
+ )
+
+ if node.else_:
+ iteration_indicator = self.temporary_identifier()
+ self.writeline(f"{iteration_indicator} = 1")
+
+ self.writeline(self.choose_async("async for ", "for "), node)
+ self.visit(node.target, loop_frame)
+ if extended_loop:
+ self.write(f", {loop_ref} in {self.choose_async('Async')}LoopContext(")
+ else:
+ self.write(" in ")
+
+ if node.test:
+ self.write(f"{loop_filter_func}(")
+ if node.recursive:
+ self.write("reciter")
+ else:
+ if self.environment.is_async and not extended_loop:
+ self.write("auto_aiter(")
+ self.visit(node.iter, frame)
+ if self.environment.is_async and not extended_loop:
+ self.write(")")
+ if node.test:
+ self.write(")")
+
+ if node.recursive:
+ self.write(", undefined, loop_render_func, depth):")
+ else:
+ self.write(", undefined):" if extended_loop else ":")
+
+ self.indent()
+ self.enter_frame(loop_frame)
+
+ self.writeline("_loop_vars = {}")
+ self.blockvisit(node.body, loop_frame)
+ if node.else_:
+ self.writeline(f"{iteration_indicator} = 0")
+ self.outdent()
+ self.leave_frame(
+ loop_frame, with_python_scope=node.recursive and not node.else_
+ )
+
+ if node.else_:
+ self.writeline(f"if {iteration_indicator}:")
+ self.indent()
+ self.enter_frame(else_frame)
+ self.blockvisit(node.else_, else_frame)
+ self.leave_frame(else_frame)
+ self.outdent()
+
+ # if the node was recursive we have to return the buffer contents
+ # and start the iteration code
+ if node.recursive:
+ self.return_buffer_contents(loop_frame)
+ self.outdent()
+ self.start_write(frame, node)
+ self.write(f"{self.choose_async('await ')}loop(")
+ if self.environment.is_async:
+ self.write("auto_aiter(")
+ self.visit(node.iter, frame)
+ if self.environment.is_async:
+ self.write(")")
+ self.write(", loop)")
+ self.end_write(frame)
+
+ # at the end of the iteration, clear any assignments made in the
+ # loop from the top level
+ if self._assign_stack:
+ self._assign_stack[-1].difference_update(loop_frame.symbols.stores)
+
+ def visit_If(self, node: nodes.If, frame: Frame) -> None:
+ if_frame = frame.soft()
+ self.writeline("if ", node)
+ self.visit(node.test, if_frame)
+ self.write(":")
+ self.indent()
+ self.blockvisit(node.body, if_frame)
+ self.outdent()
+ for elif_ in node.elif_:
+ self.writeline("elif ", elif_)
+ self.visit(elif_.test, if_frame)
+ self.write(":")
+ self.indent()
+ self.blockvisit(elif_.body, if_frame)
+ self.outdent()
+ if node.else_:
+ self.writeline("else:")
+ self.indent()
+ self.blockvisit(node.else_, if_frame)
+ self.outdent()
+
+ def visit_Macro(self, node: nodes.Macro, frame: Frame) -> None:
+ macro_frame, macro_ref = self.macro_body(node, frame)
+ self.newline()
+ if frame.toplevel:
+ if not node.name.startswith("_"):
+ self.write(f"context.exported_vars.add({node.name!r})")
+ self.writeline(f"context.vars[{node.name!r}] = ")
+ self.write(f"{frame.symbols.ref(node.name)} = ")
+ self.macro_def(macro_ref, macro_frame)
+
+ def visit_CallBlock(self, node: nodes.CallBlock, frame: Frame) -> None:
+ call_frame, macro_ref = self.macro_body(node, frame)
+ self.writeline("caller = ")
+ self.macro_def(macro_ref, call_frame)
+ self.start_write(frame, node)
+ self.visit_Call(node.call, frame, forward_caller=True)
+ self.end_write(frame)
+
+ def visit_FilterBlock(self, node: nodes.FilterBlock, frame: Frame) -> None:
+ filter_frame = frame.inner()
+ filter_frame.symbols.analyze_node(node)
+ self.enter_frame(filter_frame)
+ self.buffer(filter_frame)
+ self.blockvisit(node.body, filter_frame)
+ self.start_write(frame, node)
+ self.visit_Filter(node.filter, filter_frame)
+ self.end_write(frame)
+ self.leave_frame(filter_frame)
+
+ def visit_With(self, node: nodes.With, frame: Frame) -> None:
+ with_frame = frame.inner()
+ with_frame.symbols.analyze_node(node)
+ self.enter_frame(with_frame)
+ for target, expr in zip(node.targets, node.values):
+ self.newline()
+ self.visit(target, with_frame)
+ self.write(" = ")
+ self.visit(expr, frame)
+ self.blockvisit(node.body, with_frame)
+ self.leave_frame(with_frame)
+
+ def visit_ExprStmt(self, node: nodes.ExprStmt, frame: Frame) -> None:
+ self.newline(node)
+ self.visit(node.node, frame)
+
+ class _FinalizeInfo(t.NamedTuple):
+ const: t.Optional[t.Callable[..., str]]
+ src: t.Optional[str]
+
+ @staticmethod
+ def _default_finalize(value: t.Any) -> t.Any:
+ """The default finalize function if the environment isn't
+ configured with one. Or, if the environment has one, this is
+ called on that function's output for constants.
+ """
+ return str(value)
+
+ _finalize: t.Optional[_FinalizeInfo] = None
+
+ def _make_finalize(self) -> _FinalizeInfo:
+ """Build the finalize function to be used on constants and at
+ runtime. Cached so it's only created once for all output nodes.
+
+ Returns a ``namedtuple`` with the following attributes:
+
+ ``const``
+ A function to finalize constant data at compile time.
+
+ ``src``
+ Source code to output around nodes to be evaluated at
+ runtime.
+ """
+ if self._finalize is not None:
+ return self._finalize
+
+ finalize: t.Optional[t.Callable[..., t.Any]]
+ finalize = default = self._default_finalize
+ src = None
+
+ if self.environment.finalize:
+ src = "environment.finalize("
+ env_finalize = self.environment.finalize
+ pass_arg = {
+ _PassArg.context: "context",
+ _PassArg.eval_context: "context.eval_ctx",
+ _PassArg.environment: "environment",
+ }.get(
+ _PassArg.from_obj(env_finalize) # type: ignore
+ )
+ finalize = None
+
+ if pass_arg is None:
+
+ def finalize(value: t.Any) -> t.Any:
+ return default(env_finalize(value))
+
+ else:
+ src = f"{src}{pass_arg}, "
+
+ if pass_arg == "environment":
+
+ def finalize(value: t.Any) -> t.Any:
+ return default(env_finalize(self.environment, value))
+
+ self._finalize = self._FinalizeInfo(finalize, src)
+ return self._finalize
+
+ def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
+ """Given a group of constant values converted from ``Output``
+ child nodes, produce a string to write to the template module
+ source.
+ """
+ return repr(concat(group))
+
+ def _output_child_to_const(
+ self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
+ ) -> str:
+ """Try to optimize a child of an ``Output`` node by trying to
+ convert it to constant, finalized data at compile time.
+
+ If :exc:`Impossible` is raised, the node is not constant and
+ will be evaluated at runtime. Any other exception will also be
+ evaluated at runtime for easier debugging.
+ """
+ const = node.as_const(frame.eval_ctx)
+
+ if frame.eval_ctx.autoescape:
+ const = escape(const)
+
+ # Template data doesn't go through finalize.
+ if isinstance(node, nodes.TemplateData):
+ return str(const)
+
+ return finalize.const(const) # type: ignore
+
+ def _output_child_pre(
+ self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
+ ) -> None:
+ """Output extra source code before visiting a child of an
+ ``Output`` node.
+ """
+ if frame.eval_ctx.volatile:
+ self.write("(escape if context.eval_ctx.autoescape else str)(")
+ elif frame.eval_ctx.autoescape:
+ self.write("escape(")
+ else:
+ self.write("str(")
+
+ if finalize.src is not None:
+ self.write(finalize.src)
+
+ def _output_child_post(
+ self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
+ ) -> None:
+ """Output extra source code after visiting a child of an
+ ``Output`` node.
+ """
+ self.write(")")
+
+ if finalize.src is not None:
+ self.write(")")
+
+ def visit_Output(self, node: nodes.Output, frame: Frame) -> None:
+ # If an extends is active, don't render outside a block.
+ if frame.require_output_check:
+ # A top-level extends is known to exist at compile time.
+ if self.has_known_extends:
+ return
+
+ self.writeline("if parent_template is None:")
+ self.indent()
+
+ finalize = self._make_finalize()
+ body: t.List[t.Union[t.List[t.Any], nodes.Expr]] = []
+
+ # Evaluate constants at compile time if possible. Each item in
+ # body will be either a list of static data or a node to be
+ # evaluated at runtime.
+ for child in node.nodes:
+ try:
+ if not (
+ # If the finalize function requires runtime context,
+ # constants can't be evaluated at compile time.
+ finalize.const
+ # Unless it's basic template data that won't be
+ # finalized anyway.
+ or isinstance(child, nodes.TemplateData)
+ ):
+ raise nodes.Impossible()
+
+ const = self._output_child_to_const(child, frame, finalize)
+ except (nodes.Impossible, Exception):
+ # The node was not constant and needs to be evaluated at
+ # runtime. Or another error was raised, which is easier
+ # to debug at runtime.
+ body.append(child)
+ continue
+
+ if body and isinstance(body[-1], list):
+ body[-1].append(const)
+ else:
+ body.append([const])
+
+ if frame.buffer is not None:
+ if len(body) == 1:
+ self.writeline(f"{frame.buffer}.append(")
+ else:
+ self.writeline(f"{frame.buffer}.extend((")
+
+ self.indent()
+
+ for item in body:
+ if isinstance(item, list):
+ # A group of constant data to join and output.
+ val = self._output_const_repr(item)
+
+ if frame.buffer is None:
+ self.writeline("yield " + val)
+ else:
+ self.writeline(val + ",")
+ else:
+ if frame.buffer is None:
+ self.writeline("yield ", item)
+ else:
+ self.newline(item)
+
+ # A node to be evaluated at runtime.
+ self._output_child_pre(item, frame, finalize)
+ self.visit(item, frame)
+ self._output_child_post(item, frame, finalize)
+
+ if frame.buffer is not None:
+ self.write(",")
+
+ if frame.buffer is not None:
+ self.outdent()
+ self.writeline(")" if len(body) == 1 else "))")
+
+ if frame.require_output_check:
+ self.outdent()
+
+ def visit_Assign(self, node: nodes.Assign, frame: Frame) -> None:
+ self.push_assign_tracking()
+ self.newline(node)
+ self.visit(node.target, frame)
+ self.write(" = ")
+ self.visit(node.node, frame)
+ self.pop_assign_tracking(frame)
+
+ def visit_AssignBlock(self, node: nodes.AssignBlock, frame: Frame) -> None:
+ self.push_assign_tracking()
+ block_frame = frame.inner()
+ # This is a special case. Since a set block always captures we
+ # will disable output checks. This way one can use set blocks
+ # toplevel even in extended templates.
+ block_frame.require_output_check = False
+ block_frame.symbols.analyze_node(node)
+ self.enter_frame(block_frame)
+ self.buffer(block_frame)
+ self.blockvisit(node.body, block_frame)
+ self.newline(node)
+ self.visit(node.target, frame)
+ self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
+ if node.filter is not None:
+ self.visit_Filter(node.filter, block_frame)
+ else:
+ self.write(f"concat({block_frame.buffer})")
+ self.write(")")
+ self.pop_assign_tracking(frame)
+ self.leave_frame(block_frame)
+
+ # -- Expression Visitors
+
+ def visit_Name(self, node: nodes.Name, frame: Frame) -> None:
+ if node.ctx == "store" and (
+ frame.toplevel or frame.loop_frame or frame.block_frame
+ ):
+ if self._assign_stack:
+ self._assign_stack[-1].add(node.name)
+ ref = frame.symbols.ref(node.name)
+
+ # If we are looking up a variable we might have to deal with the
+ # case where it's undefined. We can skip that case if the load
+ # instruction indicates a parameter which are always defined.
+ if node.ctx == "load":
+ load = frame.symbols.find_load(ref)
+ if not (
+ load is not None
+ and load[0] == VAR_LOAD_PARAMETER
+ and not self.parameter_is_undeclared(ref)
+ ):
+ self.write(
+ f"(undefined(name={node.name!r}) if {ref} is missing else {ref})"
+ )
+ return
+
+ self.write(ref)
+
+ def visit_NSRef(self, node: nodes.NSRef, frame: Frame) -> None:
+ # NSRefs can only be used to store values; since they use the normal
+ # `foo.bar` notation they will be parsed as a normal attribute access
+ # when used anywhere but in a `set` context
+ ref = frame.symbols.ref(node.name)
+ self.writeline(f"if not isinstance({ref}, Namespace):")
+ self.indent()
+ self.writeline(
+ "raise TemplateRuntimeError"
+ '("cannot assign attribute on non-namespace object")'
+ )
+ self.outdent()
+ self.writeline(f"{ref}[{node.attr!r}]")
+
+ def visit_Const(self, node: nodes.Const, frame: Frame) -> None:
+ val = node.as_const(frame.eval_ctx)
+ if isinstance(val, float):
+ self.write(str(val))
+ else:
+ self.write(repr(val))
+
+ def visit_TemplateData(self, node: nodes.TemplateData, frame: Frame) -> None:
+ try:
+ self.write(repr(node.as_const(frame.eval_ctx)))
+ except nodes.Impossible:
+ self.write(
+ f"(Markup if context.eval_ctx.autoescape else identity)({node.data!r})"
+ )
+
+ def visit_Tuple(self, node: nodes.Tuple, frame: Frame) -> None:
+ self.write("(")
+ idx = -1
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(", ")
+ self.visit(item, frame)
+ self.write(",)" if idx == 0 else ")")
+
+ def visit_List(self, node: nodes.List, frame: Frame) -> None:
+ self.write("[")
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(", ")
+ self.visit(item, frame)
+ self.write("]")
+
+ def visit_Dict(self, node: nodes.Dict, frame: Frame) -> None:
+ self.write("{")
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(", ")
+ self.visit(item.key, frame)
+ self.write(": ")
+ self.visit(item.value, frame)
+ self.write("}")
+
+ visit_Add = _make_binop("+")
+ visit_Sub = _make_binop("-")
+ visit_Mul = _make_binop("*")
+ visit_Div = _make_binop("/")
+ visit_FloorDiv = _make_binop("//")
+ visit_Pow = _make_binop("**")
+ visit_Mod = _make_binop("%")
+ visit_And = _make_binop("and")
+ visit_Or = _make_binop("or")
+ visit_Pos = _make_unop("+")
+ visit_Neg = _make_unop("-")
+ visit_Not = _make_unop("not ")
+
+ @optimizeconst
+ def visit_Concat(self, node: nodes.Concat, frame: Frame) -> None:
+ if frame.eval_ctx.volatile:
+ func_name = "(markup_join if context.eval_ctx.volatile else str_join)"
+ elif frame.eval_ctx.autoescape:
+ func_name = "markup_join"
+ else:
+ func_name = "str_join"
+ self.write(f"{func_name}((")
+ for arg in node.nodes:
+ self.visit(arg, frame)
+ self.write(", ")
+ self.write("))")
+
+ @optimizeconst
+ def visit_Compare(self, node: nodes.Compare, frame: Frame) -> None:
+ self.write("(")
+ self.visit(node.expr, frame)
+ for op in node.ops:
+ self.visit(op, frame)
+ self.write(")")
+
+ def visit_Operand(self, node: nodes.Operand, frame: Frame) -> None:
+ self.write(f" {operators[node.op]} ")
+ self.visit(node.expr, frame)
+
+ @optimizeconst
+ def visit_Getattr(self, node: nodes.Getattr, frame: Frame) -> None:
+ if self.environment.is_async:
+ self.write("(await auto_await(")
+
+ self.write("environment.getattr(")
+ self.visit(node.node, frame)
+ self.write(f", {node.attr!r})")
+
+ if self.environment.is_async:
+ self.write("))")
+
+ @optimizeconst
+ def visit_Getitem(self, node: nodes.Getitem, frame: Frame) -> None:
+ # slices bypass the environment getitem method.
+ if isinstance(node.arg, nodes.Slice):
+ self.visit(node.node, frame)
+ self.write("[")
+ self.visit(node.arg, frame)
+ self.write("]")
+ else:
+ if self.environment.is_async:
+ self.write("(await auto_await(")
+
+ self.write("environment.getitem(")
+ self.visit(node.node, frame)
+ self.write(", ")
+ self.visit(node.arg, frame)
+ self.write(")")
+
+ if self.environment.is_async:
+ self.write("))")
+
+ def visit_Slice(self, node: nodes.Slice, frame: Frame) -> None:
+ if node.start is not None:
+ self.visit(node.start, frame)
+ self.write(":")
+ if node.stop is not None:
+ self.visit(node.stop, frame)
+ if node.step is not None:
+ self.write(":")
+ self.visit(node.step, frame)
+
+ @contextmanager
+ def _filter_test_common(
+ self, node: t.Union[nodes.Filter, nodes.Test], frame: Frame, is_filter: bool
+ ) -> t.Iterator[None]:
+ if self.environment.is_async:
+ self.write("await auto_await(")
+
+ if is_filter:
+ self.write(f"{self.filters[node.name]}(")
+ func = self.environment.filters.get(node.name)
+ else:
+ self.write(f"{self.tests[node.name]}(")
+ func = self.environment.tests.get(node.name)
+
+ # When inside an If or CondExpr frame, allow the filter to be
+ # undefined at compile time and only raise an error if it's
+ # actually called at runtime. See pull_dependencies.
+ if func is None and not frame.soft_frame:
+ type_name = "filter" if is_filter else "test"
+ self.fail(f"No {type_name} named {node.name!r}.", node.lineno)
+
+ pass_arg = {
+ _PassArg.context: "context",
+ _PassArg.eval_context: "context.eval_ctx",
+ _PassArg.environment: "environment",
+ }.get(
+ _PassArg.from_obj(func) # type: ignore
+ )
+
+ if pass_arg is not None:
+ self.write(f"{pass_arg}, ")
+
+ # Back to the visitor function to handle visiting the target of
+ # the filter or test.
+ yield
+
+ self.signature(node, frame)
+ self.write(")")
+
+ if self.environment.is_async:
+ self.write(")")
+
+ @optimizeconst
+ def visit_Filter(self, node: nodes.Filter, frame: Frame) -> None:
+ with self._filter_test_common(node, frame, True):
+ # if the filter node is None we are inside a filter block
+ # and want to write to the current buffer
+ if node.node is not None:
+ self.visit(node.node, frame)
+ elif frame.eval_ctx.volatile:
+ self.write(
+ f"(Markup(concat({frame.buffer}))"
+ f" if context.eval_ctx.autoescape else concat({frame.buffer}))"
+ )
+ elif frame.eval_ctx.autoescape:
+ self.write(f"Markup(concat({frame.buffer}))")
+ else:
+ self.write(f"concat({frame.buffer})")
+
+ @optimizeconst
+ def visit_Test(self, node: nodes.Test, frame: Frame) -> None:
+ with self._filter_test_common(node, frame, False):
+ self.visit(node.node, frame)
+
+ @optimizeconst
+ def visit_CondExpr(self, node: nodes.CondExpr, frame: Frame) -> None:
+ frame = frame.soft()
+
+ def write_expr2() -> None:
+ if node.expr2 is not None:
+ self.visit(node.expr2, frame)
+ return
+
+ self.write(
+ f'cond_expr_undefined("the inline if-expression on'
+ f" {self.position(node)} evaluated to false and no else"
+ f' section was defined.")'
+ )
+
+ self.write("(")
+ self.visit(node.expr1, frame)
+ self.write(" if ")
+ self.visit(node.test, frame)
+ self.write(" else ")
+ write_expr2()
+ self.write(")")
+
+ @optimizeconst
+ def visit_Call(
+ self, node: nodes.Call, frame: Frame, forward_caller: bool = False
+ ) -> None:
+ if self.environment.is_async:
+ self.write("await auto_await(")
+ if self.environment.sandboxed:
+ self.write("environment.call(context, ")
+ else:
+ self.write("context.call(")
+ self.visit(node.node, frame)
+ extra_kwargs = {"caller": "caller"} if forward_caller else None
+ loop_kwargs = {"_loop_vars": "_loop_vars"} if frame.loop_frame else {}
+ block_kwargs = {"_block_vars": "_block_vars"} if frame.block_frame else {}
+ if extra_kwargs:
+ extra_kwargs.update(loop_kwargs, **block_kwargs)
+ elif loop_kwargs or block_kwargs:
+ extra_kwargs = dict(loop_kwargs, **block_kwargs)
+ self.signature(node, frame, extra_kwargs)
+ self.write(")")
+ if self.environment.is_async:
+ self.write(")")
+
+ def visit_Keyword(self, node: nodes.Keyword, frame: Frame) -> None:
+ self.write(node.key + "=")
+ self.visit(node.value, frame)
+
+ # -- Unused nodes for extensions
+
+ def visit_MarkSafe(self, node: nodes.MarkSafe, frame: Frame) -> None:
+ self.write("Markup(")
+ self.visit(node.expr, frame)
+ self.write(")")
+
+ def visit_MarkSafeIfAutoescape(
+ self, node: nodes.MarkSafeIfAutoescape, frame: Frame
+ ) -> None:
+ self.write("(Markup if context.eval_ctx.autoescape else identity)(")
+ self.visit(node.expr, frame)
+ self.write(")")
+
+ def visit_EnvironmentAttribute(
+ self, node: nodes.EnvironmentAttribute, frame: Frame
+ ) -> None:
+ self.write("environment." + node.name)
+
+ def visit_ExtensionAttribute(
+ self, node: nodes.ExtensionAttribute, frame: Frame
+ ) -> None:
+ self.write(f"environment.extensions[{node.identifier!r}].{node.name}")
+
+ def visit_ImportedName(self, node: nodes.ImportedName, frame: Frame) -> None:
+ self.write(self.import_aliases[node.importname])
+
+ def visit_InternalName(self, node: nodes.InternalName, frame: Frame) -> None:
+ self.write(node.name)
+
+ def visit_ContextReference(
+ self, node: nodes.ContextReference, frame: Frame
+ ) -> None:
+ self.write("context")
+
+ def visit_DerivedContextReference(
+ self, node: nodes.DerivedContextReference, frame: Frame
+ ) -> None:
+ self.write(self.derive_context(frame))
+
+ def visit_Continue(self, node: nodes.Continue, frame: Frame) -> None:
+ self.writeline("continue", node)
+
+ def visit_Break(self, node: nodes.Break, frame: Frame) -> None:
+ self.writeline("break", node)
+
+ def visit_Scope(self, node: nodes.Scope, frame: Frame) -> None:
+ scope_frame = frame.inner()
+ scope_frame.symbols.analyze_node(node)
+ self.enter_frame(scope_frame)
+ self.blockvisit(node.body, scope_frame)
+ self.leave_frame(scope_frame)
+
+ def visit_OverlayScope(self, node: nodes.OverlayScope, frame: Frame) -> None:
+ ctx = self.temporary_identifier()
+ self.writeline(f"{ctx} = {self.derive_context(frame)}")
+ self.writeline(f"{ctx}.vars = ")
+ self.visit(node.context, frame)
+ self.push_context_reference(ctx)
+
+ scope_frame = frame.inner(isolated=True)
+ scope_frame.symbols.analyze_node(node)
+ self.enter_frame(scope_frame)
+ self.blockvisit(node.body, scope_frame)
+ self.leave_frame(scope_frame)
+ self.pop_context_reference()
+
+ def visit_EvalContextModifier(
+ self, node: nodes.EvalContextModifier, frame: Frame
+ ) -> None:
+ for keyword in node.options:
+ self.writeline(f"context.eval_ctx.{keyword.key} = ")
+ self.visit(keyword.value, frame)
+ try:
+ val = keyword.value.as_const(frame.eval_ctx)
+ except nodes.Impossible:
+ frame.eval_ctx.volatile = True
+ else:
+ setattr(frame.eval_ctx, keyword.key, val)
+
+ def visit_ScopedEvalContextModifier(
+ self, node: nodes.ScopedEvalContextModifier, frame: Frame
+ ) -> None:
+ old_ctx_name = self.temporary_identifier()
+ saved_ctx = frame.eval_ctx.save()
+ self.writeline(f"{old_ctx_name} = context.eval_ctx.save()")
+ self.visit_EvalContextModifier(node, frame)
+ for child in node.body:
+ self.visit(child, frame)
+ frame.eval_ctx.revert(saved_ctx)
+ self.writeline(f"context.eval_ctx.revert({old_ctx_name})")
diff --git a/lib/spack/external/_vendoring/jinja2/constants.py b/lib/spack/external/_vendoring/jinja2/constants.py
new file mode 100644
index 0000000000..41a1c23b0a
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/constants.py
@@ -0,0 +1,20 @@
+#: list of lorem ipsum words used by the lipsum() helper function
+LOREM_IPSUM_WORDS = """\
+a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
+auctor augue bibendum blandit class commodo condimentum congue consectetuer
+consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
+diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
+elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
+faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
+hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
+justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
+luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
+mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
+nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
+penatibus per pharetra phasellus placerat platea porta porttitor posuere
+potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
+ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
+sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
+tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
+ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
+viverra volutpat vulputate"""
diff --git a/lib/spack/external/_vendoring/jinja2/debug.py b/lib/spack/external/_vendoring/jinja2/debug.py
new file mode 100644
index 0000000000..805866bd6f
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/debug.py
@@ -0,0 +1,259 @@
+import platform
+import sys
+import typing as t
+from types import CodeType
+from types import TracebackType
+
+from .exceptions import TemplateSyntaxError
+from .utils import internal_code
+from .utils import missing
+
+if t.TYPE_CHECKING:
+ from .runtime import Context
+
+
+def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException:
+ """Rewrite the current exception to replace any tracebacks from
+ within compiled template code with tracebacks that look like they
+ came from the template source.
+
+ This must be called within an ``except`` block.
+
+ :param source: For ``TemplateSyntaxError``, the original source if
+ known.
+ :return: The original exception with the rewritten traceback.
+ """
+ _, exc_value, tb = sys.exc_info()
+ exc_value = t.cast(BaseException, exc_value)
+ tb = t.cast(TracebackType, tb)
+
+ if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
+ exc_value.translated = True
+ exc_value.source = source
+ # Remove the old traceback, otherwise the frames from the
+ # compiler still show up.
+ exc_value.with_traceback(None)
+ # Outside of runtime, so the frame isn't executing template
+ # code, but it still needs to point at the template.
+ tb = fake_traceback(
+ exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
+ )
+ else:
+ # Skip the frame for the render function.
+ tb = tb.tb_next
+
+ stack = []
+
+ # Build the stack of traceback object, replacing any in template
+ # code with the source file and line information.
+ while tb is not None:
+ # Skip frames decorated with @internalcode. These are internal
+ # calls that aren't useful in template debugging output.
+ if tb.tb_frame.f_code in internal_code:
+ tb = tb.tb_next
+ continue
+
+ template = tb.tb_frame.f_globals.get("__jinja_template__")
+
+ if template is not None:
+ lineno = template.get_corresponding_lineno(tb.tb_lineno)
+ fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
+ stack.append(fake_tb)
+ else:
+ stack.append(tb)
+
+ tb = tb.tb_next
+
+ tb_next = None
+
+ # Assign tb_next in reverse to avoid circular references.
+ for tb in reversed(stack):
+ tb_next = tb_set_next(tb, tb_next)
+
+ return exc_value.with_traceback(tb_next)
+
+
+def fake_traceback( # type: ignore
+ exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int
+) -> TracebackType:
+ """Produce a new traceback object that looks like it came from the
+ template source instead of the compiled code. The filename, line
+ number, and location name will point to the template, and the local
+ variables will be the current template context.
+
+ :param exc_value: The original exception to be re-raised to create
+ the new traceback.
+ :param tb: The original traceback to get the local variables and
+ code info from.
+ :param filename: The template filename.
+ :param lineno: The line number in the template source.
+ """
+ if tb is not None:
+ # Replace the real locals with the context that would be
+ # available at that point in the template.
+ locals = get_template_locals(tb.tb_frame.f_locals)
+ locals.pop("__jinja_exception__", None)
+ else:
+ locals = {}
+
+ globals = {
+ "__name__": filename,
+ "__file__": filename,
+ "__jinja_exception__": exc_value,
+ }
+ # Raise an exception at the correct line number.
+ code: CodeType = compile(
+ "\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec"
+ )
+
+ # Build a new code object that points to the template file and
+ # replaces the location with a block name.
+ location = "template"
+
+ if tb is not None:
+ function = tb.tb_frame.f_code.co_name
+
+ if function == "root":
+ location = "top-level template code"
+ elif function.startswith("block_"):
+ location = f"block {function[6:]!r}"
+
+ if sys.version_info >= (3, 8):
+ code = code.replace(co_name=location)
+ else:
+ code = CodeType(
+ code.co_argcount,
+ code.co_kwonlyargcount,
+ code.co_nlocals,
+ code.co_stacksize,
+ code.co_flags,
+ code.co_code,
+ code.co_consts,
+ code.co_names,
+ code.co_varnames,
+ code.co_filename,
+ location,
+ code.co_firstlineno,
+ code.co_lnotab,
+ code.co_freevars,
+ code.co_cellvars,
+ )
+
+ # Execute the new code, which is guaranteed to raise, and return
+ # the new traceback without this frame.
+ try:
+ exec(code, globals, locals)
+ except BaseException:
+ return sys.exc_info()[2].tb_next # type: ignore
+
+
+def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]:
+ """Based on the runtime locals, get the context that would be
+ available at that point in the template.
+ """
+ # Start with the current template context.
+ ctx: "t.Optional[Context]" = real_locals.get("context")
+
+ if ctx is not None:
+ data: t.Dict[str, t.Any] = ctx.get_all().copy()
+ else:
+ data = {}
+
+ # Might be in a derived context that only sets local variables
+ # rather than pushing a context. Local variables follow the scheme
+ # l_depth_name. Find the highest-depth local that has a value for
+ # each name.
+ local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {}
+
+ for name, value in real_locals.items():
+ if not name.startswith("l_") or value is missing:
+ # Not a template variable, or no longer relevant.
+ continue
+
+ try:
+ _, depth_str, name = name.split("_", 2)
+ depth = int(depth_str)
+ except ValueError:
+ continue
+
+ cur_depth = local_overrides.get(name, (-1,))[0]
+
+ if cur_depth < depth:
+ local_overrides[name] = (depth, value)
+
+ # Modify the context with any derived context.
+ for name, (_, value) in local_overrides.items():
+ if value is missing:
+ data.pop(name, None)
+ else:
+ data[name] = value
+
+ return data
+
+
+if sys.version_info >= (3, 7):
+ # tb_next is directly assignable as of Python 3.7
+ def tb_set_next(
+ tb: TracebackType, tb_next: t.Optional[TracebackType]
+ ) -> TracebackType:
+ tb.tb_next = tb_next
+ return tb
+
+
+elif platform.python_implementation() == "PyPy":
+ # PyPy might have special support, and won't work with ctypes.
+ try:
+ import tputil # type: ignore
+ except ImportError:
+ # Without tproxy support, use the original traceback.
+ def tb_set_next(
+ tb: TracebackType, tb_next: t.Optional[TracebackType]
+ ) -> TracebackType:
+ return tb
+
+ else:
+ # With tproxy support, create a proxy around the traceback that
+ # returns the new tb_next.
+ def tb_set_next(
+ tb: TracebackType, tb_next: t.Optional[TracebackType]
+ ) -> TracebackType:
+ def controller(op): # type: ignore
+ if op.opname == "__getattribute__" and op.args[0] == "tb_next":
+ return tb_next
+
+ return op.delegate()
+
+ return tputil.make_proxy(controller, obj=tb) # type: ignore
+
+
+else:
+ # Use ctypes to assign tb_next at the C level since it's read-only
+ # from Python.
+ import ctypes
+
+ class _CTraceback(ctypes.Structure):
+ _fields_ = [
+ # Extra PyObject slots when compiled with Py_TRACE_REFS.
+ ("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
+ # Only care about tb_next as an object, not a traceback.
+ ("tb_next", ctypes.py_object),
+ ]
+
+ def tb_set_next(
+ tb: TracebackType, tb_next: t.Optional[TracebackType]
+ ) -> TracebackType:
+ c_tb = _CTraceback.from_address(id(tb))
+
+ # Clear out the old tb_next.
+ if tb.tb_next is not None:
+ c_tb_next = ctypes.py_object(tb.tb_next)
+ c_tb.tb_next = ctypes.py_object()
+ ctypes.pythonapi.Py_DecRef(c_tb_next)
+
+ # Assign the new tb_next.
+ if tb_next is not None:
+ c_tb_next = ctypes.py_object(tb_next)
+ ctypes.pythonapi.Py_IncRef(c_tb_next)
+ c_tb.tb_next = c_tb_next
+
+ return tb
diff --git a/lib/spack/external/_vendoring/jinja2/defaults.py b/lib/spack/external/_vendoring/jinja2/defaults.py
new file mode 100644
index 0000000000..638cad3d2d
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/defaults.py
@@ -0,0 +1,48 @@
+import typing as t
+
+from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
+from .tests import TESTS as DEFAULT_TESTS # noqa: F401
+from .utils import Cycler
+from .utils import generate_lorem_ipsum
+from .utils import Joiner
+from .utils import Namespace
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+# defaults for the parser / lexer
+BLOCK_START_STRING = "{%"
+BLOCK_END_STRING = "%}"
+VARIABLE_START_STRING = "{{"
+VARIABLE_END_STRING = "}}"
+COMMENT_START_STRING = "{#"
+COMMENT_END_STRING = "#}"
+LINE_STATEMENT_PREFIX: t.Optional[str] = None
+LINE_COMMENT_PREFIX: t.Optional[str] = None
+TRIM_BLOCKS = False
+LSTRIP_BLOCKS = False
+NEWLINE_SEQUENCE: "te.Literal['\\n', '\\r\\n', '\\r']" = "\n"
+KEEP_TRAILING_NEWLINE = False
+
+# default filters, tests and namespace
+
+DEFAULT_NAMESPACE = {
+ "range": range,
+ "dict": dict,
+ "lipsum": generate_lorem_ipsum,
+ "cycler": Cycler,
+ "joiner": Joiner,
+ "namespace": Namespace,
+}
+
+# default policies
+DEFAULT_POLICIES: t.Dict[str, t.Any] = {
+ "compiler.ascii_str": True,
+ "urlize.rel": "noopener",
+ "urlize.target": None,
+ "urlize.extra_schemes": None,
+ "truncate.leeway": 5,
+ "json.dumps_function": None,
+ "json.dumps_kwargs": {"sort_keys": True},
+ "ext.i18n.trimmed": False,
+}
diff --git a/lib/spack/external/_vendoring/jinja2/environment.py b/lib/spack/external/_vendoring/jinja2/environment.py
new file mode 100644
index 0000000000..a231d9cd57
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/environment.py
@@ -0,0 +1,1661 @@
+"""Classes for managing templates and their runtime and compile time
+options.
+"""
+import os
+import sys
+import typing
+import typing as t
+import weakref
+from collections import ChainMap
+from functools import lru_cache
+from functools import partial
+from functools import reduce
+from types import CodeType
+
+from markupsafe import Markup
+
+from . import nodes
+from .compiler import CodeGenerator
+from .compiler import generate
+from .defaults import BLOCK_END_STRING
+from .defaults import BLOCK_START_STRING
+from .defaults import COMMENT_END_STRING
+from .defaults import COMMENT_START_STRING
+from .defaults import DEFAULT_FILTERS
+from .defaults import DEFAULT_NAMESPACE
+from .defaults import DEFAULT_POLICIES
+from .defaults import DEFAULT_TESTS
+from .defaults import KEEP_TRAILING_NEWLINE
+from .defaults import LINE_COMMENT_PREFIX
+from .defaults import LINE_STATEMENT_PREFIX
+from .defaults import LSTRIP_BLOCKS
+from .defaults import NEWLINE_SEQUENCE
+from .defaults import TRIM_BLOCKS
+from .defaults import VARIABLE_END_STRING
+from .defaults import VARIABLE_START_STRING
+from .exceptions import TemplateNotFound
+from .exceptions import TemplateRuntimeError
+from .exceptions import TemplatesNotFound
+from .exceptions import TemplateSyntaxError
+from .exceptions import UndefinedError
+from .lexer import get_lexer
+from .lexer import Lexer
+from .lexer import TokenStream
+from .nodes import EvalContext
+from .parser import Parser
+from .runtime import Context
+from .runtime import new_context
+from .runtime import Undefined
+from .utils import _PassArg
+from .utils import concat
+from .utils import consume
+from .utils import import_string
+from .utils import internalcode
+from .utils import LRUCache
+from .utils import missing
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .bccache import BytecodeCache
+ from .ext import Extension
+ from .loaders import BaseLoader
+
+_env_bound = t.TypeVar("_env_bound", bound="Environment")
+
+
+# for direct template usage we have up to ten living environments
+@lru_cache(maxsize=10)
+def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound:
+ """Return a new spontaneous environment. A spontaneous environment
+ is used for templates created directly rather than through an
+ existing environment.
+
+ :param cls: Environment class to create.
+ :param args: Positional arguments passed to environment.
+ """
+ env = cls(*args)
+ env.shared = True
+ return env
+
+
+def create_cache(
+ size: int,
+) -> t.Optional[t.MutableMapping[t.Tuple[weakref.ref, str], "Template"]]:
+ """Return the cache class for the given size."""
+ if size == 0:
+ return None
+
+ if size < 0:
+ return {}
+
+ return LRUCache(size) # type: ignore
+
+
+def copy_cache(
+ cache: t.Optional[t.MutableMapping],
+) -> t.Optional[t.MutableMapping[t.Tuple[weakref.ref, str], "Template"]]:
+ """Create an empty copy of the given cache."""
+ if cache is None:
+ return None
+
+ if type(cache) is dict:
+ return {}
+
+ return LRUCache(cache.capacity) # type: ignore
+
+
+def load_extensions(
+ environment: "Environment",
+ extensions: t.Sequence[t.Union[str, t.Type["Extension"]]],
+) -> t.Dict[str, "Extension"]:
+ """Load the extensions from the list and bind it to the environment.
+ Returns a dict of instantiated extensions.
+ """
+ result = {}
+
+ for extension in extensions:
+ if isinstance(extension, str):
+ extension = t.cast(t.Type["Extension"], import_string(extension))
+
+ result[extension.identifier] = extension(environment)
+
+ return result
+
+
+def _environment_config_check(environment: "Environment") -> "Environment":
+ """Perform a sanity check on the environment."""
+ assert issubclass(
+ environment.undefined, Undefined
+ ), "'undefined' must be a subclass of 'jinja2.Undefined'."
+ assert (
+ environment.block_start_string
+ != environment.variable_start_string
+ != environment.comment_start_string
+ ), "block, variable and comment start strings must be different."
+ assert environment.newline_sequence in {
+ "\r",
+ "\r\n",
+ "\n",
+ }, "'newline_sequence' must be one of '\\n', '\\r\\n', or '\\r'."
+ return environment
+
+
+class Environment:
+ r"""The core component of Jinja is the `Environment`. It contains
+ important shared variables like configuration, filters, tests,
+ globals and others. Instances of this class may be modified if
+ they are not shared and if no template was loaded so far.
+ Modifications on environments after the first template was loaded
+ will lead to surprising effects and undefined behavior.
+
+ Here are the possible initialization parameters:
+
+ `block_start_string`
+ The string marking the beginning of a block. Defaults to ``'{%'``.
+
+ `block_end_string`
+ The string marking the end of a block. Defaults to ``'%}'``.
+
+ `variable_start_string`
+ The string marking the beginning of a print statement.
+ Defaults to ``'{{'``.
+
+ `variable_end_string`
+ The string marking the end of a print statement. Defaults to
+ ``'}}'``.
+
+ `comment_start_string`
+ The string marking the beginning of a comment. Defaults to ``'{#'``.
+
+ `comment_end_string`
+ The string marking the end of a comment. Defaults to ``'#}'``.
+
+ `line_statement_prefix`
+ If given and a string, this will be used as prefix for line based
+ statements. See also :ref:`line-statements`.
+
+ `line_comment_prefix`
+ If given and a string, this will be used as prefix for line based
+ comments. See also :ref:`line-statements`.
+
+ .. versionadded:: 2.2
+
+ `trim_blocks`
+ If this is set to ``True`` the first newline after a block is
+ removed (block, not variable tag!). Defaults to `False`.
+
+ `lstrip_blocks`
+ If this is set to ``True`` leading spaces and tabs are stripped
+ from the start of a line to a block. Defaults to `False`.
+
+ `newline_sequence`
+ The sequence that starts a newline. Must be one of ``'\r'``,
+ ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
+ useful default for Linux and OS X systems as well as web
+ applications.
+
+ `keep_trailing_newline`
+ Preserve the trailing newline when rendering templates.
+ The default is ``False``, which causes a single newline,
+ if present, to be stripped from the end of the template.
+
+ .. versionadded:: 2.7
+
+ `extensions`
+ List of Jinja extensions to use. This can either be import paths
+ as strings or extension classes. For more information have a
+ look at :ref:`the extensions documentation <jinja-extensions>`.
+
+ `optimized`
+ should the optimizer be enabled? Default is ``True``.
+
+ `undefined`
+ :class:`Undefined` or a subclass of it that is used to represent
+ undefined values in the template.
+
+ `finalize`
+ A callable that can be used to process the result of a variable
+ expression before it is output. For example one can convert
+ ``None`` implicitly into an empty string here.
+
+ `autoescape`
+ If set to ``True`` the XML/HTML autoescaping feature is enabled by
+ default. For more details about autoescaping see
+ :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also
+ be a callable that is passed the template name and has to
+ return ``True`` or ``False`` depending on autoescape should be
+ enabled by default.
+
+ .. versionchanged:: 2.4
+ `autoescape` can now be a function
+
+ `loader`
+ The template loader for this environment.
+
+ `cache_size`
+ The size of the cache. Per default this is ``400`` which means
+ that if more than 400 templates are loaded the loader will clean
+ out the least recently used template. If the cache size is set to
+ ``0`` templates are recompiled all the time, if the cache size is
+ ``-1`` the cache will not be cleaned.
+
+ .. versionchanged:: 2.8
+ The cache size was increased to 400 from a low 50.
+
+ `auto_reload`
+ Some loaders load templates from locations where the template
+ sources may change (ie: file system or database). If
+ ``auto_reload`` is set to ``True`` (default) every time a template is
+ requested the loader checks if the source changed and if yes, it
+ will reload the template. For higher performance it's possible to
+ disable that.
+
+ `bytecode_cache`
+ If set to a bytecode cache object, this object will provide a
+ cache for the internal Jinja bytecode so that templates don't
+ have to be parsed if they were not changed.
+
+ See :ref:`bytecode-cache` for more information.
+
+ `enable_async`
+ If set to true this enables async template execution which
+ allows using async functions and generators.
+ """
+
+ #: if this environment is sandboxed. Modifying this variable won't make
+ #: the environment sandboxed though. For a real sandboxed environment
+ #: have a look at jinja2.sandbox. This flag alone controls the code
+ #: generation by the compiler.
+ sandboxed = False
+
+ #: True if the environment is just an overlay
+ overlayed = False
+
+ #: the environment this environment is linked to if it is an overlay
+ linked_to: t.Optional["Environment"] = None
+
+ #: shared environments have this set to `True`. A shared environment
+ #: must not be modified
+ shared = False
+
+ #: the class that is used for code generation. See
+ #: :class:`~jinja2.compiler.CodeGenerator` for more information.
+ code_generator_class: t.Type["CodeGenerator"] = CodeGenerator
+
+ #: the context class that is used for templates. See
+ #: :class:`~jinja2.runtime.Context` for more information.
+ context_class: t.Type[Context] = Context
+
+ template_class: t.Type["Template"]
+
+ def __init__(
+ self,
+ block_start_string: str = BLOCK_START_STRING,
+ block_end_string: str = BLOCK_END_STRING,
+ variable_start_string: str = VARIABLE_START_STRING,
+ variable_end_string: str = VARIABLE_END_STRING,
+ comment_start_string: str = COMMENT_START_STRING,
+ comment_end_string: str = COMMENT_END_STRING,
+ line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX,
+ line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX,
+ trim_blocks: bool = TRIM_BLOCKS,
+ lstrip_blocks: bool = LSTRIP_BLOCKS,
+ newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE,
+ keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE,
+ extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (),
+ optimized: bool = True,
+ undefined: t.Type[Undefined] = Undefined,
+ finalize: t.Optional[t.Callable[..., t.Any]] = None,
+ autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False,
+ loader: t.Optional["BaseLoader"] = None,
+ cache_size: int = 400,
+ auto_reload: bool = True,
+ bytecode_cache: t.Optional["BytecodeCache"] = None,
+ enable_async: bool = False,
+ ):
+ # !!Important notice!!
+ # The constructor accepts quite a few arguments that should be
+ # passed by keyword rather than position. However it's important to
+ # not change the order of arguments because it's used at least
+ # internally in those cases:
+ # - spontaneous environments (i18n extension and Template)
+ # - unittests
+ # If parameter changes are required only add parameters at the end
+ # and don't change the arguments (or the defaults!) of the arguments
+ # existing already.
+
+ # lexer / parser information
+ self.block_start_string = block_start_string
+ self.block_end_string = block_end_string
+ self.variable_start_string = variable_start_string
+ self.variable_end_string = variable_end_string
+ self.comment_start_string = comment_start_string
+ self.comment_end_string = comment_end_string
+ self.line_statement_prefix = line_statement_prefix
+ self.line_comment_prefix = line_comment_prefix
+ self.trim_blocks = trim_blocks
+ self.lstrip_blocks = lstrip_blocks
+ self.newline_sequence = newline_sequence
+ self.keep_trailing_newline = keep_trailing_newline
+
+ # runtime information
+ self.undefined: t.Type[Undefined] = undefined
+ self.optimized = optimized
+ self.finalize = finalize
+ self.autoescape = autoescape
+
+ # defaults
+ self.filters = DEFAULT_FILTERS.copy()
+ self.tests = DEFAULT_TESTS.copy()
+ self.globals = DEFAULT_NAMESPACE.copy()
+
+ # set the loader provided
+ self.loader = loader
+ self.cache = create_cache(cache_size)
+ self.bytecode_cache = bytecode_cache
+ self.auto_reload = auto_reload
+
+ # configurable policies
+ self.policies = DEFAULT_POLICIES.copy()
+
+ # load extensions
+ self.extensions = load_extensions(self, extensions)
+
+ self.is_async = enable_async
+ _environment_config_check(self)
+
+ def add_extension(self, extension: t.Union[str, t.Type["Extension"]]) -> None:
+ """Adds an extension after the environment was created.
+
+ .. versionadded:: 2.5
+ """
+ self.extensions.update(load_extensions(self, [extension]))
+
+ def extend(self, **attributes: t.Any) -> None:
+ """Add the items to the instance of the environment if they do not exist
+ yet. This is used by :ref:`extensions <writing-extensions>` to register
+ callbacks and configuration values without breaking inheritance.
+ """
+ for key, value in attributes.items():
+ if not hasattr(self, key):
+ setattr(self, key, value)
+
+ def overlay(
+ self,
+ block_start_string: str = missing,
+ block_end_string: str = missing,
+ variable_start_string: str = missing,
+ variable_end_string: str = missing,
+ comment_start_string: str = missing,
+ comment_end_string: str = missing,
+ line_statement_prefix: t.Optional[str] = missing,
+ line_comment_prefix: t.Optional[str] = missing,
+ trim_blocks: bool = missing,
+ lstrip_blocks: bool = missing,
+ extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = missing,
+ optimized: bool = missing,
+ undefined: t.Type[Undefined] = missing,
+ finalize: t.Optional[t.Callable[..., t.Any]] = missing,
+ autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = missing,
+ loader: t.Optional["BaseLoader"] = missing,
+ cache_size: int = missing,
+ auto_reload: bool = missing,
+ bytecode_cache: t.Optional["BytecodeCache"] = missing,
+ ) -> "Environment":
+ """Create a new overlay environment that shares all the data with the
+ current environment except for cache and the overridden attributes.
+ Extensions cannot be removed for an overlayed environment. An overlayed
+ environment automatically gets all the extensions of the environment it
+ is linked to plus optional extra extensions.
+
+ Creating overlays should happen after the initial environment was set
+ up completely. Not all attributes are truly linked, some are just
+ copied over so modifications on the original environment may not shine
+ through.
+ """
+ args = dict(locals())
+ del args["self"], args["cache_size"], args["extensions"]
+
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.overlayed = True
+ rv.linked_to = self
+
+ for key, value in args.items():
+ if value is not missing:
+ setattr(rv, key, value)
+
+ if cache_size is not missing:
+ rv.cache = create_cache(cache_size)
+ else:
+ rv.cache = copy_cache(self.cache)
+
+ rv.extensions = {}
+ for key, value in self.extensions.items():
+ rv.extensions[key] = value.bind(rv)
+ if extensions is not missing:
+ rv.extensions.update(load_extensions(rv, extensions))
+
+ return _environment_config_check(rv)
+
+ @property
+ def lexer(self) -> Lexer:
+ """The lexer for this environment."""
+ return get_lexer(self)
+
+ def iter_extensions(self) -> t.Iterator["Extension"]:
+ """Iterates over the extensions by priority."""
+ return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
+
+ def getitem(
+ self, obj: t.Any, argument: t.Union[str, t.Any]
+ ) -> t.Union[t.Any, Undefined]:
+ """Get an item or attribute of an object but prefer the item."""
+ try:
+ return obj[argument]
+ except (AttributeError, TypeError, LookupError):
+ if isinstance(argument, str):
+ try:
+ attr = str(argument)
+ except Exception:
+ pass
+ else:
+ try:
+ return getattr(obj, attr)
+ except AttributeError:
+ pass
+ return self.undefined(obj=obj, name=argument)
+
+ def getattr(self, obj: t.Any, attribute: str) -> t.Any:
+ """Get an item or attribute of an object but prefer the attribute.
+ Unlike :meth:`getitem` the attribute *must* be a string.
+ """
+ try:
+ return getattr(obj, attribute)
+ except AttributeError:
+ pass
+ try:
+ return obj[attribute]
+ except (TypeError, LookupError, AttributeError):
+ return self.undefined(obj=obj, name=attribute)
+
+ def _filter_test_common(
+ self,
+ name: t.Union[str, Undefined],
+ value: t.Any,
+ args: t.Optional[t.Sequence[t.Any]],
+ kwargs: t.Optional[t.Mapping[str, t.Any]],
+ context: t.Optional[Context],
+ eval_ctx: t.Optional[EvalContext],
+ is_filter: bool,
+ ) -> t.Any:
+ if is_filter:
+ env_map = self.filters
+ type_name = "filter"
+ else:
+ env_map = self.tests
+ type_name = "test"
+
+ func = env_map.get(name) # type: ignore
+
+ if func is None:
+ msg = f"No {type_name} named {name!r}."
+
+ if isinstance(name, Undefined):
+ try:
+ name._fail_with_undefined_error()
+ except Exception as e:
+ msg = f"{msg} ({e}; did you forget to quote the callable name?)"
+
+ raise TemplateRuntimeError(msg)
+
+ args = [value, *(args if args is not None else ())]
+ kwargs = kwargs if kwargs is not None else {}
+ pass_arg = _PassArg.from_obj(func)
+
+ if pass_arg is _PassArg.context:
+ if context is None:
+ raise TemplateRuntimeError(
+ f"Attempted to invoke a context {type_name} without context."
+ )
+
+ args.insert(0, context)
+ elif pass_arg is _PassArg.eval_context:
+ if eval_ctx is None:
+ if context is not None:
+ eval_ctx = context.eval_ctx
+ else:
+ eval_ctx = EvalContext(self)
+
+ args.insert(0, eval_ctx)
+ elif pass_arg is _PassArg.environment:
+ args.insert(0, self)
+
+ return func(*args, **kwargs)
+
+ def call_filter(
+ self,
+ name: str,
+ value: t.Any,
+ args: t.Optional[t.Sequence[t.Any]] = None,
+ kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
+ context: t.Optional[Context] = None,
+ eval_ctx: t.Optional[EvalContext] = None,
+ ) -> t.Any:
+ """Invoke a filter on a value the same way the compiler does.
+
+ This might return a coroutine if the filter is running from an
+ environment in async mode and the filter supports async
+ execution. It's your responsibility to await this if needed.
+
+ .. versionadded:: 2.7
+ """
+ return self._filter_test_common(
+ name, value, args, kwargs, context, eval_ctx, True
+ )
+
+ def call_test(
+ self,
+ name: str,
+ value: t.Any,
+ args: t.Optional[t.Sequence[t.Any]] = None,
+ kwargs: t.Optional[t.Mapping[str, t.Any]] = None,
+ context: t.Optional[Context] = None,
+ eval_ctx: t.Optional[EvalContext] = None,
+ ) -> t.Any:
+ """Invoke a test on a value the same way the compiler does.
+
+ This might return a coroutine if the test is running from an
+ environment in async mode and the test supports async execution.
+ It's your responsibility to await this if needed.
+
+ .. versionchanged:: 3.0
+ Tests support ``@pass_context``, etc. decorators. Added
+ the ``context`` and ``eval_ctx`` parameters.
+
+ .. versionadded:: 2.7
+ """
+ return self._filter_test_common(
+ name, value, args, kwargs, context, eval_ctx, False
+ )
+
+ @internalcode
+ def parse(
+ self,
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> nodes.Template:
+ """Parse the sourcecode and return the abstract syntax tree. This
+ tree of nodes is used by the compiler to convert the template into
+ executable source- or bytecode. This is useful for debugging or to
+ extract information from templates.
+
+ If you are :ref:`developing Jinja extensions <writing-extensions>`
+ this gives you a good overview of the node tree generated.
+ """
+ try:
+ return self._parse(source, name, filename)
+ except TemplateSyntaxError:
+ self.handle_exception(source=source)
+
+ def _parse(
+ self, source: str, name: t.Optional[str], filename: t.Optional[str]
+ ) -> nodes.Template:
+ """Internal parsing function used by `parse` and `compile`."""
+ return Parser(self, source, name, filename).parse()
+
+ def lex(
+ self,
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> t.Iterator[t.Tuple[int, str, str]]:
+ """Lex the given sourcecode and return a generator that yields
+ tokens as tuples in the form ``(lineno, token_type, value)``.
+ This can be useful for :ref:`extension development <writing-extensions>`
+ and debugging templates.
+
+ This does not perform preprocessing. If you want the preprocessing
+ of the extensions to be applied you have to filter source through
+ the :meth:`preprocess` method.
+ """
+ source = str(source)
+ try:
+ return self.lexer.tokeniter(source, name, filename)
+ except TemplateSyntaxError:
+ self.handle_exception(source=source)
+
+ def preprocess(
+ self,
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> str:
+ """Preprocesses the source with all extensions. This is automatically
+ called for all parsing and compiling methods but *not* for :meth:`lex`
+ because there you usually only want the actual source tokenized.
+ """
+ return reduce(
+ lambda s, e: e.preprocess(s, name, filename),
+ self.iter_extensions(),
+ str(source),
+ )
+
+ def _tokenize(
+ self,
+ source: str,
+ name: t.Optional[str],
+ filename: t.Optional[str] = None,
+ state: t.Optional[str] = None,
+ ) -> TokenStream:
+ """Called by the parser to do the preprocessing and filtering
+ for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
+ """
+ source = self.preprocess(source, name, filename)
+ stream = self.lexer.tokenize(source, name, filename, state)
+
+ for ext in self.iter_extensions():
+ stream = ext.filter_stream(stream) # type: ignore
+
+ if not isinstance(stream, TokenStream):
+ stream = TokenStream(stream, name, filename) # type: ignore
+
+ return stream
+
+ def _generate(
+ self,
+ source: nodes.Template,
+ name: t.Optional[str],
+ filename: t.Optional[str],
+ defer_init: bool = False,
+ ) -> str:
+ """Internal hook that can be overridden to hook a different generate
+ method in.
+
+ .. versionadded:: 2.5
+ """
+ return generate( # type: ignore
+ source,
+ self,
+ name,
+ filename,
+ defer_init=defer_init,
+ optimized=self.optimized,
+ )
+
+ def _compile(self, source: str, filename: str) -> CodeType:
+ """Internal hook that can be overridden to hook a different compile
+ method in.
+
+ .. versionadded:: 2.5
+ """
+ return compile(source, filename, "exec") # type: ignore
+
+ @typing.overload
+ def compile( # type: ignore
+ self,
+ source: t.Union[str, nodes.Template],
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ raw: "te.Literal[False]" = False,
+ defer_init: bool = False,
+ ) -> CodeType:
+ ...
+
+ @typing.overload
+ def compile(
+ self,
+ source: t.Union[str, nodes.Template],
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ raw: "te.Literal[True]" = ...,
+ defer_init: bool = False,
+ ) -> str:
+ ...
+
+ @internalcode
+ def compile(
+ self,
+ source: t.Union[str, nodes.Template],
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ raw: bool = False,
+ defer_init: bool = False,
+ ) -> t.Union[str, CodeType]:
+ """Compile a node or template source code. The `name` parameter is
+ the load name of the template after it was joined using
+ :meth:`join_path` if necessary, not the filename on the file system.
+ the `filename` parameter is the estimated filename of the template on
+ the file system. If the template came from a database or memory this
+ can be omitted.
+
+ The return value of this method is a python code object. If the `raw`
+ parameter is `True` the return value will be a string with python
+ code equivalent to the bytecode returned otherwise. This method is
+ mainly used internally.
+
+ `defer_init` is use internally to aid the module code generator. This
+ causes the generated code to be able to import without the global
+ environment variable to be set.
+
+ .. versionadded:: 2.4
+ `defer_init` parameter added.
+ """
+ source_hint = None
+ try:
+ if isinstance(source, str):
+ source_hint = source
+ source = self._parse(source, name, filename)
+ source = self._generate(source, name, filename, defer_init=defer_init)
+ if raw:
+ return source
+ if filename is None:
+ filename = "<template>"
+ return self._compile(source, filename)
+ except TemplateSyntaxError:
+ self.handle_exception(source=source_hint)
+
+ def compile_expression(
+ self, source: str, undefined_to_none: bool = True
+ ) -> "TemplateExpression":
+ """A handy helper method that returns a callable that accepts keyword
+ arguments that appear as variables in the expression. If called it
+ returns the result of the expression.
+
+ This is useful if applications want to use the same rules as Jinja
+ in template "configuration files" or similar situations.
+
+ Example usage:
+
+ >>> env = Environment()
+ >>> expr = env.compile_expression('foo == 42')
+ >>> expr(foo=23)
+ False
+ >>> expr(foo=42)
+ True
+
+ Per default the return value is converted to `None` if the
+ expression returns an undefined value. This can be changed
+ by setting `undefined_to_none` to `False`.
+
+ >>> env.compile_expression('var')() is None
+ True
+ >>> env.compile_expression('var', undefined_to_none=False)()
+ Undefined
+
+ .. versionadded:: 2.1
+ """
+ parser = Parser(self, source, state="variable")
+ try:
+ expr = parser.parse_expression()
+ if not parser.stream.eos:
+ raise TemplateSyntaxError(
+ "chunk after expression", parser.stream.current.lineno, None, None
+ )
+ expr.set_environment(self)
+ except TemplateSyntaxError:
+ self.handle_exception(source=source)
+
+ body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
+ template = self.from_string(nodes.Template(body, lineno=1))
+ return TemplateExpression(template, undefined_to_none)
+
+ def compile_templates(
+ self,
+ target: t.Union[str, os.PathLike],
+ extensions: t.Optional[t.Collection[str]] = None,
+ filter_func: t.Optional[t.Callable[[str], bool]] = None,
+ zip: t.Optional[str] = "deflated",
+ log_function: t.Optional[t.Callable[[str], None]] = None,
+ ignore_errors: bool = True,
+ ) -> None:
+ """Finds all the templates the loader can find, compiles them
+ and stores them in `target`. If `zip` is `None`, instead of in a
+ zipfile, the templates will be stored in a directory.
+ By default a deflate zip algorithm is used. To switch to
+ the stored algorithm, `zip` can be set to ``'stored'``.
+
+ `extensions` and `filter_func` are passed to :meth:`list_templates`.
+ Each template returned will be compiled to the target folder or
+ zipfile.
+
+ By default template compilation errors are ignored. In case a
+ log function is provided, errors are logged. If you want template
+ syntax errors to abort the compilation you can set `ignore_errors`
+ to `False` and you will get an exception on syntax errors.
+
+ .. versionadded:: 2.4
+ """
+ from .loaders import ModuleLoader
+
+ if log_function is None:
+
+ def log_function(x: str) -> None:
+ pass
+
+ assert log_function is not None
+ assert self.loader is not None, "No loader configured."
+
+ def write_file(filename: str, data: str) -> None:
+ if zip:
+ info = ZipInfo(filename)
+ info.external_attr = 0o755 << 16
+ zip_file.writestr(info, data)
+ else:
+ with open(os.path.join(target, filename), "wb") as f:
+ f.write(data.encode("utf8"))
+
+ if zip is not None:
+ from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
+
+ zip_file = ZipFile(
+ target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
+ )
+ log_function(f"Compiling into Zip archive {target!r}")
+ else:
+ if not os.path.isdir(target):
+ os.makedirs(target)
+ log_function(f"Compiling into folder {target!r}")
+
+ try:
+ for name in self.list_templates(extensions, filter_func):
+ source, filename, _ = self.loader.get_source(self, name)
+ try:
+ code = self.compile(source, name, filename, True, True)
+ except TemplateSyntaxError as e:
+ if not ignore_errors:
+ raise
+ log_function(f'Could not compile "{name}": {e}')
+ continue
+
+ filename = ModuleLoader.get_module_filename(name)
+
+ write_file(filename, code)
+ log_function(f'Compiled "{name}" as {filename}')
+ finally:
+ if zip:
+ zip_file.close()
+
+ log_function("Finished compiling templates")
+
+ def list_templates(
+ self,
+ extensions: t.Optional[t.Collection[str]] = None,
+ filter_func: t.Optional[t.Callable[[str], bool]] = None,
+ ) -> t.List[str]:
+ """Returns a list of templates for this environment. This requires
+ that the loader supports the loader's
+ :meth:`~BaseLoader.list_templates` method.
+
+ If there are other files in the template folder besides the
+ actual templates, the returned list can be filtered. There are two
+ ways: either `extensions` is set to a list of file extensions for
+ templates, or a `filter_func` can be provided which is a callable that
+ is passed a template name and should return `True` if it should end up
+ in the result list.
+
+ If the loader does not support that, a :exc:`TypeError` is raised.
+
+ .. versionadded:: 2.4
+ """
+ assert self.loader is not None, "No loader configured."
+ names = self.loader.list_templates()
+
+ if extensions is not None:
+ if filter_func is not None:
+ raise TypeError(
+ "either extensions or filter_func can be passed, but not both"
+ )
+
+ def filter_func(x: str) -> bool:
+ return "." in x and x.rsplit(".", 1)[1] in extensions # type: ignore
+
+ if filter_func is not None:
+ names = [name for name in names if filter_func(name)]
+
+ return names
+
+ def handle_exception(self, source: t.Optional[str] = None) -> "te.NoReturn":
+ """Exception handling helper. This is used internally to either raise
+ rewritten exceptions or return a rendered traceback for the template.
+ """
+ from .debug import rewrite_traceback_stack
+
+ raise rewrite_traceback_stack(source=source)
+
+ def join_path(self, template: str, parent: str) -> str:
+ """Join a template with the parent. By default all the lookups are
+ relative to the loader root so this method returns the `template`
+ parameter unchanged, but if the paths should be relative to the
+ parent template, this function can be used to calculate the real
+ template name.
+
+ Subclasses may override this method and implement template path
+ joining here.
+ """
+ return template
+
+ @internalcode
+ def _load_template(
+ self, name: str, globals: t.Optional[t.Mapping[str, t.Any]]
+ ) -> "Template":
+ if self.loader is None:
+ raise TypeError("no loader for this environment specified")
+ cache_key = (weakref.ref(self.loader), name)
+ if self.cache is not None:
+ template = self.cache.get(cache_key)
+ if template is not None and (
+ not self.auto_reload or template.is_up_to_date
+ ):
+ # template.globals is a ChainMap, modifying it will only
+ # affect the template, not the environment globals.
+ if globals:
+ template.globals.update(globals)
+
+ return template
+
+ template = self.loader.load(self, name, self.make_globals(globals))
+
+ if self.cache is not None:
+ self.cache[cache_key] = template
+ return template
+
+ @internalcode
+ def get_template(
+ self,
+ name: t.Union[str, "Template"],
+ parent: t.Optional[str] = None,
+ globals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> "Template":
+ """Load a template by name with :attr:`loader` and return a
+ :class:`Template`. If the template does not exist a
+ :exc:`TemplateNotFound` exception is raised.
+
+ :param name: Name of the template to load.
+ :param parent: The name of the parent template importing this
+ template. :meth:`join_path` can be used to implement name
+ transformations with this.
+ :param globals: Extend the environment :attr:`globals` with
+ these extra variables available for all renders of this
+ template. If the template has already been loaded and
+ cached, its globals are updated with any new items.
+
+ .. versionchanged:: 3.0
+ If a template is loaded from cache, ``globals`` will update
+ the template's globals instead of ignoring the new values.
+
+ .. versionchanged:: 2.4
+ If ``name`` is a :class:`Template` object it is returned
+ unchanged.
+ """
+ if isinstance(name, Template):
+ return name
+ if parent is not None:
+ name = self.join_path(name, parent)
+
+ return self._load_template(name, globals)
+
+ @internalcode
+ def select_template(
+ self,
+ names: t.Iterable[t.Union[str, "Template"]],
+ parent: t.Optional[str] = None,
+ globals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> "Template":
+ """Like :meth:`get_template`, but tries loading multiple names.
+ If none of the names can be loaded a :exc:`TemplatesNotFound`
+ exception is raised.
+
+ :param names: List of template names to try loading in order.
+ :param parent: The name of the parent template importing this
+ template. :meth:`join_path` can be used to implement name
+ transformations with this.
+ :param globals: Extend the environment :attr:`globals` with
+ these extra variables available for all renders of this
+ template. If the template has already been loaded and
+ cached, its globals are updated with any new items.
+
+ .. versionchanged:: 3.0
+ If a template is loaded from cache, ``globals`` will update
+ the template's globals instead of ignoring the new values.
+
+ .. versionchanged:: 2.11
+ If ``names`` is :class:`Undefined`, an :exc:`UndefinedError`
+ is raised instead. If no templates were found and ``names``
+ contains :class:`Undefined`, the message is more helpful.
+
+ .. versionchanged:: 2.4
+ If ``names`` contains a :class:`Template` object it is
+ returned unchanged.
+
+ .. versionadded:: 2.3
+ """
+ if isinstance(names, Undefined):
+ names._fail_with_undefined_error()
+
+ if not names:
+ raise TemplatesNotFound(
+ message="Tried to select from an empty list of templates."
+ )
+
+ for name in names:
+ if isinstance(name, Template):
+ return name
+ if parent is not None:
+ name = self.join_path(name, parent)
+ try:
+ return self._load_template(name, globals)
+ except (TemplateNotFound, UndefinedError):
+ pass
+ raise TemplatesNotFound(names) # type: ignore
+
+ @internalcode
+ def get_or_select_template(
+ self,
+ template_name_or_list: t.Union[
+ str, "Template", t.List[t.Union[str, "Template"]]
+ ],
+ parent: t.Optional[str] = None,
+ globals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> "Template":
+ """Use :meth:`select_template` if an iterable of template names
+ is given, or :meth:`get_template` if one name is given.
+
+ .. versionadded:: 2.3
+ """
+ if isinstance(template_name_or_list, (str, Undefined)):
+ return self.get_template(template_name_or_list, parent, globals)
+ elif isinstance(template_name_or_list, Template):
+ return template_name_or_list
+ return self.select_template(template_name_or_list, parent, globals)
+
+ def from_string(
+ self,
+ source: t.Union[str, nodes.Template],
+ globals: t.Optional[t.Mapping[str, t.Any]] = None,
+ template_class: t.Optional[t.Type["Template"]] = None,
+ ) -> "Template":
+ """Load a template from a source string without using
+ :attr:`loader`.
+
+ :param source: Jinja source to compile into a template.
+ :param globals: Extend the environment :attr:`globals` with
+ these extra variables available for all renders of this
+ template. If the template has already been loaded and
+ cached, its globals are updated with any new items.
+ :param template_class: Return an instance of this
+ :class:`Template` class.
+ """
+ gs = self.make_globals(globals)
+ cls = template_class or self.template_class
+ return cls.from_code(self, self.compile(source), gs, None)
+
+ def make_globals(
+ self, d: t.Optional[t.Mapping[str, t.Any]]
+ ) -> t.MutableMapping[str, t.Any]:
+ """Make the globals map for a template. Any given template
+ globals overlay the environment :attr:`globals`.
+
+ Returns a :class:`collections.ChainMap`. This allows any changes
+ to a template's globals to only affect that template, while
+ changes to the environment's globals are still reflected.
+ However, avoid modifying any globals after a template is loaded.
+
+ :param d: Dict of template-specific globals.
+
+ .. versionchanged:: 3.0
+ Use :class:`collections.ChainMap` to always prevent mutating
+ environment globals.
+ """
+ if d is None:
+ d = {}
+
+ return ChainMap(d, self.globals)
+
+
+class Template:
+ """A compiled template that can be rendered.
+
+ Use the methods on :class:`Environment` to create or load templates.
+ The environment is used to configure how templates are compiled and
+ behave.
+
+ It is also possible to create a template object directly. This is
+ not usually recommended. The constructor takes most of the same
+ arguments as :class:`Environment`. All templates created with the
+ same environment arguments share the same ephemeral ``Environment``
+ instance behind the scenes.
+
+ A template object should be considered immutable. Modifications on
+ the object are not supported.
+ """
+
+ #: Type of environment to create when creating a template directly
+ #: rather than through an existing environment.
+ environment_class: t.Type[Environment] = Environment
+
+ environment: Environment
+ globals: t.MutableMapping[str, t.Any]
+ name: t.Optional[str]
+ filename: t.Optional[str]
+ blocks: t.Dict[str, t.Callable[[Context], t.Iterator[str]]]
+ root_render_func: t.Callable[[Context], t.Iterator[str]]
+ _module: t.Optional["TemplateModule"]
+ _debug_info: str
+ _uptodate: t.Optional[t.Callable[[], bool]]
+
+ def __new__(
+ cls,
+ source: t.Union[str, nodes.Template],
+ block_start_string: str = BLOCK_START_STRING,
+ block_end_string: str = BLOCK_END_STRING,
+ variable_start_string: str = VARIABLE_START_STRING,
+ variable_end_string: str = VARIABLE_END_STRING,
+ comment_start_string: str = COMMENT_START_STRING,
+ comment_end_string: str = COMMENT_END_STRING,
+ line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX,
+ line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX,
+ trim_blocks: bool = TRIM_BLOCKS,
+ lstrip_blocks: bool = LSTRIP_BLOCKS,
+ newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE,
+ keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE,
+ extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (),
+ optimized: bool = True,
+ undefined: t.Type[Undefined] = Undefined,
+ finalize: t.Optional[t.Callable[..., t.Any]] = None,
+ autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False,
+ enable_async: bool = False,
+ ) -> t.Any: # it returns a `Template`, but this breaks the sphinx build...
+ env = get_spontaneous_environment(
+ cls.environment_class, # type: ignore
+ block_start_string,
+ block_end_string,
+ variable_start_string,
+ variable_end_string,
+ comment_start_string,
+ comment_end_string,
+ line_statement_prefix,
+ line_comment_prefix,
+ trim_blocks,
+ lstrip_blocks,
+ newline_sequence,
+ keep_trailing_newline,
+ frozenset(extensions),
+ optimized,
+ undefined, # type: ignore
+ finalize,
+ autoescape,
+ None,
+ 0,
+ False,
+ None,
+ enable_async,
+ )
+ return env.from_string(source, template_class=cls)
+
+ @classmethod
+ def from_code(
+ cls,
+ environment: Environment,
+ code: CodeType,
+ globals: t.MutableMapping[str, t.Any],
+ uptodate: t.Optional[t.Callable[[], bool]] = None,
+ ) -> "Template":
+ """Creates a template object from compiled code and the globals. This
+ is used by the loaders and environment to create a template object.
+ """
+ namespace = {"environment": environment, "__file__": code.co_filename}
+ exec(code, namespace)
+ rv = cls._from_namespace(environment, namespace, globals)
+ rv._uptodate = uptodate
+ return rv
+
+ @classmethod
+ def from_module_dict(
+ cls,
+ environment: Environment,
+ module_dict: t.MutableMapping[str, t.Any],
+ globals: t.MutableMapping[str, t.Any],
+ ) -> "Template":
+ """Creates a template object from a module. This is used by the
+ module loader to create a template object.
+
+ .. versionadded:: 2.4
+ """
+ return cls._from_namespace(environment, module_dict, globals)
+
+ @classmethod
+ def _from_namespace(
+ cls,
+ environment: Environment,
+ namespace: t.MutableMapping[str, t.Any],
+ globals: t.MutableMapping[str, t.Any],
+ ) -> "Template":
+ t: "Template" = object.__new__(cls)
+ t.environment = environment
+ t.globals = globals
+ t.name = namespace["name"]
+ t.filename = namespace["__file__"]
+ t.blocks = namespace["blocks"]
+
+ # render function and module
+ t.root_render_func = namespace["root"] # type: ignore
+ t._module = None
+
+ # debug and loader helpers
+ t._debug_info = namespace["debug_info"]
+ t._uptodate = None
+
+ # store the reference
+ namespace["environment"] = environment
+ namespace["__jinja_template__"] = t
+
+ return t
+
+ def render(self, *args: t.Any, **kwargs: t.Any) -> str:
+ """This method accepts the same arguments as the `dict` constructor:
+ A dict, a dict subclass or some keyword arguments. If no arguments
+ are given the context will be empty. These two calls do the same::
+
+ template.render(knights='that say nih')
+ template.render({'knights': 'that say nih'})
+
+ This will return the rendered template as a string.
+ """
+ if self.environment.is_async:
+ import asyncio
+
+ close = False
+
+ if sys.version_info < (3, 7):
+ loop = asyncio.get_event_loop()
+ else:
+ try:
+ loop = asyncio.get_running_loop()
+ except RuntimeError:
+ loop = asyncio.new_event_loop()
+ close = True
+
+ try:
+ return loop.run_until_complete(self.render_async(*args, **kwargs))
+ finally:
+ if close:
+ loop.close()
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
+ try:
+ return concat(self.root_render_func(ctx)) # type: ignore
+ except Exception:
+ self.environment.handle_exception()
+
+ async def render_async(self, *args: t.Any, **kwargs: t.Any) -> str:
+ """This works similar to :meth:`render` but returns a coroutine
+ that when awaited returns the entire rendered template string. This
+ requires the async feature to be enabled.
+
+ Example usage::
+
+ await template.render_async(knights='that say nih; asynchronously')
+ """
+ if not self.environment.is_async:
+ raise RuntimeError(
+ "The environment was not created with async mode enabled."
+ )
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
+ try:
+ return concat([n async for n in self.root_render_func(ctx)]) # type: ignore
+ except Exception:
+ return self.environment.handle_exception()
+
+ def stream(self, *args: t.Any, **kwargs: t.Any) -> "TemplateStream":
+ """Works exactly like :meth:`generate` but returns a
+ :class:`TemplateStream`.
+ """
+ return TemplateStream(self.generate(*args, **kwargs))
+
+ def generate(self, *args: t.Any, **kwargs: t.Any) -> t.Iterator[str]:
+ """For very large templates it can be useful to not render the whole
+ template at once but evaluate each statement after another and yield
+ piece for piece. This method basically does exactly that and returns
+ a generator that yields one item after another as strings.
+
+ It accepts the same arguments as :meth:`render`.
+ """
+ if self.environment.is_async:
+ import asyncio
+
+ async def to_list() -> t.List[str]:
+ return [x async for x in self.generate_async(*args, **kwargs)]
+
+ if sys.version_info < (3, 7):
+ loop = asyncio.get_event_loop()
+ out = loop.run_until_complete(to_list())
+ else:
+ out = asyncio.run(to_list())
+
+ yield from out
+ return
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
+ try:
+ yield from self.root_render_func(ctx) # type: ignore
+ except Exception:
+ yield self.environment.handle_exception()
+
+ async def generate_async(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.AsyncIterator[str]:
+ """An async version of :meth:`generate`. Works very similarly but
+ returns an async iterator instead.
+ """
+ if not self.environment.is_async:
+ raise RuntimeError(
+ "The environment was not created with async mode enabled."
+ )
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
+ try:
+ async for event in self.root_render_func(ctx): # type: ignore
+ yield event
+ except Exception:
+ yield self.environment.handle_exception()
+
+ def new_context(
+ self,
+ vars: t.Optional[t.Dict[str, t.Any]] = None,
+ shared: bool = False,
+ locals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> Context:
+ """Create a new :class:`Context` for this template. The vars
+ provided will be passed to the template. Per default the globals
+ are added to the context. If shared is set to `True` the data
+ is passed as is to the context without adding the globals.
+
+ `locals` can be a dict of local variables for internal usage.
+ """
+ return new_context(
+ self.environment, self.name, self.blocks, vars, shared, self.globals, locals
+ )
+
+ def make_module(
+ self,
+ vars: t.Optional[t.Dict[str, t.Any]] = None,
+ shared: bool = False,
+ locals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> "TemplateModule":
+ """This method works like the :attr:`module` attribute when called
+ without arguments but it will evaluate the template on every call
+ rather than caching it. It's also possible to provide
+ a dict which is then used as context. The arguments are the same
+ as for the :meth:`new_context` method.
+ """
+ ctx = self.new_context(vars, shared, locals)
+ return TemplateModule(self, ctx)
+
+ async def make_module_async(
+ self,
+ vars: t.Optional[t.Dict[str, t.Any]] = None,
+ shared: bool = False,
+ locals: t.Optional[t.Mapping[str, t.Any]] = None,
+ ) -> "TemplateModule":
+ """As template module creation can invoke template code for
+ asynchronous executions this method must be used instead of the
+ normal :meth:`make_module` one. Likewise the module attribute
+ becomes unavailable in async mode.
+ """
+ ctx = self.new_context(vars, shared, locals)
+ return TemplateModule(
+ self, ctx, [x async for x in self.root_render_func(ctx)] # type: ignore
+ )
+
+ @internalcode
+ def _get_default_module(self, ctx: t.Optional[Context] = None) -> "TemplateModule":
+ """If a context is passed in, this means that the template was
+ imported. Imported templates have access to the current
+ template's globals by default, but they can only be accessed via
+ the context during runtime.
+
+ If there are new globals, we need to create a new module because
+ the cached module is already rendered and will not have access
+ to globals from the current context. This new module is not
+ cached because the template can be imported elsewhere, and it
+ should have access to only the current template's globals.
+ """
+ if self.environment.is_async:
+ raise RuntimeError("Module is not available in async mode.")
+
+ if ctx is not None:
+ keys = ctx.globals_keys - self.globals.keys()
+
+ if keys:
+ return self.make_module({k: ctx.parent[k] for k in keys})
+
+ if self._module is None:
+ self._module = self.make_module()
+
+ return self._module
+
+ async def _get_default_module_async(
+ self, ctx: t.Optional[Context] = None
+ ) -> "TemplateModule":
+ if ctx is not None:
+ keys = ctx.globals_keys - self.globals.keys()
+
+ if keys:
+ return await self.make_module_async({k: ctx.parent[k] for k in keys})
+
+ if self._module is None:
+ self._module = await self.make_module_async()
+
+ return self._module
+
+ @property
+ def module(self) -> "TemplateModule":
+ """The template as module. This is used for imports in the
+ template runtime but is also useful if one wants to access
+ exported template variables from the Python layer:
+
+ >>> t = Template('{% macro foo() %}42{% endmacro %}23')
+ >>> str(t.module)
+ '23'
+ >>> t.module.foo() == u'42'
+ True
+
+ This attribute is not available if async mode is enabled.
+ """
+ return self._get_default_module()
+
+ def get_corresponding_lineno(self, lineno: int) -> int:
+ """Return the source line number of a line number in the
+ generated bytecode as they are not in sync.
+ """
+ for template_line, code_line in reversed(self.debug_info):
+ if code_line <= lineno:
+ return template_line
+ return 1
+
+ @property
+ def is_up_to_date(self) -> bool:
+ """If this variable is `False` there is a newer version available."""
+ if self._uptodate is None:
+ return True
+ return self._uptodate()
+
+ @property
+ def debug_info(self) -> t.List[t.Tuple[int, int]]:
+ """The debug info mapping."""
+ if self._debug_info:
+ return [
+ tuple(map(int, x.split("="))) # type: ignore
+ for x in self._debug_info.split("&")
+ ]
+
+ return []
+
+ def __repr__(self) -> str:
+ if self.name is None:
+ name = f"memory:{id(self):x}"
+ else:
+ name = repr(self.name)
+ return f"<{type(self).__name__} {name}>"
+
+
+class TemplateModule:
+ """Represents an imported template. All the exported names of the
+ template are available as attributes on this object. Additionally
+ converting it into a string renders the contents.
+ """
+
+ def __init__(
+ self,
+ template: Template,
+ context: Context,
+ body_stream: t.Optional[t.Iterable[str]] = None,
+ ) -> None:
+ if body_stream is None:
+ if context.environment.is_async:
+ raise RuntimeError(
+ "Async mode requires a body stream to be passed to"
+ " a template module. Use the async methods of the"
+ " API you are using."
+ )
+
+ body_stream = list(template.root_render_func(context)) # type: ignore
+
+ self._body_stream = body_stream
+ self.__dict__.update(context.get_exported())
+ self.__name__ = template.name
+
+ def __html__(self) -> Markup:
+ return Markup(concat(self._body_stream))
+
+ def __str__(self) -> str:
+ return concat(self._body_stream)
+
+ def __repr__(self) -> str:
+ if self.__name__ is None:
+ name = f"memory:{id(self):x}"
+ else:
+ name = repr(self.__name__)
+ return f"<{type(self).__name__} {name}>"
+
+
+class TemplateExpression:
+ """The :meth:`jinja2.Environment.compile_expression` method returns an
+ instance of this object. It encapsulates the expression-like access
+ to the template with an expression it wraps.
+ """
+
+ def __init__(self, template: Template, undefined_to_none: bool) -> None:
+ self._template = template
+ self._undefined_to_none = undefined_to_none
+
+ def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Optional[t.Any]:
+ context = self._template.new_context(dict(*args, **kwargs))
+ consume(self._template.root_render_func(context)) # type: ignore
+ rv = context.vars["result"]
+ if self._undefined_to_none and isinstance(rv, Undefined):
+ rv = None
+ return rv
+
+
+class TemplateStream:
+ """A template stream works pretty much like an ordinary python generator
+ but it can buffer multiple items to reduce the number of total iterations.
+ Per default the output is unbuffered which means that for every unbuffered
+ instruction in the template one string is yielded.
+
+ If buffering is enabled with a buffer size of 5, five items are combined
+ into a new string. This is mainly useful if you are streaming
+ big templates to a client via WSGI which flushes after each iteration.
+ """
+
+ def __init__(self, gen: t.Iterator[str]) -> None:
+ self._gen = gen
+ self.disable_buffering()
+
+ def dump(
+ self,
+ fp: t.Union[str, t.IO],
+ encoding: t.Optional[str] = None,
+ errors: t.Optional[str] = "strict",
+ ) -> None:
+ """Dump the complete stream into a file or file-like object.
+ Per default strings are written, if you want to encode
+ before writing specify an `encoding`.
+
+ Example usage::
+
+ Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
+ """
+ close = False
+
+ if isinstance(fp, str):
+ if encoding is None:
+ encoding = "utf-8"
+
+ fp = open(fp, "wb")
+ close = True
+ try:
+ if encoding is not None:
+ iterable = (x.encode(encoding, errors) for x in self) # type: ignore
+ else:
+ iterable = self # type: ignore
+
+ if hasattr(fp, "writelines"):
+ fp.writelines(iterable)
+ else:
+ for item in iterable:
+ fp.write(item)
+ finally:
+ if close:
+ fp.close()
+
+ def disable_buffering(self) -> None:
+ """Disable the output buffering."""
+ self._next = partial(next, self._gen)
+ self.buffered = False
+
+ def _buffered_generator(self, size: int) -> t.Iterator[str]:
+ buf: t.List[str] = []
+ c_size = 0
+ push = buf.append
+
+ while True:
+ try:
+ while c_size < size:
+ c = next(self._gen)
+ push(c)
+ if c:
+ c_size += 1
+ except StopIteration:
+ if not c_size:
+ return
+ yield concat(buf)
+ del buf[:]
+ c_size = 0
+
+ def enable_buffering(self, size: int = 5) -> None:
+ """Enable buffering. Buffer `size` items before yielding them."""
+ if size <= 1:
+ raise ValueError("buffer size too small")
+
+ self.buffered = True
+ self._next = partial(next, self._buffered_generator(size))
+
+ def __iter__(self) -> "TemplateStream":
+ return self
+
+ def __next__(self) -> str:
+ return self._next() # type: ignore
+
+
+# hook in default template class. if anyone reads this comment: ignore that
+# it's possible to use custom templates ;-)
+Environment.template_class = Template
diff --git a/lib/spack/external/_vendoring/jinja2/exceptions.py b/lib/spack/external/_vendoring/jinja2/exceptions.py
new file mode 100644
index 0000000000..082ebe8f22
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/exceptions.py
@@ -0,0 +1,166 @@
+import typing as t
+
+if t.TYPE_CHECKING:
+ from .runtime import Undefined
+
+
+class TemplateError(Exception):
+ """Baseclass for all template errors."""
+
+ def __init__(self, message: t.Optional[str] = None) -> None:
+ super().__init__(message)
+
+ @property
+ def message(self) -> t.Optional[str]:
+ return self.args[0] if self.args else None
+
+
+class TemplateNotFound(IOError, LookupError, TemplateError):
+ """Raised if a template does not exist.
+
+ .. versionchanged:: 2.11
+ If the given name is :class:`Undefined` and no message was
+ provided, an :exc:`UndefinedError` is raised.
+ """
+
+ # Silence the Python warning about message being deprecated since
+ # it's not valid here.
+ message: t.Optional[str] = None
+
+ def __init__(
+ self,
+ name: t.Optional[t.Union[str, "Undefined"]],
+ message: t.Optional[str] = None,
+ ) -> None:
+ IOError.__init__(self, name)
+
+ if message is None:
+ from .runtime import Undefined
+
+ if isinstance(name, Undefined):
+ name._fail_with_undefined_error()
+
+ message = name
+
+ self.message = message
+ self.name = name
+ self.templates = [name]
+
+ def __str__(self) -> str:
+ return str(self.message)
+
+
+class TemplatesNotFound(TemplateNotFound):
+ """Like :class:`TemplateNotFound` but raised if multiple templates
+ are selected. This is a subclass of :class:`TemplateNotFound`
+ exception, so just catching the base exception will catch both.
+
+ .. versionchanged:: 2.11
+ If a name in the list of names is :class:`Undefined`, a message
+ about it being undefined is shown rather than the empty string.
+
+ .. versionadded:: 2.2
+ """
+
+ def __init__(
+ self,
+ names: t.Sequence[t.Union[str, "Undefined"]] = (),
+ message: t.Optional[str] = None,
+ ) -> None:
+ if message is None:
+ from .runtime import Undefined
+
+ parts = []
+
+ for name in names:
+ if isinstance(name, Undefined):
+ parts.append(name._undefined_message)
+ else:
+ parts.append(name)
+
+ parts_str = ", ".join(map(str, parts))
+ message = f"none of the templates given were found: {parts_str}"
+
+ super().__init__(names[-1] if names else None, message)
+ self.templates = list(names)
+
+
+class TemplateSyntaxError(TemplateError):
+ """Raised to tell the user that there is a problem with the template."""
+
+ def __init__(
+ self,
+ message: str,
+ lineno: int,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> None:
+ super().__init__(message)
+ self.lineno = lineno
+ self.name = name
+ self.filename = filename
+ self.source: t.Optional[str] = None
+
+ # this is set to True if the debug.translate_syntax_error
+ # function translated the syntax error into a new traceback
+ self.translated = False
+
+ def __str__(self) -> str:
+ # for translated errors we only return the message
+ if self.translated:
+ return t.cast(str, self.message)
+
+ # otherwise attach some stuff
+ location = f"line {self.lineno}"
+ name = self.filename or self.name
+ if name:
+ location = f'File "{name}", {location}'
+ lines = [t.cast(str, self.message), " " + location]
+
+ # if the source is set, add the line to the output
+ if self.source is not None:
+ try:
+ line = self.source.splitlines()[self.lineno - 1]
+ except IndexError:
+ pass
+ else:
+ lines.append(" " + line.strip())
+
+ return "\n".join(lines)
+
+ def __reduce__(self): # type: ignore
+ # https://bugs.python.org/issue1692335 Exceptions that take
+ # multiple required arguments have problems with pickling.
+ # Without this, raises TypeError: __init__() missing 1 required
+ # positional argument: 'lineno'
+ return self.__class__, (self.message, self.lineno, self.name, self.filename)
+
+
+class TemplateAssertionError(TemplateSyntaxError):
+ """Like a template syntax error, but covers cases where something in the
+ template caused an error at compile time that wasn't necessarily caused
+ by a syntax error. However it's a direct subclass of
+ :exc:`TemplateSyntaxError` and has the same attributes.
+ """
+
+
+class TemplateRuntimeError(TemplateError):
+ """A generic runtime error in the template engine. Under some situations
+ Jinja may raise this exception.
+ """
+
+
+class UndefinedError(TemplateRuntimeError):
+ """Raised if a template tries to operate on :class:`Undefined`."""
+
+
+class SecurityError(TemplateRuntimeError):
+ """Raised if a template tries to do something insecure if the
+ sandbox is enabled.
+ """
+
+
+class FilterArgumentError(TemplateRuntimeError):
+ """This error is raised if a filter was called with inappropriate
+ arguments
+ """
diff --git a/lib/spack/external/_vendoring/jinja2/ext.py b/lib/spack/external/_vendoring/jinja2/ext.py
new file mode 100644
index 0000000000..3e982930c4
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/ext.py
@@ -0,0 +1,879 @@
+"""Extension API for adding custom tags and behavior."""
+import pprint
+import re
+import typing as t
+import warnings
+
+from markupsafe import Markup
+
+from . import defaults
+from . import nodes
+from .environment import Environment
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .runtime import concat # type: ignore
+from .runtime import Context
+from .runtime import Undefined
+from .utils import import_string
+from .utils import pass_context
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .lexer import Token
+ from .lexer import TokenStream
+ from .parser import Parser
+
+ class _TranslationsBasic(te.Protocol):
+ def gettext(self, message: str) -> str:
+ ...
+
+ def ngettext(self, singular: str, plural: str, n: int) -> str:
+ pass
+
+ class _TranslationsContext(_TranslationsBasic):
+ def pgettext(self, context: str, message: str) -> str:
+ ...
+
+ def npgettext(self, context: str, singular: str, plural: str, n: int) -> str:
+ ...
+
+ _SupportedTranslations = t.Union[_TranslationsBasic, _TranslationsContext]
+
+
+# I18N functions available in Jinja templates. If the I18N library
+# provides ugettext, it will be assigned to gettext.
+GETTEXT_FUNCTIONS: t.Tuple[str, ...] = (
+ "_",
+ "gettext",
+ "ngettext",
+ "pgettext",
+ "npgettext",
+)
+_ws_re = re.compile(r"\s*\n\s*")
+
+
+class Extension:
+ """Extensions can be used to add extra functionality to the Jinja template
+ system at the parser level. Custom extensions are bound to an environment
+ but may not store environment specific data on `self`. The reason for
+ this is that an extension can be bound to another environment (for
+ overlays) by creating a copy and reassigning the `environment` attribute.
+
+ As extensions are created by the environment they cannot accept any
+ arguments for configuration. One may want to work around that by using
+ a factory function, but that is not possible as extensions are identified
+ by their import name. The correct way to configure the extension is
+ storing the configuration values on the environment. Because this way the
+ environment ends up acting as central configuration storage the
+ attributes may clash which is why extensions have to ensure that the names
+ they choose for configuration are not too generic. ``prefix`` for example
+ is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
+ name as includes the name of the extension (fragment cache).
+ """
+
+ identifier: t.ClassVar[str]
+
+ def __init_subclass__(cls) -> None:
+ cls.identifier = f"{cls.__module__}.{cls.__name__}"
+
+ #: if this extension parses this is the list of tags it's listening to.
+ tags: t.Set[str] = set()
+
+ #: the priority of that extension. This is especially useful for
+ #: extensions that preprocess values. A lower value means higher
+ #: priority.
+ #:
+ #: .. versionadded:: 2.4
+ priority = 100
+
+ def __init__(self, environment: Environment) -> None:
+ self.environment = environment
+
+ def bind(self, environment: Environment) -> "Extension":
+ """Create a copy of this extension bound to another environment."""
+ rv = t.cast(Extension, object.__new__(self.__class__))
+ rv.__dict__.update(self.__dict__)
+ rv.environment = environment
+ return rv
+
+ def preprocess(
+ self, source: str, name: t.Optional[str], filename: t.Optional[str] = None
+ ) -> str:
+ """This method is called before the actual lexing and can be used to
+ preprocess the source. The `filename` is optional. The return value
+ must be the preprocessed source.
+ """
+ return source
+
+ def filter_stream(
+ self, stream: "TokenStream"
+ ) -> t.Union["TokenStream", t.Iterable["Token"]]:
+ """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
+ to filter tokens returned. This method has to return an iterable of
+ :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
+ :class:`~jinja2.lexer.TokenStream`.
+ """
+ return stream
+
+ def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
+ """If any of the :attr:`tags` matched this method is called with the
+ parser as first argument. The token the parser stream is pointing at
+ is the name token that matched. This method has to return one or a
+ list of multiple nodes.
+ """
+ raise NotImplementedError()
+
+ def attr(
+ self, name: str, lineno: t.Optional[int] = None
+ ) -> nodes.ExtensionAttribute:
+ """Return an attribute node for the current extension. This is useful
+ to pass constants on extensions to generated template code.
+
+ ::
+
+ self.attr('_my_attribute', lineno=lineno)
+ """
+ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
+
+ def call_method(
+ self,
+ name: str,
+ args: t.Optional[t.List[nodes.Expr]] = None,
+ kwargs: t.Optional[t.List[nodes.Keyword]] = None,
+ dyn_args: t.Optional[nodes.Expr] = None,
+ dyn_kwargs: t.Optional[nodes.Expr] = None,
+ lineno: t.Optional[int] = None,
+ ) -> nodes.Call:
+ """Call a method of the extension. This is a shortcut for
+ :meth:`attr` + :class:`jinja2.nodes.Call`.
+ """
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = []
+ return nodes.Call(
+ self.attr(name, lineno=lineno),
+ args,
+ kwargs,
+ dyn_args,
+ dyn_kwargs,
+ lineno=lineno,
+ )
+
+
+@pass_context
+def _gettext_alias(
+ __context: Context, *args: t.Any, **kwargs: t.Any
+) -> t.Union[t.Any, Undefined]:
+ return __context.call(__context.resolve("gettext"), *args, **kwargs)
+
+
+def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
+ @pass_context
+ def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
+ rv = __context.call(func, __string)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ # Always treat as a format string, even if there are no
+ # variables. This makes translation strings more consistent
+ # and predictable. This requires escaping
+ return rv % variables # type: ignore
+
+ return gettext
+
+
+def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]:
+ @pass_context
+ def ngettext(
+ __context: Context,
+ __singular: str,
+ __plural: str,
+ __num: int,
+ **variables: t.Any,
+ ) -> str:
+ variables.setdefault("num", __num)
+ rv = __context.call(func, __singular, __plural, __num)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables # type: ignore
+
+ return ngettext
+
+
+def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]:
+ @pass_context
+ def pgettext(
+ __context: Context, __string_ctx: str, __string: str, **variables: t.Any
+ ) -> str:
+ variables.setdefault("context", __string_ctx)
+ rv = __context.call(func, __string_ctx, __string)
+
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables # type: ignore
+
+ return pgettext
+
+
+def _make_new_npgettext(
+ func: t.Callable[[str, str, str, int], str]
+) -> t.Callable[..., str]:
+ @pass_context
+ def npgettext(
+ __context: Context,
+ __string_ctx: str,
+ __singular: str,
+ __plural: str,
+ __num: int,
+ **variables: t.Any,
+ ) -> str:
+ variables.setdefault("context", __string_ctx)
+ variables.setdefault("num", __num)
+ rv = __context.call(func, __string_ctx, __singular, __plural, __num)
+
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables # type: ignore
+
+ return npgettext
+
+
+class InternationalizationExtension(Extension):
+ """This extension adds gettext support to Jinja."""
+
+ tags = {"trans"}
+
+ # TODO: the i18n extension is currently reevaluating values in a few
+ # situations. Take this example:
+ # {% trans count=something() %}{{ count }} foo{% pluralize
+ # %}{{ count }} fooss{% endtrans %}
+ # something is called twice here. One time for the gettext value and
+ # the other time for the n-parameter of the ngettext function.
+
+ def __init__(self, environment: Environment) -> None:
+ super().__init__(environment)
+ environment.globals["_"] = _gettext_alias
+ environment.extend(
+ install_gettext_translations=self._install,
+ install_null_translations=self._install_null,
+ install_gettext_callables=self._install_callables,
+ uninstall_gettext_translations=self._uninstall,
+ extract_translations=self._extract,
+ newstyle_gettext=False,
+ )
+
+ def _install(
+ self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None
+ ) -> None:
+ # ugettext and ungettext are preferred in case the I18N library
+ # is providing compatibility with older Python versions.
+ gettext = getattr(translations, "ugettext", None)
+ if gettext is None:
+ gettext = translations.gettext
+ ngettext = getattr(translations, "ungettext", None)
+ if ngettext is None:
+ ngettext = translations.ngettext
+
+ pgettext = getattr(translations, "pgettext", None)
+ npgettext = getattr(translations, "npgettext", None)
+ self._install_callables(
+ gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext
+ )
+
+ def _install_null(self, newstyle: t.Optional[bool] = None) -> None:
+ import gettext
+
+ translations = gettext.NullTranslations()
+
+ if hasattr(translations, "pgettext"):
+ # Python < 3.8
+ pgettext = translations.pgettext # type: ignore
+ else:
+
+ def pgettext(c: str, s: str) -> str:
+ return s
+
+ if hasattr(translations, "npgettext"):
+ npgettext = translations.npgettext # type: ignore
+ else:
+
+ def npgettext(c: str, s: str, p: str, n: int) -> str:
+ return s if n == 1 else p
+
+ self._install_callables(
+ gettext=translations.gettext,
+ ngettext=translations.ngettext,
+ newstyle=newstyle,
+ pgettext=pgettext,
+ npgettext=npgettext,
+ )
+
+ def _install_callables(
+ self,
+ gettext: t.Callable[[str], str],
+ ngettext: t.Callable[[str, str, int], str],
+ newstyle: t.Optional[bool] = None,
+ pgettext: t.Optional[t.Callable[[str, str], str]] = None,
+ npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
+ ) -> None:
+ if newstyle is not None:
+ self.environment.newstyle_gettext = newstyle # type: ignore
+ if self.environment.newstyle_gettext: # type: ignore
+ gettext = _make_new_gettext(gettext)
+ ngettext = _make_new_ngettext(ngettext)
+
+ if pgettext is not None:
+ pgettext = _make_new_pgettext(pgettext)
+
+ if npgettext is not None:
+ npgettext = _make_new_npgettext(npgettext)
+
+ self.environment.globals.update(
+ gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
+ )
+
+ def _uninstall(self, translations: "_SupportedTranslations") -> None:
+ for key in ("gettext", "ngettext", "pgettext", "npgettext"):
+ self.environment.globals.pop(key, None)
+
+ def _extract(
+ self,
+ source: t.Union[str, nodes.Template],
+ gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
+ ) -> t.Iterator[
+ t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
+ ]:
+ if isinstance(source, str):
+ source = self.environment.parse(source)
+ return extract_from_ast(source, gettext_functions)
+
+ def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
+ """Parse a translatable tag."""
+ lineno = next(parser.stream).lineno
+ num_called_num = False
+
+ # find all the variables referenced. Additionally a variable can be
+ # defined in the body of the trans block too, but this is checked at
+ # a later state.
+ plural_expr: t.Optional[nodes.Expr] = None
+ plural_expr_assignment: t.Optional[nodes.Assign] = None
+ variables: t.Dict[str, nodes.Expr] = {}
+ trimmed = None
+ while parser.stream.current.type != "block_end":
+ if variables:
+ parser.stream.expect("comma")
+
+ # skip colon for python compatibility
+ if parser.stream.skip_if("colon"):
+ break
+
+ token = parser.stream.expect("name")
+ if token.value in variables:
+ parser.fail(
+ f"translatable variable {token.value!r} defined twice.",
+ token.lineno,
+ exc=TemplateAssertionError,
+ )
+
+ # expressions
+ if parser.stream.current.type == "assign":
+ next(parser.stream)
+ variables[token.value] = var = parser.parse_expression()
+ elif trimmed is None and token.value in ("trimmed", "notrimmed"):
+ trimmed = token.value == "trimmed"
+ continue
+ else:
+ variables[token.value] = var = nodes.Name(token.value, "load")
+
+ if plural_expr is None:
+ if isinstance(var, nodes.Call):
+ plural_expr = nodes.Name("_trans", "load")
+ variables[token.value] = plural_expr
+ plural_expr_assignment = nodes.Assign(
+ nodes.Name("_trans", "store"), var
+ )
+ else:
+ plural_expr = var
+ num_called_num = token.value == "num"
+
+ parser.stream.expect("block_end")
+
+ plural = None
+ have_plural = False
+ referenced = set()
+
+ # now parse until endtrans or pluralize
+ singular_names, singular = self._parse_block(parser, True)
+ if singular_names:
+ referenced.update(singular_names)
+ if plural_expr is None:
+ plural_expr = nodes.Name(singular_names[0], "load")
+ num_called_num = singular_names[0] == "num"
+
+ # if we have a pluralize block, we parse that too
+ if parser.stream.current.test("name:pluralize"):
+ have_plural = True
+ next(parser.stream)
+ if parser.stream.current.type != "block_end":
+ token = parser.stream.expect("name")
+ if token.value not in variables:
+ parser.fail(
+ f"unknown variable {token.value!r} for pluralization",
+ token.lineno,
+ exc=TemplateAssertionError,
+ )
+ plural_expr = variables[token.value]
+ num_called_num = token.value == "num"
+ parser.stream.expect("block_end")
+ plural_names, plural = self._parse_block(parser, False)
+ next(parser.stream)
+ referenced.update(plural_names)
+ else:
+ next(parser.stream)
+
+ # register free names as simple name expressions
+ for name in referenced:
+ if name not in variables:
+ variables[name] = nodes.Name(name, "load")
+
+ if not have_plural:
+ plural_expr = None
+ elif plural_expr is None:
+ parser.fail("pluralize without variables", lineno)
+
+ if trimmed is None:
+ trimmed = self.environment.policies["ext.i18n.trimmed"]
+ if trimmed:
+ singular = self._trim_whitespace(singular)
+ if plural:
+ plural = self._trim_whitespace(plural)
+
+ node = self._make_node(
+ singular,
+ plural,
+ variables,
+ plural_expr,
+ bool(referenced),
+ num_called_num and have_plural,
+ )
+ node.set_lineno(lineno)
+ if plural_expr_assignment is not None:
+ return [plural_expr_assignment, node]
+ else:
+ return node
+
+ def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str:
+ return _ws_re.sub(" ", string.strip())
+
+ def _parse_block(
+ self, parser: "Parser", allow_pluralize: bool
+ ) -> t.Tuple[t.List[str], str]:
+ """Parse until the next block tag with a given name."""
+ referenced = []
+ buf = []
+
+ while True:
+ if parser.stream.current.type == "data":
+ buf.append(parser.stream.current.value.replace("%", "%%"))
+ next(parser.stream)
+ elif parser.stream.current.type == "variable_begin":
+ next(parser.stream)
+ name = parser.stream.expect("name").value
+ referenced.append(name)
+ buf.append(f"%({name})s")
+ parser.stream.expect("variable_end")
+ elif parser.stream.current.type == "block_begin":
+ next(parser.stream)
+ if parser.stream.current.test("name:endtrans"):
+ break
+ elif parser.stream.current.test("name:pluralize"):
+ if allow_pluralize:
+ break
+ parser.fail(
+ "a translatable section can have only one pluralize section"
+ )
+ parser.fail(
+ "control structures in translatable sections are not allowed"
+ )
+ elif parser.stream.eos:
+ parser.fail("unclosed translation block")
+ else:
+ raise RuntimeError("internal parser error")
+
+ return referenced, concat(buf)
+
+ def _make_node(
+ self,
+ singular: str,
+ plural: t.Optional[str],
+ variables: t.Dict[str, nodes.Expr],
+ plural_expr: t.Optional[nodes.Expr],
+ vars_referenced: bool,
+ num_called_num: bool,
+ ) -> nodes.Output:
+ """Generates a useful node from the data provided."""
+ newstyle = self.environment.newstyle_gettext # type: ignore
+ node: nodes.Expr
+
+ # no variables referenced? no need to escape for old style
+ # gettext invocations only if there are vars.
+ if not vars_referenced and not newstyle:
+ singular = singular.replace("%%", "%")
+ if plural:
+ plural = plural.replace("%%", "%")
+
+ # singular only:
+ if plural_expr is None:
+ gettext = nodes.Name("gettext", "load")
+ node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
+
+ # singular and plural
+ else:
+ ngettext = nodes.Name("ngettext", "load")
+ node = nodes.Call(
+ ngettext,
+ [nodes.Const(singular), nodes.Const(plural), plural_expr],
+ [],
+ None,
+ None,
+ )
+
+ # in case newstyle gettext is used, the method is powerful
+ # enough to handle the variable expansion and autoescape
+ # handling itself
+ if newstyle:
+ for key, value in variables.items():
+ # the function adds that later anyways in case num was
+ # called num, so just skip it.
+ if num_called_num and key == "num":
+ continue
+ node.kwargs.append(nodes.Keyword(key, value))
+
+ # otherwise do that here
+ else:
+ # mark the return value as safe if we are in an
+ # environment with autoescaping turned on
+ node = nodes.MarkSafeIfAutoescape(node)
+ if variables:
+ node = nodes.Mod(
+ node,
+ nodes.Dict(
+ [
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]
+ ),
+ )
+ return nodes.Output([node])
+
+
+class ExprStmtExtension(Extension):
+ """Adds a `do` tag to Jinja that works like the print statement just
+ that it doesn't print the return value.
+ """
+
+ tags = {"do"}
+
+ def parse(self, parser: "Parser") -> nodes.ExprStmt:
+ node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
+ node.node = parser.parse_tuple()
+ return node
+
+
+class LoopControlExtension(Extension):
+ """Adds break and continue to the template engine."""
+
+ tags = {"break", "continue"}
+
+ def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]:
+ token = next(parser.stream)
+ if token.value == "break":
+ return nodes.Break(lineno=token.lineno)
+ return nodes.Continue(lineno=token.lineno)
+
+
+class WithExtension(Extension):
+ def __init__(self, environment: Environment) -> None:
+ super().__init__(environment)
+ warnings.warn(
+ "The 'with' extension is deprecated and will be removed in"
+ " Jinja 3.1. This is built in now.",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+
+
+class AutoEscapeExtension(Extension):
+ def __init__(self, environment: Environment) -> None:
+ super().__init__(environment)
+ warnings.warn(
+ "The 'autoescape' extension is deprecated and will be"
+ " removed in Jinja 3.1. This is built in now.",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+
+
+class DebugExtension(Extension):
+ """A ``{% debug %}`` tag that dumps the available variables,
+ filters, and tests.
+
+ .. code-block:: html+jinja
+
+ <pre>{% debug %}</pre>
+
+ .. code-block:: text
+
+ {'context': {'cycler': <class 'jinja2.utils.Cycler'>,
+ ...,
+ 'namespace': <class 'jinja2.utils.Namespace'>},
+ 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
+ ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
+ 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
+ ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
+
+ .. versionadded:: 2.11.0
+ """
+
+ tags = {"debug"}
+
+ def parse(self, parser: "Parser") -> nodes.Output:
+ lineno = parser.stream.expect("name:debug").lineno
+ context = nodes.ContextReference()
+ result = self.call_method("_render", [context], lineno=lineno)
+ return nodes.Output([result], lineno=lineno)
+
+ def _render(self, context: Context) -> str:
+ result = {
+ "context": context.get_all(),
+ "filters": sorted(self.environment.filters.keys()),
+ "tests": sorted(self.environment.tests.keys()),
+ }
+
+ # Set the depth since the intent is to show the top few names.
+ return pprint.pformat(result, depth=3, compact=True)
+
+
+def extract_from_ast(
+ ast: nodes.Template,
+ gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
+ babel_style: bool = True,
+) -> t.Iterator[
+ t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
+]:
+ """Extract localizable strings from the given template node. Per
+ default this function returns matches in babel style that means non string
+ parameters as well as keyword arguments are returned as `None`. This
+ allows Babel to figure out what you really meant if you are using
+ gettext functions that allow keyword arguments for placeholder expansion.
+ If you don't want that behavior set the `babel_style` parameter to `False`
+ which causes only strings to be returned and parameters are always stored
+ in tuples. As a consequence invalid gettext calls (calls without a single
+ string parameter or string parameters after non-string parameters) are
+ skipped.
+
+ This example explains the behavior:
+
+ >>> from jinja2 import Environment
+ >>> env = Environment()
+ >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
+ >>> list(extract_from_ast(node))
+ [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
+ >>> list(extract_from_ast(node, babel_style=False))
+ [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
+
+ For every string found this function yields a ``(lineno, function,
+ message)`` tuple, where:
+
+ * ``lineno`` is the number of the line on which the string was found,
+ * ``function`` is the name of the ``gettext`` function used (if the
+ string was extracted from embedded Python code), and
+ * ``message`` is the string, or a tuple of strings for functions
+ with multiple string arguments.
+
+ This extraction function operates on the AST and is because of that unable
+ to extract any comments. For comment support you have to use the babel
+ extraction interface or extract comments yourself.
+ """
+ out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]
+
+ for node in ast.find_all(nodes.Call):
+ if (
+ not isinstance(node.node, nodes.Name)
+ or node.node.name not in gettext_functions
+ ):
+ continue
+
+ strings: t.List[t.Optional[str]] = []
+
+ for arg in node.args:
+ if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
+ strings.append(arg.value)
+ else:
+ strings.append(None)
+
+ for _ in node.kwargs:
+ strings.append(None)
+ if node.dyn_args is not None:
+ strings.append(None)
+ if node.dyn_kwargs is not None:
+ strings.append(None)
+
+ if not babel_style:
+ out = tuple(x for x in strings if x is not None)
+
+ if not out:
+ continue
+ else:
+ if len(strings) == 1:
+ out = strings[0]
+ else:
+ out = tuple(strings)
+
+ yield node.lineno, node.node.name, out
+
+
+class _CommentFinder:
+ """Helper class to find comments in a token stream. Can only
+ find comments for gettext calls forwards. Once the comment
+ from line 4 is found, a comment for line 1 will not return a
+ usable value.
+ """
+
+ def __init__(
+ self, tokens: t.Sequence[t.Tuple[int, str, str]], comment_tags: t.Sequence[str]
+ ) -> None:
+ self.tokens = tokens
+ self.comment_tags = comment_tags
+ self.offset = 0
+ self.last_lineno = 0
+
+ def find_backwards(self, offset: int) -> t.List[str]:
+ try:
+ for _, token_type, token_value in reversed(
+ self.tokens[self.offset : offset]
+ ):
+ if token_type in ("comment", "linecomment"):
+ try:
+ prefix, comment = token_value.split(None, 1)
+ except ValueError:
+ continue
+ if prefix in self.comment_tags:
+ return [comment.rstrip()]
+ return []
+ finally:
+ self.offset = offset
+
+ def find_comments(self, lineno: int) -> t.List[str]:
+ if not self.comment_tags or self.last_lineno > lineno:
+ return []
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
+ if token_lineno > lineno:
+ return self.find_backwards(self.offset + idx)
+ return self.find_backwards(len(self.tokens))
+
+
+def babel_extract(
+ fileobj: t.BinaryIO,
+ keywords: t.Sequence[str],
+ comment_tags: t.Sequence[str],
+ options: t.Dict[str, t.Any],
+) -> t.Iterator[
+ t.Tuple[
+ int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str]
+ ]
+]:
+ """Babel extraction method for Jinja templates.
+
+ .. versionchanged:: 2.3
+ Basic support for translation comments was added. If `comment_tags`
+ is now set to a list of keywords for extraction, the extractor will
+ try to find the best preceding comment that begins with one of the
+ keywords. For best results, make sure to not have more than one
+ gettext call in one line of code and the matching comment in the
+ same line or the line before.
+
+ .. versionchanged:: 2.5.1
+ The `newstyle_gettext` flag can be set to `True` to enable newstyle
+ gettext calls.
+
+ .. versionchanged:: 2.7
+ A `silent` option can now be provided. If set to `False` template
+ syntax errors are propagated instead of being ignored.
+
+ :param fileobj: the file-like object the messages should be extracted from
+ :param keywords: a list of keywords (i.e. function names) that should be
+ recognized as translation functions
+ :param comment_tags: a list of translator tags to search for and include
+ in the results.
+ :param options: a dictionary of additional options (optional)
+ :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
+ (comments will be empty currently)
+ """
+ extensions: t.Dict[t.Type[Extension], None] = {}
+
+ for extension_name in options.get("extensions", "").split(","):
+ extension_name = extension_name.strip()
+
+ if not extension_name:
+ continue
+
+ extensions[import_string(extension_name)] = None
+
+ if InternationalizationExtension not in extensions:
+ extensions[InternationalizationExtension] = None
+
+ def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool:
+ return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"}
+
+ silent = getbool(options, "silent", True)
+ environment = Environment(
+ options.get("block_start_string", defaults.BLOCK_START_STRING),
+ options.get("block_end_string", defaults.BLOCK_END_STRING),
+ options.get("variable_start_string", defaults.VARIABLE_START_STRING),
+ options.get("variable_end_string", defaults.VARIABLE_END_STRING),
+ options.get("comment_start_string", defaults.COMMENT_START_STRING),
+ options.get("comment_end_string", defaults.COMMENT_END_STRING),
+ options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX,
+ options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX,
+ getbool(options, "trim_blocks", defaults.TRIM_BLOCKS),
+ getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS),
+ defaults.NEWLINE_SEQUENCE,
+ getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE),
+ tuple(extensions),
+ cache_size=0,
+ auto_reload=False,
+ )
+
+ if getbool(options, "trimmed"):
+ environment.policies["ext.i18n.trimmed"] = True
+ if getbool(options, "newstyle_gettext"):
+ environment.newstyle_gettext = True # type: ignore
+
+ source = fileobj.read().decode(options.get("encoding", "utf-8"))
+ try:
+ node = environment.parse(source)
+ tokens = list(environment.lex(environment.preprocess(source)))
+ except TemplateSyntaxError:
+ if not silent:
+ raise
+ # skip templates with syntax errors
+ return
+
+ finder = _CommentFinder(tokens, comment_tags)
+ for lineno, func, message in extract_from_ast(node, keywords):
+ yield lineno, func, message, finder.find_comments(lineno)
+
+
+#: nicer import names
+i18n = InternationalizationExtension
+do = ExprStmtExtension
+loopcontrols = LoopControlExtension
+with_ = WithExtension
+autoescape = AutoEscapeExtension
+debug = DebugExtension
diff --git a/lib/spack/external/_vendoring/jinja2/filters.py b/lib/spack/external/_vendoring/jinja2/filters.py
new file mode 100644
index 0000000000..ffb98bf4e3
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/filters.py
@@ -0,0 +1,1824 @@
+"""Built-in template filters used with the ``|`` operator."""
+import math
+import random
+import re
+import typing
+import typing as t
+import warnings
+from collections import abc
+from itertools import chain
+from itertools import groupby
+
+from markupsafe import escape
+from markupsafe import Markup
+from markupsafe import soft_str
+
+from .async_utils import async_variant
+from .async_utils import auto_aiter
+from .async_utils import auto_await
+from .async_utils import auto_to_list
+from .exceptions import FilterArgumentError
+from .runtime import Undefined
+from .utils import htmlsafe_json_dumps
+from .utils import pass_context
+from .utils import pass_environment
+from .utils import pass_eval_context
+from .utils import pformat
+from .utils import url_quote
+from .utils import urlize
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+ from .nodes import EvalContext
+ from .runtime import Context
+ from .sandbox import SandboxedEnvironment # noqa: F401
+
+ class HasHTML(te.Protocol):
+ def __html__(self) -> str:
+ pass
+
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+K = t.TypeVar("K")
+V = t.TypeVar("V")
+
+
+def contextfilter(f: F) -> F:
+ """Pass the context as the first argument to the decorated function.
+
+ .. deprecated:: 3.0
+ Will be removed in Jinja 3.1. Use :func:`~jinja2.pass_context`
+ instead.
+ """
+ warnings.warn(
+ "'contextfilter' is renamed to 'pass_context', the old name"
+ " will be removed in Jinja 3.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return pass_context(f)
+
+
+def evalcontextfilter(f: F) -> F:
+ """Pass the eval context as the first argument to the decorated
+ function.
+
+ .. deprecated:: 3.0
+ Will be removed in Jinja 3.1. Use
+ :func:`~jinja2.pass_eval_context` instead.
+
+ .. versionadded:: 2.4
+ """
+ warnings.warn(
+ "'evalcontextfilter' is renamed to 'pass_eval_context', the old"
+ " name will be removed in Jinja 3.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return pass_eval_context(f)
+
+
+def environmentfilter(f: F) -> F:
+ """Pass the environment as the first argument to the decorated
+ function.
+
+ .. deprecated:: 3.0
+ Will be removed in Jinja 3.1. Use
+ :func:`~jinja2.pass_environment` instead.
+ """
+ warnings.warn(
+ "'environmentfilter' is renamed to 'pass_environment', the old"
+ " name will be removed in Jinja 3.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return pass_environment(f)
+
+
+def ignore_case(value: V) -> V:
+ """For use as a postprocessor for :func:`make_attrgetter`. Converts strings
+ to lowercase and returns other types as-is."""
+ if isinstance(value, str):
+ return t.cast(V, value.lower())
+
+ return value
+
+
+def make_attrgetter(
+ environment: "Environment",
+ attribute: t.Optional[t.Union[str, int]],
+ postprocess: t.Optional[t.Callable[[t.Any], t.Any]] = None,
+ default: t.Optional[t.Any] = None,
+) -> t.Callable[[t.Any], t.Any]:
+ """Returns a callable that looks up the given attribute from a
+ passed object with the rules of the environment. Dots are allowed
+ to access attributes of attributes. Integer parts in paths are
+ looked up as integers.
+ """
+ parts = _prepare_attribute_parts(attribute)
+
+ def attrgetter(item: t.Any) -> t.Any:
+ for part in parts:
+ item = environment.getitem(item, part)
+
+ if default is not None and isinstance(item, Undefined):
+ item = default
+
+ if postprocess is not None:
+ item = postprocess(item)
+
+ return item
+
+ return attrgetter
+
+
+def make_multi_attrgetter(
+ environment: "Environment",
+ attribute: t.Optional[t.Union[str, int]],
+ postprocess: t.Optional[t.Callable[[t.Any], t.Any]] = None,
+) -> t.Callable[[t.Any], t.List[t.Any]]:
+ """Returns a callable that looks up the given comma separated
+ attributes from a passed object with the rules of the environment.
+ Dots are allowed to access attributes of each attribute. Integer
+ parts in paths are looked up as integers.
+
+ The value returned by the returned callable is a list of extracted
+ attribute values.
+
+ Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
+ """
+ if isinstance(attribute, str):
+ split: t.Sequence[t.Union[str, int, None]] = attribute.split(",")
+ else:
+ split = [attribute]
+
+ parts = [_prepare_attribute_parts(item) for item in split]
+
+ def attrgetter(item: t.Any) -> t.List[t.Any]:
+ items = [None] * len(parts)
+
+ for i, attribute_part in enumerate(parts):
+ item_i = item
+
+ for part in attribute_part:
+ item_i = environment.getitem(item_i, part)
+
+ if postprocess is not None:
+ item_i = postprocess(item_i)
+
+ items[i] = item_i
+
+ return items
+
+ return attrgetter
+
+
+def _prepare_attribute_parts(
+ attr: t.Optional[t.Union[str, int]]
+) -> t.List[t.Union[str, int]]:
+ if attr is None:
+ return []
+
+ if isinstance(attr, str):
+ return [int(x) if x.isdigit() else x for x in attr.split(".")]
+
+ return [attr]
+
+
+def do_forceescape(value: "t.Union[str, HasHTML]") -> Markup:
+ """Enforce HTML escaping. This will probably double escape variables."""
+ if hasattr(value, "__html__"):
+ value = t.cast("HasHTML", value).__html__()
+
+ return escape(str(value))
+
+
+def do_urlencode(
+ value: t.Union[str, t.Mapping[str, t.Any], t.Iterable[t.Tuple[str, t.Any]]]
+) -> str:
+ """Quote data for use in a URL path or query using UTF-8.
+
+ Basic wrapper around :func:`urllib.parse.quote` when given a
+ string, or :func:`urllib.parse.urlencode` for a dict or iterable.
+
+ :param value: Data to quote. A string will be quoted directly. A
+ dict or iterable of ``(key, value)`` pairs will be joined as a
+ query string.
+
+ When given a string, "/" is not quoted. HTTP servers treat "/" and
+ "%2F" equivalently in paths. If you need quoted slashes, use the
+ ``|replace("/", "%2F")`` filter.
+
+ .. versionadded:: 2.7
+ """
+ if isinstance(value, str) or not isinstance(value, abc.Iterable):
+ return url_quote(value)
+
+ if isinstance(value, dict):
+ items: t.Iterable[t.Tuple[str, t.Any]] = value.items()
+ else:
+ items = value # type: ignore
+
+ return "&".join(
+ f"{url_quote(k, for_qs=True)}={url_quote(v, for_qs=True)}" for k, v in items
+ )
+
+
+@pass_eval_context
+def do_replace(
+ eval_ctx: "EvalContext", s: str, old: str, new: str, count: t.Optional[int] = None
+) -> str:
+ """Return a copy of the value with all occurrences of a substring
+ replaced with a new one. The first argument is the substring
+ that should be replaced, the second is the replacement string.
+ If the optional third argument ``count`` is given, only the first
+ ``count`` occurrences are replaced:
+
+ .. sourcecode:: jinja
+
+ {{ "Hello World"|replace("Hello", "Goodbye") }}
+ -> Goodbye World
+
+ {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
+ -> d'oh, d'oh, aaargh
+ """
+ if count is None:
+ count = -1
+
+ if not eval_ctx.autoescape:
+ return str(s).replace(str(old), str(new), count)
+
+ if (
+ hasattr(old, "__html__")
+ or hasattr(new, "__html__")
+ and not hasattr(s, "__html__")
+ ):
+ s = escape(s)
+ else:
+ s = soft_str(s)
+
+ return s.replace(soft_str(old), soft_str(new), count)
+
+
+def do_upper(s: str) -> str:
+ """Convert a value to uppercase."""
+ return soft_str(s).upper()
+
+
+def do_lower(s: str) -> str:
+ """Convert a value to lowercase."""
+ return soft_str(s).lower()
+
+
+@pass_eval_context
+def do_xmlattr(
+ eval_ctx: "EvalContext", d: t.Mapping[str, t.Any], autospace: bool = True
+) -> str:
+ """Create an SGML/XML attribute string based on the items in a dict.
+ All values that are neither `none` nor `undefined` are automatically
+ escaped:
+
+ .. sourcecode:: html+jinja
+
+ <ul{{ {'class': 'my_list', 'missing': none,
+ 'id': 'list-%d'|format(variable)}|xmlattr }}>
+ ...
+ </ul>
+
+ Results in something like this:
+
+ .. sourcecode:: html
+
+ <ul class="my_list" id="list-42">
+ ...
+ </ul>
+
+ As you can see it automatically prepends a space in front of the item
+ if the filter returned something unless the second parameter is false.
+ """
+ rv = " ".join(
+ f'{escape(key)}="{escape(value)}"'
+ for key, value in d.items()
+ if value is not None and not isinstance(value, Undefined)
+ )
+
+ if autospace and rv:
+ rv = " " + rv
+
+ if eval_ctx.autoescape:
+ rv = Markup(rv)
+
+ return rv
+
+
+def do_capitalize(s: str) -> str:
+ """Capitalize a value. The first character will be uppercase, all others
+ lowercase.
+ """
+ return soft_str(s).capitalize()
+
+
+_word_beginning_split_re = re.compile(r"([-\s({\[<]+)")
+
+
+def do_title(s: str) -> str:
+ """Return a titlecased version of the value. I.e. words will start with
+ uppercase letters, all remaining characters are lowercase.
+ """
+ return "".join(
+ [
+ item[0].upper() + item[1:].lower()
+ for item in _word_beginning_split_re.split(soft_str(s))
+ if item
+ ]
+ )
+
+
+def do_dictsort(
+ value: t.Mapping[K, V],
+ case_sensitive: bool = False,
+ by: 'te.Literal["key", "value"]' = "key",
+ reverse: bool = False,
+) -> t.List[t.Tuple[K, V]]:
+ """Sort a dict and yield (key, value) pairs. Python dicts may not
+ be in the order you want to display them in, so sort them first.
+
+ .. sourcecode:: jinja
+
+ {% for key, value in mydict|dictsort %}
+ sort the dict by key, case insensitive
+
+ {% for key, value in mydict|dictsort(reverse=true) %}
+ sort the dict by key, case insensitive, reverse order
+
+ {% for key, value in mydict|dictsort(true) %}
+ sort the dict by key, case sensitive
+
+ {% for key, value in mydict|dictsort(false, 'value') %}
+ sort the dict by value, case insensitive
+ """
+ if by == "key":
+ pos = 0
+ elif by == "value":
+ pos = 1
+ else:
+ raise FilterArgumentError('You can only sort by either "key" or "value"')
+
+ def sort_func(item: t.Tuple[t.Any, t.Any]) -> t.Any:
+ value = item[pos]
+
+ if not case_sensitive:
+ value = ignore_case(value)
+
+ return value
+
+ return sorted(value.items(), key=sort_func, reverse=reverse)
+
+
+@pass_environment
+def do_sort(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ reverse: bool = False,
+ case_sensitive: bool = False,
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> "t.List[V]":
+ """Sort an iterable using Python's :func:`sorted`.
+
+ .. sourcecode:: jinja
+
+ {% for city in cities|sort %}
+ ...
+ {% endfor %}
+
+ :param reverse: Sort descending instead of ascending.
+ :param case_sensitive: When sorting strings, sort upper and lower
+ case separately.
+ :param attribute: When sorting objects or dicts, an attribute or
+ key to sort by. Can use dot notation like ``"address.city"``.
+ Can be a list of attributes like ``"age,name"``.
+
+ The sort is stable, it does not change the relative order of
+ elements that compare equal. This makes it is possible to chain
+ sorts on different attributes and ordering.
+
+ .. sourcecode:: jinja
+
+ {% for user in users|sort(attribute="name")
+ |sort(reverse=true, attribute="age") %}
+ ...
+ {% endfor %}
+
+ As a shortcut to chaining when the direction is the same for all
+ attributes, pass a comma separate list of attributes.
+
+ .. sourcecode:: jinja
+
+ {% for user users|sort(attribute="age,name") %}
+ ...
+ {% endfor %}
+
+ .. versionchanged:: 2.11.0
+ The ``attribute`` parameter can be a comma separated list of
+ attributes, e.g. ``"age,name"``.
+
+ .. versionchanged:: 2.6
+ The ``attribute`` parameter was added.
+ """
+ key_func = make_multi_attrgetter(
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ )
+ return sorted(value, key=key_func, reverse=reverse)
+
+
+@pass_environment
+def do_unique(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ case_sensitive: bool = False,
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> "t.Iterator[V]":
+ """Returns a list of unique items from the given iterable.
+
+ .. sourcecode:: jinja
+
+ {{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
+ -> ['foo', 'bar', 'foobar']
+
+ The unique items are yielded in the same order as their first occurrence in
+ the iterable passed to the filter.
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Filter objects with unique values for this attribute.
+ """
+ getter = make_attrgetter(
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ )
+ seen = set()
+
+ for item in value:
+ key = getter(item)
+
+ if key not in seen:
+ seen.add(key)
+ yield item
+
+
+def _min_or_max(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ func: "t.Callable[..., V]",
+ case_sensitive: bool,
+ attribute: t.Optional[t.Union[str, int]],
+) -> "t.Union[V, Undefined]":
+ it = iter(value)
+
+ try:
+ first = next(it)
+ except StopIteration:
+ return environment.undefined("No aggregated item, sequence was empty.")
+
+ key_func = make_attrgetter(
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ )
+ return func(chain([first], it), key=key_func)
+
+
+@pass_environment
+def do_min(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ case_sensitive: bool = False,
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> "t.Union[V, Undefined]":
+ """Return the smallest item from the sequence.
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|min }}
+ -> 1
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Get the object with the min value of this attribute.
+ """
+ return _min_or_max(environment, value, min, case_sensitive, attribute)
+
+
+@pass_environment
+def do_max(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ case_sensitive: bool = False,
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> "t.Union[V, Undefined]":
+ """Return the largest item from the sequence.
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|max }}
+ -> 3
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Get the object with the max value of this attribute.
+ """
+ return _min_or_max(environment, value, max, case_sensitive, attribute)
+
+
+def do_default(
+ value: V,
+ default_value: V = "", # type: ignore
+ boolean: bool = False,
+) -> V:
+ """If the value is undefined it will return the passed default value,
+ otherwise the value of the variable:
+
+ .. sourcecode:: jinja
+
+ {{ my_variable|default('my_variable is not defined') }}
+
+ This will output the value of ``my_variable`` if the variable was
+ defined, otherwise ``'my_variable is not defined'``. If you want
+ to use default with variables that evaluate to false you have to
+ set the second parameter to `true`:
+
+ .. sourcecode:: jinja
+
+ {{ ''|default('the string was empty', true) }}
+
+ .. versionchanged:: 2.11
+ It's now possible to configure the :class:`~jinja2.Environment` with
+ :class:`~jinja2.ChainableUndefined` to make the `default` filter work
+ on nested elements and attributes that may contain undefined values
+ in the chain without getting an :exc:`~jinja2.UndefinedError`.
+ """
+ if isinstance(value, Undefined) or (boolean and not value):
+ return default_value
+
+ return value
+
+
+@pass_eval_context
+def sync_do_join(
+ eval_ctx: "EvalContext",
+ value: t.Iterable,
+ d: str = "",
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> str:
+ """Return a string which is the concatenation of the strings in the
+ sequence. The separator between elements is an empty string per
+ default, you can define it with the optional parameter:
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|join('|') }}
+ -> 1|2|3
+
+ {{ [1, 2, 3]|join }}
+ -> 123
+
+ It is also possible to join certain attributes of an object:
+
+ .. sourcecode:: jinja
+
+ {{ users|join(', ', attribute='username') }}
+
+ .. versionadded:: 2.6
+ The `attribute` parameter was added.
+ """
+ if attribute is not None:
+ value = map(make_attrgetter(eval_ctx.environment, attribute), value)
+
+ # no automatic escaping? joining is a lot easier then
+ if not eval_ctx.autoescape:
+ return str(d).join(map(str, value))
+
+ # if the delimiter doesn't have an html representation we check
+ # if any of the items has. If yes we do a coercion to Markup
+ if not hasattr(d, "__html__"):
+ value = list(value)
+ do_escape = False
+
+ for idx, item in enumerate(value):
+ if hasattr(item, "__html__"):
+ do_escape = True
+ else:
+ value[idx] = str(item)
+
+ if do_escape:
+ d = escape(d)
+ else:
+ d = str(d)
+
+ return d.join(value)
+
+ # no html involved, to normal joining
+ return soft_str(d).join(map(soft_str, value))
+
+
+@async_variant(sync_do_join) # type: ignore
+async def do_join(
+ eval_ctx: "EvalContext",
+ value: t.Union[t.AsyncIterable, t.Iterable],
+ d: str = "",
+ attribute: t.Optional[t.Union[str, int]] = None,
+) -> str:
+ return sync_do_join(eval_ctx, await auto_to_list(value), d, attribute)
+
+
+def do_center(value: str, width: int = 80) -> str:
+ """Centers the value in a field of a given width."""
+ return soft_str(value).center(width)
+
+
+@pass_environment
+def sync_do_first(
+ environment: "Environment", seq: "t.Iterable[V]"
+) -> "t.Union[V, Undefined]":
+ """Return the first item of a sequence."""
+ try:
+ return next(iter(seq))
+ except StopIteration:
+ return environment.undefined("No first item, sequence was empty.")
+
+
+@async_variant(sync_do_first) # type: ignore
+async def do_first(
+ environment: "Environment", seq: "t.Union[t.AsyncIterable[V], t.Iterable[V]]"
+) -> "t.Union[V, Undefined]":
+ try:
+ return await auto_aiter(seq).__anext__()
+ except StopAsyncIteration:
+ return environment.undefined("No first item, sequence was empty.")
+
+
+@pass_environment
+def do_last(
+ environment: "Environment", seq: "t.Reversible[V]"
+) -> "t.Union[V, Undefined]":
+ """Return the last item of a sequence.
+
+ Note: Does not work with generators. You may want to explicitly
+ convert it to a list:
+
+ .. sourcecode:: jinja
+
+ {{ data | selectattr('name', '==', 'Jinja') | list | last }}
+ """
+ try:
+ return next(iter(reversed(seq)))
+ except StopIteration:
+ return environment.undefined("No last item, sequence was empty.")
+
+
+# No async do_last, it may not be safe in async mode.
+
+
+@pass_context
+def do_random(context: "Context", seq: "t.Sequence[V]") -> "t.Union[V, Undefined]":
+ """Return a random item from the sequence."""
+ try:
+ return random.choice(seq)
+ except IndexError:
+ return context.environment.undefined("No random item, sequence was empty.")
+
+
+def do_filesizeformat(value: t.Union[str, float, int], binary: bool = False) -> str:
+ """Format the value like a 'human-readable' file size (i.e. 13 kB,
+ 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
+ Giga, etc.), if the second parameter is set to `True` the binary
+ prefixes are used (Mebi, Gibi).
+ """
+ bytes = float(value)
+ base = 1024 if binary else 1000
+ prefixes = [
+ ("KiB" if binary else "kB"),
+ ("MiB" if binary else "MB"),
+ ("GiB" if binary else "GB"),
+ ("TiB" if binary else "TB"),
+ ("PiB" if binary else "PB"),
+ ("EiB" if binary else "EB"),
+ ("ZiB" if binary else "ZB"),
+ ("YiB" if binary else "YB"),
+ ]
+
+ if bytes == 1:
+ return "1 Byte"
+ elif bytes < base:
+ return f"{int(bytes)} Bytes"
+ else:
+ for i, prefix in enumerate(prefixes):
+ unit = base ** (i + 2)
+
+ if bytes < unit:
+ return f"{base * bytes / unit:.1f} {prefix}"
+
+ return f"{base * bytes / unit:.1f} {prefix}"
+
+
+def do_pprint(value: t.Any) -> str:
+ """Pretty print a variable. Useful for debugging."""
+ return pformat(value)
+
+
+_uri_scheme_re = re.compile(r"^([\w.+-]{2,}:(/){0,2})$")
+
+
+@pass_eval_context
+def do_urlize(
+ eval_ctx: "EvalContext",
+ value: str,
+ trim_url_limit: t.Optional[int] = None,
+ nofollow: bool = False,
+ target: t.Optional[str] = None,
+ rel: t.Optional[str] = None,
+ extra_schemes: t.Optional[t.Iterable[str]] = None,
+) -> str:
+ """Convert URLs in text into clickable links.
+
+ This may not recognize links in some situations. Usually, a more
+ comprehensive formatter, such as a Markdown library, is a better
+ choice.
+
+ Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
+ addresses. Links with trailing punctuation (periods, commas, closing
+ parentheses) and leading punctuation (opening parentheses) are
+ recognized excluding the punctuation. Email addresses that include
+ header fields are not recognized (for example,
+ ``mailto:address@example.com?cc=copy@example.com``).
+
+ :param value: Original text containing URLs to link.
+ :param trim_url_limit: Shorten displayed URL values to this length.
+ :param nofollow: Add the ``rel=nofollow`` attribute to links.
+ :param target: Add the ``target`` attribute to links.
+ :param rel: Add the ``rel`` attribute to links.
+ :param extra_schemes: Recognize URLs that start with these schemes
+ in addition to the default behavior. Defaults to
+ ``env.policies["urlize.extra_schemes"]``, which defaults to no
+ extra schemes.
+
+ .. versionchanged:: 3.0
+ The ``extra_schemes`` parameter was added.
+
+ .. versionchanged:: 3.0
+ Generate ``https://`` links for URLs without a scheme.
+
+ .. versionchanged:: 3.0
+ The parsing rules were updated. Recognize email addresses with
+ or without the ``mailto:`` scheme. Validate IP addresses. Ignore
+ parentheses and brackets in more cases.
+
+ .. versionchanged:: 2.8
+ The ``target`` parameter was added.
+ """
+ policies = eval_ctx.environment.policies
+ rel_parts = set((rel or "").split())
+
+ if nofollow:
+ rel_parts.add("nofollow")
+
+ rel_parts.update((policies["urlize.rel"] or "").split())
+ rel = " ".join(sorted(rel_parts)) or None
+
+ if target is None:
+ target = policies["urlize.target"]
+
+ if extra_schemes is None:
+ extra_schemes = policies["urlize.extra_schemes"] or ()
+
+ for scheme in extra_schemes:
+ if _uri_scheme_re.fullmatch(scheme) is None:
+ raise FilterArgumentError(f"{scheme!r} is not a valid URI scheme prefix.")
+
+ rv = urlize(
+ value,
+ trim_url_limit=trim_url_limit,
+ rel=rel,
+ target=target,
+ extra_schemes=extra_schemes,
+ )
+
+ if eval_ctx.autoescape:
+ rv = Markup(rv)
+
+ return rv
+
+
+def do_indent(
+ s: str, width: t.Union[int, str] = 4, first: bool = False, blank: bool = False
+) -> str:
+ """Return a copy of the string with each line indented by 4 spaces. The
+ first line and blank lines are not indented by default.
+
+ :param width: Number of spaces, or a string, to indent by.
+ :param first: Don't skip indenting the first line.
+ :param blank: Don't skip indenting empty lines.
+
+ .. versionchanged:: 3.0
+ ``width`` can be a string.
+
+ .. versionchanged:: 2.10
+ Blank lines are not indented by default.
+
+ Rename the ``indentfirst`` argument to ``first``.
+ """
+ if isinstance(width, str):
+ indention = width
+ else:
+ indention = " " * width
+
+ newline = "\n"
+
+ if isinstance(s, Markup):
+ indention = Markup(indention)
+ newline = Markup(newline)
+
+ s += newline # this quirk is necessary for splitlines method
+
+ if blank:
+ rv = (newline + indention).join(s.splitlines())
+ else:
+ lines = s.splitlines()
+ rv = lines.pop(0)
+
+ if lines:
+ rv += newline + newline.join(
+ indention + line if line else line for line in lines
+ )
+
+ if first:
+ rv = indention + rv
+
+ return rv
+
+
+@pass_environment
+def do_truncate(
+ env: "Environment",
+ s: str,
+ length: int = 255,
+ killwords: bool = False,
+ end: str = "...",
+ leeway: t.Optional[int] = None,
+) -> str:
+ """Return a truncated copy of the string. The length is specified
+ with the first parameter which defaults to ``255``. If the second
+ parameter is ``true`` the filter will cut the text at length. Otherwise
+ it will discard the last word. If the text was in fact
+ truncated it will append an ellipsis sign (``"..."``). If you want a
+ different ellipsis sign than ``"..."`` you can specify it using the
+ third parameter. Strings that only exceed the length by the tolerance
+ margin given in the fourth parameter will not be truncated.
+
+ .. sourcecode:: jinja
+
+ {{ "foo bar baz qux"|truncate(9) }}
+ -> "foo..."
+ {{ "foo bar baz qux"|truncate(9, True) }}
+ -> "foo ba..."
+ {{ "foo bar baz qux"|truncate(11) }}
+ -> "foo bar baz qux"
+ {{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
+ -> "foo bar..."
+
+ The default leeway on newer Jinja versions is 5 and was 0 before but
+ can be reconfigured globally.
+ """
+ if leeway is None:
+ leeway = env.policies["truncate.leeway"]
+
+ assert length >= len(end), f"expected length >= {len(end)}, got {length}"
+ assert leeway >= 0, f"expected leeway >= 0, got {leeway}"
+
+ if len(s) <= length + leeway:
+ return s
+
+ if killwords:
+ return s[: length - len(end)] + end
+
+ result = s[: length - len(end)].rsplit(" ", 1)[0]
+ return result + end
+
+
+@pass_environment
+def do_wordwrap(
+ environment: "Environment",
+ s: str,
+ width: int = 79,
+ break_long_words: bool = True,
+ wrapstring: t.Optional[str] = None,
+ break_on_hyphens: bool = True,
+) -> str:
+ """Wrap a string to the given width. Existing newlines are treated
+ as paragraphs to be wrapped separately.
+
+ :param s: Original text to wrap.
+ :param width: Maximum length of wrapped lines.
+ :param break_long_words: If a word is longer than ``width``, break
+ it across lines.
+ :param break_on_hyphens: If a word contains hyphens, it may be split
+ across lines.
+ :param wrapstring: String to join each wrapped line. Defaults to
+ :attr:`Environment.newline_sequence`.
+
+ .. versionchanged:: 2.11
+ Existing newlines are treated as paragraphs wrapped separately.
+
+ .. versionchanged:: 2.11
+ Added the ``break_on_hyphens`` parameter.
+
+ .. versionchanged:: 2.7
+ Added the ``wrapstring`` parameter.
+ """
+ import textwrap
+
+ if wrapstring is None:
+ wrapstring = environment.newline_sequence
+
+ # textwrap.wrap doesn't consider existing newlines when wrapping.
+ # If the string has a newline before width, wrap will still insert
+ # a newline at width, resulting in a short line. Instead, split and
+ # wrap each paragraph individually.
+ return wrapstring.join(
+ [
+ wrapstring.join(
+ textwrap.wrap(
+ line,
+ width=width,
+ expand_tabs=False,
+ replace_whitespace=False,
+ break_long_words=break_long_words,
+ break_on_hyphens=break_on_hyphens,
+ )
+ )
+ for line in s.splitlines()
+ ]
+ )
+
+
+_word_re = re.compile(r"\w+")
+
+
+def do_wordcount(s: str) -> int:
+ """Count the words in that string."""
+ return len(_word_re.findall(soft_str(s)))
+
+
+def do_int(value: t.Any, default: int = 0, base: int = 10) -> int:
+ """Convert the value into an integer. If the
+ conversion doesn't work it will return ``0``. You can
+ override this default using the first parameter. You
+ can also override the default base (10) in the second
+ parameter, which handles input with prefixes such as
+ 0b, 0o and 0x for bases 2, 8 and 16 respectively.
+ The base is ignored for decimal numbers and non-string values.
+ """
+ try:
+ if isinstance(value, str):
+ return int(value, base)
+
+ return int(value)
+ except (TypeError, ValueError):
+ # this quirk is necessary so that "42.23"|int gives 42.
+ try:
+ return int(float(value))
+ except (TypeError, ValueError):
+ return default
+
+
+def do_float(value: t.Any, default: float = 0.0) -> float:
+ """Convert the value into a floating point number. If the
+ conversion doesn't work it will return ``0.0``. You can
+ override this default using the first parameter.
+ """
+ try:
+ return float(value)
+ except (TypeError, ValueError):
+ return default
+
+
+def do_format(value: str, *args: t.Any, **kwargs: t.Any) -> str:
+ """Apply the given values to a `printf-style`_ format string, like
+ ``string % values``.
+
+ .. sourcecode:: jinja
+
+ {{ "%s, %s!"|format(greeting, name) }}
+ Hello, World!
+
+ In most cases it should be more convenient and efficient to use the
+ ``%`` operator or :meth:`str.format`.
+
+ .. code-block:: text
+
+ {{ "%s, %s!" % (greeting, name) }}
+ {{ "{}, {}!".format(greeting, name) }}
+
+ .. _printf-style: https://docs.python.org/library/stdtypes.html
+ #printf-style-string-formatting
+ """
+ if args and kwargs:
+ raise FilterArgumentError(
+ "can't handle positional and keyword arguments at the same time"
+ )
+
+ return soft_str(value) % (kwargs or args)
+
+
+def do_trim(value: str, chars: t.Optional[str] = None) -> str:
+ """Strip leading and trailing characters, by default whitespace."""
+ return soft_str(value).strip(chars)
+
+
+def do_striptags(value: "t.Union[str, HasHTML]") -> str:
+ """Strip SGML/XML tags and replace adjacent whitespace by one space."""
+ if hasattr(value, "__html__"):
+ value = t.cast("HasHTML", value).__html__()
+
+ return Markup(str(value)).striptags()
+
+
+def sync_do_slice(
+ value: "t.Collection[V]", slices: int, fill_with: "t.Optional[V]" = None
+) -> "t.Iterator[t.List[V]]":
+ """Slice an iterator and return a list of lists containing
+ those items. Useful if you want to create a div containing
+ three ul tags that represent columns:
+
+ .. sourcecode:: html+jinja
+
+ <div class="columnwrapper">
+ {%- for column in items|slice(3) %}
+ <ul class="column-{{ loop.index }}">
+ {%- for item in column %}
+ <li>{{ item }}</li>
+ {%- endfor %}
+ </ul>
+ {%- endfor %}
+ </div>
+
+ If you pass it a second argument it's used to fill missing
+ values on the last iteration.
+ """
+ seq = list(value)
+ length = len(seq)
+ items_per_slice = length // slices
+ slices_with_extra = length % slices
+ offset = 0
+
+ for slice_number in range(slices):
+ start = offset + slice_number * items_per_slice
+
+ if slice_number < slices_with_extra:
+ offset += 1
+
+ end = offset + (slice_number + 1) * items_per_slice
+ tmp = seq[start:end]
+
+ if fill_with is not None and slice_number >= slices_with_extra:
+ tmp.append(fill_with)
+
+ yield tmp
+
+
+@async_variant(sync_do_slice) # type: ignore
+async def do_slice(
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ slices: int,
+ fill_with: t.Optional[t.Any] = None,
+) -> "t.Iterator[t.List[V]]":
+ return sync_do_slice(await auto_to_list(value), slices, fill_with)
+
+
+def do_batch(
+ value: "t.Iterable[V]", linecount: int, fill_with: "t.Optional[V]" = None
+) -> "t.Iterator[t.List[V]]":
+ """
+ A filter that batches items. It works pretty much like `slice`
+ just the other way round. It returns a list of lists with the
+ given number of items. If you provide a second parameter this
+ is used to fill up missing items. See this example:
+
+ .. sourcecode:: html+jinja
+
+ <table>
+ {%- for row in items|batch(3, '&nbsp;') %}
+ <tr>
+ {%- for column in row %}
+ <td>{{ column }}</td>
+ {%- endfor %}
+ </tr>
+ {%- endfor %}
+ </table>
+ """
+ tmp: "t.List[V]" = []
+
+ for item in value:
+ if len(tmp) == linecount:
+ yield tmp
+ tmp = []
+
+ tmp.append(item)
+
+ if tmp:
+ if fill_with is not None and len(tmp) < linecount:
+ tmp += [fill_with] * (linecount - len(tmp))
+
+ yield tmp
+
+
+def do_round(
+ value: float,
+ precision: int = 0,
+ method: 'te.Literal["common", "ceil", "floor"]' = "common",
+) -> float:
+ """Round the number to a given precision. The first
+ parameter specifies the precision (default is ``0``), the
+ second the rounding method:
+
+ - ``'common'`` rounds either up or down
+ - ``'ceil'`` always rounds up
+ - ``'floor'`` always rounds down
+
+ If you don't specify a method ``'common'`` is used.
+
+ .. sourcecode:: jinja
+
+ {{ 42.55|round }}
+ -> 43.0
+ {{ 42.55|round(1, 'floor') }}
+ -> 42.5
+
+ Note that even if rounded to 0 precision, a float is returned. If
+ you need a real integer, pipe it through `int`:
+
+ .. sourcecode:: jinja
+
+ {{ 42.55|round|int }}
+ -> 43
+ """
+ if method not in {"common", "ceil", "floor"}:
+ raise FilterArgumentError("method must be common, ceil or floor")
+
+ if method == "common":
+ return round(value, precision)
+
+ func = getattr(math, method)
+ return t.cast(float, func(value * (10 ** precision)) / (10 ** precision))
+
+
+class _GroupTuple(t.NamedTuple):
+ grouper: t.Any
+ list: t.List
+
+ # Use the regular tuple repr to hide this subclass if users print
+ # out the value during debugging.
+ def __repr__(self) -> str:
+ return tuple.__repr__(self)
+
+ def __str__(self) -> str:
+ return tuple.__str__(self)
+
+
+@pass_environment
+def sync_do_groupby(
+ environment: "Environment",
+ value: "t.Iterable[V]",
+ attribute: t.Union[str, int],
+ default: t.Optional[t.Any] = None,
+) -> "t.List[t.Tuple[t.Any, t.List[V]]]":
+ """Group a sequence of objects by an attribute using Python's
+ :func:`itertools.groupby`. The attribute can use dot notation for
+ nested access, like ``"address.city"``. Unlike Python's ``groupby``,
+ the values are sorted first so only one group is returned for each
+ unique value.
+
+ For example, a list of ``User`` objects with a ``city`` attribute
+ can be rendered in groups. In this example, ``grouper`` refers to
+ the ``city`` value of the group.
+
+ .. sourcecode:: html+jinja
+
+ <ul>{% for city, items in users|groupby("city") %}
+ <li>{{ city }}
+ <ul>{% for user in items %}
+ <li>{{ user.name }}
+ {% endfor %}</ul>
+ </li>
+ {% endfor %}</ul>
+
+ ``groupby`` yields namedtuples of ``(grouper, list)``, which
+ can be used instead of the tuple unpacking above. ``grouper`` is the
+ value of the attribute, and ``list`` is the items with that value.
+
+ .. sourcecode:: html+jinja
+
+ <ul>{% for group in users|groupby("city") %}
+ <li>{{ group.grouper }}: {{ group.list|join(", ") }}
+ {% endfor %}</ul>
+
+ You can specify a ``default`` value to use if an object in the list
+ does not have the given attribute.
+
+ .. sourcecode:: jinja
+
+ <ul>{% for city, items in users|groupby("city", default="NY") %}
+ <li>{{ city }}: {{ items|map(attribute="name")|join(", ") }}</li>
+ {% endfor %}</ul>
+
+ .. versionchanged:: 3.0
+ Added the ``default`` parameter.
+
+ .. versionchanged:: 2.6
+ The attribute supports dot notation for nested access.
+ """
+ expr = make_attrgetter(environment, attribute, default=default)
+ return [
+ _GroupTuple(key, list(values))
+ for key, values in groupby(sorted(value, key=expr), expr)
+ ]
+
+
+@async_variant(sync_do_groupby) # type: ignore
+async def do_groupby(
+ environment: "Environment",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ attribute: t.Union[str, int],
+ default: t.Optional[t.Any] = None,
+) -> "t.List[t.Tuple[t.Any, t.List[V]]]":
+ expr = make_attrgetter(environment, attribute, default=default)
+ return [
+ _GroupTuple(key, await auto_to_list(values))
+ for key, values in groupby(sorted(await auto_to_list(value), key=expr), expr)
+ ]
+
+
+@pass_environment
+def sync_do_sum(
+ environment: "Environment",
+ iterable: "t.Iterable[V]",
+ attribute: t.Optional[t.Union[str, int]] = None,
+ start: V = 0, # type: ignore
+) -> V:
+ """Returns the sum of a sequence of numbers plus the value of parameter
+ 'start' (which defaults to 0). When the sequence is empty it returns
+ start.
+
+ It is also possible to sum up only certain attributes:
+
+ .. sourcecode:: jinja
+
+ Total: {{ items|sum(attribute='price') }}
+
+ .. versionchanged:: 2.6
+ The `attribute` parameter was added to allow suming up over
+ attributes. Also the `start` parameter was moved on to the right.
+ """
+ if attribute is not None:
+ iterable = map(make_attrgetter(environment, attribute), iterable)
+
+ return sum(iterable, start)
+
+
+@async_variant(sync_do_sum) # type: ignore
+async def do_sum(
+ environment: "Environment",
+ iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ attribute: t.Optional[t.Union[str, int]] = None,
+ start: V = 0, # type: ignore
+) -> V:
+ rv = start
+
+ if attribute is not None:
+ func = make_attrgetter(environment, attribute)
+ else:
+
+ def func(x: V) -> V:
+ return x
+
+ async for item in auto_aiter(iterable):
+ rv += func(item)
+
+ return rv
+
+
+def sync_do_list(value: "t.Iterable[V]") -> "t.List[V]":
+ """Convert the value into a list. If it was a string the returned list
+ will be a list of characters.
+ """
+ return list(value)
+
+
+@async_variant(sync_do_list) # type: ignore
+async def do_list(value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]") -> "t.List[V]":
+ return await auto_to_list(value)
+
+
+def do_mark_safe(value: str) -> Markup:
+ """Mark the value as safe which means that in an environment with automatic
+ escaping enabled this variable will not be escaped.
+ """
+ return Markup(value)
+
+
+def do_mark_unsafe(value: str) -> str:
+ """Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
+ return str(value)
+
+
+@typing.overload
+def do_reverse(value: str) -> str:
+ ...
+
+
+@typing.overload
+def do_reverse(value: "t.Iterable[V]") -> "t.Iterable[V]":
+ ...
+
+
+def do_reverse(value: t.Union[str, t.Iterable[V]]) -> t.Union[str, t.Iterable[V]]:
+ """Reverse the object or return an iterator that iterates over it the other
+ way round.
+ """
+ if isinstance(value, str):
+ return value[::-1]
+
+ try:
+ return reversed(value) # type: ignore
+ except TypeError:
+ try:
+ rv = list(value)
+ rv.reverse()
+ return rv
+ except TypeError as e:
+ raise FilterArgumentError("argument must be iterable") from e
+
+
+@pass_environment
+def do_attr(
+ environment: "Environment", obj: t.Any, name: str
+) -> t.Union[Undefined, t.Any]:
+ """Get an attribute of an object. ``foo|attr("bar")`` works like
+ ``foo.bar`` just that always an attribute is returned and items are not
+ looked up.
+
+ See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
+ """
+ try:
+ name = str(name)
+ except UnicodeError:
+ pass
+ else:
+ try:
+ value = getattr(obj, name)
+ except AttributeError:
+ pass
+ else:
+ if environment.sandboxed:
+ environment = t.cast("SandboxedEnvironment", environment)
+
+ if not environment.is_safe_attribute(obj, name, value):
+ return environment.unsafe_undefined(obj, name)
+
+ return value
+
+ return environment.undefined(obj=obj, name=name)
+
+
+@typing.overload
+def sync_do_map(
+ context: "Context", value: t.Iterable, name: str, *args: t.Any, **kwargs: t.Any
+) -> t.Iterable:
+ ...
+
+
+@typing.overload
+def sync_do_map(
+ context: "Context",
+ value: t.Iterable,
+ *,
+ attribute: str = ...,
+ default: t.Optional[t.Any] = None,
+) -> t.Iterable:
+ ...
+
+
+@pass_context
+def sync_do_map(
+ context: "Context", value: t.Iterable, *args: t.Any, **kwargs: t.Any
+) -> t.Iterable:
+ """Applies a filter on a sequence of objects or looks up an attribute.
+ This is useful when dealing with lists of objects but you are really
+ only interested in a certain value of it.
+
+ The basic usage is mapping on an attribute. Imagine you have a list
+ of users but you are only interested in a list of usernames:
+
+ .. sourcecode:: jinja
+
+ Users on this page: {{ users|map(attribute='username')|join(', ') }}
+
+ You can specify a ``default`` value to use if an object in the list
+ does not have the given attribute.
+
+ .. sourcecode:: jinja
+
+ {{ users|map(attribute="username", default="Anonymous")|join(", ") }}
+
+ Alternatively you can let it invoke a filter by passing the name of the
+ filter and the arguments afterwards. A good example would be applying a
+ text conversion filter on a sequence:
+
+ .. sourcecode:: jinja
+
+ Users on this page: {{ titles|map('lower')|join(', ') }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u.username for u in users)
+ (getattr(u, "username", "Anonymous") for u in users)
+ (do_lower(x) for x in titles)
+
+ .. versionchanged:: 2.11.0
+ Added the ``default`` parameter.
+
+ .. versionadded:: 2.7
+ """
+ if value:
+ func = prepare_map(context, args, kwargs)
+
+ for item in value:
+ yield func(item)
+
+
+@typing.overload
+def do_map(
+ context: "Context",
+ value: t.Union[t.AsyncIterable, t.Iterable],
+ name: str,
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> t.Iterable:
+ ...
+
+
+@typing.overload
+def do_map(
+ context: "Context",
+ value: t.Union[t.AsyncIterable, t.Iterable],
+ *,
+ attribute: str = ...,
+ default: t.Optional[t.Any] = None,
+) -> t.Iterable:
+ ...
+
+
+@async_variant(sync_do_map) # type: ignore
+async def do_map(
+ context: "Context",
+ value: t.Union[t.AsyncIterable, t.Iterable],
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> t.AsyncIterable:
+ if value:
+ func = prepare_map(context, args, kwargs)
+
+ async for item in auto_aiter(value):
+ yield await auto_await(func(item))
+
+
+@pass_context
+def sync_do_select(
+ context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
+) -> "t.Iterator[V]":
+ """Filters a sequence of objects by applying a test to each object,
+ and only selecting the objects with the test succeeding.
+
+ If no test is specified, each object will be evaluated as a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ numbers|select("odd") }}
+ {{ numbers|select("odd") }}
+ {{ numbers|select("divisibleby", 3) }}
+ {{ numbers|select("lessthan", 42) }}
+ {{ strings|select("equalto", "mystring") }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (n for n in numbers if test_odd(n))
+ (n for n in numbers if test_divisibleby(n, 3))
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(context, value, args, kwargs, lambda x: x, False)
+
+
+@async_variant(sync_do_select) # type: ignore
+async def do_select(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> "t.AsyncIterator[V]":
+ return async_select_or_reject(context, value, args, kwargs, lambda x: x, False)
+
+
+@pass_context
+def sync_do_reject(
+ context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
+) -> "t.Iterator[V]":
+ """Filters a sequence of objects by applying a test to each object,
+ and rejecting the objects with the test succeeding.
+
+ If no test is specified, each object will be evaluated as a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ numbers|reject("odd") }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (n for n in numbers if not test_odd(n))
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(context, value, args, kwargs, lambda x: not x, False)
+
+
+@async_variant(sync_do_reject) # type: ignore
+async def do_reject(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> "t.AsyncIterator[V]":
+ return async_select_or_reject(context, value, args, kwargs, lambda x: not x, False)
+
+
+@pass_context
+def sync_do_selectattr(
+ context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
+) -> "t.Iterator[V]":
+ """Filters a sequence of objects by applying a test to the specified
+ attribute of each object, and only selecting the objects with the
+ test succeeding.
+
+ If no test is specified, the attribute's value will be evaluated as
+ a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ users|selectattr("is_active") }}
+ {{ users|selectattr("email", "none") }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u for user in users if user.is_active)
+ (u for user in users if test_none(user.email))
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(context, value, args, kwargs, lambda x: x, True)
+
+
+@async_variant(sync_do_selectattr) # type: ignore
+async def do_selectattr(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> "t.AsyncIterator[V]":
+ return async_select_or_reject(context, value, args, kwargs, lambda x: x, True)
+
+
+@pass_context
+def sync_do_rejectattr(
+ context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
+) -> "t.Iterator[V]":
+ """Filters a sequence of objects by applying a test to the specified
+ attribute of each object, and rejecting the objects with the test
+ succeeding.
+
+ If no test is specified, the attribute's value will be evaluated as
+ a boolean.
+
+ .. sourcecode:: jinja
+
+ {{ users|rejectattr("is_active") }}
+ {{ users|rejectattr("email", "none") }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u for user in users if not user.is_active)
+ (u for user in users if not test_none(user.email))
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(context, value, args, kwargs, lambda x: not x, True)
+
+
+@async_variant(sync_do_rejectattr) # type: ignore
+async def do_rejectattr(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ *args: t.Any,
+ **kwargs: t.Any,
+) -> "t.AsyncIterator[V]":
+ return async_select_or_reject(context, value, args, kwargs, lambda x: not x, True)
+
+
+@pass_eval_context
+def do_tojson(
+ eval_ctx: "EvalContext", value: t.Any, indent: t.Optional[int] = None
+) -> Markup:
+ """Serialize an object to a string of JSON, and mark it safe to
+ render in HTML. This filter is only for use in HTML documents.
+
+ The returned string is safe to render in HTML documents and
+ ``<script>`` tags. The exception is in HTML attributes that are
+ double quoted; either use single quotes or the ``|forceescape``
+ filter.
+
+ :param value: The object to serialize to JSON.
+ :param indent: The ``indent`` parameter passed to ``dumps``, for
+ pretty-printing the value.
+
+ .. versionadded:: 2.9
+ """
+ policies = eval_ctx.environment.policies
+ dumps = policies["json.dumps_function"]
+ kwargs = policies["json.dumps_kwargs"]
+
+ if indent is not None:
+ kwargs = kwargs.copy()
+ kwargs["indent"] = indent
+
+ return htmlsafe_json_dumps(value, dumps=dumps, **kwargs)
+
+
+def prepare_map(
+ context: "Context", args: t.Tuple, kwargs: t.Dict[str, t.Any]
+) -> t.Callable[[t.Any], t.Any]:
+ if not args and "attribute" in kwargs:
+ attribute = kwargs.pop("attribute")
+ default = kwargs.pop("default", None)
+
+ if kwargs:
+ raise FilterArgumentError(
+ f"Unexpected keyword argument {next(iter(kwargs))!r}"
+ )
+
+ func = make_attrgetter(context.environment, attribute, default=default)
+ else:
+ try:
+ name = args[0]
+ args = args[1:]
+ except LookupError:
+ raise FilterArgumentError("map requires a filter argument") from None
+
+ def func(item: t.Any) -> t.Any:
+ return context.environment.call_filter(
+ name, item, args, kwargs, context=context
+ )
+
+ return func
+
+
+def prepare_select_or_reject(
+ context: "Context",
+ args: t.Tuple,
+ kwargs: t.Dict[str, t.Any],
+ modfunc: t.Callable[[t.Any], t.Any],
+ lookup_attr: bool,
+) -> t.Callable[[t.Any], t.Any]:
+ if lookup_attr:
+ try:
+ attr = args[0]
+ except LookupError:
+ raise FilterArgumentError("Missing parameter for attribute name") from None
+
+ transfunc = make_attrgetter(context.environment, attr)
+ off = 1
+ else:
+ off = 0
+
+ def transfunc(x: V) -> V:
+ return x
+
+ try:
+ name = args[off]
+ args = args[1 + off :]
+
+ def func(item: t.Any) -> t.Any:
+ return context.environment.call_test(name, item, args, kwargs)
+
+ except LookupError:
+ func = bool # type: ignore
+
+ return lambda item: modfunc(func(transfunc(item)))
+
+
+def select_or_reject(
+ context: "Context",
+ value: "t.Iterable[V]",
+ args: t.Tuple,
+ kwargs: t.Dict[str, t.Any],
+ modfunc: t.Callable[[t.Any], t.Any],
+ lookup_attr: bool,
+) -> "t.Iterator[V]":
+ if value:
+ func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
+
+ for item in value:
+ if func(item):
+ yield item
+
+
+async def async_select_or_reject(
+ context: "Context",
+ value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
+ args: t.Tuple,
+ kwargs: t.Dict[str, t.Any],
+ modfunc: t.Callable[[t.Any], t.Any],
+ lookup_attr: bool,
+) -> "t.AsyncIterator[V]":
+ if value:
+ func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
+
+ async for item in auto_aiter(value):
+ if func(item):
+ yield item
+
+
+FILTERS = {
+ "abs": abs,
+ "attr": do_attr,
+ "batch": do_batch,
+ "capitalize": do_capitalize,
+ "center": do_center,
+ "count": len,
+ "d": do_default,
+ "default": do_default,
+ "dictsort": do_dictsort,
+ "e": escape,
+ "escape": escape,
+ "filesizeformat": do_filesizeformat,
+ "first": do_first,
+ "float": do_float,
+ "forceescape": do_forceescape,
+ "format": do_format,
+ "groupby": do_groupby,
+ "indent": do_indent,
+ "int": do_int,
+ "join": do_join,
+ "last": do_last,
+ "length": len,
+ "list": do_list,
+ "lower": do_lower,
+ "map": do_map,
+ "min": do_min,
+ "max": do_max,
+ "pprint": do_pprint,
+ "random": do_random,
+ "reject": do_reject,
+ "rejectattr": do_rejectattr,
+ "replace": do_replace,
+ "reverse": do_reverse,
+ "round": do_round,
+ "safe": do_mark_safe,
+ "select": do_select,
+ "selectattr": do_selectattr,
+ "slice": do_slice,
+ "sort": do_sort,
+ "string": soft_str,
+ "striptags": do_striptags,
+ "sum": do_sum,
+ "title": do_title,
+ "trim": do_trim,
+ "truncate": do_truncate,
+ "unique": do_unique,
+ "upper": do_upper,
+ "urlencode": do_urlencode,
+ "urlize": do_urlize,
+ "wordcount": do_wordcount,
+ "wordwrap": do_wordwrap,
+ "xmlattr": do_xmlattr,
+ "tojson": do_tojson,
+}
diff --git a/lib/spack/external/_vendoring/jinja2/idtracking.py b/lib/spack/external/_vendoring/jinja2/idtracking.py
new file mode 100644
index 0000000000..38c525ee31
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/idtracking.py
@@ -0,0 +1,318 @@
+import typing as t
+
+from . import nodes
+from .visitor import NodeVisitor
+
+VAR_LOAD_PARAMETER = "param"
+VAR_LOAD_RESOLVE = "resolve"
+VAR_LOAD_ALIAS = "alias"
+VAR_LOAD_UNDEFINED = "undefined"
+
+
+def find_symbols(
+ nodes: t.Iterable[nodes.Node], parent_symbols: t.Optional["Symbols"] = None
+) -> "Symbols":
+ sym = Symbols(parent=parent_symbols)
+ visitor = FrameSymbolVisitor(sym)
+ for node in nodes:
+ visitor.visit(node)
+ return sym
+
+
+def symbols_for_node(
+ node: nodes.Node, parent_symbols: t.Optional["Symbols"] = None
+) -> "Symbols":
+ sym = Symbols(parent=parent_symbols)
+ sym.analyze_node(node)
+ return sym
+
+
+class Symbols:
+ def __init__(
+ self, parent: t.Optional["Symbols"] = None, level: t.Optional[int] = None
+ ) -> None:
+ if level is None:
+ if parent is None:
+ level = 0
+ else:
+ level = parent.level + 1
+
+ self.level: int = level
+ self.parent = parent
+ self.refs: t.Dict[str, str] = {}
+ self.loads: t.Dict[str, t.Any] = {}
+ self.stores: t.Set[str] = set()
+
+ def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None:
+ visitor = RootVisitor(self)
+ visitor.visit(node, **kwargs)
+
+ def _define_ref(
+ self, name: str, load: t.Optional[t.Tuple[str, t.Optional[str]]] = None
+ ) -> str:
+ ident = f"l_{self.level}_{name}"
+ self.refs[name] = ident
+ if load is not None:
+ self.loads[ident] = load
+ return ident
+
+ def find_load(self, target: str) -> t.Optional[t.Any]:
+ if target in self.loads:
+ return self.loads[target]
+
+ if self.parent is not None:
+ return self.parent.find_load(target)
+
+ return None
+
+ def find_ref(self, name: str) -> t.Optional[str]:
+ if name in self.refs:
+ return self.refs[name]
+
+ if self.parent is not None:
+ return self.parent.find_ref(name)
+
+ return None
+
+ def ref(self, name: str) -> str:
+ rv = self.find_ref(name)
+ if rv is None:
+ raise AssertionError(
+ "Tried to resolve a name to a reference that was"
+ f" unknown to the frame ({name!r})"
+ )
+ return rv
+
+ def copy(self) -> "Symbols":
+ rv = t.cast(Symbols, object.__new__(self.__class__))
+ rv.__dict__.update(self.__dict__)
+ rv.refs = self.refs.copy()
+ rv.loads = self.loads.copy()
+ rv.stores = self.stores.copy()
+ return rv
+
+ def store(self, name: str) -> None:
+ self.stores.add(name)
+
+ # If we have not see the name referenced yet, we need to figure
+ # out what to set it to.
+ if name not in self.refs:
+ # If there is a parent scope we check if the name has a
+ # reference there. If it does it means we might have to alias
+ # to a variable there.
+ if self.parent is not None:
+ outer_ref = self.parent.find_ref(name)
+ if outer_ref is not None:
+ self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
+ return
+
+ # Otherwise we can just set it to undefined.
+ self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
+
+ def declare_parameter(self, name: str) -> str:
+ self.stores.add(name)
+ return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
+
+ def load(self, name: str) -> None:
+ if self.find_ref(name) is None:
+ self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
+
+ def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None:
+ stores: t.Dict[str, int] = {}
+ for branch in branch_symbols:
+ for target in branch.stores:
+ if target in self.stores:
+ continue
+ stores[target] = stores.get(target, 0) + 1
+
+ for sym in branch_symbols:
+ self.refs.update(sym.refs)
+ self.loads.update(sym.loads)
+ self.stores.update(sym.stores)
+
+ for name, branch_count in stores.items():
+ if branch_count == len(branch_symbols):
+ continue
+
+ target = self.find_ref(name) # type: ignore
+ assert target is not None, "should not happen"
+
+ if self.parent is not None:
+ outer_target = self.parent.find_ref(name)
+ if outer_target is not None:
+ self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
+ continue
+ self.loads[target] = (VAR_LOAD_RESOLVE, name)
+
+ def dump_stores(self) -> t.Dict[str, str]:
+ rv: t.Dict[str, str] = {}
+ node: t.Optional["Symbols"] = self
+
+ while node is not None:
+ for name in sorted(node.stores):
+ if name not in rv:
+ rv[name] = self.find_ref(name) # type: ignore
+
+ node = node.parent
+
+ return rv
+
+ def dump_param_targets(self) -> t.Set[str]:
+ rv = set()
+ node: t.Optional["Symbols"] = self
+
+ while node is not None:
+ for target, (instr, _) in self.loads.items():
+ if instr == VAR_LOAD_PARAMETER:
+ rv.add(target)
+
+ node = node.parent
+
+ return rv
+
+
+class RootVisitor(NodeVisitor):
+ def __init__(self, symbols: "Symbols") -> None:
+ self.sym_visitor = FrameSymbolVisitor(symbols)
+
+ def _simple_visit(self, node: nodes.Node, **kwargs: t.Any) -> None:
+ for child in node.iter_child_nodes():
+ self.sym_visitor.visit(child)
+
+ visit_Template = _simple_visit
+ visit_Block = _simple_visit
+ visit_Macro = _simple_visit
+ visit_FilterBlock = _simple_visit
+ visit_Scope = _simple_visit
+ visit_If = _simple_visit
+ visit_ScopedEvalContextModifier = _simple_visit
+
+ def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
+ for child in node.iter_child_nodes(exclude=("call",)):
+ self.sym_visitor.visit(child)
+
+ def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def visit_For(
+ self, node: nodes.For, for_branch: str = "body", **kwargs: t.Any
+ ) -> None:
+ if for_branch == "body":
+ self.sym_visitor.visit(node.target, store_as_param=True)
+ branch = node.body
+ elif for_branch == "else":
+ branch = node.else_
+ elif for_branch == "test":
+ self.sym_visitor.visit(node.target, store_as_param=True)
+ if node.test is not None:
+ self.sym_visitor.visit(node.test)
+ return
+ else:
+ raise RuntimeError("Unknown for branch")
+
+ if branch:
+ for item in branch:
+ self.sym_visitor.visit(item)
+
+ def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
+ for target in node.targets:
+ self.sym_visitor.visit(target)
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def generic_visit(self, node: nodes.Node, *args: t.Any, **kwargs: t.Any) -> None:
+ raise NotImplementedError(f"Cannot find symbols for {type(node).__name__!r}")
+
+
+class FrameSymbolVisitor(NodeVisitor):
+ """A visitor for `Frame.inspect`."""
+
+ def __init__(self, symbols: "Symbols") -> None:
+ self.symbols = symbols
+
+ def visit_Name(
+ self, node: nodes.Name, store_as_param: bool = False, **kwargs: t.Any
+ ) -> None:
+ """All assignments to names go through this function."""
+ if store_as_param or node.ctx == "param":
+ self.symbols.declare_parameter(node.name)
+ elif node.ctx == "store":
+ self.symbols.store(node.name)
+ elif node.ctx == "load":
+ self.symbols.load(node.name)
+
+ def visit_NSRef(self, node: nodes.NSRef, **kwargs: t.Any) -> None:
+ self.symbols.load(node.name)
+
+ def visit_If(self, node: nodes.If, **kwargs: t.Any) -> None:
+ self.visit(node.test, **kwargs)
+ original_symbols = self.symbols
+
+ def inner_visit(nodes: t.Iterable[nodes.Node]) -> "Symbols":
+ self.symbols = rv = original_symbols.copy()
+
+ for subnode in nodes:
+ self.visit(subnode, **kwargs)
+
+ self.symbols = original_symbols
+ return rv
+
+ body_symbols = inner_visit(node.body)
+ elif_symbols = inner_visit(node.elif_)
+ else_symbols = inner_visit(node.else_ or ())
+ self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
+
+ def visit_Macro(self, node: nodes.Macro, **kwargs: t.Any) -> None:
+ self.symbols.store(node.name)
+
+ def visit_Import(self, node: nodes.Import, **kwargs: t.Any) -> None:
+ self.generic_visit(node, **kwargs)
+ self.symbols.store(node.target)
+
+ def visit_FromImport(self, node: nodes.FromImport, **kwargs: t.Any) -> None:
+ self.generic_visit(node, **kwargs)
+
+ for name in node.names:
+ if isinstance(name, tuple):
+ self.symbols.store(name[1])
+ else:
+ self.symbols.store(name)
+
+ def visit_Assign(self, node: nodes.Assign, **kwargs: t.Any) -> None:
+ """Visit assignments in the correct order."""
+ self.visit(node.node, **kwargs)
+ self.visit(node.target, **kwargs)
+
+ def visit_For(self, node: nodes.For, **kwargs: t.Any) -> None:
+ """Visiting stops at for blocks. However the block sequence
+ is visited as part of the outer scope.
+ """
+ self.visit(node.iter, **kwargs)
+
+ def visit_CallBlock(self, node: nodes.CallBlock, **kwargs: t.Any) -> None:
+ self.visit(node.call, **kwargs)
+
+ def visit_FilterBlock(self, node: nodes.FilterBlock, **kwargs: t.Any) -> None:
+ self.visit(node.filter, **kwargs)
+
+ def visit_With(self, node: nodes.With, **kwargs: t.Any) -> None:
+ for target in node.values:
+ self.visit(target)
+
+ def visit_AssignBlock(self, node: nodes.AssignBlock, **kwargs: t.Any) -> None:
+ """Stop visiting at block assigns."""
+ self.visit(node.target, **kwargs)
+
+ def visit_Scope(self, node: nodes.Scope, **kwargs: t.Any) -> None:
+ """Stop visiting at scopes."""
+
+ def visit_Block(self, node: nodes.Block, **kwargs: t.Any) -> None:
+ """Stop visiting at blocks."""
+
+ def visit_OverlayScope(self, node: nodes.OverlayScope, **kwargs: t.Any) -> None:
+ """Do not visit into overlay scopes."""
diff --git a/lib/spack/external/_vendoring/jinja2/lexer.py b/lib/spack/external/_vendoring/jinja2/lexer.py
new file mode 100644
index 0000000000..c25ab0f690
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/lexer.py
@@ -0,0 +1,869 @@
+"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
+is used to do some preprocessing. It filters out invalid operators like
+the bitshift operators we don't allow in templates. It separates
+template code and python code in expressions.
+"""
+import re
+import typing as t
+from ast import literal_eval
+from collections import deque
+from sys import intern
+
+from ._identifier import pattern as name_re
+from .exceptions import TemplateSyntaxError
+from .utils import LRUCache
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
+# cache for the lexers. Exists in order to be able to have multiple
+# environments with the same lexer
+_lexer_cache: t.MutableMapping[t.Tuple, "Lexer"] = LRUCache(50) # type: ignore
+
+# static regular expressions
+whitespace_re = re.compile(r"\s+")
+newline_re = re.compile(r"(\r\n|\r|\n)")
+string_re = re.compile(
+ r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
+)
+integer_re = re.compile(
+ r"""
+ (
+ 0b(_?[0-1])+ # binary
+ |
+ 0o(_?[0-7])+ # octal
+ |
+ 0x(_?[\da-f])+ # hex
+ |
+ [1-9](_?\d)* # decimal
+ |
+ 0(_?0)* # decimal zero
+ )
+ """,
+ re.IGNORECASE | re.VERBOSE,
+)
+float_re = re.compile(
+ r"""
+ (?<!\.) # doesn't start with a .
+ (\d+_)*\d+ # digits, possibly _ separated
+ (
+ (\.(\d+_)*\d+)? # optional fractional part
+ e[+\-]?(\d+_)*\d+ # exponent part
+ |
+ \.(\d+_)*\d+ # required fractional part
+ )
+ """,
+ re.IGNORECASE | re.VERBOSE,
+)
+
+# internal the tokens and keep references to them
+TOKEN_ADD = intern("add")
+TOKEN_ASSIGN = intern("assign")
+TOKEN_COLON = intern("colon")
+TOKEN_COMMA = intern("comma")
+TOKEN_DIV = intern("div")
+TOKEN_DOT = intern("dot")
+TOKEN_EQ = intern("eq")
+TOKEN_FLOORDIV = intern("floordiv")
+TOKEN_GT = intern("gt")
+TOKEN_GTEQ = intern("gteq")
+TOKEN_LBRACE = intern("lbrace")
+TOKEN_LBRACKET = intern("lbracket")
+TOKEN_LPAREN = intern("lparen")
+TOKEN_LT = intern("lt")
+TOKEN_LTEQ = intern("lteq")
+TOKEN_MOD = intern("mod")
+TOKEN_MUL = intern("mul")
+TOKEN_NE = intern("ne")
+TOKEN_PIPE = intern("pipe")
+TOKEN_POW = intern("pow")
+TOKEN_RBRACE = intern("rbrace")
+TOKEN_RBRACKET = intern("rbracket")
+TOKEN_RPAREN = intern("rparen")
+TOKEN_SEMICOLON = intern("semicolon")
+TOKEN_SUB = intern("sub")
+TOKEN_TILDE = intern("tilde")
+TOKEN_WHITESPACE = intern("whitespace")
+TOKEN_FLOAT = intern("float")
+TOKEN_INTEGER = intern("integer")
+TOKEN_NAME = intern("name")
+TOKEN_STRING = intern("string")
+TOKEN_OPERATOR = intern("operator")
+TOKEN_BLOCK_BEGIN = intern("block_begin")
+TOKEN_BLOCK_END = intern("block_end")
+TOKEN_VARIABLE_BEGIN = intern("variable_begin")
+TOKEN_VARIABLE_END = intern("variable_end")
+TOKEN_RAW_BEGIN = intern("raw_begin")
+TOKEN_RAW_END = intern("raw_end")
+TOKEN_COMMENT_BEGIN = intern("comment_begin")
+TOKEN_COMMENT_END = intern("comment_end")
+TOKEN_COMMENT = intern("comment")
+TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
+TOKEN_LINESTATEMENT_END = intern("linestatement_end")
+TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
+TOKEN_LINECOMMENT_END = intern("linecomment_end")
+TOKEN_LINECOMMENT = intern("linecomment")
+TOKEN_DATA = intern("data")
+TOKEN_INITIAL = intern("initial")
+TOKEN_EOF = intern("eof")
+
+# bind operators to token types
+operators = {
+ "+": TOKEN_ADD,
+ "-": TOKEN_SUB,
+ "/": TOKEN_DIV,
+ "//": TOKEN_FLOORDIV,
+ "*": TOKEN_MUL,
+ "%": TOKEN_MOD,
+ "**": TOKEN_POW,
+ "~": TOKEN_TILDE,
+ "[": TOKEN_LBRACKET,
+ "]": TOKEN_RBRACKET,
+ "(": TOKEN_LPAREN,
+ ")": TOKEN_RPAREN,
+ "{": TOKEN_LBRACE,
+ "}": TOKEN_RBRACE,
+ "==": TOKEN_EQ,
+ "!=": TOKEN_NE,
+ ">": TOKEN_GT,
+ ">=": TOKEN_GTEQ,
+ "<": TOKEN_LT,
+ "<=": TOKEN_LTEQ,
+ "=": TOKEN_ASSIGN,
+ ".": TOKEN_DOT,
+ ":": TOKEN_COLON,
+ "|": TOKEN_PIPE,
+ ",": TOKEN_COMMA,
+ ";": TOKEN_SEMICOLON,
+}
+
+reverse_operators = {v: k for k, v in operators.items()}
+assert len(operators) == len(reverse_operators), "operators dropped"
+operator_re = re.compile(
+ f"({'|'.join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))})"
+)
+
+ignored_tokens = frozenset(
+ [
+ TOKEN_COMMENT_BEGIN,
+ TOKEN_COMMENT,
+ TOKEN_COMMENT_END,
+ TOKEN_WHITESPACE,
+ TOKEN_LINECOMMENT_BEGIN,
+ TOKEN_LINECOMMENT_END,
+ TOKEN_LINECOMMENT,
+ ]
+)
+ignore_if_empty = frozenset(
+ [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
+)
+
+
+def _describe_token_type(token_type: str) -> str:
+ if token_type in reverse_operators:
+ return reverse_operators[token_type]
+
+ return {
+ TOKEN_COMMENT_BEGIN: "begin of comment",
+ TOKEN_COMMENT_END: "end of comment",
+ TOKEN_COMMENT: "comment",
+ TOKEN_LINECOMMENT: "comment",
+ TOKEN_BLOCK_BEGIN: "begin of statement block",
+ TOKEN_BLOCK_END: "end of statement block",
+ TOKEN_VARIABLE_BEGIN: "begin of print statement",
+ TOKEN_VARIABLE_END: "end of print statement",
+ TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
+ TOKEN_LINESTATEMENT_END: "end of line statement",
+ TOKEN_DATA: "template data / text",
+ TOKEN_EOF: "end of template",
+ }.get(token_type, token_type)
+
+
+def describe_token(token: "Token") -> str:
+ """Returns a description of the token."""
+ if token.type == TOKEN_NAME:
+ return token.value
+
+ return _describe_token_type(token.type)
+
+
+def describe_token_expr(expr: str) -> str:
+ """Like `describe_token` but for token expressions."""
+ if ":" in expr:
+ type, value = expr.split(":", 1)
+
+ if type == TOKEN_NAME:
+ return value
+ else:
+ type = expr
+
+ return _describe_token_type(type)
+
+
+def count_newlines(value: str) -> int:
+ """Count the number of newline characters in the string. This is
+ useful for extensions that filter a stream.
+ """
+ return len(newline_re.findall(value))
+
+
+def compile_rules(environment: "Environment") -> t.List[t.Tuple[str, str]]:
+ """Compiles all the rules from the environment into a list of rules."""
+ e = re.escape
+ rules = [
+ (
+ len(environment.comment_start_string),
+ TOKEN_COMMENT_BEGIN,
+ e(environment.comment_start_string),
+ ),
+ (
+ len(environment.block_start_string),
+ TOKEN_BLOCK_BEGIN,
+ e(environment.block_start_string),
+ ),
+ (
+ len(environment.variable_start_string),
+ TOKEN_VARIABLE_BEGIN,
+ e(environment.variable_start_string),
+ ),
+ ]
+
+ if environment.line_statement_prefix is not None:
+ rules.append(
+ (
+ len(environment.line_statement_prefix),
+ TOKEN_LINESTATEMENT_BEGIN,
+ r"^[ \t\v]*" + e(environment.line_statement_prefix),
+ )
+ )
+ if environment.line_comment_prefix is not None:
+ rules.append(
+ (
+ len(environment.line_comment_prefix),
+ TOKEN_LINECOMMENT_BEGIN,
+ r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
+ )
+ )
+
+ return [x[1:] for x in sorted(rules, reverse=True)]
+
+
+class Failure:
+ """Class that raises a `TemplateSyntaxError` if called.
+ Used by the `Lexer` to specify known errors.
+ """
+
+ def __init__(
+ self, message: str, cls: t.Type[TemplateSyntaxError] = TemplateSyntaxError
+ ) -> None:
+ self.message = message
+ self.error_class = cls
+
+ def __call__(self, lineno: int, filename: str) -> "te.NoReturn":
+ raise self.error_class(self.message, lineno, filename)
+
+
+class Token(t.NamedTuple):
+ lineno: int
+ type: str
+ value: str
+
+ def __str__(self) -> str:
+ return describe_token(self)
+
+ def test(self, expr: str) -> bool:
+ """Test a token against a token expression. This can either be a
+ token type or ``'token_type:token_value'``. This can only test
+ against string values and types.
+ """
+ # here we do a regular string equality check as test_any is usually
+ # passed an iterable of not interned strings.
+ if self.type == expr:
+ return True
+
+ if ":" in expr:
+ return expr.split(":", 1) == [self.type, self.value]
+
+ return False
+
+ def test_any(self, *iterable: str) -> bool:
+ """Test against multiple token expressions."""
+ return any(self.test(expr) for expr in iterable)
+
+
+class TokenStreamIterator:
+ """The iterator for tokenstreams. Iterate over the stream
+ until the eof token is reached.
+ """
+
+ def __init__(self, stream: "TokenStream") -> None:
+ self.stream = stream
+
+ def __iter__(self) -> "TokenStreamIterator":
+ return self
+
+ def __next__(self) -> Token:
+ token = self.stream.current
+
+ if token.type is TOKEN_EOF:
+ self.stream.close()
+ raise StopIteration
+
+ next(self.stream)
+ return token
+
+
+class TokenStream:
+ """A token stream is an iterable that yields :class:`Token`\\s. The
+ parser however does not iterate over it but calls :meth:`next` to go
+ one token ahead. The current active token is stored as :attr:`current`.
+ """
+
+ def __init__(
+ self,
+ generator: t.Iterable[Token],
+ name: t.Optional[str],
+ filename: t.Optional[str],
+ ):
+ self._iter = iter(generator)
+ self._pushed: "te.Deque[Token]" = deque()
+ self.name = name
+ self.filename = filename
+ self.closed = False
+ self.current = Token(1, TOKEN_INITIAL, "")
+ next(self)
+
+ def __iter__(self) -> TokenStreamIterator:
+ return TokenStreamIterator(self)
+
+ def __bool__(self) -> bool:
+ return bool(self._pushed) or self.current.type is not TOKEN_EOF
+
+ @property
+ def eos(self) -> bool:
+ """Are we at the end of the stream?"""
+ return not self
+
+ def push(self, token: Token) -> None:
+ """Push a token back to the stream."""
+ self._pushed.append(token)
+
+ def look(self) -> Token:
+ """Look at the next token."""
+ old_token = next(self)
+ result = self.current
+ self.push(result)
+ self.current = old_token
+ return result
+
+ def skip(self, n: int = 1) -> None:
+ """Got n tokens ahead."""
+ for _ in range(n):
+ next(self)
+
+ def next_if(self, expr: str) -> t.Optional[Token]:
+ """Perform the token test and return the token if it matched.
+ Otherwise the return value is `None`.
+ """
+ if self.current.test(expr):
+ return next(self)
+
+ return None
+
+ def skip_if(self, expr: str) -> bool:
+ """Like :meth:`next_if` but only returns `True` or `False`."""
+ return self.next_if(expr) is not None
+
+ def __next__(self) -> Token:
+ """Go one token ahead and return the old one.
+
+ Use the built-in :func:`next` instead of calling this directly.
+ """
+ rv = self.current
+
+ if self._pushed:
+ self.current = self._pushed.popleft()
+ elif self.current.type is not TOKEN_EOF:
+ try:
+ self.current = next(self._iter)
+ except StopIteration:
+ self.close()
+
+ return rv
+
+ def close(self) -> None:
+ """Close the stream."""
+ self.current = Token(self.current.lineno, TOKEN_EOF, "")
+ self._iter = iter(())
+ self.closed = True
+
+ def expect(self, expr: str) -> Token:
+ """Expect a given token type and return it. This accepts the same
+ argument as :meth:`jinja2.lexer.Token.test`.
+ """
+ if not self.current.test(expr):
+ expr = describe_token_expr(expr)
+
+ if self.current.type is TOKEN_EOF:
+ raise TemplateSyntaxError(
+ f"unexpected end of template, expected {expr!r}.",
+ self.current.lineno,
+ self.name,
+ self.filename,
+ )
+
+ raise TemplateSyntaxError(
+ f"expected token {expr!r}, got {describe_token(self.current)!r}",
+ self.current.lineno,
+ self.name,
+ self.filename,
+ )
+
+ return next(self)
+
+
+def get_lexer(environment: "Environment") -> "Lexer":
+ """Return a lexer which is probably cached."""
+ key = (
+ environment.block_start_string,
+ environment.block_end_string,
+ environment.variable_start_string,
+ environment.variable_end_string,
+ environment.comment_start_string,
+ environment.comment_end_string,
+ environment.line_statement_prefix,
+ environment.line_comment_prefix,
+ environment.trim_blocks,
+ environment.lstrip_blocks,
+ environment.newline_sequence,
+ environment.keep_trailing_newline,
+ )
+ lexer = _lexer_cache.get(key)
+
+ if lexer is None:
+ _lexer_cache[key] = lexer = Lexer(environment)
+
+ return lexer
+
+
+class OptionalLStrip(tuple):
+ """A special tuple for marking a point in the state that can have
+ lstrip applied.
+ """
+
+ __slots__ = ()
+
+ # Even though it looks like a no-op, creating instances fails
+ # without this.
+ def __new__(cls, *members, **kwargs): # type: ignore
+ return super().__new__(cls, members)
+
+
+class _Rule(t.NamedTuple):
+ pattern: t.Pattern[str]
+ tokens: t.Union[str, t.Tuple[str, ...], t.Tuple[Failure]]
+ command: t.Optional[str]
+
+
+class Lexer:
+ """Class that implements a lexer for a given environment. Automatically
+ created by the environment class, usually you don't have to do that.
+
+ Note that the lexer is not automatically bound to an environment.
+ Multiple environments can share the same lexer.
+ """
+
+ def __init__(self, environment: "Environment") -> None:
+ # shortcuts
+ e = re.escape
+
+ def c(x: str) -> t.Pattern[str]:
+ return re.compile(x, re.M | re.S)
+
+ # lexing rules for tags
+ tag_rules: t.List[_Rule] = [
+ _Rule(whitespace_re, TOKEN_WHITESPACE, None),
+ _Rule(float_re, TOKEN_FLOAT, None),
+ _Rule(integer_re, TOKEN_INTEGER, None),
+ _Rule(name_re, TOKEN_NAME, None),
+ _Rule(string_re, TOKEN_STRING, None),
+ _Rule(operator_re, TOKEN_OPERATOR, None),
+ ]
+
+ # assemble the root lexing rule. because "|" is ungreedy
+ # we have to sort by length so that the lexer continues working
+ # as expected when we have parsing rules like <% for block and
+ # <%= for variables. (if someone wants asp like syntax)
+ # variables are just part of the rules if variable processing
+ # is required.
+ root_tag_rules = compile_rules(environment)
+
+ block_start_re = e(environment.block_start_string)
+ block_end_re = e(environment.block_end_string)
+ comment_end_re = e(environment.comment_end_string)
+ variable_end_re = e(environment.variable_end_string)
+
+ # block suffix if trimming is enabled
+ block_suffix_re = "\\n?" if environment.trim_blocks else ""
+
+ # If lstrip is enabled, it should not be applied if there is any
+ # non-whitespace between the newline and block.
+ self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
+
+ self.newline_sequence = environment.newline_sequence
+ self.keep_trailing_newline = environment.keep_trailing_newline
+
+ root_raw_re = (
+ fr"(?P<raw_begin>{block_start_re}(\-|\+|)\s*raw\s*"
+ fr"(?:\-{block_end_re}\s*|{block_end_re}))"
+ )
+ root_parts_re = "|".join(
+ [root_raw_re] + [fr"(?P<{n}>{r}(\-|\+|))" for n, r in root_tag_rules]
+ )
+
+ # global lexing rules
+ self.rules: t.Dict[str, t.List[_Rule]] = {
+ "root": [
+ # directives
+ _Rule(
+ c(fr"(.*?)(?:{root_parts_re})"),
+ OptionalLStrip(TOKEN_DATA, "#bygroup"), # type: ignore
+ "#bygroup",
+ ),
+ # data
+ _Rule(c(".+"), TOKEN_DATA, None),
+ ],
+ # comments
+ TOKEN_COMMENT_BEGIN: [
+ _Rule(
+ c(
+ fr"(.*?)((?:\+{comment_end_re}|\-{comment_end_re}\s*"
+ fr"|{comment_end_re}{block_suffix_re}))"
+ ),
+ (TOKEN_COMMENT, TOKEN_COMMENT_END),
+ "#pop",
+ ),
+ _Rule(c(r"(.)"), (Failure("Missing end of comment tag"),), None),
+ ],
+ # blocks
+ TOKEN_BLOCK_BEGIN: [
+ _Rule(
+ c(
+ fr"(?:\+{block_end_re}|\-{block_end_re}\s*"
+ fr"|{block_end_re}{block_suffix_re})"
+ ),
+ TOKEN_BLOCK_END,
+ "#pop",
+ ),
+ ]
+ + tag_rules,
+ # variables
+ TOKEN_VARIABLE_BEGIN: [
+ _Rule(
+ c(fr"\-{variable_end_re}\s*|{variable_end_re}"),
+ TOKEN_VARIABLE_END,
+ "#pop",
+ )
+ ]
+ + tag_rules,
+ # raw block
+ TOKEN_RAW_BEGIN: [
+ _Rule(
+ c(
+ fr"(.*?)((?:{block_start_re}(\-|\+|))\s*endraw\s*"
+ fr"(?:\+{block_end_re}|\-{block_end_re}\s*"
+ fr"|{block_end_re}{block_suffix_re}))"
+ ),
+ OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END), # type: ignore
+ "#pop",
+ ),
+ _Rule(c(r"(.)"), (Failure("Missing end of raw directive"),), None),
+ ],
+ # line statements
+ TOKEN_LINESTATEMENT_BEGIN: [
+ _Rule(c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
+ ]
+ + tag_rules,
+ # line comments
+ TOKEN_LINECOMMENT_BEGIN: [
+ _Rule(
+ c(r"(.*?)()(?=\n|$)"),
+ (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
+ "#pop",
+ )
+ ],
+ }
+
+ def _normalize_newlines(self, value: str) -> str:
+ """Replace all newlines with the configured sequence in strings
+ and template data.
+ """
+ return newline_re.sub(self.newline_sequence, value)
+
+ def tokenize(
+ self,
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ state: t.Optional[str] = None,
+ ) -> TokenStream:
+ """Calls tokeniter + tokenize and wraps it in a token stream."""
+ stream = self.tokeniter(source, name, filename, state)
+ return TokenStream(self.wrap(stream, name, filename), name, filename)
+
+ def wrap(
+ self,
+ stream: t.Iterable[t.Tuple[int, str, str]],
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ ) -> t.Iterator[Token]:
+ """This is called with the stream as returned by `tokenize` and wraps
+ every token in a :class:`Token` and converts the value.
+ """
+ for lineno, token, value_str in stream:
+ if token in ignored_tokens:
+ continue
+
+ value: t.Any = value_str
+
+ if token == TOKEN_LINESTATEMENT_BEGIN:
+ token = TOKEN_BLOCK_BEGIN
+ elif token == TOKEN_LINESTATEMENT_END:
+ token = TOKEN_BLOCK_END
+ # we are not interested in those tokens in the parser
+ elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
+ continue
+ elif token == TOKEN_DATA:
+ value = self._normalize_newlines(value_str)
+ elif token == "keyword":
+ token = value_str
+ elif token == TOKEN_NAME:
+ value = value_str
+
+ if not value.isidentifier():
+ raise TemplateSyntaxError(
+ "Invalid character in identifier", lineno, name, filename
+ )
+ elif token == TOKEN_STRING:
+ # try to unescape string
+ try:
+ value = (
+ self._normalize_newlines(value_str[1:-1])
+ .encode("ascii", "backslashreplace")
+ .decode("unicode-escape")
+ )
+ except Exception as e:
+ msg = str(e).split(":")[-1].strip()
+ raise TemplateSyntaxError(msg, lineno, name, filename) from e
+ elif token == TOKEN_INTEGER:
+ value = int(value_str.replace("_", ""), 0)
+ elif token == TOKEN_FLOAT:
+ # remove all "_" first to support more Python versions
+ value = literal_eval(value_str.replace("_", ""))
+ elif token == TOKEN_OPERATOR:
+ token = operators[value_str]
+
+ yield Token(lineno, token, value)
+
+ def tokeniter(
+ self,
+ source: str,
+ name: t.Optional[str],
+ filename: t.Optional[str] = None,
+ state: t.Optional[str] = None,
+ ) -> t.Iterator[t.Tuple[int, str, str]]:
+ """This method tokenizes the text and returns the tokens in a
+ generator. Use this method if you just want to tokenize a template.
+
+ .. versionchanged:: 3.0
+ Only ``\\n``, ``\\r\\n`` and ``\\r`` are treated as line
+ breaks.
+ """
+ lines = newline_re.split(source)[::2]
+
+ if not self.keep_trailing_newline and lines[-1] == "":
+ del lines[-1]
+
+ source = "\n".join(lines)
+ pos = 0
+ lineno = 1
+ stack = ["root"]
+
+ if state is not None and state != "root":
+ assert state in ("variable", "block"), "invalid state"
+ stack.append(state + "_begin")
+
+ statetokens = self.rules[stack[-1]]
+ source_length = len(source)
+ balancing_stack: t.List[str] = []
+ lstrip_unless_re = self.lstrip_unless_re
+ newlines_stripped = 0
+ line_starting = True
+
+ while True:
+ # tokenizer loop
+ for regex, tokens, new_state in statetokens:
+ m = regex.match(source, pos)
+
+ # if no match we try again with the next rule
+ if m is None:
+ continue
+
+ # we only match blocks and variables if braces / parentheses
+ # are balanced. continue parsing with the lower rule which
+ # is the operator rule. do this only if the end tags look
+ # like operators
+ if balancing_stack and tokens in (
+ TOKEN_VARIABLE_END,
+ TOKEN_BLOCK_END,
+ TOKEN_LINESTATEMENT_END,
+ ):
+ continue
+
+ # tuples support more options
+ if isinstance(tokens, tuple):
+ groups = m.groups()
+
+ if isinstance(tokens, OptionalLStrip):
+ # Rule supports lstrip. Match will look like
+ # text, block type, whitespace control, type, control, ...
+ text = groups[0]
+ # Skipping the text and first type, every other group is the
+ # whitespace control for each type. One of the groups will be
+ # -, +, or empty string instead of None.
+ strip_sign = next(g for g in groups[2::2] if g is not None)
+
+ if strip_sign == "-":
+ # Strip all whitespace between the text and the tag.
+ stripped = text.rstrip()
+ newlines_stripped = text[len(stripped) :].count("\n")
+ groups = [stripped, *groups[1:]]
+ elif (
+ # Not marked for preserving whitespace.
+ strip_sign != "+"
+ # lstrip is enabled.
+ and lstrip_unless_re is not None
+ # Not a variable expression.
+ and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
+ ):
+ # The start of text between the last newline and the tag.
+ l_pos = text.rfind("\n") + 1
+
+ if l_pos > 0 or line_starting:
+ # If there's only whitespace between the newline and the
+ # tag, strip it.
+ if not lstrip_unless_re.search(text, l_pos):
+ groups = [text[:l_pos], *groups[1:]]
+
+ for idx, token in enumerate(tokens):
+ # failure group
+ if token.__class__ is Failure:
+ raise token(lineno, filename)
+ # bygroup is a bit more complex, in that case we
+ # yield for the current token the first named
+ # group that matched
+ elif token == "#bygroup":
+ for key, value in m.groupdict().items():
+ if value is not None:
+ yield lineno, key, value
+ lineno += value.count("\n")
+ break
+ else:
+ raise RuntimeError(
+ f"{regex!r} wanted to resolve the token dynamically"
+ " but no group matched"
+ )
+ # normal group
+ else:
+ data = groups[idx]
+
+ if data or token not in ignore_if_empty:
+ yield lineno, token, data
+
+ lineno += data.count("\n") + newlines_stripped
+ newlines_stripped = 0
+
+ # strings as token just are yielded as it.
+ else:
+ data = m.group()
+
+ # update brace/parentheses balance
+ if tokens == TOKEN_OPERATOR:
+ if data == "{":
+ balancing_stack.append("}")
+ elif data == "(":
+ balancing_stack.append(")")
+ elif data == "[":
+ balancing_stack.append("]")
+ elif data in ("}", ")", "]"):
+ if not balancing_stack:
+ raise TemplateSyntaxError(
+ f"unexpected '{data}'", lineno, name, filename
+ )
+
+ expected_op = balancing_stack.pop()
+
+ if expected_op != data:
+ raise TemplateSyntaxError(
+ f"unexpected '{data}', expected '{expected_op}'",
+ lineno,
+ name,
+ filename,
+ )
+
+ # yield items
+ if data or tokens not in ignore_if_empty:
+ yield lineno, tokens, data
+
+ lineno += data.count("\n")
+
+ line_starting = m.group()[-1:] == "\n"
+ # fetch new position into new variable so that we can check
+ # if there is a internal parsing error which would result
+ # in an infinite loop
+ pos2 = m.end()
+
+ # handle state changes
+ if new_state is not None:
+ # remove the uppermost state
+ if new_state == "#pop":
+ stack.pop()
+ # resolve the new state by group checking
+ elif new_state == "#bygroup":
+ for key, value in m.groupdict().items():
+ if value is not None:
+ stack.append(key)
+ break
+ else:
+ raise RuntimeError(
+ f"{regex!r} wanted to resolve the new state dynamically"
+ f" but no group matched"
+ )
+ # direct state name given
+ else:
+ stack.append(new_state)
+
+ statetokens = self.rules[stack[-1]]
+ # we are still at the same position and no stack change.
+ # this means a loop without break condition, avoid that and
+ # raise error
+ elif pos2 == pos:
+ raise RuntimeError(
+ f"{regex!r} yielded empty string without stack change"
+ )
+
+ # publish new function and start again
+ pos = pos2
+ break
+ # if loop terminated without break we haven't found a single match
+ # either we are at the end of the file or we have a problem
+ else:
+ # end of text
+ if pos >= source_length:
+ return
+
+ # something went wrong
+ raise TemplateSyntaxError(
+ f"unexpected char {source[pos]!r} at {pos}", lineno, name, filename
+ )
diff --git a/lib/spack/external/_vendoring/jinja2/loaders.py b/lib/spack/external/_vendoring/jinja2/loaders.py
new file mode 100644
index 0000000000..4fac3a0966
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/loaders.py
@@ -0,0 +1,652 @@
+"""API and implementations for loading templates from different data
+sources.
+"""
+import importlib.util
+import os
+import sys
+import typing as t
+import weakref
+import zipimport
+from collections import abc
+from hashlib import sha1
+from importlib import import_module
+from types import ModuleType
+
+from .exceptions import TemplateNotFound
+from .utils import internalcode
+from .utils import open_if_exists
+
+if t.TYPE_CHECKING:
+ from .environment import Environment
+ from .environment import Template
+
+
+def split_template_path(template: str) -> t.List[str]:
+ """Split a path into segments and perform a sanity check. If it detects
+ '..' in the path it will raise a `TemplateNotFound` error.
+ """
+ pieces = []
+ for piece in template.split("/"):
+ if (
+ os.path.sep in piece
+ or (os.path.altsep and os.path.altsep in piece)
+ or piece == os.path.pardir
+ ):
+ raise TemplateNotFound(template)
+ elif piece and piece != ".":
+ pieces.append(piece)
+ return pieces
+
+
+class BaseLoader:
+ """Baseclass for all loaders. Subclass this and override `get_source` to
+ implement a custom loading mechanism. The environment provides a
+ `get_template` method that calls the loader's `load` method to get the
+ :class:`Template` object.
+
+ A very basic example for a loader that looks up templates on the file
+ system could look like this::
+
+ from jinja2 import BaseLoader, TemplateNotFound
+ from os.path import join, exists, getmtime
+
+ class MyLoader(BaseLoader):
+
+ def __init__(self, path):
+ self.path = path
+
+ def get_source(self, environment, template):
+ path = join(self.path, template)
+ if not exists(path):
+ raise TemplateNotFound(template)
+ mtime = getmtime(path)
+ with open(path) as f:
+ source = f.read()
+ return source, path, lambda: mtime == getmtime(path)
+ """
+
+ #: if set to `False` it indicates that the loader cannot provide access
+ #: to the source of templates.
+ #:
+ #: .. versionadded:: 2.4
+ has_source_access = True
+
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
+ """Get the template source, filename and reload helper for a template.
+ It's passed the environment and template name and has to return a
+ tuple in the form ``(source, filename, uptodate)`` or raise a
+ `TemplateNotFound` error if it can't locate the template.
+
+ The source part of the returned tuple must be the source of the
+ template as a string. The filename should be the name of the
+ file on the filesystem if it was loaded from there, otherwise
+ ``None``. The filename is used by Python for the tracebacks
+ if no loader extension is used.
+
+ The last item in the tuple is the `uptodate` function. If auto
+ reloading is enabled it's always called to check if the template
+ changed. No arguments are passed so the function must store the
+ old state somewhere (for example in a closure). If it returns `False`
+ the template will be reloaded.
+ """
+ if not self.has_source_access:
+ raise RuntimeError(
+ f"{type(self).__name__} cannot provide access to the source"
+ )
+ raise TemplateNotFound(template)
+
+ def list_templates(self) -> t.List[str]:
+ """Iterates over all templates. If the loader does not support that
+ it should raise a :exc:`TypeError` which is the default behavior.
+ """
+ raise TypeError("this loader cannot iterate over all templates")
+
+ @internalcode
+ def load(
+ self,
+ environment: "Environment",
+ name: str,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
+ """Loads a template. This method looks up the template in the cache
+ or loads one by calling :meth:`get_source`. Subclasses should not
+ override this method as loaders working on collections of other
+ loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
+ will not call this method but `get_source` directly.
+ """
+ code = None
+ if globals is None:
+ globals = {}
+
+ # first we try to get the source for this template together
+ # with the filename and the uptodate function.
+ source, filename, uptodate = self.get_source(environment, name)
+
+ # try to load the code from the bytecode cache if there is a
+ # bytecode cache configured.
+ bcc = environment.bytecode_cache
+ if bcc is not None:
+ bucket = bcc.get_bucket(environment, name, filename, source)
+ code = bucket.code
+
+ # if we don't have code so far (not cached, no longer up to
+ # date) etc. we compile the template
+ if code is None:
+ code = environment.compile(source, name, filename)
+
+ # if the bytecode cache is available and the bucket doesn't
+ # have a code so far, we give the bucket the new code and put
+ # it back to the bytecode cache.
+ if bcc is not None and bucket.code is None:
+ bucket.code = code
+ bcc.set_bucket(bucket)
+
+ return environment.template_class.from_code(
+ environment, code, globals, uptodate
+ )
+
+
+class FileSystemLoader(BaseLoader):
+ """Load templates from a directory in the file system.
+
+ The path can be relative or absolute. Relative paths are relative to
+ the current working directory.
+
+ .. code-block:: python
+
+ loader = FileSystemLoader("templates")
+
+ A list of paths can be given. The directories will be searched in
+ order, stopping at the first matching template.
+
+ .. code-block:: python
+
+ loader = FileSystemLoader(["/override/templates", "/default/templates"])
+
+ :param searchpath: A path, or list of paths, to the directory that
+ contains the templates.
+ :param encoding: Use this encoding to read the text from template
+ files.
+ :param followlinks: Follow symbolic links in the path.
+
+ .. versionchanged:: 2.8
+ Added the ``followlinks`` parameter.
+ """
+
+ def __init__(
+ self,
+ searchpath: t.Union[str, os.PathLike, t.Sequence[t.Union[str, os.PathLike]]],
+ encoding: str = "utf-8",
+ followlinks: bool = False,
+ ) -> None:
+ if not isinstance(searchpath, abc.Iterable) or isinstance(searchpath, str):
+ searchpath = [searchpath]
+
+ self.searchpath = [os.fspath(p) for p in searchpath]
+ self.encoding = encoding
+ self.followlinks = followlinks
+
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, str, t.Callable[[], bool]]:
+ pieces = split_template_path(template)
+ for searchpath in self.searchpath:
+ filename = os.path.join(searchpath, *pieces)
+ f = open_if_exists(filename)
+ if f is None:
+ continue
+ try:
+ contents = f.read().decode(self.encoding)
+ finally:
+ f.close()
+
+ mtime = os.path.getmtime(filename)
+
+ def uptodate() -> bool:
+ try:
+ return os.path.getmtime(filename) == mtime
+ except OSError:
+ return False
+
+ return contents, filename, uptodate
+ raise TemplateNotFound(template)
+
+ def list_templates(self) -> t.List[str]:
+ found = set()
+ for searchpath in self.searchpath:
+ walk_dir = os.walk(searchpath, followlinks=self.followlinks)
+ for dirpath, _, filenames in walk_dir:
+ for filename in filenames:
+ template = (
+ os.path.join(dirpath, filename)[len(searchpath) :]
+ .strip(os.path.sep)
+ .replace(os.path.sep, "/")
+ )
+ if template[:2] == "./":
+ template = template[2:]
+ if template not in found:
+ found.add(template)
+ return sorted(found)
+
+
+class PackageLoader(BaseLoader):
+ """Load templates from a directory in a Python package.
+
+ :param package_name: Import name of the package that contains the
+ template directory.
+ :param package_path: Directory within the imported package that
+ contains the templates.
+ :param encoding: Encoding of template files.
+
+ The following example looks up templates in the ``pages`` directory
+ within the ``project.ui`` package.
+
+ .. code-block:: python
+
+ loader = PackageLoader("project.ui", "pages")
+
+ Only packages installed as directories (standard pip behavior) or
+ zip/egg files (less common) are supported. The Python API for
+ introspecting data in packages is too limited to support other
+ installation methods the way this loader requires.
+
+ There is limited support for :pep:`420` namespace packages. The
+ template directory is assumed to only be in one namespace
+ contributor. Zip files contributing to a namespace are not
+ supported.
+
+ .. versionchanged:: 3.0
+ No longer uses ``setuptools`` as a dependency.
+
+ .. versionchanged:: 3.0
+ Limited PEP 420 namespace package support.
+ """
+
+ def __init__(
+ self,
+ package_name: str,
+ package_path: "str" = "templates",
+ encoding: str = "utf-8",
+ ) -> None:
+ package_path = os.path.normpath(package_path).rstrip(os.path.sep)
+
+ # normpath preserves ".", which isn't valid in zip paths.
+ if package_path == os.path.curdir:
+ package_path = ""
+ elif package_path[:2] == os.path.curdir + os.path.sep:
+ package_path = package_path[2:]
+
+ self.package_path = package_path
+ self.package_name = package_name
+ self.encoding = encoding
+
+ # Make sure the package exists. This also makes namespace
+ # packages work, otherwise get_loader returns None.
+ import_module(package_name)
+ spec = importlib.util.find_spec(package_name)
+ assert spec is not None, "An import spec was not found for the package."
+ loader = spec.loader
+ assert loader is not None, "A loader was not found for the package."
+ self._loader = loader
+ self._archive = None
+ template_root = None
+
+ if isinstance(loader, zipimport.zipimporter):
+ self._archive = loader.archive
+ pkgdir = next(iter(spec.submodule_search_locations)) # type: ignore
+ template_root = os.path.join(pkgdir, package_path)
+ else:
+ roots: t.List[str] = []
+
+ # One element for regular packages, multiple for namespace
+ # packages, or None for single module file.
+ if spec.submodule_search_locations:
+ roots.extend(spec.submodule_search_locations)
+ # A single module file, use the parent directory instead.
+ elif spec.origin is not None:
+ roots.append(os.path.dirname(spec.origin))
+
+ for root in roots:
+ root = os.path.join(root, package_path)
+
+ if os.path.isdir(root):
+ template_root = root
+ break
+
+ if template_root is None:
+ raise ValueError(
+ f"The {package_name!r} package was not installed in a"
+ " way that PackageLoader understands."
+ )
+
+ self._template_root = template_root
+
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, str, t.Optional[t.Callable[[], bool]]]:
+ p = os.path.join(self._template_root, *split_template_path(template))
+ up_to_date: t.Optional[t.Callable[[], bool]]
+
+ if self._archive is None:
+ # Package is a directory.
+ if not os.path.isfile(p):
+ raise TemplateNotFound(template)
+
+ with open(p, "rb") as f:
+ source = f.read()
+
+ mtime = os.path.getmtime(p)
+
+ def up_to_date() -> bool:
+ return os.path.isfile(p) and os.path.getmtime(p) == mtime
+
+ else:
+ # Package is a zip file.
+ try:
+ source = self._loader.get_data(p) # type: ignore
+ except OSError as e:
+ raise TemplateNotFound(template) from e
+
+ # Could use the zip's mtime for all template mtimes, but
+ # would need to safely reload the module if it's out of
+ # date, so just report it as always current.
+ up_to_date = None
+
+ return source.decode(self.encoding), p, up_to_date
+
+ def list_templates(self) -> t.List[str]:
+ results: t.List[str] = []
+
+ if self._archive is None:
+ # Package is a directory.
+ offset = len(self._template_root)
+
+ for dirpath, _, filenames in os.walk(self._template_root):
+ dirpath = dirpath[offset:].lstrip(os.path.sep)
+ results.extend(
+ os.path.join(dirpath, name).replace(os.path.sep, "/")
+ for name in filenames
+ )
+ else:
+ if not hasattr(self._loader, "_files"):
+ raise TypeError(
+ "This zip import does not have the required"
+ " metadata to list templates."
+ )
+
+ # Package is a zip file.
+ prefix = (
+ self._template_root[len(self._archive) :].lstrip(os.path.sep)
+ + os.path.sep
+ )
+ offset = len(prefix)
+
+ for name in self._loader._files.keys(): # type: ignore
+ # Find names under the templates directory that aren't directories.
+ if name.startswith(prefix) and name[-1] != os.path.sep:
+ results.append(name[offset:].replace(os.path.sep, "/"))
+
+ results.sort()
+ return results
+
+
+class DictLoader(BaseLoader):
+ """Loads a template from a Python dict mapping template names to
+ template source. This loader is useful for unittesting:
+
+ >>> loader = DictLoader({'index.html': 'source here'})
+
+ Because auto reloading is rarely useful this is disabled per default.
+ """
+
+ def __init__(self, mapping: t.Mapping[str, str]) -> None:
+ self.mapping = mapping
+
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, None, t.Callable[[], bool]]:
+ if template in self.mapping:
+ source = self.mapping[template]
+ return source, None, lambda: source == self.mapping.get(template)
+ raise TemplateNotFound(template)
+
+ def list_templates(self) -> t.List[str]:
+ return sorted(self.mapping)
+
+
+class FunctionLoader(BaseLoader):
+ """A loader that is passed a function which does the loading. The
+ function receives the name of the template and has to return either
+ a string with the template source, a tuple in the form ``(source,
+ filename, uptodatefunc)`` or `None` if the template does not exist.
+
+ >>> def load_template(name):
+ ... if name == 'index.html':
+ ... return '...'
+ ...
+ >>> loader = FunctionLoader(load_template)
+
+ The `uptodatefunc` is a function that is called if autoreload is enabled
+ and has to return `True` if the template is still up to date. For more
+ details have a look at :meth:`BaseLoader.get_source` which has the same
+ return value.
+ """
+
+ def __init__(
+ self,
+ load_func: t.Callable[
+ [str],
+ t.Optional[
+ t.Union[
+ str, t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]
+ ]
+ ],
+ ],
+ ) -> None:
+ self.load_func = load_func
+
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
+ rv = self.load_func(template)
+
+ if rv is None:
+ raise TemplateNotFound(template)
+
+ if isinstance(rv, str):
+ return rv, None, None
+
+ return rv
+
+
+class PrefixLoader(BaseLoader):
+ """A loader that is passed a dict of loaders where each loader is bound
+ to a prefix. The prefix is delimited from the template by a slash per
+ default, which can be changed by setting the `delimiter` argument to
+ something else::
+
+ loader = PrefixLoader({
+ 'app1': PackageLoader('mypackage.app1'),
+ 'app2': PackageLoader('mypackage.app2')
+ })
+
+ By loading ``'app1/index.html'`` the file from the app1 package is loaded,
+ by loading ``'app2/index.html'`` the file from the second.
+ """
+
+ def __init__(
+ self, mapping: t.Mapping[str, BaseLoader], delimiter: str = "/"
+ ) -> None:
+ self.mapping = mapping
+ self.delimiter = delimiter
+
+ def get_loader(self, template: str) -> t.Tuple[BaseLoader, str]:
+ try:
+ prefix, name = template.split(self.delimiter, 1)
+ loader = self.mapping[prefix]
+ except (ValueError, KeyError) as e:
+ raise TemplateNotFound(template) from e
+ return loader, name
+
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
+ loader, name = self.get_loader(template)
+ try:
+ return loader.get_source(environment, name)
+ except TemplateNotFound as e:
+ # re-raise the exception with the correct filename here.
+ # (the one that includes the prefix)
+ raise TemplateNotFound(template) from e
+
+ @internalcode
+ def load(
+ self,
+ environment: "Environment",
+ name: str,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
+ loader, local_name = self.get_loader(name)
+ try:
+ return loader.load(environment, local_name, globals)
+ except TemplateNotFound as e:
+ # re-raise the exception with the correct filename here.
+ # (the one that includes the prefix)
+ raise TemplateNotFound(name) from e
+
+ def list_templates(self) -> t.List[str]:
+ result = []
+ for prefix, loader in self.mapping.items():
+ for template in loader.list_templates():
+ result.append(prefix + self.delimiter + template)
+ return result
+
+
+class ChoiceLoader(BaseLoader):
+ """This loader works like the `PrefixLoader` just that no prefix is
+ specified. If a template could not be found by one loader the next one
+ is tried.
+
+ >>> loader = ChoiceLoader([
+ ... FileSystemLoader('/path/to/user/templates'),
+ ... FileSystemLoader('/path/to/system/templates')
+ ... ])
+
+ This is useful if you want to allow users to override builtin templates
+ from a different location.
+ """
+
+ def __init__(self, loaders: t.Sequence[BaseLoader]) -> None:
+ self.loaders = loaders
+
+ def get_source(
+ self, environment: "Environment", template: str
+ ) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
+ for loader in self.loaders:
+ try:
+ return loader.get_source(environment, template)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(template)
+
+ @internalcode
+ def load(
+ self,
+ environment: "Environment",
+ name: str,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
+ for loader in self.loaders:
+ try:
+ return loader.load(environment, name, globals)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(name)
+
+ def list_templates(self) -> t.List[str]:
+ found = set()
+ for loader in self.loaders:
+ found.update(loader.list_templates())
+ return sorted(found)
+
+
+class _TemplateModule(ModuleType):
+ """Like a normal module but with support for weak references"""
+
+
+class ModuleLoader(BaseLoader):
+ """This loader loads templates from precompiled templates.
+
+ Example usage:
+
+ >>> loader = ChoiceLoader([
+ ... ModuleLoader('/path/to/compiled/templates'),
+ ... FileSystemLoader('/path/to/templates')
+ ... ])
+
+ Templates can be precompiled with :meth:`Environment.compile_templates`.
+ """
+
+ has_source_access = False
+
+ def __init__(
+ self, path: t.Union[str, os.PathLike, t.Sequence[t.Union[str, os.PathLike]]]
+ ) -> None:
+ package_name = f"_jinja2_module_templates_{id(self):x}"
+
+ # create a fake module that looks for the templates in the
+ # path given.
+ mod = _TemplateModule(package_name)
+
+ if not isinstance(path, abc.Iterable) or isinstance(path, str):
+ path = [path]
+
+ mod.__path__ = [os.fspath(p) for p in path] # type: ignore
+
+ sys.modules[package_name] = weakref.proxy(
+ mod, lambda x: sys.modules.pop(package_name, None)
+ )
+
+ # the only strong reference, the sys.modules entry is weak
+ # so that the garbage collector can remove it once the
+ # loader that created it goes out of business.
+ self.module = mod
+ self.package_name = package_name
+
+ @staticmethod
+ def get_template_key(name: str) -> str:
+ return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
+
+ @staticmethod
+ def get_module_filename(name: str) -> str:
+ return ModuleLoader.get_template_key(name) + ".py"
+
+ @internalcode
+ def load(
+ self,
+ environment: "Environment",
+ name: str,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ) -> "Template":
+ key = self.get_template_key(name)
+ module = f"{self.package_name}.{key}"
+ mod = getattr(self.module, module, None)
+
+ if mod is None:
+ try:
+ mod = __import__(module, None, None, ["root"])
+ except ImportError as e:
+ raise TemplateNotFound(name) from e
+
+ # remove the entry from sys.modules, we only want the attribute
+ # on the module object we have stored on the loader.
+ sys.modules.pop(module, None)
+
+ if globals is None:
+ globals = {}
+
+ return environment.template_class.from_module_dict(
+ environment, mod.__dict__, globals
+ )
diff --git a/lib/spack/external/_vendoring/jinja2/meta.py b/lib/spack/external/_vendoring/jinja2/meta.py
new file mode 100644
index 0000000000..0057d6eaba
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/meta.py
@@ -0,0 +1,111 @@
+"""Functions that expose information about templates that might be
+interesting for introspection.
+"""
+import typing as t
+
+from . import nodes
+from .compiler import CodeGenerator
+from .compiler import Frame
+
+if t.TYPE_CHECKING:
+ from .environment import Environment
+
+
+class TrackingCodeGenerator(CodeGenerator):
+ """We abuse the code generator for introspection."""
+
+ def __init__(self, environment: "Environment") -> None:
+ super().__init__(environment, "<introspection>", "<introspection>")
+ self.undeclared_identifiers: t.Set[str] = set()
+
+ def write(self, x: str) -> None:
+ """Don't write."""
+
+ def enter_frame(self, frame: Frame) -> None:
+ """Remember all undeclared identifiers."""
+ super().enter_frame(frame)
+
+ for _, (action, param) in frame.symbols.loads.items():
+ if action == "resolve" and param not in self.environment.globals:
+ self.undeclared_identifiers.add(param)
+
+
+def find_undeclared_variables(ast: nodes.Template) -> t.Set[str]:
+ """Returns a set of all variables in the AST that will be looked up from
+ the context at runtime. Because at compile time it's not known which
+ variables will be used depending on the path the execution takes at
+ runtime, all variables are returned.
+
+ >>> from jinja2 import Environment, meta
+ >>> env = Environment()
+ >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
+ >>> meta.find_undeclared_variables(ast) == {'bar'}
+ True
+
+ .. admonition:: Implementation
+
+ Internally the code generator is used for finding undeclared variables.
+ This is good to know because the code generator might raise a
+ :exc:`TemplateAssertionError` during compilation and as a matter of
+ fact this function can currently raise that exception as well.
+ """
+ codegen = TrackingCodeGenerator(ast.environment) # type: ignore
+ codegen.visit(ast)
+ return codegen.undeclared_identifiers
+
+
+_ref_types = (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
+_RefType = t.Union[nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include]
+
+
+def find_referenced_templates(ast: nodes.Template) -> t.Iterator[t.Optional[str]]:
+ """Finds all the referenced templates from the AST. This will return an
+ iterator over all the hardcoded template extensions, inclusions and
+ imports. If dynamic inheritance or inclusion is used, `None` will be
+ yielded.
+
+ >>> from jinja2 import Environment, meta
+ >>> env = Environment()
+ >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
+ >>> list(meta.find_referenced_templates(ast))
+ ['layout.html', None]
+
+ This function is useful for dependency tracking. For example if you want
+ to rebuild parts of the website after a layout template has changed.
+ """
+ template_name: t.Any
+
+ for node in ast.find_all(_ref_types):
+ template: nodes.Expr = node.template # type: ignore
+
+ if not isinstance(template, nodes.Const):
+ # a tuple with some non consts in there
+ if isinstance(template, (nodes.Tuple, nodes.List)):
+ for template_name in template.items:
+ # something const, only yield the strings and ignore
+ # non-string consts that really just make no sense
+ if isinstance(template_name, nodes.Const):
+ if isinstance(template_name.value, str):
+ yield template_name.value
+ # something dynamic in there
+ else:
+ yield None
+ # something dynamic we don't know about here
+ else:
+ yield None
+ continue
+ # constant is a basestring, direct template name
+ if isinstance(template.value, str):
+ yield template.value
+ # a tuple or list (latter *should* not happen) made of consts,
+ # yield the consts that are strings. We could warn here for
+ # non string values
+ elif isinstance(node, nodes.Include) and isinstance(
+ template.value, (tuple, list)
+ ):
+ for template_name in template.value:
+ if isinstance(template_name, str):
+ yield template_name
+ # something else we don't care about, we could warn here
+ else:
+ yield None
diff --git a/lib/spack/external/_vendoring/jinja2/nativetypes.py b/lib/spack/external/_vendoring/jinja2/nativetypes.py
new file mode 100644
index 0000000000..20597d50ab
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/nativetypes.py
@@ -0,0 +1,124 @@
+import typing as t
+from ast import literal_eval
+from ast import parse
+from itertools import chain
+from itertools import islice
+
+from . import nodes
+from .compiler import CodeGenerator
+from .compiler import Frame
+from .compiler import has_safe_repr
+from .environment import Environment
+from .environment import Template
+
+
+def native_concat(values: t.Iterable[t.Any]) -> t.Optional[t.Any]:
+ """Return a native Python type from the list of compiled nodes. If
+ the result is a single node, its value is returned. Otherwise, the
+ nodes are concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise,
+ the string is returned.
+
+ :param values: Iterable of outputs to concatenate.
+ """
+ head = list(islice(values, 2))
+
+ if not head:
+ return None
+
+ if len(head) == 1:
+ raw = head[0]
+ if not isinstance(raw, str):
+ return raw
+ else:
+ raw = "".join([str(v) for v in chain(head, values)])
+
+ try:
+ return literal_eval(
+ # In Python 3.10+ ast.literal_eval removes leading spaces/tabs
+ # from the given string. For backwards compatibility we need to
+ # parse the string ourselves without removing leading spaces/tabs.
+ parse(raw, mode="eval")
+ )
+ except (ValueError, SyntaxError, MemoryError):
+ return raw
+
+
+class NativeCodeGenerator(CodeGenerator):
+ """A code generator which renders Python types by not adding
+ ``str()`` around output nodes.
+ """
+
+ @staticmethod
+ def _default_finalize(value: t.Any) -> t.Any:
+ return value
+
+ def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
+ return repr("".join([str(v) for v in group]))
+
+ def _output_child_to_const(
+ self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
+ ) -> t.Any:
+ const = node.as_const(frame.eval_ctx)
+
+ if not has_safe_repr(const):
+ raise nodes.Impossible()
+
+ if isinstance(node, nodes.TemplateData):
+ return const
+
+ return finalize.const(const) # type: ignore
+
+ def _output_child_pre(
+ self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
+ ) -> None:
+ if finalize.src is not None:
+ self.write(finalize.src)
+
+ def _output_child_post(
+ self, node: nodes.Expr, frame: Frame, finalize: CodeGenerator._FinalizeInfo
+ ) -> None:
+ if finalize.src is not None:
+ self.write(")")
+
+
+class NativeEnvironment(Environment):
+ """An environment that renders templates to native Python types."""
+
+ code_generator_class = NativeCodeGenerator
+
+
+class NativeTemplate(Template):
+ environment_class = NativeEnvironment
+
+ def render(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ """Render the template to produce a native Python type. If the
+ result is a single node, its value is returned. Otherwise, the
+ nodes are concatenated as strings. If the result can be parsed
+ with :func:`ast.literal_eval`, the parsed value is returned.
+ Otherwise, the string is returned.
+ """
+ ctx = self.new_context(dict(*args, **kwargs))
+
+ try:
+ return native_concat(self.root_render_func(ctx)) # type: ignore
+ except Exception:
+ return self.environment.handle_exception()
+
+ async def render_async(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ if not self.environment.is_async:
+ raise RuntimeError(
+ "The environment was not created with async mode enabled."
+ )
+
+ ctx = self.new_context(dict(*args, **kwargs))
+
+ try:
+ return native_concat(
+ [n async for n in self.root_render_func(ctx)] # type: ignore
+ )
+ except Exception:
+ return self.environment.handle_exception()
+
+
+NativeEnvironment.template_class = NativeTemplate
diff --git a/lib/spack/external/_vendoring/jinja2/nodes.py b/lib/spack/external/_vendoring/jinja2/nodes.py
new file mode 100644
index 0000000000..b2f88d9d9c
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/nodes.py
@@ -0,0 +1,1204 @@
+"""AST nodes generated by the parser for the compiler. Also provides
+some node tree helper functions used by the parser and compiler in order
+to normalize nodes.
+"""
+import inspect
+import operator
+import typing as t
+from collections import deque
+
+from markupsafe import Markup
+
+from .utils import _PassArg
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
+_NodeBound = t.TypeVar("_NodeBound", bound="Node")
+
+_binop_to_func: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
+ "*": operator.mul,
+ "/": operator.truediv,
+ "//": operator.floordiv,
+ "**": operator.pow,
+ "%": operator.mod,
+ "+": operator.add,
+ "-": operator.sub,
+}
+
+_uaop_to_func: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
+ "not": operator.not_,
+ "+": operator.pos,
+ "-": operator.neg,
+}
+
+_cmpop_to_func: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
+ "eq": operator.eq,
+ "ne": operator.ne,
+ "gt": operator.gt,
+ "gteq": operator.ge,
+ "lt": operator.lt,
+ "lteq": operator.le,
+ "in": lambda a, b: a in b,
+ "notin": lambda a, b: a not in b,
+}
+
+
+class Impossible(Exception):
+ """Raised if the node could not perform a requested action."""
+
+
+class NodeType(type):
+ """A metaclass for nodes that handles the field and attribute
+ inheritance. fields and attributes from the parent class are
+ automatically forwarded to the child."""
+
+ def __new__(mcs, name, bases, d): # type: ignore
+ for attr in "fields", "attributes":
+ storage = []
+ storage.extend(getattr(bases[0] if bases else object, attr, ()))
+ storage.extend(d.get(attr, ()))
+ assert len(bases) <= 1, "multiple inheritance not allowed"
+ assert len(storage) == len(set(storage)), "layout conflict"
+ d[attr] = tuple(storage)
+ d.setdefault("abstract", False)
+ return type.__new__(mcs, name, bases, d)
+
+
+class EvalContext:
+ """Holds evaluation time information. Custom attributes can be attached
+ to it in extensions.
+ """
+
+ def __init__(
+ self, environment: "Environment", template_name: t.Optional[str] = None
+ ) -> None:
+ self.environment = environment
+ if callable(environment.autoescape):
+ self.autoescape = environment.autoescape(template_name)
+ else:
+ self.autoescape = environment.autoescape
+ self.volatile = False
+
+ def save(self) -> t.Mapping[str, t.Any]:
+ return self.__dict__.copy()
+
+ def revert(self, old: t.Mapping[str, t.Any]) -> None:
+ self.__dict__.clear()
+ self.__dict__.update(old)
+
+
+def get_eval_context(node: "Node", ctx: t.Optional[EvalContext]) -> EvalContext:
+ if ctx is None:
+ if node.environment is None:
+ raise RuntimeError(
+ "if no eval context is passed, the node must have an"
+ " attached environment."
+ )
+ return EvalContext(node.environment)
+ return ctx
+
+
+class Node(metaclass=NodeType):
+ """Baseclass for all Jinja nodes. There are a number of nodes available
+ of different types. There are four major types:
+
+ - :class:`Stmt`: statements
+ - :class:`Expr`: expressions
+ - :class:`Helper`: helper nodes
+ - :class:`Template`: the outermost wrapper node
+
+ All nodes have fields and attributes. Fields may be other nodes, lists,
+ or arbitrary values. Fields are passed to the constructor as regular
+ positional arguments, attributes as keyword arguments. Each node has
+ two attributes: `lineno` (the line number of the node) and `environment`.
+ The `environment` attribute is set at the end of the parsing process for
+ all nodes automatically.
+ """
+
+ fields: t.Tuple[str, ...] = ()
+ attributes: t.Tuple[str, ...] = ("lineno", "environment")
+ abstract = True
+
+ lineno: int
+ environment: t.Optional["Environment"]
+
+ def __init__(self, *fields: t.Any, **attributes: t.Any) -> None:
+ if self.abstract:
+ raise TypeError("abstract nodes are not instantiable")
+ if fields:
+ if len(fields) != len(self.fields):
+ if not self.fields:
+ raise TypeError(f"{type(self).__name__!r} takes 0 arguments")
+ raise TypeError(
+ f"{type(self).__name__!r} takes 0 or {len(self.fields)}"
+ f" argument{'s' if len(self.fields) != 1 else ''}"
+ )
+ for name, arg in zip(self.fields, fields):
+ setattr(self, name, arg)
+ for attr in self.attributes:
+ setattr(self, attr, attributes.pop(attr, None))
+ if attributes:
+ raise TypeError(f"unknown attribute {next(iter(attributes))!r}")
+
+ def iter_fields(
+ self,
+ exclude: t.Optional[t.Container[str]] = None,
+ only: t.Optional[t.Container[str]] = None,
+ ) -> t.Iterator[t.Tuple[str, t.Any]]:
+ """This method iterates over all fields that are defined and yields
+ ``(key, value)`` tuples. Per default all fields are returned, but
+ it's possible to limit that to some fields by providing the `only`
+ parameter or to exclude some using the `exclude` parameter. Both
+ should be sets or tuples of field names.
+ """
+ for name in self.fields:
+ if (
+ (exclude is None and only is None)
+ or (exclude is not None and name not in exclude)
+ or (only is not None and name in only)
+ ):
+ try:
+ yield name, getattr(self, name)
+ except AttributeError:
+ pass
+
+ def iter_child_nodes(
+ self,
+ exclude: t.Optional[t.Container[str]] = None,
+ only: t.Optional[t.Container[str]] = None,
+ ) -> t.Iterator["Node"]:
+ """Iterates over all direct child nodes of the node. This iterates
+ over all fields and yields the values of they are nodes. If the value
+ of a field is a list all the nodes in that list are returned.
+ """
+ for _, item in self.iter_fields(exclude, only):
+ if isinstance(item, list):
+ for n in item:
+ if isinstance(n, Node):
+ yield n
+ elif isinstance(item, Node):
+ yield item
+
+ def find(self, node_type: t.Type[_NodeBound]) -> t.Optional[_NodeBound]:
+ """Find the first node of a given type. If no such node exists the
+ return value is `None`.
+ """
+ for result in self.find_all(node_type):
+ return result
+
+ return None
+
+ def find_all(
+ self, node_type: t.Union[t.Type[_NodeBound], t.Tuple[t.Type[_NodeBound], ...]]
+ ) -> t.Iterator[_NodeBound]:
+ """Find all the nodes of a given type. If the type is a tuple,
+ the check is performed for any of the tuple items.
+ """
+ for child in self.iter_child_nodes():
+ if isinstance(child, node_type):
+ yield child # type: ignore
+ yield from child.find_all(node_type)
+
+ def set_ctx(self, ctx: str) -> "Node":
+ """Reset the context of a node and all child nodes. Per default the
+ parser will all generate nodes that have a 'load' context as it's the
+ most common one. This method is used in the parser to set assignment
+ targets and other nodes to a store context.
+ """
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ if "ctx" in node.fields:
+ node.ctx = ctx # type: ignore
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def set_lineno(self, lineno: int, override: bool = False) -> "Node":
+ """Set the line numbers of the node and children."""
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ if "lineno" in node.attributes:
+ if node.lineno is None or override:
+ node.lineno = lineno
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def set_environment(self, environment: "Environment") -> "Node":
+ """Set the environment for all nodes."""
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ node.environment = environment
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def __eq__(self, other: t.Any) -> bool:
+ if type(self) is not type(other):
+ return NotImplemented
+
+ return tuple(self.iter_fields()) == tuple(other.iter_fields())
+
+ __hash__ = object.__hash__
+
+ def __repr__(self) -> str:
+ args_str = ", ".join(f"{a}={getattr(self, a, None)!r}" for a in self.fields)
+ return f"{type(self).__name__}({args_str})"
+
+ def dump(self) -> str:
+ def _dump(node: t.Union[Node, t.Any]) -> None:
+ if not isinstance(node, Node):
+ buf.append(repr(node))
+ return
+
+ buf.append(f"nodes.{type(node).__name__}(")
+ if not node.fields:
+ buf.append(")")
+ return
+ for idx, field in enumerate(node.fields):
+ if idx:
+ buf.append(", ")
+ value = getattr(node, field)
+ if isinstance(value, list):
+ buf.append("[")
+ for idx, item in enumerate(value):
+ if idx:
+ buf.append(", ")
+ _dump(item)
+ buf.append("]")
+ else:
+ _dump(value)
+ buf.append(")")
+
+ buf: t.List[str] = []
+ _dump(self)
+ return "".join(buf)
+
+
+class Stmt(Node):
+ """Base node for all statements."""
+
+ abstract = True
+
+
+class Helper(Node):
+ """Nodes that exist in a specific context only."""
+
+ abstract = True
+
+
+class Template(Node):
+ """Node that represents a template. This must be the outermost node that
+ is passed to the compiler.
+ """
+
+ fields = ("body",)
+ body: t.List[Node]
+
+
+class Output(Stmt):
+ """A node that holds multiple expressions which are then printed out.
+ This is used both for the `print` statement and the regular template data.
+ """
+
+ fields = ("nodes",)
+ nodes: t.List["Expr"]
+
+
+class Extends(Stmt):
+ """Represents an extends statement."""
+
+ fields = ("template",)
+ template: "Expr"
+
+
+class For(Stmt):
+ """The for loop. `target` is the target for the iteration (usually a
+ :class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
+ of nodes that are used as loop-body, and `else_` a list of nodes for the
+ `else` block. If no else node exists it has to be an empty list.
+
+ For filtered nodes an expression can be stored as `test`, otherwise `None`.
+ """
+
+ fields = ("target", "iter", "body", "else_", "test", "recursive")
+ target: Node
+ iter: Node
+ body: t.List[Node]
+ else_: t.List[Node]
+ test: t.Optional[Node]
+ recursive: bool
+
+
+class If(Stmt):
+ """If `test` is true, `body` is rendered, else `else_`."""
+
+ fields = ("test", "body", "elif_", "else_")
+ test: Node
+ body: t.List[Node]
+ elif_: t.List["If"]
+ else_: t.List[Node]
+
+
+class Macro(Stmt):
+ """A macro definition. `name` is the name of the macro, `args` a list of
+ arguments and `defaults` a list of defaults if there are any. `body` is
+ a list of nodes for the macro body.
+ """
+
+ fields = ("name", "args", "defaults", "body")
+ name: str
+ args: t.List["Name"]
+ defaults: t.List["Expr"]
+ body: t.List[Node]
+
+
+class CallBlock(Stmt):
+ """Like a macro without a name but a call instead. `call` is called with
+ the unnamed macro as `caller` argument this node holds.
+ """
+
+ fields = ("call", "args", "defaults", "body")
+ call: "Call"
+ args: t.List["Name"]
+ defaults: t.List["Expr"]
+ body: t.List[Node]
+
+
+class FilterBlock(Stmt):
+ """Node for filter sections."""
+
+ fields = ("body", "filter")
+ body: t.List[Node]
+ filter: "Filter"
+
+
+class With(Stmt):
+ """Specific node for with statements. In older versions of Jinja the
+ with statement was implemented on the base of the `Scope` node instead.
+
+ .. versionadded:: 2.9.3
+ """
+
+ fields = ("targets", "values", "body")
+ targets: t.List["Expr"]
+ values: t.List["Expr"]
+ body: t.List[Node]
+
+
+class Block(Stmt):
+ """A node that represents a block.
+
+ .. versionchanged:: 3.0.0
+ the `required` field was added.
+ """
+
+ fields = ("name", "body", "scoped", "required")
+ name: str
+ body: t.List[Node]
+ scoped: bool
+ required: bool
+
+
+class Include(Stmt):
+ """A node that represents the include tag."""
+
+ fields = ("template", "with_context", "ignore_missing")
+ template: "Expr"
+ with_context: bool
+ ignore_missing: bool
+
+
+class Import(Stmt):
+ """A node that represents the import tag."""
+
+ fields = ("template", "target", "with_context")
+ template: "Expr"
+ target: str
+ with_context: bool
+
+
+class FromImport(Stmt):
+ """A node that represents the from import tag. It's important to not
+ pass unsafe names to the name attribute. The compiler translates the
+ attribute lookups directly into getattr calls and does *not* use the
+ subscript callback of the interface. As exported variables may not
+ start with double underscores (which the parser asserts) this is not a
+ problem for regular Jinja code, but if this node is used in an extension
+ extra care must be taken.
+
+ The list of names may contain tuples if aliases are wanted.
+ """
+
+ fields = ("template", "names", "with_context")
+ template: "Expr"
+ names: t.List[t.Union[str, t.Tuple[str, str]]]
+ with_context: bool
+
+
+class ExprStmt(Stmt):
+ """A statement that evaluates an expression and discards the result."""
+
+ fields = ("node",)
+ node: Node
+
+
+class Assign(Stmt):
+ """Assigns an expression to a target."""
+
+ fields = ("target", "node")
+ target: "Expr"
+ node: Node
+
+
+class AssignBlock(Stmt):
+ """Assigns a block to a target."""
+
+ fields = ("target", "filter", "body")
+ target: "Expr"
+ filter: t.Optional["Filter"]
+ body: t.List[Node]
+
+
+class Expr(Node):
+ """Baseclass for all expressions."""
+
+ abstract = True
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ """Return the value of the expression as constant or raise
+ :exc:`Impossible` if this was not possible.
+
+ An :class:`EvalContext` can be provided, if none is given
+ a default context is created which requires the nodes to have
+ an attached environment.
+
+ .. versionchanged:: 2.4
+ the `eval_ctx` parameter was added.
+ """
+ raise Impossible()
+
+ def can_assign(self) -> bool:
+ """Check if it's possible to assign something to this node."""
+ return False
+
+
+class BinExpr(Expr):
+ """Baseclass for all binary expressions."""
+
+ fields = ("left", "right")
+ left: Expr
+ right: Expr
+ operator: str
+ abstract = True
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ # intercepted operators cannot be folded at compile time
+ if (
+ eval_ctx.environment.sandboxed
+ and self.operator in eval_ctx.environment.intercepted_binops # type: ignore
+ ):
+ raise Impossible()
+ f = _binop_to_func[self.operator]
+ try:
+ return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
+ except Exception as e:
+ raise Impossible() from e
+
+
+class UnaryExpr(Expr):
+ """Baseclass for all unary expressions."""
+
+ fields = ("node",)
+ node: Expr
+ operator: str
+ abstract = True
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ # intercepted operators cannot be folded at compile time
+ if (
+ eval_ctx.environment.sandboxed
+ and self.operator in eval_ctx.environment.intercepted_unops # type: ignore
+ ):
+ raise Impossible()
+ f = _uaop_to_func[self.operator]
+ try:
+ return f(self.node.as_const(eval_ctx))
+ except Exception as e:
+ raise Impossible() from e
+
+
+class Name(Expr):
+ """Looks up a name or stores a value in a name.
+ The `ctx` of the node can be one of the following values:
+
+ - `store`: store a value in the name
+ - `load`: load that name
+ - `param`: like `store` but if the name was defined as function parameter.
+ """
+
+ fields = ("name", "ctx")
+ name: str
+ ctx: str
+
+ def can_assign(self) -> bool:
+ return self.name not in {"true", "false", "none", "True", "False", "None"}
+
+
+class NSRef(Expr):
+ """Reference to a namespace value assignment"""
+
+ fields = ("name", "attr")
+ name: str
+ attr: str
+
+ def can_assign(self) -> bool:
+ # We don't need any special checks here; NSRef assignments have a
+ # runtime check to ensure the target is a namespace object which will
+ # have been checked already as it is created using a normal assignment
+ # which goes through a `Name` node.
+ return True
+
+
+class Literal(Expr):
+ """Baseclass for literals."""
+
+ abstract = True
+
+
+class Const(Literal):
+ """All constant values. The parser will return this node for simple
+ constants such as ``42`` or ``"foo"`` but it can be used to store more
+ complex values such as lists too. Only constants with a safe
+ representation (objects where ``eval(repr(x)) == x`` is true).
+ """
+
+ fields = ("value",)
+ value: t.Any
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ return self.value
+
+ @classmethod
+ def from_untrusted(
+ cls,
+ value: t.Any,
+ lineno: t.Optional[int] = None,
+ environment: "t.Optional[Environment]" = None,
+ ) -> "Const":
+ """Return a const object if the value is representable as
+ constant value in the generated code, otherwise it will raise
+ an `Impossible` exception.
+ """
+ from .compiler import has_safe_repr
+
+ if not has_safe_repr(value):
+ raise Impossible()
+ return cls(value, lineno=lineno, environment=environment)
+
+
+class TemplateData(Literal):
+ """A constant template string."""
+
+ fields = ("data",)
+ data: str
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> str:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ if eval_ctx.autoescape:
+ return Markup(self.data)
+ return self.data
+
+
+class Tuple(Literal):
+ """For loop unpacking and some other things like multiple arguments
+ for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
+ is used for loading the names or storing.
+ """
+
+ fields = ("items", "ctx")
+ items: t.List[Expr]
+ ctx: str
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Tuple[t.Any, ...]:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return tuple(x.as_const(eval_ctx) for x in self.items)
+
+ def can_assign(self) -> bool:
+ for item in self.items:
+ if not item.can_assign():
+ return False
+ return True
+
+
+class List(Literal):
+ """Any list literal such as ``[1, 2, 3]``"""
+
+ fields = ("items",)
+ items: t.List[Expr]
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.List[t.Any]:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return [x.as_const(eval_ctx) for x in self.items]
+
+
+class Dict(Literal):
+ """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
+ :class:`Pair` nodes.
+ """
+
+ fields = ("items",)
+ items: t.List["Pair"]
+
+ def as_const(
+ self, eval_ctx: t.Optional[EvalContext] = None
+ ) -> t.Dict[t.Any, t.Any]:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return dict(x.as_const(eval_ctx) for x in self.items)
+
+
+class Pair(Helper):
+ """A key, value pair for dicts."""
+
+ fields = ("key", "value")
+ key: Expr
+ value: Expr
+
+ def as_const(
+ self, eval_ctx: t.Optional[EvalContext] = None
+ ) -> t.Tuple[t.Any, t.Any]:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
+
+
+class Keyword(Helper):
+ """A key, value pair for keyword arguments where key is a string."""
+
+ fields = ("key", "value")
+ key: str
+ value: Expr
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Tuple[str, t.Any]:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.key, self.value.as_const(eval_ctx)
+
+
+class CondExpr(Expr):
+ """A conditional expression (inline if expression). (``{{
+ foo if bar else baz }}``)
+ """
+
+ fields = ("test", "expr1", "expr2")
+ test: Expr
+ expr1: Expr
+ expr2: t.Optional[Expr]
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if self.test.as_const(eval_ctx):
+ return self.expr1.as_const(eval_ctx)
+
+ # if we evaluate to an undefined object, we better do that at runtime
+ if self.expr2 is None:
+ raise Impossible()
+
+ return self.expr2.as_const(eval_ctx)
+
+
+def args_as_const(
+ node: t.Union["_FilterTestCommon", "Call"], eval_ctx: t.Optional[EvalContext]
+) -> t.Tuple[t.List[t.Any], t.Dict[t.Any, t.Any]]:
+ args = [x.as_const(eval_ctx) for x in node.args]
+ kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
+
+ if node.dyn_args is not None:
+ try:
+ args.extend(node.dyn_args.as_const(eval_ctx))
+ except Exception as e:
+ raise Impossible() from e
+
+ if node.dyn_kwargs is not None:
+ try:
+ kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
+ except Exception as e:
+ raise Impossible() from e
+
+ return args, kwargs
+
+
+class _FilterTestCommon(Expr):
+ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
+ node: Expr
+ name: str
+ args: t.List[Expr]
+ kwargs: t.List[Pair]
+ dyn_args: t.Optional[Expr]
+ dyn_kwargs: t.Optional[Expr]
+ abstract = True
+ _is_filter = True
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ if eval_ctx.volatile:
+ raise Impossible()
+
+ if self._is_filter:
+ env_map = eval_ctx.environment.filters
+ else:
+ env_map = eval_ctx.environment.tests
+
+ func = env_map.get(self.name)
+ pass_arg = _PassArg.from_obj(func) # type: ignore
+
+ if func is None or pass_arg is _PassArg.context:
+ raise Impossible()
+
+ if eval_ctx.environment.is_async and (
+ getattr(func, "jinja_async_variant", False) is True
+ or inspect.iscoroutinefunction(func)
+ ):
+ raise Impossible()
+
+ args, kwargs = args_as_const(self, eval_ctx)
+ args.insert(0, self.node.as_const(eval_ctx))
+
+ if pass_arg is _PassArg.eval_context:
+ args.insert(0, eval_ctx)
+ elif pass_arg is _PassArg.environment:
+ args.insert(0, eval_ctx.environment)
+
+ try:
+ return func(*args, **kwargs)
+ except Exception as e:
+ raise Impossible() from e
+
+
+class Filter(_FilterTestCommon):
+ """Apply a filter to an expression. ``name`` is the name of the
+ filter, the other fields are the same as :class:`Call`.
+
+ If ``node`` is ``None``, the filter is being used in a filter block
+ and is applied to the content of the block.
+ """
+
+ node: t.Optional[Expr] # type: ignore
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ if self.node is None:
+ raise Impossible()
+
+ return super().as_const(eval_ctx=eval_ctx)
+
+
+class Test(_FilterTestCommon):
+ """Apply a test to an expression. ``name`` is the name of the test,
+ the other field are the same as :class:`Call`.
+
+ .. versionchanged:: 3.0
+ ``as_const`` shares the same logic for filters and tests. Tests
+ check for volatile, async, and ``@pass_context`` etc.
+ decorators.
+ """
+
+ _is_filter = False
+
+
+class Call(Expr):
+ """Calls an expression. `args` is a list of arguments, `kwargs` a list
+ of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
+ and `dyn_kwargs` has to be either `None` or a node that is used as
+ node for dynamic positional (``*args``) or keyword (``**kwargs``)
+ arguments.
+ """
+
+ fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
+ node: Expr
+ args: t.List[Expr]
+ kwargs: t.List[Keyword]
+ dyn_args: t.Optional[Expr]
+ dyn_kwargs: t.Optional[Expr]
+
+
+class Getitem(Expr):
+ """Get an attribute or item from an expression and prefer the item."""
+
+ fields = ("node", "arg", "ctx")
+ node: Expr
+ arg: Expr
+ ctx: str
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ if self.ctx != "load":
+ raise Impossible()
+
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ try:
+ return eval_ctx.environment.getitem(
+ self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
+ )
+ except Exception as e:
+ raise Impossible() from e
+
+
+class Getattr(Expr):
+ """Get an attribute or item from an expression that is a ascii-only
+ bytestring and prefer the attribute.
+ """
+
+ fields = ("node", "attr", "ctx")
+ node: Expr
+ attr: str
+ ctx: str
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ if self.ctx != "load":
+ raise Impossible()
+
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ try:
+ return eval_ctx.environment.getattr(self.node.as_const(eval_ctx), self.attr)
+ except Exception as e:
+ raise Impossible() from e
+
+
+class Slice(Expr):
+ """Represents a slice object. This must only be used as argument for
+ :class:`Subscript`.
+ """
+
+ fields = ("start", "stop", "step")
+ start: t.Optional[Expr]
+ stop: t.Optional[Expr]
+ step: t.Optional[Expr]
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> slice:
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ def const(obj: t.Optional[Expr]) -> t.Optional[t.Any]:
+ if obj is None:
+ return None
+ return obj.as_const(eval_ctx)
+
+ return slice(const(self.start), const(self.stop), const(self.step))
+
+
+class Concat(Expr):
+ """Concatenates the list of expressions provided after converting
+ them to strings.
+ """
+
+ fields = ("nodes",)
+ nodes: t.List[Expr]
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> str:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return "".join(str(x.as_const(eval_ctx)) for x in self.nodes)
+
+
+class Compare(Expr):
+ """Compares an expression with some other expressions. `ops` must be a
+ list of :class:`Operand`\\s.
+ """
+
+ fields = ("expr", "ops")
+ expr: Expr
+ ops: t.List["Operand"]
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ result = value = self.expr.as_const(eval_ctx)
+
+ try:
+ for op in self.ops:
+ new_value = op.expr.as_const(eval_ctx)
+ result = _cmpop_to_func[op.op](value, new_value)
+
+ if not result:
+ return False
+
+ value = new_value
+ except Exception as e:
+ raise Impossible() from e
+
+ return result
+
+
+class Operand(Helper):
+ """Holds an operator and an expression."""
+
+ fields = ("op", "expr")
+ op: str
+ expr: Expr
+
+
+class Mul(BinExpr):
+ """Multiplies the left with the right node."""
+
+ operator = "*"
+
+
+class Div(BinExpr):
+ """Divides the left by the right node."""
+
+ operator = "/"
+
+
+class FloorDiv(BinExpr):
+ """Divides the left by the right node and converts the
+ result into an integer by truncating.
+ """
+
+ operator = "//"
+
+
+class Add(BinExpr):
+ """Add the left to the right node."""
+
+ operator = "+"
+
+
+class Sub(BinExpr):
+ """Subtract the right from the left node."""
+
+ operator = "-"
+
+
+class Mod(BinExpr):
+ """Left modulo right."""
+
+ operator = "%"
+
+
+class Pow(BinExpr):
+ """Left to the power of right."""
+
+ operator = "**"
+
+
+class And(BinExpr):
+ """Short circuited AND."""
+
+ operator = "and"
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
+
+
+class Or(BinExpr):
+ """Short circuited OR."""
+
+ operator = "or"
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> t.Any:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
+
+
+class Not(UnaryExpr):
+ """Negate the expression."""
+
+ operator = "not"
+
+
+class Neg(UnaryExpr):
+ """Make the expression negative."""
+
+ operator = "-"
+
+
+class Pos(UnaryExpr):
+ """Make the expression positive (noop for most expressions)"""
+
+ operator = "+"
+
+
+# Helpers for extensions
+
+
+class EnvironmentAttribute(Expr):
+ """Loads an attribute from the environment object. This is useful for
+ extensions that want to call a callback stored on the environment.
+ """
+
+ fields = ("name",)
+ name: str
+
+
+class ExtensionAttribute(Expr):
+ """Returns the attribute of an extension bound to the environment.
+ The identifier is the identifier of the :class:`Extension`.
+
+ This node is usually constructed by calling the
+ :meth:`~jinja2.ext.Extension.attr` method on an extension.
+ """
+
+ fields = ("identifier", "name")
+ identifier: str
+ name: str
+
+
+class ImportedName(Expr):
+ """If created with an import name the import name is returned on node
+ access. For example ``ImportedName('cgi.escape')`` returns the `escape`
+ function from the cgi module on evaluation. Imports are optimized by the
+ compiler so there is no need to assign them to local variables.
+ """
+
+ fields = ("importname",)
+ importname: str
+
+
+class InternalName(Expr):
+ """An internal name in the compiler. You cannot create these nodes
+ yourself but the parser provides a
+ :meth:`~jinja2.parser.Parser.free_identifier` method that creates
+ a new identifier for you. This identifier is not available from the
+ template and is not treated specially by the compiler.
+ """
+
+ fields = ("name",)
+ name: str
+
+ def __init__(self) -> None:
+ raise TypeError(
+ "Can't create internal names. Use the "
+ "`free_identifier` method on a parser."
+ )
+
+
+class MarkSafe(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`)."""
+
+ fields = ("expr",)
+ expr: Expr
+
+ def as_const(self, eval_ctx: t.Optional[EvalContext] = None) -> Markup:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return Markup(self.expr.as_const(eval_ctx))
+
+
+class MarkSafeIfAutoescape(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`) but
+ only if autoescaping is active.
+
+ .. versionadded:: 2.5
+ """
+
+ fields = ("expr",)
+ expr: Expr
+
+ def as_const(
+ self, eval_ctx: t.Optional[EvalContext] = None
+ ) -> t.Union[Markup, t.Any]:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ expr = self.expr.as_const(eval_ctx)
+ if eval_ctx.autoescape:
+ return Markup(expr)
+ return expr
+
+
+class ContextReference(Expr):
+ """Returns the current template context. It can be used like a
+ :class:`Name` node, with a ``'load'`` ctx and will return the
+ current :class:`~jinja2.runtime.Context` object.
+
+ Here an example that assigns the current template name to a
+ variable named `foo`::
+
+ Assign(Name('foo', ctx='store'),
+ Getattr(ContextReference(), 'name'))
+
+ This is basically equivalent to using the
+ :func:`~jinja2.pass_context` decorator when using the high-level
+ API, which causes a reference to the context to be passed as the
+ first argument to a function.
+ """
+
+
+class DerivedContextReference(Expr):
+ """Return the current template context including locals. Behaves
+ exactly like :class:`ContextReference`, but includes local
+ variables, such as from a ``for`` loop.
+
+ .. versionadded:: 2.11
+ """
+
+
+class Continue(Stmt):
+ """Continue a loop."""
+
+
+class Break(Stmt):
+ """Break a loop."""
+
+
+class Scope(Stmt):
+ """An artificial scope."""
+
+ fields = ("body",)
+ body: t.List[Node]
+
+
+class OverlayScope(Stmt):
+ """An overlay scope for extensions. This is a largely unoptimized scope
+ that however can be used to introduce completely arbitrary variables into
+ a sub scope from a dictionary or dictionary like object. The `context`
+ field has to evaluate to a dictionary object.
+
+ Example usage::
+
+ OverlayScope(context=self.call_method('get_context'),
+ body=[...])
+
+ .. versionadded:: 2.10
+ """
+
+ fields = ("context", "body")
+ context: Expr
+ body: t.List[Node]
+
+
+class EvalContextModifier(Stmt):
+ """Modifies the eval context. For each option that should be modified,
+ a :class:`Keyword` has to be added to the :attr:`options` list.
+
+ Example to change the `autoescape` setting::
+
+ EvalContextModifier(options=[Keyword('autoescape', Const(True))])
+ """
+
+ fields = ("options",)
+ options: t.List[Keyword]
+
+
+class ScopedEvalContextModifier(EvalContextModifier):
+ """Modifies the eval context and reverts it later. Works exactly like
+ :class:`EvalContextModifier` but will only modify the
+ :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
+ """
+
+ fields = ("body",)
+ body: t.List[Node]
+
+
+# make sure nobody creates custom nodes
+def _failing_new(*args: t.Any, **kwargs: t.Any) -> "te.NoReturn":
+ raise TypeError("can't create custom node types")
+
+
+NodeType.__new__ = staticmethod(_failing_new) # type: ignore
+del _failing_new
diff --git a/lib/spack/external/_vendoring/jinja2/optimizer.py b/lib/spack/external/_vendoring/jinja2/optimizer.py
new file mode 100644
index 0000000000..fe1010705e
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/optimizer.py
@@ -0,0 +1,47 @@
+"""The optimizer tries to constant fold expressions and modify the AST
+in place so that it should be faster to evaluate.
+
+Because the AST does not contain all the scoping information and the
+compiler has to find that out, we cannot do all the optimizations we
+want. For example, loop unrolling doesn't work because unrolled loops
+would have a different scope. The solution would be a second syntax tree
+that stored the scoping rules.
+"""
+import typing as t
+
+from . import nodes
+from .visitor import NodeTransformer
+
+if t.TYPE_CHECKING:
+ from .environment import Environment
+
+
+def optimize(node: nodes.Node, environment: "Environment") -> nodes.Node:
+ """The context hint can be used to perform an static optimization
+ based on the context given."""
+ optimizer = Optimizer(environment)
+ return t.cast(nodes.Node, optimizer.visit(node))
+
+
+class Optimizer(NodeTransformer):
+ def __init__(self, environment: "t.Optional[Environment]") -> None:
+ self.environment = environment
+
+ def generic_visit(
+ self, node: nodes.Node, *args: t.Any, **kwargs: t.Any
+ ) -> nodes.Node:
+ node = super().generic_visit(node, *args, **kwargs)
+
+ # Do constant folding. Some other nodes besides Expr have
+ # as_const, but folding them causes errors later on.
+ if isinstance(node, nodes.Expr):
+ try:
+ return nodes.Const.from_untrusted(
+ node.as_const(args[0] if args else None),
+ lineno=node.lineno,
+ environment=self.environment,
+ )
+ except nodes.Impossible:
+ pass
+
+ return node
diff --git a/lib/spack/external/_vendoring/jinja2/parser.py b/lib/spack/external/_vendoring/jinja2/parser.py
new file mode 100644
index 0000000000..7ad73fcc0f
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/parser.py
@@ -0,0 +1,1040 @@
+"""Parse tokens from the lexer into nodes for the compiler."""
+import typing
+import typing as t
+
+from . import nodes
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .lexer import describe_token
+from .lexer import describe_token_expr
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from .environment import Environment
+
+_ImportInclude = t.TypeVar("_ImportInclude", nodes.Import, nodes.Include)
+_MacroCall = t.TypeVar("_MacroCall", nodes.Macro, nodes.CallBlock)
+
+_statement_keywords = frozenset(
+ [
+ "for",
+ "if",
+ "block",
+ "extends",
+ "print",
+ "macro",
+ "include",
+ "from",
+ "import",
+ "set",
+ "with",
+ "autoescape",
+ ]
+)
+_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
+
+_math_nodes: t.Dict[str, t.Type[nodes.Expr]] = {
+ "add": nodes.Add,
+ "sub": nodes.Sub,
+ "mul": nodes.Mul,
+ "div": nodes.Div,
+ "floordiv": nodes.FloorDiv,
+ "mod": nodes.Mod,
+}
+
+
+class Parser:
+ """This is the central parsing class Jinja uses. It's passed to
+ extensions and can be used to parse expressions or statements.
+ """
+
+ def __init__(
+ self,
+ environment: "Environment",
+ source: str,
+ name: t.Optional[str] = None,
+ filename: t.Optional[str] = None,
+ state: t.Optional[str] = None,
+ ) -> None:
+ self.environment = environment
+ self.stream = environment._tokenize(source, name, filename, state)
+ self.name = name
+ self.filename = filename
+ self.closed = False
+ self.extensions: t.Dict[
+ str, t.Callable[["Parser"], t.Union[nodes.Node, t.List[nodes.Node]]]
+ ] = {}
+ for extension in environment.iter_extensions():
+ for tag in extension.tags:
+ self.extensions[tag] = extension.parse
+ self._last_identifier = 0
+ self._tag_stack: t.List[str] = []
+ self._end_token_stack: t.List[t.Tuple[str, ...]] = []
+
+ def fail(
+ self,
+ msg: str,
+ lineno: t.Optional[int] = None,
+ exc: t.Type[TemplateSyntaxError] = TemplateSyntaxError,
+ ) -> "te.NoReturn":
+ """Convenience method that raises `exc` with the message, passed
+ line number or last line number as well as the current name and
+ filename.
+ """
+ if lineno is None:
+ lineno = self.stream.current.lineno
+ raise exc(msg, lineno, self.name, self.filename)
+
+ def _fail_ut_eof(
+ self,
+ name: t.Optional[str],
+ end_token_stack: t.List[t.Tuple[str, ...]],
+ lineno: t.Optional[int],
+ ) -> "te.NoReturn":
+ expected: t.Set[str] = set()
+ for exprs in end_token_stack:
+ expected.update(map(describe_token_expr, exprs))
+ if end_token_stack:
+ currently_looking: t.Optional[str] = " or ".join(
+ map(repr, map(describe_token_expr, end_token_stack[-1]))
+ )
+ else:
+ currently_looking = None
+
+ if name is None:
+ message = ["Unexpected end of template."]
+ else:
+ message = [f"Encountered unknown tag {name!r}."]
+
+ if currently_looking:
+ if name is not None and name in expected:
+ message.append(
+ "You probably made a nesting mistake. Jinja is expecting this tag,"
+ f" but currently looking for {currently_looking}."
+ )
+ else:
+ message.append(
+ f"Jinja was looking for the following tags: {currently_looking}."
+ )
+
+ if self._tag_stack:
+ message.append(
+ "The innermost block that needs to be closed is"
+ f" {self._tag_stack[-1]!r}."
+ )
+
+ self.fail(" ".join(message), lineno)
+
+ def fail_unknown_tag(
+ self, name: str, lineno: t.Optional[int] = None
+ ) -> "te.NoReturn":
+ """Called if the parser encounters an unknown tag. Tries to fail
+ with a human readable error message that could help to identify
+ the problem.
+ """
+ self._fail_ut_eof(name, self._end_token_stack, lineno)
+
+ def fail_eof(
+ self,
+ end_tokens: t.Optional[t.Tuple[str, ...]] = None,
+ lineno: t.Optional[int] = None,
+ ) -> "te.NoReturn":
+ """Like fail_unknown_tag but for end of template situations."""
+ stack = list(self._end_token_stack)
+ if end_tokens is not None:
+ stack.append(end_tokens)
+ self._fail_ut_eof(None, stack, lineno)
+
+ def is_tuple_end(
+ self, extra_end_rules: t.Optional[t.Tuple[str, ...]] = None
+ ) -> bool:
+ """Are we at the end of a tuple?"""
+ if self.stream.current.type in ("variable_end", "block_end", "rparen"):
+ return True
+ elif extra_end_rules is not None:
+ return self.stream.current.test_any(extra_end_rules) # type: ignore
+ return False
+
+ def free_identifier(self, lineno: t.Optional[int] = None) -> nodes.InternalName:
+ """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
+ self._last_identifier += 1
+ rv = object.__new__(nodes.InternalName)
+ nodes.Node.__init__(rv, f"fi{self._last_identifier}", lineno=lineno)
+ return rv # type: ignore
+
+ def parse_statement(self) -> t.Union[nodes.Node, t.List[nodes.Node]]:
+ """Parse a single statement."""
+ token = self.stream.current
+ if token.type != "name":
+ self.fail("tag name expected", token.lineno)
+ self._tag_stack.append(token.value)
+ pop_tag = True
+ try:
+ if token.value in _statement_keywords:
+ f = getattr(self, f"parse_{self.stream.current.value}")
+ return f() # type: ignore
+ if token.value == "call":
+ return self.parse_call_block()
+ if token.value == "filter":
+ return self.parse_filter_block()
+ ext = self.extensions.get(token.value)
+ if ext is not None:
+ return ext(self)
+
+ # did not work out, remove the token we pushed by accident
+ # from the stack so that the unknown tag fail function can
+ # produce a proper error message.
+ self._tag_stack.pop()
+ pop_tag = False
+ self.fail_unknown_tag(token.value, token.lineno)
+ finally:
+ if pop_tag:
+ self._tag_stack.pop()
+
+ def parse_statements(
+ self, end_tokens: t.Tuple[str, ...], drop_needle: bool = False
+ ) -> t.List[nodes.Node]:
+ """Parse multiple statements into a list until one of the end tokens
+ is reached. This is used to parse the body of statements as it also
+ parses template data if appropriate. The parser checks first if the
+ current token is a colon and skips it if there is one. Then it checks
+ for the block end and parses until if one of the `end_tokens` is
+ reached. Per default the active token in the stream at the end of
+ the call is the matched end token. If this is not wanted `drop_needle`
+ can be set to `True` and the end token is removed.
+ """
+ # the first token may be a colon for python compatibility
+ self.stream.skip_if("colon")
+
+ # in the future it would be possible to add whole code sections
+ # by adding some sort of end of statement token and parsing those here.
+ self.stream.expect("block_end")
+ result = self.subparse(end_tokens)
+
+ # we reached the end of the template too early, the subparser
+ # does not check for this, so we do that now
+ if self.stream.current.type == "eof":
+ self.fail_eof(end_tokens)
+
+ if drop_needle:
+ next(self.stream)
+ return result
+
+ def parse_set(self) -> t.Union[nodes.Assign, nodes.AssignBlock]:
+ """Parse an assign statement."""
+ lineno = next(self.stream).lineno
+ target = self.parse_assign_target(with_namespace=True)
+ if self.stream.skip_if("assign"):
+ expr = self.parse_tuple()
+ return nodes.Assign(target, expr, lineno=lineno)
+ filter_node = self.parse_filter(None)
+ body = self.parse_statements(("name:endset",), drop_needle=True)
+ return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
+
+ def parse_for(self) -> nodes.For:
+ """Parse a for loop."""
+ lineno = self.stream.expect("name:for").lineno
+ target = self.parse_assign_target(extra_end_rules=("name:in",))
+ self.stream.expect("name:in")
+ iter = self.parse_tuple(
+ with_condexpr=False, extra_end_rules=("name:recursive",)
+ )
+ test = None
+ if self.stream.skip_if("name:if"):
+ test = self.parse_expression()
+ recursive = self.stream.skip_if("name:recursive")
+ body = self.parse_statements(("name:endfor", "name:else"))
+ if next(self.stream).value == "endfor":
+ else_ = []
+ else:
+ else_ = self.parse_statements(("name:endfor",), drop_needle=True)
+ return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
+
+ def parse_if(self) -> nodes.If:
+ """Parse an if construct."""
+ node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
+ while True:
+ node.test = self.parse_tuple(with_condexpr=False)
+ node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
+ node.elif_ = []
+ node.else_ = []
+ token = next(self.stream)
+ if token.test("name:elif"):
+ node = nodes.If(lineno=self.stream.current.lineno)
+ result.elif_.append(node)
+ continue
+ elif token.test("name:else"):
+ result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
+ break
+ return result
+
+ def parse_with(self) -> nodes.With:
+ node = nodes.With(lineno=next(self.stream).lineno)
+ targets: t.List[nodes.Expr] = []
+ values: t.List[nodes.Expr] = []
+ while self.stream.current.type != "block_end":
+ if targets:
+ self.stream.expect("comma")
+ target = self.parse_assign_target()
+ target.set_ctx("param")
+ targets.append(target)
+ self.stream.expect("assign")
+ values.append(self.parse_expression())
+ node.targets = targets
+ node.values = values
+ node.body = self.parse_statements(("name:endwith",), drop_needle=True)
+ return node
+
+ def parse_autoescape(self) -> nodes.Scope:
+ node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
+ node.options = [nodes.Keyword("autoescape", self.parse_expression())]
+ node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
+ return nodes.Scope([node])
+
+ def parse_block(self) -> nodes.Block:
+ node = nodes.Block(lineno=next(self.stream).lineno)
+ node.name = self.stream.expect("name").value
+ node.scoped = self.stream.skip_if("name:scoped")
+ node.required = self.stream.skip_if("name:required")
+
+ # common problem people encounter when switching from django
+ # to jinja. we do not support hyphens in block names, so let's
+ # raise a nicer error message in that case.
+ if self.stream.current.type == "sub":
+ self.fail(
+ "Block names in Jinja have to be valid Python identifiers and may not"
+ " contain hyphens, use an underscore instead."
+ )
+
+ node.body = self.parse_statements(("name:endblock",), drop_needle=True)
+
+ # enforce that required blocks only contain whitespace or comments
+ # by asserting that the body, if not empty, is just TemplateData nodes
+ # with whitespace data
+ if node.required and not all(
+ isinstance(child, nodes.TemplateData) and child.data.isspace()
+ for body in node.body
+ for child in body.nodes # type: ignore
+ ):
+ self.fail("Required blocks can only contain comments or whitespace")
+
+ self.stream.skip_if("name:" + node.name)
+ return node
+
+ def parse_extends(self) -> nodes.Extends:
+ node = nodes.Extends(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ return node
+
+ def parse_import_context(
+ self, node: _ImportInclude, default: bool
+ ) -> _ImportInclude:
+ if self.stream.current.test_any(
+ "name:with", "name:without"
+ ) and self.stream.look().test("name:context"):
+ node.with_context = next(self.stream).value == "with"
+ self.stream.skip()
+ else:
+ node.with_context = default
+ return node
+
+ def parse_include(self) -> nodes.Include:
+ node = nodes.Include(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ if self.stream.current.test("name:ignore") and self.stream.look().test(
+ "name:missing"
+ ):
+ node.ignore_missing = True
+ self.stream.skip(2)
+ else:
+ node.ignore_missing = False
+ return self.parse_import_context(node, True)
+
+ def parse_import(self) -> nodes.Import:
+ node = nodes.Import(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ self.stream.expect("name:as")
+ node.target = self.parse_assign_target(name_only=True).name
+ return self.parse_import_context(node, False)
+
+ def parse_from(self) -> nodes.FromImport:
+ node = nodes.FromImport(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ self.stream.expect("name:import")
+ node.names = []
+
+ def parse_context() -> bool:
+ if (
+ self.stream.current.value
+ in {
+ "with",
+ "without",
+ }
+ and self.stream.look().test("name:context")
+ ):
+ node.with_context = next(self.stream).value == "with"
+ self.stream.skip()
+ return True
+ return False
+
+ while True:
+ if node.names:
+ self.stream.expect("comma")
+ if self.stream.current.type == "name":
+ if parse_context():
+ break
+ target = self.parse_assign_target(name_only=True)
+ if target.name.startswith("_"):
+ self.fail(
+ "names starting with an underline can not be imported",
+ target.lineno,
+ exc=TemplateAssertionError,
+ )
+ if self.stream.skip_if("name:as"):
+ alias = self.parse_assign_target(name_only=True)
+ node.names.append((target.name, alias.name))
+ else:
+ node.names.append(target.name)
+ if parse_context() or self.stream.current.type != "comma":
+ break
+ else:
+ self.stream.expect("name")
+ if not hasattr(node, "with_context"):
+ node.with_context = False
+ return node
+
+ def parse_signature(self, node: _MacroCall) -> None:
+ args = node.args = []
+ defaults = node.defaults = []
+ self.stream.expect("lparen")
+ while self.stream.current.type != "rparen":
+ if args:
+ self.stream.expect("comma")
+ arg = self.parse_assign_target(name_only=True)
+ arg.set_ctx("param")
+ if self.stream.skip_if("assign"):
+ defaults.append(self.parse_expression())
+ elif defaults:
+ self.fail("non-default argument follows default argument")
+ args.append(arg)
+ self.stream.expect("rparen")
+
+ def parse_call_block(self) -> nodes.CallBlock:
+ node = nodes.CallBlock(lineno=next(self.stream).lineno)
+ if self.stream.current.type == "lparen":
+ self.parse_signature(node)
+ else:
+ node.args = []
+ node.defaults = []
+
+ call_node = self.parse_expression()
+ if not isinstance(call_node, nodes.Call):
+ self.fail("expected call", node.lineno)
+ node.call = call_node
+ node.body = self.parse_statements(("name:endcall",), drop_needle=True)
+ return node
+
+ def parse_filter_block(self) -> nodes.FilterBlock:
+ node = nodes.FilterBlock(lineno=next(self.stream).lineno)
+ node.filter = self.parse_filter(None, start_inline=True) # type: ignore
+ node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
+ return node
+
+ def parse_macro(self) -> nodes.Macro:
+ node = nodes.Macro(lineno=next(self.stream).lineno)
+ node.name = self.parse_assign_target(name_only=True).name
+ self.parse_signature(node)
+ node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
+ return node
+
+ def parse_print(self) -> nodes.Output:
+ node = nodes.Output(lineno=next(self.stream).lineno)
+ node.nodes = []
+ while self.stream.current.type != "block_end":
+ if node.nodes:
+ self.stream.expect("comma")
+ node.nodes.append(self.parse_expression())
+ return node
+
+ @typing.overload
+ def parse_assign_target(
+ self, with_tuple: bool = ..., name_only: "te.Literal[True]" = ...
+ ) -> nodes.Name:
+ ...
+
+ @typing.overload
+ def parse_assign_target(
+ self,
+ with_tuple: bool = True,
+ name_only: bool = False,
+ extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
+ with_namespace: bool = False,
+ ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]:
+ ...
+
+ def parse_assign_target(
+ self,
+ with_tuple: bool = True,
+ name_only: bool = False,
+ extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
+ with_namespace: bool = False,
+ ) -> t.Union[nodes.NSRef, nodes.Name, nodes.Tuple]:
+ """Parse an assignment target. As Jinja allows assignments to
+ tuples, this function can parse all allowed assignment targets. Per
+ default assignments to tuples are parsed, that can be disable however
+ by setting `with_tuple` to `False`. If only assignments to names are
+ wanted `name_only` can be set to `True`. The `extra_end_rules`
+ parameter is forwarded to the tuple parsing function. If
+ `with_namespace` is enabled, a namespace assignment may be parsed.
+ """
+ target: nodes.Expr
+
+ if with_namespace and self.stream.look().type == "dot":
+ token = self.stream.expect("name")
+ next(self.stream) # dot
+ attr = self.stream.expect("name")
+ target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
+ elif name_only:
+ token = self.stream.expect("name")
+ target = nodes.Name(token.value, "store", lineno=token.lineno)
+ else:
+ if with_tuple:
+ target = self.parse_tuple(
+ simplified=True, extra_end_rules=extra_end_rules
+ )
+ else:
+ target = self.parse_primary()
+
+ target.set_ctx("store")
+
+ if not target.can_assign():
+ self.fail(
+ f"can't assign to {type(target).__name__.lower()!r}", target.lineno
+ )
+
+ return target # type: ignore
+
+ def parse_expression(self, with_condexpr: bool = True) -> nodes.Expr:
+ """Parse an expression. Per default all expressions are parsed, if
+ the optional `with_condexpr` parameter is set to `False` conditional
+ expressions are not parsed.
+ """
+ if with_condexpr:
+ return self.parse_condexpr()
+ return self.parse_or()
+
+ def parse_condexpr(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ expr1 = self.parse_or()
+ expr3: t.Optional[nodes.Expr]
+
+ while self.stream.skip_if("name:if"):
+ expr2 = self.parse_or()
+ if self.stream.skip_if("name:else"):
+ expr3 = self.parse_condexpr()
+ else:
+ expr3 = None
+ expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return expr1
+
+ def parse_or(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ left = self.parse_and()
+ while self.stream.skip_if("name:or"):
+ right = self.parse_and()
+ left = nodes.Or(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_and(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ left = self.parse_not()
+ while self.stream.skip_if("name:and"):
+ right = self.parse_not()
+ left = nodes.And(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_not(self) -> nodes.Expr:
+ if self.stream.current.test("name:not"):
+ lineno = next(self.stream).lineno
+ return nodes.Not(self.parse_not(), lineno=lineno)
+ return self.parse_compare()
+
+ def parse_compare(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ expr = self.parse_math1()
+ ops = []
+ while True:
+ token_type = self.stream.current.type
+ if token_type in _compare_operators:
+ next(self.stream)
+ ops.append(nodes.Operand(token_type, self.parse_math1()))
+ elif self.stream.skip_if("name:in"):
+ ops.append(nodes.Operand("in", self.parse_math1()))
+ elif self.stream.current.test("name:not") and self.stream.look().test(
+ "name:in"
+ ):
+ self.stream.skip(2)
+ ops.append(nodes.Operand("notin", self.parse_math1()))
+ else:
+ break
+ lineno = self.stream.current.lineno
+ if not ops:
+ return expr
+ return nodes.Compare(expr, ops, lineno=lineno)
+
+ def parse_math1(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ left = self.parse_concat()
+ while self.stream.current.type in ("add", "sub"):
+ cls = _math_nodes[self.stream.current.type]
+ next(self.stream)
+ right = self.parse_concat()
+ left = cls(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_concat(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ args = [self.parse_math2()]
+ while self.stream.current.type == "tilde":
+ next(self.stream)
+ args.append(self.parse_math2())
+ if len(args) == 1:
+ return args[0]
+ return nodes.Concat(args, lineno=lineno)
+
+ def parse_math2(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ left = self.parse_pow()
+ while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
+ cls = _math_nodes[self.stream.current.type]
+ next(self.stream)
+ right = self.parse_pow()
+ left = cls(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_pow(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ left = self.parse_unary()
+ while self.stream.current.type == "pow":
+ next(self.stream)
+ right = self.parse_unary()
+ left = nodes.Pow(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_unary(self, with_filter: bool = True) -> nodes.Expr:
+ token_type = self.stream.current.type
+ lineno = self.stream.current.lineno
+ node: nodes.Expr
+
+ if token_type == "sub":
+ next(self.stream)
+ node = nodes.Neg(self.parse_unary(False), lineno=lineno)
+ elif token_type == "add":
+ next(self.stream)
+ node = nodes.Pos(self.parse_unary(False), lineno=lineno)
+ else:
+ node = self.parse_primary()
+ node = self.parse_postfix(node)
+ if with_filter:
+ node = self.parse_filter_expr(node)
+ return node
+
+ def parse_primary(self) -> nodes.Expr:
+ token = self.stream.current
+ node: nodes.Expr
+ if token.type == "name":
+ if token.value in ("true", "false", "True", "False"):
+ node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
+ elif token.value in ("none", "None"):
+ node = nodes.Const(None, lineno=token.lineno)
+ else:
+ node = nodes.Name(token.value, "load", lineno=token.lineno)
+ next(self.stream)
+ elif token.type == "string":
+ next(self.stream)
+ buf = [token.value]
+ lineno = token.lineno
+ while self.stream.current.type == "string":
+ buf.append(self.stream.current.value)
+ next(self.stream)
+ node = nodes.Const("".join(buf), lineno=lineno)
+ elif token.type in ("integer", "float"):
+ next(self.stream)
+ node = nodes.Const(token.value, lineno=token.lineno)
+ elif token.type == "lparen":
+ next(self.stream)
+ node = self.parse_tuple(explicit_parentheses=True)
+ self.stream.expect("rparen")
+ elif token.type == "lbracket":
+ node = self.parse_list()
+ elif token.type == "lbrace":
+ node = self.parse_dict()
+ else:
+ self.fail(f"unexpected {describe_token(token)!r}", token.lineno)
+ return node
+
+ def parse_tuple(
+ self,
+ simplified: bool = False,
+ with_condexpr: bool = True,
+ extra_end_rules: t.Optional[t.Tuple[str, ...]] = None,
+ explicit_parentheses: bool = False,
+ ) -> t.Union[nodes.Tuple, nodes.Expr]:
+ """Works like `parse_expression` but if multiple expressions are
+ delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
+ This method could also return a regular expression instead of a tuple
+ if no commas where found.
+
+ The default parsing mode is a full tuple. If `simplified` is `True`
+ only names and literals are parsed. The `no_condexpr` parameter is
+ forwarded to :meth:`parse_expression`.
+
+ Because tuples do not require delimiters and may end in a bogus comma
+ an extra hint is needed that marks the end of a tuple. For example
+ for loops support tuples between `for` and `in`. In that case the
+ `extra_end_rules` is set to ``['name:in']``.
+
+ `explicit_parentheses` is true if the parsing was triggered by an
+ expression in parentheses. This is used to figure out if an empty
+ tuple is a valid expression or not.
+ """
+ lineno = self.stream.current.lineno
+ if simplified:
+ parse = self.parse_primary
+ elif with_condexpr:
+ parse = self.parse_expression
+ else:
+
+ def parse() -> nodes.Expr:
+ return self.parse_expression(with_condexpr=False)
+
+ args: t.List[nodes.Expr] = []
+ is_tuple = False
+
+ while True:
+ if args:
+ self.stream.expect("comma")
+ if self.is_tuple_end(extra_end_rules):
+ break
+ args.append(parse())
+ if self.stream.current.type == "comma":
+ is_tuple = True
+ else:
+ break
+ lineno = self.stream.current.lineno
+
+ if not is_tuple:
+ if args:
+ return args[0]
+
+ # if we don't have explicit parentheses, an empty tuple is
+ # not a valid expression. This would mean nothing (literally
+ # nothing) in the spot of an expression would be an empty
+ # tuple.
+ if not explicit_parentheses:
+ self.fail(
+ "Expected an expression,"
+ f" got {describe_token(self.stream.current)!r}"
+ )
+
+ return nodes.Tuple(args, "load", lineno=lineno)
+
+ def parse_list(self) -> nodes.List:
+ token = self.stream.expect("lbracket")
+ items: t.List[nodes.Expr] = []
+ while self.stream.current.type != "rbracket":
+ if items:
+ self.stream.expect("comma")
+ if self.stream.current.type == "rbracket":
+ break
+ items.append(self.parse_expression())
+ self.stream.expect("rbracket")
+ return nodes.List(items, lineno=token.lineno)
+
+ def parse_dict(self) -> nodes.Dict:
+ token = self.stream.expect("lbrace")
+ items: t.List[nodes.Pair] = []
+ while self.stream.current.type != "rbrace":
+ if items:
+ self.stream.expect("comma")
+ if self.stream.current.type == "rbrace":
+ break
+ key = self.parse_expression()
+ self.stream.expect("colon")
+ value = self.parse_expression()
+ items.append(nodes.Pair(key, value, lineno=key.lineno))
+ self.stream.expect("rbrace")
+ return nodes.Dict(items, lineno=token.lineno)
+
+ def parse_postfix(self, node: nodes.Expr) -> nodes.Expr:
+ while True:
+ token_type = self.stream.current.type
+ if token_type == "dot" or token_type == "lbracket":
+ node = self.parse_subscript(node)
+ # calls are valid both after postfix expressions (getattr
+ # and getitem) as well as filters and tests
+ elif token_type == "lparen":
+ node = self.parse_call(node)
+ else:
+ break
+ return node
+
+ def parse_filter_expr(self, node: nodes.Expr) -> nodes.Expr:
+ while True:
+ token_type = self.stream.current.type
+ if token_type == "pipe":
+ node = self.parse_filter(node) # type: ignore
+ elif token_type == "name" and self.stream.current.value == "is":
+ node = self.parse_test(node)
+ # calls are valid both after postfix expressions (getattr
+ # and getitem) as well as filters and tests
+ elif token_type == "lparen":
+ node = self.parse_call(node)
+ else:
+ break
+ return node
+
+ def parse_subscript(
+ self, node: nodes.Expr
+ ) -> t.Union[nodes.Getattr, nodes.Getitem]:
+ token = next(self.stream)
+ arg: nodes.Expr
+
+ if token.type == "dot":
+ attr_token = self.stream.current
+ next(self.stream)
+ if attr_token.type == "name":
+ return nodes.Getattr(
+ node, attr_token.value, "load", lineno=token.lineno
+ )
+ elif attr_token.type != "integer":
+ self.fail("expected name or number", attr_token.lineno)
+ arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
+ return nodes.Getitem(node, arg, "load", lineno=token.lineno)
+ if token.type == "lbracket":
+ args: t.List[nodes.Expr] = []
+ while self.stream.current.type != "rbracket":
+ if args:
+ self.stream.expect("comma")
+ args.append(self.parse_subscribed())
+ self.stream.expect("rbracket")
+ if len(args) == 1:
+ arg = args[0]
+ else:
+ arg = nodes.Tuple(args, "load", lineno=token.lineno)
+ return nodes.Getitem(node, arg, "load", lineno=token.lineno)
+ self.fail("expected subscript expression", token.lineno)
+
+ def parse_subscribed(self) -> nodes.Expr:
+ lineno = self.stream.current.lineno
+ args: t.List[t.Optional[nodes.Expr]]
+
+ if self.stream.current.type == "colon":
+ next(self.stream)
+ args = [None]
+ else:
+ node = self.parse_expression()
+ if self.stream.current.type != "colon":
+ return node
+ next(self.stream)
+ args = [node]
+
+ if self.stream.current.type == "colon":
+ args.append(None)
+ elif self.stream.current.type not in ("rbracket", "comma"):
+ args.append(self.parse_expression())
+ else:
+ args.append(None)
+
+ if self.stream.current.type == "colon":
+ next(self.stream)
+ if self.stream.current.type not in ("rbracket", "comma"):
+ args.append(self.parse_expression())
+ else:
+ args.append(None)
+ else:
+ args.append(None)
+
+ return nodes.Slice(lineno=lineno, *args)
+
+ def parse_call_args(self) -> t.Tuple:
+ token = self.stream.expect("lparen")
+ args = []
+ kwargs = []
+ dyn_args = None
+ dyn_kwargs = None
+ require_comma = False
+
+ def ensure(expr: bool) -> None:
+ if not expr:
+ self.fail("invalid syntax for function call expression", token.lineno)
+
+ while self.stream.current.type != "rparen":
+ if require_comma:
+ self.stream.expect("comma")
+
+ # support for trailing comma
+ if self.stream.current.type == "rparen":
+ break
+
+ if self.stream.current.type == "mul":
+ ensure(dyn_args is None and dyn_kwargs is None)
+ next(self.stream)
+ dyn_args = self.parse_expression()
+ elif self.stream.current.type == "pow":
+ ensure(dyn_kwargs is None)
+ next(self.stream)
+ dyn_kwargs = self.parse_expression()
+ else:
+ if (
+ self.stream.current.type == "name"
+ and self.stream.look().type == "assign"
+ ):
+ # Parsing a kwarg
+ ensure(dyn_kwargs is None)
+ key = self.stream.current.value
+ self.stream.skip(2)
+ value = self.parse_expression()
+ kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
+ else:
+ # Parsing an arg
+ ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
+ args.append(self.parse_expression())
+
+ require_comma = True
+
+ self.stream.expect("rparen")
+ return args, kwargs, dyn_args, dyn_kwargs
+
+ def parse_call(self, node: nodes.Expr) -> nodes.Call:
+ # The lparen will be expected in parse_call_args, but the lineno
+ # needs to be recorded before the stream is advanced.
+ token = self.stream.current
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
+ return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
+
+ def parse_filter(
+ self, node: t.Optional[nodes.Expr], start_inline: bool = False
+ ) -> t.Optional[nodes.Expr]:
+ while self.stream.current.type == "pipe" or start_inline:
+ if not start_inline:
+ next(self.stream)
+ token = self.stream.expect("name")
+ name = token.value
+ while self.stream.current.type == "dot":
+ next(self.stream)
+ name += "." + self.stream.expect("name").value
+ if self.stream.current.type == "lparen":
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
+ else:
+ args = []
+ kwargs = []
+ dyn_args = dyn_kwargs = None
+ node = nodes.Filter(
+ node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
+ )
+ start_inline = False
+ return node
+
+ def parse_test(self, node: nodes.Expr) -> nodes.Expr:
+ token = next(self.stream)
+ if self.stream.current.test("name:not"):
+ next(self.stream)
+ negated = True
+ else:
+ negated = False
+ name = self.stream.expect("name").value
+ while self.stream.current.type == "dot":
+ next(self.stream)
+ name += "." + self.stream.expect("name").value
+ dyn_args = dyn_kwargs = None
+ kwargs = []
+ if self.stream.current.type == "lparen":
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call_args()
+ elif (
+ self.stream.current.type
+ in {
+ "name",
+ "string",
+ "integer",
+ "float",
+ "lparen",
+ "lbracket",
+ "lbrace",
+ }
+ and not self.stream.current.test_any("name:else", "name:or", "name:and")
+ ):
+ if self.stream.current.test("name:is"):
+ self.fail("You cannot chain multiple tests with is")
+ arg_node = self.parse_primary()
+ arg_node = self.parse_postfix(arg_node)
+ args = [arg_node]
+ else:
+ args = []
+ node = nodes.Test(
+ node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
+ )
+ if negated:
+ node = nodes.Not(node, lineno=token.lineno)
+ return node
+
+ def subparse(
+ self, end_tokens: t.Optional[t.Tuple[str, ...]] = None
+ ) -> t.List[nodes.Node]:
+ body: t.List[nodes.Node] = []
+ data_buffer: t.List[nodes.Node] = []
+ add_data = data_buffer.append
+
+ if end_tokens is not None:
+ self._end_token_stack.append(end_tokens)
+
+ def flush_data() -> None:
+ if data_buffer:
+ lineno = data_buffer[0].lineno
+ body.append(nodes.Output(data_buffer[:], lineno=lineno))
+ del data_buffer[:]
+
+ try:
+ while self.stream:
+ token = self.stream.current
+ if token.type == "data":
+ if token.value:
+ add_data(nodes.TemplateData(token.value, lineno=token.lineno))
+ next(self.stream)
+ elif token.type == "variable_begin":
+ next(self.stream)
+ add_data(self.parse_tuple(with_condexpr=True))
+ self.stream.expect("variable_end")
+ elif token.type == "block_begin":
+ flush_data()
+ next(self.stream)
+ if end_tokens is not None and self.stream.current.test_any(
+ *end_tokens
+ ):
+ return body
+ rv = self.parse_statement()
+ if isinstance(rv, list):
+ body.extend(rv)
+ else:
+ body.append(rv)
+ self.stream.expect("block_end")
+ else:
+ raise AssertionError("internal parsing error")
+
+ flush_data()
+ finally:
+ if end_tokens is not None:
+ self._end_token_stack.pop()
+ return body
+
+ def parse(self) -> nodes.Template:
+ """Parse the whole template into a `Template` node."""
+ result = nodes.Template(self.subparse(), lineno=1)
+ result.set_environment(self.environment)
+ return result
diff --git a/lib/spack/external/_vendoring/jinja2/py.typed b/lib/spack/external/_vendoring/jinja2/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/py.typed
diff --git a/lib/spack/external/_vendoring/jinja2/runtime.py b/lib/spack/external/_vendoring/jinja2/runtime.py
new file mode 100644
index 0000000000..2346cf2bac
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/runtime.py
@@ -0,0 +1,1104 @@
+"""The runtime functions and state used by compiled templates."""
+import functools
+import sys
+import typing as t
+from collections import abc
+from itertools import chain
+
+from markupsafe import escape # noqa: F401
+from markupsafe import Markup
+from markupsafe import soft_str
+
+from .async_utils import auto_aiter
+from .async_utils import auto_await # noqa: F401
+from .exceptions import TemplateNotFound # noqa: F401
+from .exceptions import TemplateRuntimeError # noqa: F401
+from .exceptions import UndefinedError
+from .nodes import EvalContext
+from .utils import _PassArg
+from .utils import concat
+from .utils import internalcode
+from .utils import missing
+from .utils import Namespace # noqa: F401
+from .utils import object_type_repr
+from .utils import pass_eval_context
+
+V = t.TypeVar("V")
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+if t.TYPE_CHECKING:
+ import logging
+ import typing_extensions as te
+ from .environment import Environment
+
+ class LoopRenderFunc(te.Protocol):
+ def __call__(
+ self,
+ reciter: t.Iterable[V],
+ loop_render_func: "LoopRenderFunc",
+ depth: int = 0,
+ ) -> str:
+ ...
+
+
+# these variables are exported to the template runtime
+exported = [
+ "LoopContext",
+ "TemplateReference",
+ "Macro",
+ "Markup",
+ "TemplateRuntimeError",
+ "missing",
+ "concat",
+ "escape",
+ "markup_join",
+ "str_join",
+ "identity",
+ "TemplateNotFound",
+ "Namespace",
+ "Undefined",
+ "internalcode",
+]
+async_exported = [
+ "AsyncLoopContext",
+ "auto_aiter",
+ "auto_await",
+]
+
+
+def identity(x: V) -> V:
+ """Returns its argument. Useful for certain things in the
+ environment.
+ """
+ return x
+
+
+def markup_join(seq: t.Iterable[t.Any]) -> str:
+ """Concatenation that escapes if necessary and converts to string."""
+ buf = []
+ iterator = map(soft_str, seq)
+ for arg in iterator:
+ buf.append(arg)
+ if hasattr(arg, "__html__"):
+ return Markup("").join(chain(buf, iterator))
+ return concat(buf)
+
+
+def str_join(seq: t.Iterable[t.Any]) -> str:
+ """Simple args to string conversion and concatenation."""
+ return concat(map(str, seq))
+
+
+def unicode_join(seq: t.Iterable[t.Any]) -> str:
+ import warnings
+
+ warnings.warn(
+ "This template must be recompiled with at least Jinja 3.0, or"
+ " it will fail in Jinja 3.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return str_join(seq)
+
+
+def new_context(
+ environment: "Environment",
+ template_name: t.Optional[str],
+ blocks: t.Dict[str, t.Callable[["Context"], t.Iterator[str]]],
+ vars: t.Optional[t.Dict[str, t.Any]] = None,
+ shared: bool = False,
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ locals: t.Optional[t.Mapping[str, t.Any]] = None,
+) -> "Context":
+ """Internal helper for context creation."""
+ if vars is None:
+ vars = {}
+ if shared:
+ parent = vars
+ else:
+ parent = dict(globals or (), **vars)
+ if locals:
+ # if the parent is shared a copy should be created because
+ # we don't want to modify the dict passed
+ if shared:
+ parent = dict(parent)
+ for key, value in locals.items():
+ if value is not missing:
+ parent[key] = value
+ return environment.context_class(
+ environment, parent, template_name, blocks, globals=globals
+ )
+
+
+class TemplateReference:
+ """The `self` in templates."""
+
+ def __init__(self, context: "Context") -> None:
+ self.__context = context
+
+ def __getitem__(self, name: str) -> t.Any:
+ blocks = self.__context.blocks[name]
+ return BlockReference(name, self.__context, blocks, 0)
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.__context.name!r}>"
+
+
+def _dict_method_all(dict_method: F) -> F:
+ @functools.wraps(dict_method)
+ def f_all(self: "Context") -> t.Any:
+ return dict_method(self.get_all())
+
+ return t.cast(F, f_all)
+
+
+@abc.Mapping.register
+class Context:
+ """The template context holds the variables of a template. It stores the
+ values passed to the template and also the names the template exports.
+ Creating instances is neither supported nor useful as it's created
+ automatically at various stages of the template evaluation and should not
+ be created by hand.
+
+ The context is immutable. Modifications on :attr:`parent` **must not**
+ happen and modifications on :attr:`vars` are allowed from generated
+ template code only. Template filters and global functions marked as
+ :func:`pass_context` get the active context passed as first argument
+ and are allowed to access the context read-only.
+
+ The template context supports read only dict operations (`get`,
+ `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
+ `__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
+ method that doesn't fail with a `KeyError` but returns an
+ :class:`Undefined` object for missing variables.
+ """
+
+ _legacy_resolve_mode: t.ClassVar[bool] = False
+
+ def __init_subclass__(cls) -> None:
+ if "resolve_or_missing" in cls.__dict__:
+ # If the subclass overrides resolve_or_missing it opts in to
+ # modern mode no matter what.
+ cls._legacy_resolve_mode = False
+ elif "resolve" in cls.__dict__ or cls._legacy_resolve_mode:
+ # If the subclass overrides resolve, or if its base is
+ # already in legacy mode, warn about legacy behavior.
+ import warnings
+
+ warnings.warn(
+ "Overriding 'resolve' is deprecated and will not have"
+ " the expected behavior in Jinja 3.1. Override"
+ " 'resolve_or_missing' instead ",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ cls._legacy_resolve_mode = True
+
+ def __init__(
+ self,
+ environment: "Environment",
+ parent: t.Dict[str, t.Any],
+ name: t.Optional[str],
+ blocks: t.Dict[str, t.Callable[["Context"], t.Iterator[str]]],
+ globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
+ ):
+ self.parent = parent
+ self.vars: t.Dict[str, t.Any] = {}
+ self.environment: "Environment" = environment
+ self.eval_ctx = EvalContext(self.environment, name)
+ self.exported_vars: t.Set[str] = set()
+ self.name = name
+ self.globals_keys = set() if globals is None else set(globals)
+
+ # create the initial mapping of blocks. Whenever template inheritance
+ # takes place the runtime will update this mapping with the new blocks
+ # from the template.
+ self.blocks = {k: [v] for k, v in blocks.items()}
+
+ def super(
+ self, name: str, current: t.Callable[["Context"], t.Iterator[str]]
+ ) -> t.Union["BlockReference", "Undefined"]:
+ """Render a parent block."""
+ try:
+ blocks = self.blocks[name]
+ index = blocks.index(current) + 1
+ blocks[index]
+ except LookupError:
+ return self.environment.undefined(
+ f"there is no parent block called {name!r}.", name="super"
+ )
+ return BlockReference(name, self, blocks, index)
+
+ def get(self, key: str, default: t.Any = None) -> t.Any:
+ """Look up a variable by name, or return a default if the key is
+ not found.
+
+ :param key: The variable name to look up.
+ :param default: The value to return if the key is not found.
+ """
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def resolve(self, key: str) -> t.Union[t.Any, "Undefined"]:
+ """Look up a variable by name, or return an :class:`Undefined`
+ object if the key is not found.
+
+ If you need to add custom behavior, override
+ :meth:`resolve_or_missing`, not this method. The various lookup
+ functions use that method, not this one.
+
+ :param key: The variable name to look up.
+ """
+ if self._legacy_resolve_mode:
+ if key in self.vars:
+ return self.vars[key]
+
+ if key in self.parent:
+ return self.parent[key]
+
+ return self.environment.undefined(name=key)
+
+ rv = self.resolve_or_missing(key)
+
+ if rv is missing:
+ return self.environment.undefined(name=key)
+
+ return rv
+
+ def resolve_or_missing(self, key: str) -> t.Any:
+ """Look up a variable by name, or return a ``missing`` sentinel
+ if the key is not found.
+
+ Override this method to add custom lookup behavior.
+ :meth:`resolve`, :meth:`get`, and :meth:`__getitem__` use this
+ method. Don't call this method directly.
+
+ :param key: The variable name to look up.
+ """
+ if self._legacy_resolve_mode:
+ rv = self.resolve(key)
+
+ if isinstance(rv, Undefined):
+ return missing
+
+ return rv
+
+ if key in self.vars:
+ return self.vars[key]
+
+ if key in self.parent:
+ return self.parent[key]
+
+ return missing
+
+ def get_exported(self) -> t.Dict[str, t.Any]:
+ """Get a new dict with the exported variables."""
+ return {k: self.vars[k] for k in self.exported_vars}
+
+ def get_all(self) -> t.Dict[str, t.Any]:
+ """Return the complete context as dict including the exported
+ variables. For optimizations reasons this might not return an
+ actual copy so be careful with using it.
+ """
+ if not self.vars:
+ return self.parent
+ if not self.parent:
+ return self.vars
+ return dict(self.parent, **self.vars)
+
+ @internalcode
+ def call(
+ __self, __obj: t.Callable, *args: t.Any, **kwargs: t.Any # noqa: B902
+ ) -> t.Union[t.Any, "Undefined"]:
+ """Call the callable with the arguments and keyword arguments
+ provided but inject the active context or environment as first
+ argument if the callable has :func:`pass_context` or
+ :func:`pass_environment`.
+ """
+ if __debug__:
+ __traceback_hide__ = True # noqa
+
+ # Allow callable classes to take a context
+ if (
+ hasattr(__obj, "__call__") # noqa: B004
+ and _PassArg.from_obj(__obj.__call__) is not None # type: ignore
+ ):
+ __obj = __obj.__call__ # type: ignore
+
+ pass_arg = _PassArg.from_obj(__obj)
+
+ if pass_arg is _PassArg.context:
+ # the active context should have access to variables set in
+ # loops and blocks without mutating the context itself
+ if kwargs.get("_loop_vars"):
+ __self = __self.derived(kwargs["_loop_vars"])
+ if kwargs.get("_block_vars"):
+ __self = __self.derived(kwargs["_block_vars"])
+ args = (__self,) + args
+ elif pass_arg is _PassArg.eval_context:
+ args = (__self.eval_ctx,) + args
+ elif pass_arg is _PassArg.environment:
+ args = (__self.environment,) + args
+
+ kwargs.pop("_block_vars", None)
+ kwargs.pop("_loop_vars", None)
+
+ try:
+ return __obj(*args, **kwargs)
+ except StopIteration:
+ return __self.environment.undefined(
+ "value was undefined because a callable raised a"
+ " StopIteration exception"
+ )
+
+ def derived(self, locals: t.Optional[t.Dict[str, t.Any]] = None) -> "Context":
+ """Internal helper function to create a derived context. This is
+ used in situations where the system needs a new context in the same
+ template that is independent.
+ """
+ context = new_context(
+ self.environment, self.name, {}, self.get_all(), True, None, locals
+ )
+ context.eval_ctx = self.eval_ctx
+ context.blocks.update((k, list(v)) for k, v in self.blocks.items())
+ return context
+
+ keys = _dict_method_all(dict.keys)
+ values = _dict_method_all(dict.values)
+ items = _dict_method_all(dict.items)
+
+ def __contains__(self, name: str) -> bool:
+ return name in self.vars or name in self.parent
+
+ def __getitem__(self, key: str) -> t.Any:
+ """Look up a variable by name with ``[]`` syntax, or raise a
+ ``KeyError`` if the key is not found.
+ """
+ item = self.resolve_or_missing(key)
+
+ if item is missing:
+ raise KeyError(key)
+
+ return item
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.get_all()!r} of {self.name!r}>"
+
+
+class BlockReference:
+ """One block on a template reference."""
+
+ def __init__(
+ self,
+ name: str,
+ context: "Context",
+ stack: t.List[t.Callable[["Context"], t.Iterator[str]]],
+ depth: int,
+ ) -> None:
+ self.name = name
+ self._context = context
+ self._stack = stack
+ self._depth = depth
+
+ @property
+ def super(self) -> t.Union["BlockReference", "Undefined"]:
+ """Super the block."""
+ if self._depth + 1 >= len(self._stack):
+ return self._context.environment.undefined(
+ f"there is no parent block called {self.name!r}.", name="super"
+ )
+ return BlockReference(self.name, self._context, self._stack, self._depth + 1)
+
+ @internalcode
+ async def _async_call(self) -> str:
+ rv = concat(
+ [x async for x in self._stack[self._depth](self._context)] # type: ignore
+ )
+
+ if self._context.eval_ctx.autoescape:
+ return Markup(rv)
+
+ return rv
+
+ @internalcode
+ def __call__(self) -> str:
+ if self._context.environment.is_async:
+ return self._async_call() # type: ignore
+
+ rv = concat(self._stack[self._depth](self._context))
+
+ if self._context.eval_ctx.autoescape:
+ return Markup(rv)
+
+ return rv
+
+
+class LoopContext:
+ """A wrapper iterable for dynamic ``for`` loops, with information
+ about the loop and iteration.
+ """
+
+ #: Current iteration of the loop, starting at 0.
+ index0 = -1
+
+ _length: t.Optional[int] = None
+ _after: t.Any = missing
+ _current: t.Any = missing
+ _before: t.Any = missing
+ _last_changed_value: t.Any = missing
+
+ def __init__(
+ self,
+ iterable: t.Iterable[V],
+ undefined: t.Type["Undefined"],
+ recurse: t.Optional["LoopRenderFunc"] = None,
+ depth0: int = 0,
+ ) -> None:
+ """
+ :param iterable: Iterable to wrap.
+ :param undefined: :class:`Undefined` class to use for next and
+ previous items.
+ :param recurse: The function to render the loop body when the
+ loop is marked recursive.
+ :param depth0: Incremented when looping recursively.
+ """
+ self._iterable = iterable
+ self._iterator = self._to_iterator(iterable)
+ self._undefined = undefined
+ self._recurse = recurse
+ #: How many levels deep a recursive loop currently is, starting at 0.
+ self.depth0 = depth0
+
+ @staticmethod
+ def _to_iterator(iterable: t.Iterable[V]) -> t.Iterator[V]:
+ return iter(iterable)
+
+ @property
+ def length(self) -> int:
+ """Length of the iterable.
+
+ If the iterable is a generator or otherwise does not have a
+ size, it is eagerly evaluated to get a size.
+ """
+ if self._length is not None:
+ return self._length
+
+ try:
+ self._length = len(self._iterable) # type: ignore
+ except TypeError:
+ iterable = list(self._iterator)
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
+
+ return self._length
+
+ def __len__(self) -> int:
+ return self.length
+
+ @property
+ def depth(self) -> int:
+ """How many levels deep a recursive loop currently is, starting at 1."""
+ return self.depth0 + 1
+
+ @property
+ def index(self) -> int:
+ """Current iteration of the loop, starting at 1."""
+ return self.index0 + 1
+
+ @property
+ def revindex0(self) -> int:
+ """Number of iterations from the end of the loop, ending at 0.
+
+ Requires calculating :attr:`length`.
+ """
+ return self.length - self.index
+
+ @property
+ def revindex(self) -> int:
+ """Number of iterations from the end of the loop, ending at 1.
+
+ Requires calculating :attr:`length`.
+ """
+ return self.length - self.index0
+
+ @property
+ def first(self) -> bool:
+ """Whether this is the first iteration of the loop."""
+ return self.index0 == 0
+
+ def _peek_next(self) -> t.Any:
+ """Return the next element in the iterable, or :data:`missing`
+ if the iterable is exhausted. Only peeks one item ahead, caching
+ the result in :attr:`_last` for use in subsequent checks. The
+ cache is reset when :meth:`__next__` is called.
+ """
+ if self._after is not missing:
+ return self._after
+
+ self._after = next(self._iterator, missing)
+ return self._after
+
+ @property
+ def last(self) -> bool:
+ """Whether this is the last iteration of the loop.
+
+ Causes the iterable to advance early. See
+ :func:`itertools.groupby` for issues this can cause.
+ The :func:`groupby` filter avoids that issue.
+ """
+ return self._peek_next() is missing
+
+ @property
+ def previtem(self) -> t.Union[t.Any, "Undefined"]:
+ """The item in the previous iteration. Undefined during the
+ first iteration.
+ """
+ if self.first:
+ return self._undefined("there is no previous item")
+
+ return self._before
+
+ @property
+ def nextitem(self) -> t.Union[t.Any, "Undefined"]:
+ """The item in the next iteration. Undefined during the last
+ iteration.
+
+ Causes the iterable to advance early. See
+ :func:`itertools.groupby` for issues this can cause.
+ The :func:`jinja-filters.groupby` filter avoids that issue.
+ """
+ rv = self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
+
+ def cycle(self, *args: V) -> V:
+ """Return a value from the given args, cycling through based on
+ the current :attr:`index0`.
+
+ :param args: One or more values to cycle through.
+ """
+ if not args:
+ raise TypeError("no items for cycling given")
+
+ return args[self.index0 % len(args)]
+
+ def changed(self, *value: t.Any) -> bool:
+ """Return ``True`` if previously called with a different value
+ (including when called for the first time).
+
+ :param value: One or more values to compare to the last call.
+ """
+ if self._last_changed_value != value:
+ self._last_changed_value = value
+ return True
+
+ return False
+
+ def __iter__(self) -> "LoopContext":
+ return self
+
+ def __next__(self) -> t.Tuple[t.Any, "LoopContext"]:
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = next(self._iterator)
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
+
+ @internalcode
+ def __call__(self, iterable: t.Iterable[V]) -> str:
+ """When iterating over nested data, render the body of the loop
+ recursively with the given inner iterable data.
+
+ The loop must have the ``recursive`` marker for this to work.
+ """
+ if self._recurse is None:
+ raise TypeError(
+ "The loop must have the 'recursive' marker to be called recursively."
+ )
+
+ return self._recurse(iterable, self._recurse, depth=self.depth)
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.index}/{self.length}>"
+
+
+class AsyncLoopContext(LoopContext):
+ _iterator: t.AsyncIterator[t.Any] # type: ignore
+
+ @staticmethod
+ def _to_iterator( # type: ignore
+ iterable: t.Union[t.Iterable[V], t.AsyncIterable[V]]
+ ) -> t.AsyncIterator[V]:
+ return auto_aiter(iterable)
+
+ @property
+ async def length(self) -> int: # type: ignore
+ if self._length is not None:
+ return self._length
+
+ try:
+ self._length = len(self._iterable) # type: ignore
+ except TypeError:
+ iterable = [x async for x in self._iterator]
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
+
+ return self._length
+
+ @property
+ async def revindex0(self) -> int: # type: ignore
+ return await self.length - self.index
+
+ @property
+ async def revindex(self) -> int: # type: ignore
+ return await self.length - self.index0
+
+ async def _peek_next(self) -> t.Any:
+ if self._after is not missing:
+ return self._after
+
+ try:
+ self._after = await self._iterator.__anext__()
+ except StopAsyncIteration:
+ self._after = missing
+
+ return self._after
+
+ @property
+ async def last(self) -> bool: # type: ignore
+ return await self._peek_next() is missing
+
+ @property
+ async def nextitem(self) -> t.Union[t.Any, "Undefined"]:
+ rv = await self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
+
+ def __aiter__(self) -> "AsyncLoopContext":
+ return self
+
+ async def __anext__(self) -> t.Tuple[t.Any, "AsyncLoopContext"]:
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = await self._iterator.__anext__()
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
+
+
+class Macro:
+ """Wraps a macro function."""
+
+ def __init__(
+ self,
+ environment: "Environment",
+ func: t.Callable[..., str],
+ name: str,
+ arguments: t.List[str],
+ catch_kwargs: bool,
+ catch_varargs: bool,
+ caller: bool,
+ default_autoescape: t.Optional[bool] = None,
+ ):
+ self._environment = environment
+ self._func = func
+ self._argument_count = len(arguments)
+ self.name = name
+ self.arguments = arguments
+ self.catch_kwargs = catch_kwargs
+ self.catch_varargs = catch_varargs
+ self.caller = caller
+ self.explicit_caller = "caller" in arguments
+
+ if default_autoescape is None:
+ if callable(environment.autoescape):
+ default_autoescape = environment.autoescape(None)
+ else:
+ default_autoescape = environment.autoescape
+
+ self._default_autoescape = default_autoescape
+
+ @internalcode
+ @pass_eval_context
+ def __call__(self, *args: t.Any, **kwargs: t.Any) -> str:
+ # This requires a bit of explanation, In the past we used to
+ # decide largely based on compile-time information if a macro is
+ # safe or unsafe. While there was a volatile mode it was largely
+ # unused for deciding on escaping. This turns out to be
+ # problematic for macros because whether a macro is safe depends not
+ # on the escape mode when it was defined, but rather when it was used.
+ #
+ # Because however we export macros from the module system and
+ # there are historic callers that do not pass an eval context (and
+ # will continue to not pass one), we need to perform an instance
+ # check here.
+ #
+ # This is considered safe because an eval context is not a valid
+ # argument to callables otherwise anyway. Worst case here is
+ # that if no eval context is passed we fall back to the compile
+ # time autoescape flag.
+ if args and isinstance(args[0], EvalContext):
+ autoescape = args[0].autoescape
+ args = args[1:]
+ else:
+ autoescape = self._default_autoescape
+
+ # try to consume the positional arguments
+ arguments = list(args[: self._argument_count])
+ off = len(arguments)
+
+ # For information why this is necessary refer to the handling
+ # of caller in the `macro_body` handler in the compiler.
+ found_caller = False
+
+ # if the number of arguments consumed is not the number of
+ # arguments expected we start filling in keyword arguments
+ # and defaults.
+ if off != self._argument_count:
+ for name in self.arguments[len(arguments) :]:
+ try:
+ value = kwargs.pop(name)
+ except KeyError:
+ value = missing
+ if name == "caller":
+ found_caller = True
+ arguments.append(value)
+ else:
+ found_caller = self.explicit_caller
+
+ # it's important that the order of these arguments does not change
+ # if not also changed in the compiler's `function_scoping` method.
+ # the order is caller, keyword arguments, positional arguments!
+ if self.caller and not found_caller:
+ caller = kwargs.pop("caller", None)
+ if caller is None:
+ caller = self._environment.undefined("No caller defined", name="caller")
+ arguments.append(caller)
+
+ if self.catch_kwargs:
+ arguments.append(kwargs)
+ elif kwargs:
+ if "caller" in kwargs:
+ raise TypeError(
+ f"macro {self.name!r} was invoked with two values for the special"
+ " caller argument. This is most likely a bug."
+ )
+ raise TypeError(
+ f"macro {self.name!r} takes no keyword argument {next(iter(kwargs))!r}"
+ )
+ if self.catch_varargs:
+ arguments.append(args[self._argument_count :])
+ elif len(args) > self._argument_count:
+ raise TypeError(
+ f"macro {self.name!r} takes not more than"
+ f" {len(self.arguments)} argument(s)"
+ )
+
+ return self._invoke(arguments, autoescape)
+
+ async def _async_invoke(self, arguments: t.List[t.Any], autoescape: bool) -> str:
+ rv = await self._func(*arguments) # type: ignore
+
+ if autoescape:
+ return Markup(rv)
+
+ return rv # type: ignore
+
+ def _invoke(self, arguments: t.List[t.Any], autoescape: bool) -> str:
+ if self._environment.is_async:
+ return self._async_invoke(arguments, autoescape) # type: ignore
+
+ rv = self._func(*arguments)
+
+ if autoescape:
+ rv = Markup(rv)
+
+ return rv
+
+ def __repr__(self) -> str:
+ name = "anonymous" if self.name is None else repr(self.name)
+ return f"<{type(self).__name__} {name}>"
+
+
+class Undefined:
+ """The default undefined type. This undefined type can be printed and
+ iterated over, but every other access will raise an :exc:`UndefinedError`:
+
+ >>> foo = Undefined(name='foo')
+ >>> str(foo)
+ ''
+ >>> not foo
+ True
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+
+ __slots__ = (
+ "_undefined_hint",
+ "_undefined_obj",
+ "_undefined_name",
+ "_undefined_exception",
+ )
+
+ def __init__(
+ self,
+ hint: t.Optional[str] = None,
+ obj: t.Any = missing,
+ name: t.Optional[str] = None,
+ exc: t.Type[TemplateRuntimeError] = UndefinedError,
+ ) -> None:
+ self._undefined_hint = hint
+ self._undefined_obj = obj
+ self._undefined_name = name
+ self._undefined_exception = exc
+
+ @property
+ def _undefined_message(self) -> str:
+ """Build a message about the undefined value based on how it was
+ accessed.
+ """
+ if self._undefined_hint:
+ return self._undefined_hint
+
+ if self._undefined_obj is missing:
+ return f"{self._undefined_name!r} is undefined"
+
+ if not isinstance(self._undefined_name, str):
+ return (
+ f"{object_type_repr(self._undefined_obj)} has no"
+ f" element {self._undefined_name!r}"
+ )
+
+ return (
+ f"{object_type_repr(self._undefined_obj)!r} has no"
+ f" attribute {self._undefined_name!r}"
+ )
+
+ @internalcode
+ def _fail_with_undefined_error(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> "te.NoReturn":
+ """Raise an :exc:`UndefinedError` when operations are performed
+ on the undefined value.
+ """
+ raise self._undefined_exception(self._undefined_message)
+
+ @internalcode
+ def __getattr__(self, name: str) -> t.Any:
+ if name[:2] == "__":
+ raise AttributeError(name)
+
+ return self._fail_with_undefined_error()
+
+ __add__ = __radd__ = __sub__ = __rsub__ = _fail_with_undefined_error
+ __mul__ = __rmul__ = __div__ = __rdiv__ = _fail_with_undefined_error
+ __truediv__ = __rtruediv__ = _fail_with_undefined_error
+ __floordiv__ = __rfloordiv__ = _fail_with_undefined_error
+ __mod__ = __rmod__ = _fail_with_undefined_error
+ __pos__ = __neg__ = _fail_with_undefined_error
+ __call__ = __getitem__ = _fail_with_undefined_error
+ __lt__ = __le__ = __gt__ = __ge__ = _fail_with_undefined_error
+ __int__ = __float__ = __complex__ = _fail_with_undefined_error
+ __pow__ = __rpow__ = _fail_with_undefined_error
+
+ def __eq__(self, other: t.Any) -> bool:
+ return type(self) is type(other)
+
+ def __ne__(self, other: t.Any) -> bool:
+ return not self.__eq__(other)
+
+ def __hash__(self) -> int:
+ return id(type(self))
+
+ def __str__(self) -> str:
+ return ""
+
+ def __len__(self) -> int:
+ return 0
+
+ def __iter__(self) -> t.Iterator[t.Any]:
+ yield from ()
+
+ async def __aiter__(self) -> t.AsyncIterator[t.Any]:
+ for _ in ():
+ yield
+
+ def __bool__(self) -> bool:
+ return False
+
+ def __repr__(self) -> str:
+ return "Undefined"
+
+
+def make_logging_undefined(
+ logger: t.Optional["logging.Logger"] = None, base: t.Type[Undefined] = Undefined
+) -> t.Type[Undefined]:
+ """Given a logger object this returns a new undefined class that will
+ log certain failures. It will log iterations and printing. If no
+ logger is given a default logger is created.
+
+ Example::
+
+ logger = logging.getLogger(__name__)
+ LoggingUndefined = make_logging_undefined(
+ logger=logger,
+ base=Undefined
+ )
+
+ .. versionadded:: 2.8
+
+ :param logger: the logger to use. If not provided, a default logger
+ is created.
+ :param base: the base class to add logging functionality to. This
+ defaults to :class:`Undefined`.
+ """
+ if logger is None:
+ import logging
+
+ logger = logging.getLogger(__name__)
+ logger.addHandler(logging.StreamHandler(sys.stderr))
+
+ def _log_message(undef: Undefined) -> None:
+ logger.warning( # type: ignore
+ "Template variable warning: %s", undef._undefined_message
+ )
+
+ class LoggingUndefined(base): # type: ignore
+ __slots__ = ()
+
+ def _fail_with_undefined_error( # type: ignore
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> "te.NoReturn":
+ try:
+ super()._fail_with_undefined_error(*args, **kwargs)
+ except self._undefined_exception as e:
+ logger.error("Template variable error: %s", e) # type: ignore
+ raise e
+
+ def __str__(self) -> str:
+ _log_message(self)
+ return super().__str__() # type: ignore
+
+ def __iter__(self) -> t.Iterator[t.Any]:
+ _log_message(self)
+ return super().__iter__() # type: ignore
+
+ def __bool__(self) -> bool:
+ _log_message(self)
+ return super().__bool__() # type: ignore
+
+ return LoggingUndefined
+
+
+class ChainableUndefined(Undefined):
+ """An undefined that is chainable, where both ``__getattr__`` and
+ ``__getitem__`` return itself rather than raising an
+ :exc:`UndefinedError`.
+
+ >>> foo = ChainableUndefined(name='foo')
+ >>> str(foo.bar['baz'])
+ ''
+ >>> foo.bar['baz'] + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+
+ .. versionadded:: 2.11.0
+ """
+
+ __slots__ = ()
+
+ def __html__(self) -> str:
+ return str(self)
+
+ def __getattr__(self, _: str) -> "ChainableUndefined":
+ return self
+
+ __getitem__ = __getattr__ # type: ignore
+
+
+class DebugUndefined(Undefined):
+ """An undefined that returns the debug info when printed.
+
+ >>> foo = DebugUndefined(name='foo')
+ >>> str(foo)
+ '{{ foo }}'
+ >>> not foo
+ True
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+
+ __slots__ = ()
+
+ def __str__(self) -> str:
+ if self._undefined_hint:
+ message = f"undefined value printed: {self._undefined_hint}"
+
+ elif self._undefined_obj is missing:
+ message = self._undefined_name # type: ignore
+
+ else:
+ message = (
+ f"no such element: {object_type_repr(self._undefined_obj)}"
+ f"[{self._undefined_name!r}]"
+ )
+
+ return f"{{{{ {message} }}}}"
+
+
+class StrictUndefined(Undefined):
+ """An undefined that barks on print and iteration as well as boolean
+ tests and all kinds of comparisons. In other words: you can do nothing
+ with it except checking if it's defined using the `defined` test.
+
+ >>> foo = StrictUndefined(name='foo')
+ >>> str(foo)
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ >>> not foo
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+
+ __slots__ = ()
+ __iter__ = __str__ = __len__ = Undefined._fail_with_undefined_error
+ __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
+ __contains__ = Undefined._fail_with_undefined_error
+
+
+# Remove slots attributes, after the metaclass is applied they are
+# unneeded and contain wrong data for subclasses.
+del (
+ Undefined.__slots__,
+ ChainableUndefined.__slots__,
+ DebugUndefined.__slots__,
+ StrictUndefined.__slots__,
+)
diff --git a/lib/spack/external/_vendoring/jinja2/sandbox.py b/lib/spack/external/_vendoring/jinja2/sandbox.py
new file mode 100644
index 0000000000..4294884776
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/sandbox.py
@@ -0,0 +1,428 @@
+"""A sandbox layer that ensures unsafe operations cannot be performed.
+Useful when the template itself comes from an untrusted source.
+"""
+import operator
+import types
+import typing as t
+from _string import formatter_field_name_split # type: ignore
+from collections import abc
+from collections import deque
+from string import Formatter
+
+from markupsafe import EscapeFormatter
+from markupsafe import Markup
+
+from .environment import Environment
+from .exceptions import SecurityError
+from .runtime import Context
+from .runtime import Undefined
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+#: maximum number of items a range may produce
+MAX_RANGE = 100000
+
+#: Unsafe function attributes.
+UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set()
+
+#: Unsafe method attributes. Function attributes are unsafe for methods too.
+UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set()
+
+#: unsafe generator attributes.
+UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
+
+#: unsafe attributes on coroutines
+UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
+
+#: unsafe attributes on async generators
+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
+
+_mutable_spec: t.Tuple[t.Tuple[t.Type, t.FrozenSet[str]], ...] = (
+ (
+ abc.MutableSet,
+ frozenset(
+ [
+ "add",
+ "clear",
+ "difference_update",
+ "discard",
+ "pop",
+ "remove",
+ "symmetric_difference_update",
+ "update",
+ ]
+ ),
+ ),
+ (
+ abc.MutableMapping,
+ frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
+ ),
+ (
+ abc.MutableSequence,
+ frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
+ ),
+ (
+ deque,
+ frozenset(
+ [
+ "append",
+ "appendleft",
+ "clear",
+ "extend",
+ "extendleft",
+ "pop",
+ "popleft",
+ "remove",
+ "rotate",
+ ]
+ ),
+ ),
+)
+
+
+def inspect_format_method(callable: t.Callable) -> t.Optional[str]:
+ if not isinstance(
+ callable, (types.MethodType, types.BuiltinMethodType)
+ ) or callable.__name__ not in ("format", "format_map"):
+ return None
+
+ obj = callable.__self__
+
+ if isinstance(obj, str):
+ return obj
+
+ return None
+
+
+def safe_range(*args: int) -> range:
+ """A range that can't generate ranges with a length of more than
+ MAX_RANGE items.
+ """
+ rng = range(*args)
+
+ if len(rng) > MAX_RANGE:
+ raise OverflowError(
+ "Range too big. The sandbox blocks ranges larger than"
+ f" MAX_RANGE ({MAX_RANGE})."
+ )
+
+ return rng
+
+
+def unsafe(f: F) -> F:
+ """Marks a function or method as unsafe.
+
+ .. code-block: python
+
+ @unsafe
+ def delete(self):
+ pass
+ """
+ f.unsafe_callable = True # type: ignore
+ return f
+
+
+def is_internal_attribute(obj: t.Any, attr: str) -> bool:
+ """Test if the attribute given is an internal python attribute. For
+ example this function returns `True` for the `func_code` attribute of
+ python objects. This is useful if the environment method
+ :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
+
+ >>> from jinja2.sandbox import is_internal_attribute
+ >>> is_internal_attribute(str, "mro")
+ True
+ >>> is_internal_attribute(str, "upper")
+ False
+ """
+ if isinstance(obj, types.FunctionType):
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES:
+ return True
+ elif isinstance(obj, types.MethodType):
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
+ return True
+ elif isinstance(obj, type):
+ if attr == "mro":
+ return True
+ elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
+ return True
+ elif isinstance(obj, types.GeneratorType):
+ if attr in UNSAFE_GENERATOR_ATTRIBUTES:
+ return True
+ elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
+ if attr in UNSAFE_COROUTINE_ATTRIBUTES:
+ return True
+ elif hasattr(types, "AsyncGeneratorType") and isinstance(
+ obj, types.AsyncGeneratorType
+ ):
+ if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
+ return True
+ return attr.startswith("__")
+
+
+def modifies_known_mutable(obj: t.Any, attr: str) -> bool:
+ """This function checks if an attribute on a builtin mutable object
+ (list, dict, set or deque) or the corresponding ABCs would modify it
+ if called.
+
+ >>> modifies_known_mutable({}, "clear")
+ True
+ >>> modifies_known_mutable({}, "keys")
+ False
+ >>> modifies_known_mutable([], "append")
+ True
+ >>> modifies_known_mutable([], "index")
+ False
+
+ If called with an unsupported object, ``False`` is returned.
+
+ >>> modifies_known_mutable("foo", "upper")
+ False
+ """
+ for typespec, unsafe in _mutable_spec:
+ if isinstance(obj, typespec):
+ return attr in unsafe
+ return False
+
+
+class SandboxedEnvironment(Environment):
+ """The sandboxed environment. It works like the regular environment but
+ tells the compiler to generate sandboxed code. Additionally subclasses of
+ this environment may override the methods that tell the runtime what
+ attributes or functions are safe to access.
+
+ If the template tries to access insecure code a :exc:`SecurityError` is
+ raised. However also other exceptions may occur during the rendering so
+ the caller has to ensure that all exceptions are caught.
+ """
+
+ sandboxed = True
+
+ #: default callback table for the binary operators. A copy of this is
+ #: available on each instance of a sandboxed environment as
+ #: :attr:`binop_table`
+ default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
+ "+": operator.add,
+ "-": operator.sub,
+ "*": operator.mul,
+ "/": operator.truediv,
+ "//": operator.floordiv,
+ "**": operator.pow,
+ "%": operator.mod,
+ }
+
+ #: default callback table for the unary operators. A copy of this is
+ #: available on each instance of a sandboxed environment as
+ #: :attr:`unop_table`
+ default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
+ "+": operator.pos,
+ "-": operator.neg,
+ }
+
+ #: a set of binary operators that should be intercepted. Each operator
+ #: that is added to this set (empty by default) is delegated to the
+ #: :meth:`call_binop` method that will perform the operator. The default
+ #: operator callback is specified by :attr:`binop_table`.
+ #:
+ #: The following binary operators are interceptable:
+ #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
+ #:
+ #: The default operation form the operator table corresponds to the
+ #: builtin function. Intercepted calls are always slower than the native
+ #: operator call, so make sure only to intercept the ones you are
+ #: interested in.
+ #:
+ #: .. versionadded:: 2.6
+ intercepted_binops: t.FrozenSet[str] = frozenset()
+
+ #: a set of unary operators that should be intercepted. Each operator
+ #: that is added to this set (empty by default) is delegated to the
+ #: :meth:`call_unop` method that will perform the operator. The default
+ #: operator callback is specified by :attr:`unop_table`.
+ #:
+ #: The following unary operators are interceptable: ``+``, ``-``
+ #:
+ #: The default operation form the operator table corresponds to the
+ #: builtin function. Intercepted calls are always slower than the native
+ #: operator call, so make sure only to intercept the ones you are
+ #: interested in.
+ #:
+ #: .. versionadded:: 2.6
+ intercepted_unops: t.FrozenSet[str] = frozenset()
+
+ def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.globals["range"] = safe_range
+ self.binop_table = self.default_binop_table.copy()
+ self.unop_table = self.default_unop_table.copy()
+
+ def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
+ """The sandboxed environment will call this method to check if the
+ attribute of an object is safe to access. Per default all attributes
+ starting with an underscore are considered private as well as the
+ special attributes of internal python objects as returned by the
+ :func:`is_internal_attribute` function.
+ """
+ return not (attr.startswith("_") or is_internal_attribute(obj, attr))
+
+ def is_safe_callable(self, obj: t.Any) -> bool:
+ """Check if an object is safely callable. By default callables
+ are considered safe unless decorated with :func:`unsafe`.
+
+ This also recognizes the Django convention of setting
+ ``func.alters_data = True``.
+ """
+ return not (
+ getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
+ )
+
+ def call_binop(
+ self, context: Context, operator: str, left: t.Any, right: t.Any
+ ) -> t.Any:
+ """For intercepted binary operator calls (:meth:`intercepted_binops`)
+ this function is executed instead of the builtin operator. This can
+ be used to fine tune the behavior of certain operators.
+
+ .. versionadded:: 2.6
+ """
+ return self.binop_table[operator](left, right)
+
+ def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
+ """For intercepted unary operator calls (:meth:`intercepted_unops`)
+ this function is executed instead of the builtin operator. This can
+ be used to fine tune the behavior of certain operators.
+
+ .. versionadded:: 2.6
+ """
+ return self.unop_table[operator](arg)
+
+ def getitem(
+ self, obj: t.Any, argument: t.Union[str, t.Any]
+ ) -> t.Union[t.Any, Undefined]:
+ """Subscribe an object from sandboxed code."""
+ try:
+ return obj[argument]
+ except (TypeError, LookupError):
+ if isinstance(argument, str):
+ try:
+ attr = str(argument)
+ except Exception:
+ pass
+ else:
+ try:
+ value = getattr(obj, attr)
+ except AttributeError:
+ pass
+ else:
+ if self.is_safe_attribute(obj, argument, value):
+ return value
+ return self.unsafe_undefined(obj, argument)
+ return self.undefined(obj=obj, name=argument)
+
+ def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]:
+ """Subscribe an object from sandboxed code and prefer the
+ attribute. The attribute passed *must* be a bytestring.
+ """
+ try:
+ value = getattr(obj, attribute)
+ except AttributeError:
+ try:
+ return obj[attribute]
+ except (TypeError, LookupError):
+ pass
+ else:
+ if self.is_safe_attribute(obj, attribute, value):
+ return value
+ return self.unsafe_undefined(obj, attribute)
+ return self.undefined(obj=obj, name=attribute)
+
+ def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
+ """Return an undefined object for unsafe attributes."""
+ return self.undefined(
+ f"access to attribute {attribute!r} of"
+ f" {type(obj).__name__!r} object is unsafe.",
+ name=attribute,
+ obj=obj,
+ exc=SecurityError,
+ )
+
+ def format_string(
+ self,
+ s: str,
+ args: t.Tuple[t.Any, ...],
+ kwargs: t.Dict[str, t.Any],
+ format_func: t.Optional[t.Callable] = None,
+ ) -> str:
+ """If a format call is detected, then this is routed through this
+ method so that our safety sandbox can be used for it.
+ """
+ formatter: SandboxedFormatter
+ if isinstance(s, Markup):
+ formatter = SandboxedEscapeFormatter(self, escape=s.escape)
+ else:
+ formatter = SandboxedFormatter(self)
+
+ if format_func is not None and format_func.__name__ == "format_map":
+ if len(args) != 1 or kwargs:
+ raise TypeError(
+ "format_map() takes exactly one argument"
+ f" {len(args) + (kwargs is not None)} given"
+ )
+
+ kwargs = args[0]
+ args = ()
+
+ rv = formatter.vformat(s, args, kwargs)
+ return type(s)(rv)
+
+ def call(
+ __self, # noqa: B902
+ __context: Context,
+ __obj: t.Any,
+ *args: t.Any,
+ **kwargs: t.Any,
+ ) -> t.Any:
+ """Call an object from sandboxed code."""
+ fmt = inspect_format_method(__obj)
+ if fmt is not None:
+ return __self.format_string(fmt, args, kwargs, __obj)
+
+ # the double prefixes are to avoid double keyword argument
+ # errors when proxying the call.
+ if not __self.is_safe_callable(__obj):
+ raise SecurityError(f"{__obj!r} is not safely callable")
+ return __context.call(__obj, *args, **kwargs)
+
+
+class ImmutableSandboxedEnvironment(SandboxedEnvironment):
+ """Works exactly like the regular `SandboxedEnvironment` but does not
+ permit modifications on the builtin mutable objects `list`, `set`, and
+ `dict` by using the :func:`modifies_known_mutable` function.
+ """
+
+ def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
+ if not super().is_safe_attribute(obj, attr, value):
+ return False
+
+ return not modifies_known_mutable(obj, attr)
+
+
+class SandboxedFormatter(Formatter):
+ def __init__(self, env: Environment, **kwargs: t.Any) -> None:
+ self._env = env
+ super().__init__(**kwargs) # type: ignore
+
+ def get_field(
+ self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
+ ) -> t.Tuple[t.Any, str]:
+ first, rest = formatter_field_name_split(field_name)
+ obj = self.get_value(first, args, kwargs)
+ for is_attr, i in rest:
+ if is_attr:
+ obj = self._env.getattr(obj, i)
+ else:
+ obj = self._env.getitem(obj, i)
+ return obj, first
+
+
+class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter):
+ pass
diff --git a/lib/spack/external/_vendoring/jinja2/tests.py b/lib/spack/external/_vendoring/jinja2/tests.py
new file mode 100644
index 0000000000..a467cf08b5
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/tests.py
@@ -0,0 +1,255 @@
+"""Built-in template tests used with the ``is`` operator."""
+import operator
+import typing as t
+from collections import abc
+from numbers import Number
+
+from .runtime import Undefined
+from .utils import pass_environment
+
+if t.TYPE_CHECKING:
+ from .environment import Environment
+
+
+def test_odd(value: int) -> bool:
+ """Return true if the variable is odd."""
+ return value % 2 == 1
+
+
+def test_even(value: int) -> bool:
+ """Return true if the variable is even."""
+ return value % 2 == 0
+
+
+def test_divisibleby(value: int, num: int) -> bool:
+ """Check if a variable is divisible by a number."""
+ return value % num == 0
+
+
+def test_defined(value: t.Any) -> bool:
+ """Return true if the variable is defined:
+
+ .. sourcecode:: jinja
+
+ {% if variable is defined %}
+ value of variable: {{ variable }}
+ {% else %}
+ variable is not defined
+ {% endif %}
+
+ See the :func:`default` filter for a simple way to set undefined
+ variables.
+ """
+ return not isinstance(value, Undefined)
+
+
+def test_undefined(value: t.Any) -> bool:
+ """Like :func:`defined` but the other way round."""
+ return isinstance(value, Undefined)
+
+
+@pass_environment
+def test_filter(env: "Environment", value: str) -> bool:
+ """Check if a filter exists by name. Useful if a filter may be
+ optionally available.
+
+ .. code-block:: jinja
+
+ {% if 'markdown' is filter %}
+ {{ value | markdown }}
+ {% else %}
+ {{ value }}
+ {% endif %}
+
+ .. versionadded:: 3.0
+ """
+ return value in env.filters
+
+
+@pass_environment
+def test_test(env: "Environment", value: str) -> bool:
+ """Check if a test exists by name. Useful if a test may be
+ optionally available.
+
+ .. code-block:: jinja
+
+ {% if 'loud' is test %}
+ {% if value is loud %}
+ {{ value|upper }}
+ {% else %}
+ {{ value|lower }}
+ {% endif %}
+ {% else %}
+ {{ value }}
+ {% endif %}
+
+ .. versionadded:: 3.0
+ """
+ return value in env.tests
+
+
+def test_none(value: t.Any) -> bool:
+ """Return true if the variable is none."""
+ return value is None
+
+
+def test_boolean(value: t.Any) -> bool:
+ """Return true if the object is a boolean value.
+
+ .. versionadded:: 2.11
+ """
+ return value is True or value is False
+
+
+def test_false(value: t.Any) -> bool:
+ """Return true if the object is False.
+
+ .. versionadded:: 2.11
+ """
+ return value is False
+
+
+def test_true(value: t.Any) -> bool:
+ """Return true if the object is True.
+
+ .. versionadded:: 2.11
+ """
+ return value is True
+
+
+# NOTE: The existing 'number' test matches booleans and floats
+def test_integer(value: t.Any) -> bool:
+ """Return true if the object is an integer.
+
+ .. versionadded:: 2.11
+ """
+ return isinstance(value, int) and value is not True and value is not False
+
+
+# NOTE: The existing 'number' test matches booleans and integers
+def test_float(value: t.Any) -> bool:
+ """Return true if the object is a float.
+
+ .. versionadded:: 2.11
+ """
+ return isinstance(value, float)
+
+
+def test_lower(value: str) -> bool:
+ """Return true if the variable is lowercased."""
+ return str(value).islower()
+
+
+def test_upper(value: str) -> bool:
+ """Return true if the variable is uppercased."""
+ return str(value).isupper()
+
+
+def test_string(value: t.Any) -> bool:
+ """Return true if the object is a string."""
+ return isinstance(value, str)
+
+
+def test_mapping(value: t.Any) -> bool:
+ """Return true if the object is a mapping (dict etc.).
+
+ .. versionadded:: 2.6
+ """
+ return isinstance(value, abc.Mapping)
+
+
+def test_number(value: t.Any) -> bool:
+ """Return true if the variable is a number."""
+ return isinstance(value, Number)
+
+
+def test_sequence(value: t.Any) -> bool:
+ """Return true if the variable is a sequence. Sequences are variables
+ that are iterable.
+ """
+ try:
+ len(value)
+ value.__getitem__
+ except Exception:
+ return False
+
+ return True
+
+
+def test_sameas(value: t.Any, other: t.Any) -> bool:
+ """Check if an object points to the same memory address than another
+ object:
+
+ .. sourcecode:: jinja
+
+ {% if foo.attribute is sameas false %}
+ the foo attribute really is the `False` singleton
+ {% endif %}
+ """
+ return value is other
+
+
+def test_iterable(value: t.Any) -> bool:
+ """Check if it's possible to iterate over an object."""
+ try:
+ iter(value)
+ except TypeError:
+ return False
+
+ return True
+
+
+def test_escaped(value: t.Any) -> bool:
+ """Check if the value is escaped."""
+ return hasattr(value, "__html__")
+
+
+def test_in(value: t.Any, seq: t.Container) -> bool:
+ """Check if value is in seq.
+
+ .. versionadded:: 2.10
+ """
+ return value in seq
+
+
+TESTS = {
+ "odd": test_odd,
+ "even": test_even,
+ "divisibleby": test_divisibleby,
+ "defined": test_defined,
+ "undefined": test_undefined,
+ "filter": test_filter,
+ "test": test_test,
+ "none": test_none,
+ "boolean": test_boolean,
+ "false": test_false,
+ "true": test_true,
+ "integer": test_integer,
+ "float": test_float,
+ "lower": test_lower,
+ "upper": test_upper,
+ "string": test_string,
+ "mapping": test_mapping,
+ "number": test_number,
+ "sequence": test_sequence,
+ "iterable": test_iterable,
+ "callable": callable,
+ "sameas": test_sameas,
+ "escaped": test_escaped,
+ "in": test_in,
+ "==": operator.eq,
+ "eq": operator.eq,
+ "equalto": operator.eq,
+ "!=": operator.ne,
+ "ne": operator.ne,
+ ">": operator.gt,
+ "gt": operator.gt,
+ "greaterthan": operator.gt,
+ "ge": operator.ge,
+ ">=": operator.ge,
+ "<": operator.lt,
+ "lt": operator.lt,
+ "lessthan": operator.lt,
+ "<=": operator.le,
+ "le": operator.le,
+}
diff --git a/lib/spack/external/_vendoring/jinja2/utils.py b/lib/spack/external/_vendoring/jinja2/utils.py
new file mode 100644
index 0000000000..567185f41f
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/utils.py
@@ -0,0 +1,854 @@
+import enum
+import json
+import os
+import re
+import typing as t
+import warnings
+from collections import abc
+from collections import deque
+from random import choice
+from random import randrange
+from threading import Lock
+from types import CodeType
+from urllib.parse import quote_from_bytes
+
+import markupsafe
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+# special singleton representing missing values for the runtime
+missing: t.Any = type("MissingType", (), {"__repr__": lambda x: "missing"})()
+
+internal_code: t.MutableSet[CodeType] = set()
+
+concat = "".join
+
+
+def pass_context(f: F) -> F:
+ """Pass the :class:`~jinja2.runtime.Context` as the first argument
+ to the decorated function when called while rendering a template.
+
+ Can be used on functions, filters, and tests.
+
+ If only ``Context.eval_context`` is needed, use
+ :func:`pass_eval_context`. If only ``Context.environment`` is
+ needed, use :func:`pass_environment`.
+
+ .. versionadded:: 3.0.0
+ Replaces ``contextfunction`` and ``contextfilter``.
+ """
+ f.jinja_pass_arg = _PassArg.context # type: ignore
+ return f
+
+
+def pass_eval_context(f: F) -> F:
+ """Pass the :class:`~jinja2.nodes.EvalContext` as the first argument
+ to the decorated function when called while rendering a template.
+ See :ref:`eval-context`.
+
+ Can be used on functions, filters, and tests.
+
+ If only ``EvalContext.environment`` is needed, use
+ :func:`pass_environment`.
+
+ .. versionadded:: 3.0.0
+ Replaces ``evalcontextfunction`` and ``evalcontextfilter``.
+ """
+ f.jinja_pass_arg = _PassArg.eval_context # type: ignore
+ return f
+
+
+def pass_environment(f: F) -> F:
+ """Pass the :class:`~jinja2.Environment` as the first argument to
+ the decorated function when called while rendering a template.
+
+ Can be used on functions, filters, and tests.
+
+ .. versionadded:: 3.0.0
+ Replaces ``environmentfunction`` and ``environmentfilter``.
+ """
+ f.jinja_pass_arg = _PassArg.environment # type: ignore
+ return f
+
+
+class _PassArg(enum.Enum):
+ context = enum.auto()
+ eval_context = enum.auto()
+ environment = enum.auto()
+
+ @classmethod
+ def from_obj(cls, obj: F) -> t.Optional["_PassArg"]:
+ if hasattr(obj, "jinja_pass_arg"):
+ return obj.jinja_pass_arg # type: ignore
+
+ for prefix in "context", "eval_context", "environment":
+ squashed = prefix.replace("_", "")
+
+ for name in f"{squashed}function", f"{squashed}filter":
+ if getattr(obj, name, False) is True:
+ warnings.warn(
+ f"{name!r} is deprecated and will stop working"
+ f" in Jinja 3.1. Use 'pass_{prefix}' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return cls[prefix]
+
+ return None
+
+
+def contextfunction(f: F) -> F:
+ """Pass the context as the first argument to the decorated function.
+
+ .. deprecated:: 3.0
+ Will be removed in Jinja 3.1. Use :func:`~jinja2.pass_context`
+ instead.
+ """
+ warnings.warn(
+ "'contextfunction' is renamed to 'pass_context', the old name"
+ " will be removed in Jinja 3.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return pass_context(f)
+
+
+def evalcontextfunction(f: F) -> F:
+ """Pass the eval context as the first argument to the decorated
+ function.
+
+ .. deprecated:: 3.0
+ Will be removed in Jinja 3.1. Use
+ :func:`~jinja2.pass_eval_context` instead.
+
+ .. versionadded:: 2.4
+ """
+ warnings.warn(
+ "'evalcontextfunction' is renamed to 'pass_eval_context', the"
+ " old name will be removed in Jinja 3.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return pass_eval_context(f)
+
+
+def environmentfunction(f: F) -> F:
+ """Pass the environment as the first argument to the decorated
+ function.
+
+ .. deprecated:: 3.0
+ Will be removed in Jinja 3.1. Use
+ :func:`~jinja2.pass_environment` instead.
+ """
+ warnings.warn(
+ "'environmentfunction' is renamed to 'pass_environment', the"
+ " old name will be removed in Jinja 3.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return pass_environment(f)
+
+
+def internalcode(f: F) -> F:
+ """Marks the function as internally used"""
+ internal_code.add(f.__code__)
+ return f
+
+
+def is_undefined(obj: t.Any) -> bool:
+ """Check if the object passed is undefined. This does nothing more than
+ performing an instance check against :class:`Undefined` but looks nicer.
+ This can be used for custom filters or tests that want to react to
+ undefined variables. For example a custom default filter can look like
+ this::
+
+ def default(var, default=''):
+ if is_undefined(var):
+ return default
+ return var
+ """
+ from .runtime import Undefined
+
+ return isinstance(obj, Undefined)
+
+
+def consume(iterable: t.Iterable[t.Any]) -> None:
+ """Consumes an iterable without doing anything with it."""
+ for _ in iterable:
+ pass
+
+
+def clear_caches() -> None:
+ """Jinja keeps internal caches for environments and lexers. These are
+ used so that Jinja doesn't have to recreate environments and lexers all
+ the time. Normally you don't have to care about that but if you are
+ measuring memory consumption you may want to clean the caches.
+ """
+ from .environment import get_spontaneous_environment
+ from .lexer import _lexer_cache
+
+ get_spontaneous_environment.cache_clear()
+ _lexer_cache.clear()
+
+
+def import_string(import_name: str, silent: bool = False) -> t.Any:
+ """Imports an object based on a string. This is useful if you want to
+ use import paths as endpoints or something similar. An import path can
+ be specified either in dotted notation (``xml.sax.saxutils.escape``)
+ or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+ If the `silent` is True the return value will be `None` if the import
+ fails.
+
+ :return: imported object
+ """
+ try:
+ if ":" in import_name:
+ module, obj = import_name.split(":", 1)
+ elif "." in import_name:
+ module, _, obj = import_name.rpartition(".")
+ else:
+ return __import__(import_name)
+ return getattr(__import__(module, None, None, [obj]), obj)
+ except (ImportError, AttributeError):
+ if not silent:
+ raise
+
+
+def open_if_exists(filename: str, mode: str = "rb") -> t.Optional[t.IO]:
+ """Returns a file descriptor for the filename if that file exists,
+ otherwise ``None``.
+ """
+ if not os.path.isfile(filename):
+ return None
+
+ return open(filename, mode)
+
+
+def object_type_repr(obj: t.Any) -> str:
+ """Returns the name of the object's type. For some recognized
+ singletons the name of the object is returned instead. (For
+ example for `None` and `Ellipsis`).
+ """
+ if obj is None:
+ return "None"
+ elif obj is Ellipsis:
+ return "Ellipsis"
+
+ cls = type(obj)
+
+ if cls.__module__ == "builtins":
+ return f"{cls.__name__} object"
+
+ return f"{cls.__module__}.{cls.__name__} object"
+
+
+def pformat(obj: t.Any) -> str:
+ """Format an object using :func:`pprint.pformat`."""
+ from pprint import pformat # type: ignore
+
+ return pformat(obj)
+
+
+_http_re = re.compile(
+ r"""
+ ^
+ (
+ (https?://|www\.) # scheme or www
+ (([\w%-]+\.)+)? # subdomain
+ (
+ [a-z]{2,63} # basic tld
+ |
+ xn--[\w%]{2,59} # idna tld
+ )
+ |
+ ([\w%-]{2,63}\.)+ # basic domain
+ (com|net|int|edu|gov|org|info|mil) # basic tld
+ |
+ (https?://) # scheme
+ (
+ (([\d]{1,3})(\.[\d]{1,3}){3}) # IPv4
+ |
+ (\[([\da-f]{0,4}:){2}([\da-f]{0,4}:?){1,6}]) # IPv6
+ )
+ )
+ (?::[\d]{1,5})? # port
+ (?:[/?#]\S*)? # path, query, and fragment
+ $
+ """,
+ re.IGNORECASE | re.VERBOSE,
+)
+_email_re = re.compile(r"^\S+@\w[\w.-]*\.\w+$")
+
+
+def urlize(
+ text: str,
+ trim_url_limit: t.Optional[int] = None,
+ rel: t.Optional[str] = None,
+ target: t.Optional[str] = None,
+ extra_schemes: t.Optional[t.Iterable[str]] = None,
+) -> str:
+ """Convert URLs in text into clickable links.
+
+ This may not recognize links in some situations. Usually, a more
+ comprehensive formatter, such as a Markdown library, is a better
+ choice.
+
+ Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
+ addresses. Links with trailing punctuation (periods, commas, closing
+ parentheses) and leading punctuation (opening parentheses) are
+ recognized excluding the punctuation. Email addresses that include
+ header fields are not recognized (for example,
+ ``mailto:address@example.com?cc=copy@example.com``).
+
+ :param text: Original text containing URLs to link.
+ :param trim_url_limit: Shorten displayed URL values to this length.
+ :param target: Add the ``target`` attribute to links.
+ :param rel: Add the ``rel`` attribute to links.
+ :param extra_schemes: Recognize URLs that start with these schemes
+ in addition to the default behavior.
+
+ .. versionchanged:: 3.0
+ The ``extra_schemes`` parameter was added.
+
+ .. versionchanged:: 3.0
+ Generate ``https://`` links for URLs without a scheme.
+
+ .. versionchanged:: 3.0
+ The parsing rules were updated. Recognize email addresses with
+ or without the ``mailto:`` scheme. Validate IP addresses. Ignore
+ parentheses and brackets in more cases.
+ """
+ if trim_url_limit is not None:
+
+ def trim_url(x: str) -> str:
+ if len(x) > trim_url_limit: # type: ignore
+ return f"{x[:trim_url_limit]}..."
+
+ return x
+
+ else:
+
+ def trim_url(x: str) -> str:
+ return x
+
+ words = re.split(r"(\s+)", str(markupsafe.escape(text)))
+ rel_attr = f' rel="{markupsafe.escape(rel)}"' if rel else ""
+ target_attr = f' target="{markupsafe.escape(target)}"' if target else ""
+
+ for i, word in enumerate(words):
+ head, middle, tail = "", word, ""
+ match = re.match(r"^([(<]|&lt;)+", middle)
+
+ if match:
+ head = match.group()
+ middle = middle[match.end() :]
+
+ # Unlike lead, which is anchored to the start of the string,
+ # need to check that the string ends with any of the characters
+ # before trying to match all of them, to avoid backtracking.
+ if middle.endswith((")", ">", ".", ",", "\n", "&gt;")):
+ match = re.search(r"([)>.,\n]|&gt;)+$", middle)
+
+ if match:
+ tail = match.group()
+ middle = middle[: match.start()]
+
+ # Prefer balancing parentheses in URLs instead of ignoring a
+ # trailing character.
+ for start_char, end_char in ("(", ")"), ("<", ">"), ("&lt;", "&gt;"):
+ start_count = middle.count(start_char)
+
+ if start_count <= middle.count(end_char):
+ # Balanced, or lighter on the left
+ continue
+
+ # Move as many as possible from the tail to balance
+ for _ in range(min(start_count, tail.count(end_char))):
+ end_index = tail.index(end_char) + len(end_char)
+ # Move anything in the tail before the end char too
+ middle += tail[:end_index]
+ tail = tail[end_index:]
+
+ if _http_re.match(middle):
+ if middle.startswith("https://") or middle.startswith("http://"):
+ middle = (
+ f'<a href="{middle}"{rel_attr}{target_attr}>{trim_url(middle)}</a>'
+ )
+ else:
+ middle = (
+ f'<a href="https://{middle}"{rel_attr}{target_attr}>'
+ f"{trim_url(middle)}</a>"
+ )
+
+ elif middle.startswith("mailto:") and _email_re.match(middle[7:]):
+ middle = f'<a href="{middle}">{middle[7:]}</a>'
+
+ elif (
+ "@" in middle
+ and not middle.startswith("www.")
+ and ":" not in middle
+ and _email_re.match(middle)
+ ):
+ middle = f'<a href="mailto:{middle}">{middle}</a>'
+
+ elif extra_schemes is not None:
+ for scheme in extra_schemes:
+ if middle != scheme and middle.startswith(scheme):
+ middle = f'<a href="{middle}"{rel_attr}{target_attr}>{middle}</a>'
+
+ words[i] = f"{head}{middle}{tail}"
+
+ return "".join(words)
+
+
+def generate_lorem_ipsum(
+ n: int = 5, html: bool = True, min: int = 20, max: int = 100
+) -> str:
+ """Generate some lorem ipsum for the template."""
+ from .constants import LOREM_IPSUM_WORDS
+
+ words = LOREM_IPSUM_WORDS.split()
+ result = []
+
+ for _ in range(n):
+ next_capitalized = True
+ last_comma = last_fullstop = 0
+ word = None
+ last = None
+ p = []
+
+ # each paragraph contains out of 20 to 100 words.
+ for idx, _ in enumerate(range(randrange(min, max))):
+ while True:
+ word = choice(words)
+ if word != last:
+ last = word
+ break
+ if next_capitalized:
+ word = word.capitalize()
+ next_capitalized = False
+ # add commas
+ if idx - randrange(3, 8) > last_comma:
+ last_comma = idx
+ last_fullstop += 2
+ word += ","
+ # add end of sentences
+ if idx - randrange(10, 20) > last_fullstop:
+ last_comma = last_fullstop = idx
+ word += "."
+ next_capitalized = True
+ p.append(word)
+
+ # ensure that the paragraph ends with a dot.
+ p_str = " ".join(p)
+
+ if p_str.endswith(","):
+ p_str = p_str[:-1] + "."
+ elif not p_str.endswith("."):
+ p_str += "."
+
+ result.append(p_str)
+
+ if not html:
+ return "\n\n".join(result)
+ return markupsafe.Markup(
+ "\n".join(f"<p>{markupsafe.escape(x)}</p>" for x in result)
+ )
+
+
+def url_quote(obj: t.Any, charset: str = "utf-8", for_qs: bool = False) -> str:
+ """Quote a string for use in a URL using the given charset.
+
+ :param obj: String or bytes to quote. Other types are converted to
+ string then encoded to bytes using the given charset.
+ :param charset: Encode text to bytes using this charset.
+ :param for_qs: Quote "/" and use "+" for spaces.
+ """
+ if not isinstance(obj, bytes):
+ if not isinstance(obj, str):
+ obj = str(obj)
+
+ obj = obj.encode(charset)
+
+ safe = b"" if for_qs else b"/"
+ rv = quote_from_bytes(obj, safe)
+
+ if for_qs:
+ rv = rv.replace("%20", "+")
+
+ return rv
+
+
+def unicode_urlencode(obj: t.Any, charset: str = "utf-8", for_qs: bool = False) -> str:
+ import warnings
+
+ warnings.warn(
+ "'unicode_urlencode' has been renamed to 'url_quote'. The old"
+ " name will be removed in Jinja 3.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return url_quote(obj, charset=charset, for_qs=for_qs)
+
+
+@abc.MutableMapping.register
+class LRUCache:
+ """A simple LRU Cache implementation."""
+
+ # this is fast for small capacities (something below 1000) but doesn't
+ # scale. But as long as it's only used as storage for templates this
+ # won't do any harm.
+
+ def __init__(self, capacity: int) -> None:
+ self.capacity = capacity
+ self._mapping: t.Dict[t.Any, t.Any] = {}
+ self._queue: "te.Deque[t.Any]" = deque()
+ self._postinit()
+
+ def _postinit(self) -> None:
+ # alias all queue methods for faster lookup
+ self._popleft = self._queue.popleft
+ self._pop = self._queue.pop
+ self._remove = self._queue.remove
+ self._wlock = Lock()
+ self._append = self._queue.append
+
+ def __getstate__(self) -> t.Mapping[str, t.Any]:
+ return {
+ "capacity": self.capacity,
+ "_mapping": self._mapping,
+ "_queue": self._queue,
+ }
+
+ def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
+ self.__dict__.update(d)
+ self._postinit()
+
+ def __getnewargs__(self) -> t.Tuple:
+ return (self.capacity,)
+
+ def copy(self) -> "LRUCache":
+ """Return a shallow copy of the instance."""
+ rv = self.__class__(self.capacity)
+ rv._mapping.update(self._mapping)
+ rv._queue.extend(self._queue)
+ return rv
+
+ def get(self, key: t.Any, default: t.Any = None) -> t.Any:
+ """Return an item from the cache dict or `default`"""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
+ """Set `default` if the key is not in the cache otherwise
+ leave unchanged. Return the value of this key.
+ """
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def clear(self) -> None:
+ """Clear the cache."""
+ with self._wlock:
+ self._mapping.clear()
+ self._queue.clear()
+
+ def __contains__(self, key: t.Any) -> bool:
+ """Check if a key exists in this cache."""
+ return key in self._mapping
+
+ def __len__(self) -> int:
+ """Return the current size of the cache."""
+ return len(self._mapping)
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self._mapping!r}>"
+
+ def __getitem__(self, key: t.Any) -> t.Any:
+ """Get an item from the cache. Moves the item up so that it has the
+ highest priority then.
+
+ Raise a `KeyError` if it does not exist.
+ """
+ with self._wlock:
+ rv = self._mapping[key]
+
+ if self._queue[-1] != key:
+ try:
+ self._remove(key)
+ except ValueError:
+ # if something removed the key from the container
+ # when we read, ignore the ValueError that we would
+ # get otherwise.
+ pass
+
+ self._append(key)
+
+ return rv
+
+ def __setitem__(self, key: t.Any, value: t.Any) -> None:
+ """Sets the value for an item. Moves the item up so that it
+ has the highest priority then.
+ """
+ with self._wlock:
+ if key in self._mapping:
+ self._remove(key)
+ elif len(self._mapping) == self.capacity:
+ del self._mapping[self._popleft()]
+
+ self._append(key)
+ self._mapping[key] = value
+
+ def __delitem__(self, key: t.Any) -> None:
+ """Remove an item from the cache dict.
+ Raise a `KeyError` if it does not exist.
+ """
+ with self._wlock:
+ del self._mapping[key]
+
+ try:
+ self._remove(key)
+ except ValueError:
+ pass
+
+ def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
+ """Return a list of items."""
+ result = [(key, self._mapping[key]) for key in list(self._queue)]
+ result.reverse()
+ return result
+
+ def values(self) -> t.Iterable[t.Any]:
+ """Return a list of all values."""
+ return [x[1] for x in self.items()]
+
+ def keys(self) -> t.Iterable[t.Any]:
+ """Return a list of all keys ordered by most recent usage."""
+ return list(self)
+
+ def __iter__(self) -> t.Iterator[t.Any]:
+ return reversed(tuple(self._queue))
+
+ def __reversed__(self) -> t.Iterator[t.Any]:
+ """Iterate over the keys in the cache dict, oldest items
+ coming first.
+ """
+ return iter(tuple(self._queue))
+
+ __copy__ = copy
+
+
+def select_autoescape(
+ enabled_extensions: t.Collection[str] = ("html", "htm", "xml"),
+ disabled_extensions: t.Collection[str] = (),
+ default_for_string: bool = True,
+ default: bool = False,
+) -> t.Callable[[t.Optional[str]], bool]:
+ """Intelligently sets the initial value of autoescaping based on the
+ filename of the template. This is the recommended way to configure
+ autoescaping if you do not want to write a custom function yourself.
+
+ If you want to enable it for all templates created from strings or
+ for all templates with `.html` and `.xml` extensions::
+
+ from jinja2 import Environment, select_autoescape
+ env = Environment(autoescape=select_autoescape(
+ enabled_extensions=('html', 'xml'),
+ default_for_string=True,
+ ))
+
+ Example configuration to turn it on at all times except if the template
+ ends with `.txt`::
+
+ from jinja2 import Environment, select_autoescape
+ env = Environment(autoescape=select_autoescape(
+ disabled_extensions=('txt',),
+ default_for_string=True,
+ default=True,
+ ))
+
+ The `enabled_extensions` is an iterable of all the extensions that
+ autoescaping should be enabled for. Likewise `disabled_extensions` is
+ a list of all templates it should be disabled for. If a template is
+ loaded from a string then the default from `default_for_string` is used.
+ If nothing matches then the initial value of autoescaping is set to the
+ value of `default`.
+
+ For security reasons this function operates case insensitive.
+
+ .. versionadded:: 2.9
+ """
+ enabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in enabled_extensions)
+ disabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in disabled_extensions)
+
+ def autoescape(template_name: t.Optional[str]) -> bool:
+ if template_name is None:
+ return default_for_string
+ template_name = template_name.lower()
+ if template_name.endswith(enabled_patterns):
+ return True
+ if template_name.endswith(disabled_patterns):
+ return False
+ return default
+
+ return autoescape
+
+
+def htmlsafe_json_dumps(
+ obj: t.Any, dumps: t.Optional[t.Callable[..., str]] = None, **kwargs: t.Any
+) -> markupsafe.Markup:
+ """Serialize an object to a string of JSON with :func:`json.dumps`,
+ then replace HTML-unsafe characters with Unicode escapes and mark
+ the result safe with :class:`~markupsafe.Markup`.
+
+ This is available in templates as the ``|tojson`` filter.
+
+ The following characters are escaped: ``<``, ``>``, ``&``, ``'``.
+
+ The returned string is safe to render in HTML documents and
+ ``<script>`` tags. The exception is in HTML attributes that are
+ double quoted; either use single quotes or the ``|forceescape``
+ filter.
+
+ :param obj: The object to serialize to JSON.
+ :param dumps: The ``dumps`` function to use. Defaults to
+ ``env.policies["json.dumps_function"]``, which defaults to
+ :func:`json.dumps`.
+ :param kwargs: Extra arguments to pass to ``dumps``. Merged onto
+ ``env.policies["json.dumps_kwargs"]``.
+
+ .. versionchanged:: 3.0
+ The ``dumper`` parameter is renamed to ``dumps``.
+
+ .. versionadded:: 2.9
+ """
+ if dumps is None:
+ dumps = json.dumps
+
+ return markupsafe.Markup(
+ dumps(obj, **kwargs)
+ .replace("<", "\\u003c")
+ .replace(">", "\\u003e")
+ .replace("&", "\\u0026")
+ .replace("'", "\\u0027")
+ )
+
+
+class Cycler:
+ """Cycle through values by yield them one at a time, then restarting
+ once the end is reached. Available as ``cycler`` in templates.
+
+ Similar to ``loop.cycle``, but can be used outside loops or across
+ multiple loops. For example, render a list of folders and files in a
+ list, alternating giving them "odd" and "even" classes.
+
+ .. code-block:: html+jinja
+
+ {% set row_class = cycler("odd", "even") %}
+ <ul class="browser">
+ {% for folder in folders %}
+ <li class="folder {{ row_class.next() }}">{{ folder }}
+ {% endfor %}
+ {% for file in files %}
+ <li class="file {{ row_class.next() }}">{{ file }}
+ {% endfor %}
+ </ul>
+
+ :param items: Each positional argument will be yielded in the order
+ given for each cycle.
+
+ .. versionadded:: 2.1
+ """
+
+ def __init__(self, *items: t.Any) -> None:
+ if not items:
+ raise RuntimeError("at least one item has to be provided")
+ self.items = items
+ self.pos = 0
+
+ def reset(self) -> None:
+ """Resets the current item to the first item."""
+ self.pos = 0
+
+ @property
+ def current(self) -> t.Any:
+ """Return the current item. Equivalent to the item that will be
+ returned next time :meth:`next` is called.
+ """
+ return self.items[self.pos]
+
+ def next(self) -> t.Any:
+ """Return the current item, then advance :attr:`current` to the
+ next item.
+ """
+ rv = self.current
+ self.pos = (self.pos + 1) % len(self.items)
+ return rv
+
+ __next__ = next
+
+
+class Joiner:
+ """A joining helper for templates."""
+
+ def __init__(self, sep: str = ", ") -> None:
+ self.sep = sep
+ self.used = False
+
+ def __call__(self) -> str:
+ if not self.used:
+ self.used = True
+ return ""
+ return self.sep
+
+
+class Namespace:
+ """A namespace object that can hold arbitrary attributes. It may be
+ initialized from a dictionary or with keyword arguments."""
+
+ def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902
+ self, args = args[0], args[1:]
+ self.__attrs = dict(*args, **kwargs)
+
+ def __getattribute__(self, name: str) -> t.Any:
+ # __class__ is needed for the awaitable check in async mode
+ if name in {"_Namespace__attrs", "__class__"}:
+ return object.__getattribute__(self, name)
+ try:
+ return self.__attrs[name]
+ except KeyError:
+ raise AttributeError(name) from None
+
+ def __setitem__(self, name: str, value: t.Any) -> None:
+ self.__attrs[name] = value
+
+ def __repr__(self) -> str:
+ return f"<Namespace {self.__attrs!r}>"
+
+
+class Markup(markupsafe.Markup):
+ def __new__(cls, base="", encoding=None, errors="strict"): # type: ignore
+ warnings.warn(
+ "'jinja2.Markup' is deprecated and will be removed in Jinja"
+ " 3.1. Import 'markupsafe.Markup' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return super().__new__(cls, base, encoding, errors)
+
+
+def escape(s: t.Any) -> str:
+ warnings.warn(
+ "'jinja2.escape' is deprecated and will be removed in Jinja"
+ " 3.1. Import 'markupsafe.escape' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return markupsafe.escape(s)
diff --git a/lib/spack/external/_vendoring/jinja2/visitor.py b/lib/spack/external/_vendoring/jinja2/visitor.py
new file mode 100644
index 0000000000..b150e578a8
--- /dev/null
+++ b/lib/spack/external/_vendoring/jinja2/visitor.py
@@ -0,0 +1,92 @@
+"""API for traversing the AST nodes. Implemented by the compiler and
+meta introspection.
+"""
+import typing as t
+
+from .nodes import Node
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ class VisitCallable(te.Protocol):
+ def __call__(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ ...
+
+
+class NodeVisitor:
+ """Walks the abstract syntax tree and call visitor functions for every
+ node found. The visitor functions may return values which will be
+ forwarded by the `visit` method.
+
+ Per default the visitor functions for the nodes are ``'visit_'`` +
+ class name of the node. So a `TryFinally` node visit function would
+ be `visit_TryFinally`. This behavior can be changed by overriding
+ the `get_visitor` function. If no visitor function exists for a node
+ (return value `None`) the `generic_visit` visitor is used instead.
+ """
+
+ def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
+ """Return the visitor function for this node or `None` if no visitor
+ exists for this node. In that case the generic visit function is
+ used instead.
+ """
+ return getattr(self, f"visit_{type(node).__name__}", None) # type: ignore
+
+ def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ """Visit a node."""
+ f = self.get_visitor(node)
+
+ if f is not None:
+ return f(node, *args, **kwargs)
+
+ return self.generic_visit(node, *args, **kwargs)
+
+ def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ """Called if no explicit visitor function exists for a node."""
+ for node in node.iter_child_nodes():
+ self.visit(node, *args, **kwargs)
+
+
+class NodeTransformer(NodeVisitor):
+ """Walks the abstract syntax tree and allows modifications of nodes.
+
+ The `NodeTransformer` will walk the AST and use the return value of the
+ visitor functions to replace or remove the old node. If the return
+ value of the visitor function is `None` the node will be removed
+ from the previous location otherwise it's replaced with the return
+ value. The return value may be the original node in which case no
+ replacement takes place.
+ """
+
+ def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
+ for field, old_value in node.iter_fields():
+ if isinstance(old_value, list):
+ new_values = []
+ for value in old_value:
+ if isinstance(value, Node):
+ value = self.visit(value, *args, **kwargs)
+ if value is None:
+ continue
+ elif not isinstance(value, Node):
+ new_values.extend(value)
+ continue
+ new_values.append(value)
+ old_value[:] = new_values
+ elif isinstance(old_value, Node):
+ new_node = self.visit(old_value, *args, **kwargs)
+ if new_node is None:
+ delattr(node, field)
+ else:
+ setattr(node, field, new_node)
+ return node
+
+ def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
+ """As transformers may return lists in some places this method
+ can be used to enforce a list as return value.
+ """
+ rv = self.visit(node, *args, **kwargs)
+
+ if not isinstance(rv, list):
+ return [rv]
+
+ return rv
diff --git a/lib/spack/external/_vendoring/jsonschema.pyi b/lib/spack/external/_vendoring/jsonschema.pyi
new file mode 100644
index 0000000000..85b509b63f
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema.pyi
@@ -0,0 +1 @@
+from jsonschema import * \ No newline at end of file
diff --git a/lib/spack/external/_vendoring/jsonschema/COPYING b/lib/spack/external/_vendoring/jsonschema/COPYING
new file mode 100644
index 0000000000..af9cfbdb13
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/COPYING
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Julian Berman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/lib/spack/external/_vendoring/jsonschema/__init__.py b/lib/spack/external/_vendoring/jsonschema/__init__.py
new file mode 100644
index 0000000000..1791fe7fbf
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/__init__.py
@@ -0,0 +1,31 @@
+"""
+An implementation of JSON Schema for Python
+
+The main functionality is provided by the validator classes for each of the
+supported JSON Schema versions.
+
+Most commonly, `validate` is the quickest way to simply validate a given
+instance under a schema, and will create a validator for you.
+"""
+
+from jsonschema.exceptions import (
+ ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError
+)
+from jsonschema._format import (
+ FormatChecker,
+ draft3_format_checker,
+ draft4_format_checker,
+ draft6_format_checker,
+ draft7_format_checker,
+)
+from jsonschema._types import TypeChecker
+from jsonschema.validators import (
+ Draft3Validator,
+ Draft4Validator,
+ Draft6Validator,
+ Draft7Validator,
+ RefResolver,
+ validate,
+)
+
+__version__ = "3.2.0"
diff --git a/lib/spack/external/_vendoring/jsonschema/__main__.py b/lib/spack/external/_vendoring/jsonschema/__main__.py
new file mode 100644
index 0000000000..82c29fd39e
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/__main__.py
@@ -0,0 +1,2 @@
+from jsonschema.cli import main
+main()
diff --git a/lib/spack/external/_vendoring/jsonschema/_format.py b/lib/spack/external/_vendoring/jsonschema/_format.py
new file mode 100644
index 0000000000..281a7cfcff
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/_format.py
@@ -0,0 +1,425 @@
+import datetime
+import re
+import socket
+import struct
+
+from jsonschema.compat import str_types
+from jsonschema.exceptions import FormatError
+
+
+class FormatChecker(object):
+ """
+ A ``format`` property checker.
+
+ JSON Schema does not mandate that the ``format`` property actually do any
+ validation. If validation is desired however, instances of this class can
+ be hooked into validators to enable format validation.
+
+ `FormatChecker` objects always return ``True`` when asked about
+ formats that they do not know how to validate.
+
+ To check a custom format using a function that takes an instance and
+ returns a ``bool``, use the `FormatChecker.checks` or
+ `FormatChecker.cls_checks` decorators.
+
+ Arguments:
+
+ formats (~collections.Iterable):
+
+ The known formats to validate. This argument can be used to
+ limit which formats will be used during validation.
+ """
+
+ checkers = {}
+
+ def __init__(self, formats=None):
+ if formats is None:
+ self.checkers = self.checkers.copy()
+ else:
+ self.checkers = dict((k, self.checkers[k]) for k in formats)
+
+ def __repr__(self):
+ return "<FormatChecker checkers={}>".format(sorted(self.checkers))
+
+ def checks(self, format, raises=()):
+ """
+ Register a decorated function as validating a new format.
+
+ Arguments:
+
+ format (str):
+
+ The format that the decorated function will check.
+
+ raises (Exception):
+
+ The exception(s) raised by the decorated function when an
+ invalid instance is found.
+
+ The exception object will be accessible as the
+ `jsonschema.exceptions.ValidationError.cause` attribute of the
+ resulting validation error.
+ """
+
+ def _checks(func):
+ self.checkers[format] = (func, raises)
+ return func
+ return _checks
+
+ cls_checks = classmethod(checks)
+
+ def check(self, instance, format):
+ """
+ Check whether the instance conforms to the given format.
+
+ Arguments:
+
+ instance (*any primitive type*, i.e. str, number, bool):
+
+ The instance to check
+
+ format (str):
+
+ The format that instance should conform to
+
+
+ Raises:
+
+ FormatError: if the instance does not conform to ``format``
+ """
+
+ if format not in self.checkers:
+ return
+
+ func, raises = self.checkers[format]
+ result, cause = None, None
+ try:
+ result = func(instance)
+ except raises as e:
+ cause = e
+ if not result:
+ raise FormatError(
+ "%r is not a %r" % (instance, format), cause=cause,
+ )
+
+ def conforms(self, instance, format):
+ """
+ Check whether the instance conforms to the given format.
+
+ Arguments:
+
+ instance (*any primitive type*, i.e. str, number, bool):
+
+ The instance to check
+
+ format (str):
+
+ The format that instance should conform to
+
+ Returns:
+
+ bool: whether it conformed
+ """
+
+ try:
+ self.check(instance, format)
+ except FormatError:
+ return False
+ else:
+ return True
+
+
+draft3_format_checker = FormatChecker()
+draft4_format_checker = FormatChecker()
+draft6_format_checker = FormatChecker()
+draft7_format_checker = FormatChecker()
+
+
+_draft_checkers = dict(
+ draft3=draft3_format_checker,
+ draft4=draft4_format_checker,
+ draft6=draft6_format_checker,
+ draft7=draft7_format_checker,
+)
+
+
+def _checks_drafts(
+ name=None,
+ draft3=None,
+ draft4=None,
+ draft6=None,
+ draft7=None,
+ raises=(),
+):
+ draft3 = draft3 or name
+ draft4 = draft4 or name
+ draft6 = draft6 or name
+ draft7 = draft7 or name
+
+ def wrap(func):
+ if draft3:
+ func = _draft_checkers["draft3"].checks(draft3, raises)(func)
+ if draft4:
+ func = _draft_checkers["draft4"].checks(draft4, raises)(func)
+ if draft6:
+ func = _draft_checkers["draft6"].checks(draft6, raises)(func)
+ if draft7:
+ func = _draft_checkers["draft7"].checks(draft7, raises)(func)
+
+ # Oy. This is bad global state, but relied upon for now, until
+ # deprecation. See https://github.com/Julian/jsonschema/issues/519
+ # and test_format_checkers_come_with_defaults
+ FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)(
+ func,
+ )
+ return func
+ return wrap
+
+
+@_checks_drafts(name="idn-email")
+@_checks_drafts(name="email")
+def is_email(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return "@" in instance
+
+
+_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+
+
+@_checks_drafts(
+ draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4",
+)
+def is_ipv4(instance):
+ if not isinstance(instance, str_types):
+ return True
+ if not _ipv4_re.match(instance):
+ return False
+ return all(0 <= int(component) <= 255 for component in instance.split("."))
+
+
+if hasattr(socket, "inet_pton"):
+ # FIXME: Really this only should raise struct.error, but see the sadness
+ # that is https://twistedmatrix.com/trac/ticket/9409
+ @_checks_drafts(
+ name="ipv6", raises=(socket.error, struct.error, ValueError),
+ )
+ def is_ipv6(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return socket.inet_pton(socket.AF_INET6, instance)
+
+
+_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
+
+
+@_checks_drafts(
+ draft3="host-name",
+ draft4="hostname",
+ draft6="hostname",
+ draft7="hostname",
+)
+def is_host_name(instance):
+ if not isinstance(instance, str_types):
+ return True
+ if not _host_name_re.match(instance):
+ return False
+ components = instance.split(".")
+ for component in components:
+ if len(component) > 63:
+ return False
+ return True
+
+
+try:
+ # The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
+ import idna
+except ImportError:
+ pass
+else:
+ @_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError)
+ def is_idn_host_name(instance):
+ if not isinstance(instance, str_types):
+ return True
+ idna.encode(instance)
+ return True
+
+
+try:
+ import rfc3987
+except ImportError:
+ try:
+ from rfc3986_validator import validate_rfc3986
+ except ImportError:
+ pass
+ else:
+ @_checks_drafts(name="uri")
+ def is_uri(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return validate_rfc3986(instance, rule="URI")
+
+ @_checks_drafts(
+ draft6="uri-reference",
+ draft7="uri-reference",
+ raises=ValueError,
+ )
+ def is_uri_reference(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return validate_rfc3986(instance, rule="URI_reference")
+
+else:
+ @_checks_drafts(draft7="iri", raises=ValueError)
+ def is_iri(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="IRI")
+
+ @_checks_drafts(draft7="iri-reference", raises=ValueError)
+ def is_iri_reference(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="IRI_reference")
+
+ @_checks_drafts(name="uri", raises=ValueError)
+ def is_uri(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="URI")
+
+ @_checks_drafts(
+ draft6="uri-reference",
+ draft7="uri-reference",
+ raises=ValueError,
+ )
+ def is_uri_reference(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="URI_reference")
+
+
+try:
+ from strict_rfc3339 import validate_rfc3339
+except ImportError:
+ try:
+ from rfc3339_validator import validate_rfc3339
+ except ImportError:
+ validate_rfc3339 = None
+
+if validate_rfc3339:
+ @_checks_drafts(name="date-time")
+ def is_datetime(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return validate_rfc3339(instance)
+
+ @_checks_drafts(draft7="time")
+ def is_time(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return is_datetime("1970-01-01T" + instance)
+
+
+@_checks_drafts(name="regex", raises=re.error)
+def is_regex(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return re.compile(instance)
+
+
+@_checks_drafts(draft3="date", draft7="date", raises=ValueError)
+def is_date(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return datetime.datetime.strptime(instance, "%Y-%m-%d")
+
+
+@_checks_drafts(draft3="time", raises=ValueError)
+def is_draft3_time(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return datetime.datetime.strptime(instance, "%H:%M:%S")
+
+
+try:
+ import webcolors
+except ImportError:
+ pass
+else:
+ def is_css_color_code(instance):
+ return webcolors.normalize_hex(instance)
+
+ @_checks_drafts(draft3="color", raises=(ValueError, TypeError))
+ def is_css21_color(instance):
+ if (
+ not isinstance(instance, str_types) or
+ instance.lower() in webcolors.css21_names_to_hex
+ ):
+ return True
+ return is_css_color_code(instance)
+
+ def is_css3_color(instance):
+ if instance.lower() in webcolors.css3_names_to_hex:
+ return True
+ return is_css_color_code(instance)
+
+
+try:
+ import jsonpointer
+except ImportError:
+ pass
+else:
+ @_checks_drafts(
+ draft6="json-pointer",
+ draft7="json-pointer",
+ raises=jsonpointer.JsonPointerException,
+ )
+ def is_json_pointer(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return jsonpointer.JsonPointer(instance)
+
+ # TODO: I don't want to maintain this, so it
+ # needs to go either into jsonpointer (pending
+ # https://github.com/stefankoegl/python-json-pointer/issues/34) or
+ # into a new external library.
+ @_checks_drafts(
+ draft7="relative-json-pointer",
+ raises=jsonpointer.JsonPointerException,
+ )
+ def is_relative_json_pointer(instance):
+ # Definition taken from:
+ # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
+ if not isinstance(instance, str_types):
+ return True
+ non_negative_integer, rest = [], ""
+ for i, character in enumerate(instance):
+ if character.isdigit():
+ non_negative_integer.append(character)
+ continue
+
+ if not non_negative_integer:
+ return False
+
+ rest = instance[i:]
+ break
+ return (rest == "#") or jsonpointer.JsonPointer(rest)
+
+
+try:
+ import uritemplate.exceptions
+except ImportError:
+ pass
+else:
+ @_checks_drafts(
+ draft6="uri-template",
+ draft7="uri-template",
+ raises=uritemplate.exceptions.InvalidTemplate,
+ )
+ def is_uri_template(
+ instance,
+ template_validator=uritemplate.Validator().force_balanced_braces(),
+ ):
+ template = uritemplate.URITemplate(instance)
+ return template_validator.validate(template)
diff --git a/lib/spack/external/_vendoring/jsonschema/_legacy_validators.py b/lib/spack/external/_vendoring/jsonschema/_legacy_validators.py
new file mode 100644
index 0000000000..264ff7d713
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/_legacy_validators.py
@@ -0,0 +1,141 @@
+from jsonschema import _utils
+from jsonschema.compat import iteritems
+from jsonschema.exceptions import ValidationError
+
+
+def dependencies_draft3(validator, dependencies, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, dependency in iteritems(dependencies):
+ if property not in instance:
+ continue
+
+ if validator.is_type(dependency, "object"):
+ for error in validator.descend(
+ instance, dependency, schema_path=property,
+ ):
+ yield error
+ elif validator.is_type(dependency, "string"):
+ if dependency not in instance:
+ yield ValidationError(
+ "%r is a dependency of %r" % (dependency, property)
+ )
+ else:
+ for each in dependency:
+ if each not in instance:
+ message = "%r is a dependency of %r"
+ yield ValidationError(message % (each, property))
+
+
+def disallow_draft3(validator, disallow, instance, schema):
+ for disallowed in _utils.ensure_list(disallow):
+ if validator.is_valid(instance, {"type": [disallowed]}):
+ yield ValidationError(
+ "%r is disallowed for %r" % (disallowed, instance)
+ )
+
+
+def extends_draft3(validator, extends, instance, schema):
+ if validator.is_type(extends, "object"):
+ for error in validator.descend(instance, extends):
+ yield error
+ return
+ for index, subschema in enumerate(extends):
+ for error in validator.descend(instance, subschema, schema_path=index):
+ yield error
+
+
+def items_draft3_draft4(validator, items, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if validator.is_type(items, "object"):
+ for index, item in enumerate(instance):
+ for error in validator.descend(item, items, path=index):
+ yield error
+ else:
+ for (index, item), subschema in zip(enumerate(instance), items):
+ for error in validator.descend(
+ item, subschema, path=index, schema_path=index,
+ ):
+ yield error
+
+
+def minimum_draft3_draft4(validator, minimum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if schema.get("exclusiveMinimum", False):
+ failed = instance <= minimum
+ cmp = "less than or equal to"
+ else:
+ failed = instance < minimum
+ cmp = "less than"
+
+ if failed:
+ yield ValidationError(
+ "%r is %s the minimum of %r" % (instance, cmp, minimum)
+ )
+
+
+def maximum_draft3_draft4(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if schema.get("exclusiveMaximum", False):
+ failed = instance >= maximum
+ cmp = "greater than or equal to"
+ else:
+ failed = instance > maximum
+ cmp = "greater than"
+
+ if failed:
+ yield ValidationError(
+ "%r is %s the maximum of %r" % (instance, cmp, maximum)
+ )
+
+
+def properties_draft3(validator, properties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, subschema in iteritems(properties):
+ if property in instance:
+ for error in validator.descend(
+ instance[property],
+ subschema,
+ path=property,
+ schema_path=property,
+ ):
+ yield error
+ elif subschema.get("required", False):
+ error = ValidationError("%r is a required property" % property)
+ error._set(
+ validator="required",
+ validator_value=subschema["required"],
+ instance=instance,
+ schema=schema,
+ )
+ error.path.appendleft(property)
+ error.schema_path.extend([property, "required"])
+ yield error
+
+
+def type_draft3(validator, types, instance, schema):
+ types = _utils.ensure_list(types)
+
+ all_errors = []
+ for index, type in enumerate(types):
+ if validator.is_type(type, "object"):
+ errors = list(validator.descend(instance, type, schema_path=index))
+ if not errors:
+ return
+ all_errors.extend(errors)
+ else:
+ if validator.is_type(instance, type):
+ return
+ else:
+ yield ValidationError(
+ _utils.types_msg(instance, types), context=all_errors,
+ )
diff --git a/lib/spack/external/_vendoring/jsonschema/_reflect.py b/lib/spack/external/_vendoring/jsonschema/_reflect.py
new file mode 100644
index 0000000000..d09e38fbdc
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/_reflect.py
@@ -0,0 +1,155 @@
+# -*- test-case-name: twisted.test.test_reflect -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Standardized versions of various cool and/or strange things that you can do
+with Python's reflection capabilities.
+"""
+
+import sys
+
+from jsonschema.compat import PY3
+
+
+class _NoModuleFound(Exception):
+ """
+ No module was found because none exists.
+ """
+
+
+
+class InvalidName(ValueError):
+ """
+ The given name is not a dot-separated list of Python objects.
+ """
+
+
+
+class ModuleNotFound(InvalidName):
+ """
+ The module associated with the given name doesn't exist and it can't be
+ imported.
+ """
+
+
+
+class ObjectNotFound(InvalidName):
+ """
+ The object associated with the given name doesn't exist and it can't be
+ imported.
+ """
+
+
+
+if PY3:
+ def reraise(exception, traceback):
+ raise exception.with_traceback(traceback)
+else:
+ exec("""def reraise(exception, traceback):
+ raise exception.__class__, exception, traceback""")
+
+reraise.__doc__ = """
+Re-raise an exception, with an optional traceback, in a way that is compatible
+with both Python 2 and Python 3.
+
+Note that on Python 3, re-raised exceptions will be mutated, with their
+C{__traceback__} attribute being set.
+
+@param exception: The exception instance.
+@param traceback: The traceback to use, or C{None} indicating a new traceback.
+"""
+
+
+def _importAndCheckStack(importName):
+ """
+ Import the given name as a module, then walk the stack to determine whether
+ the failure was the module not existing, or some code in the module (for
+ example a dependent import) failing. This can be helpful to determine
+ whether any actual application code was run. For example, to distiguish
+ administrative error (entering the wrong module name), from programmer
+ error (writing buggy code in a module that fails to import).
+
+ @param importName: The name of the module to import.
+ @type importName: C{str}
+ @raise Exception: if something bad happens. This can be any type of
+ exception, since nobody knows what loading some arbitrary code might
+ do.
+ @raise _NoModuleFound: if no module was found.
+ """
+ try:
+ return __import__(importName)
+ except ImportError:
+ excType, excValue, excTraceback = sys.exc_info()
+ while excTraceback:
+ execName = excTraceback.tb_frame.f_globals["__name__"]
+ # in Python 2 execName is None when an ImportError is encountered,
+ # where in Python 3 execName is equal to the importName.
+ if execName is None or execName == importName:
+ reraise(excValue, excTraceback)
+ excTraceback = excTraceback.tb_next
+ raise _NoModuleFound()
+
+
+
+def namedAny(name):
+ """
+ Retrieve a Python object by its fully qualified name from the global Python
+ module namespace. The first part of the name, that describes a module,
+ will be discovered and imported. Each subsequent part of the name is
+ treated as the name of an attribute of the object specified by all of the
+ name which came before it. For example, the fully-qualified name of this
+ object is 'twisted.python.reflect.namedAny'.
+
+ @type name: L{str}
+ @param name: The name of the object to return.
+
+ @raise InvalidName: If the name is an empty string, starts or ends with
+ a '.', or is otherwise syntactically incorrect.
+
+ @raise ModuleNotFound: If the name is syntactically correct but the
+ module it specifies cannot be imported because it does not appear to
+ exist.
+
+ @raise ObjectNotFound: If the name is syntactically correct, includes at
+ least one '.', but the module it specifies cannot be imported because
+ it does not appear to exist.
+
+ @raise AttributeError: If an attribute of an object along the way cannot be
+ accessed, or a module along the way is not found.
+
+ @return: the Python object identified by 'name'.
+ """
+ if not name:
+ raise InvalidName('Empty module name')
+
+ names = name.split('.')
+
+ # if the name starts or ends with a '.' or contains '..', the __import__
+ # will raise an 'Empty module name' error. This will provide a better error
+ # message.
+ if '' in names:
+ raise InvalidName(
+ "name must be a string giving a '.'-separated list of Python "
+ "identifiers, not %r" % (name,))
+
+ topLevelPackage = None
+ moduleNames = names[:]
+ while not topLevelPackage:
+ if moduleNames:
+ trialname = '.'.join(moduleNames)
+ try:
+ topLevelPackage = _importAndCheckStack(trialname)
+ except _NoModuleFound:
+ moduleNames.pop()
+ else:
+ if len(names) == 1:
+ raise ModuleNotFound("No module named %r" % (name,))
+ else:
+ raise ObjectNotFound('%r does not name an object' % (name,))
+
+ obj = topLevelPackage
+ for n in names[1:]:
+ obj = getattr(obj, n)
+
+ return obj
diff --git a/lib/spack/external/_vendoring/jsonschema/_types.py b/lib/spack/external/_vendoring/jsonschema/_types.py
new file mode 100644
index 0000000000..a71a4e34bd
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/_types.py
@@ -0,0 +1,188 @@
+import numbers
+
+from pyrsistent import pmap
+import attr
+
+from jsonschema.compat import int_types, str_types
+from jsonschema.exceptions import UndefinedTypeCheck
+
+
+def is_array(checker, instance):
+ return isinstance(instance, list)
+
+
+def is_bool(checker, instance):
+ return isinstance(instance, bool)
+
+
+def is_integer(checker, instance):
+ # bool inherits from int, so ensure bools aren't reported as ints
+ if isinstance(instance, bool):
+ return False
+ return isinstance(instance, int_types)
+
+
+def is_null(checker, instance):
+ return instance is None
+
+
+def is_number(checker, instance):
+ # bool inherits from int, so ensure bools aren't reported as ints
+ if isinstance(instance, bool):
+ return False
+ return isinstance(instance, numbers.Number)
+
+
+def is_object(checker, instance):
+ return isinstance(instance, dict)
+
+
+def is_string(checker, instance):
+ return isinstance(instance, str_types)
+
+
+def is_any(checker, instance):
+ return True
+
+
+@attr.s(frozen=True)
+class TypeChecker(object):
+ """
+ A ``type`` property checker.
+
+ A `TypeChecker` performs type checking for an `IValidator`. Type
+ checks to perform are updated using `TypeChecker.redefine` or
+ `TypeChecker.redefine_many` and removed via `TypeChecker.remove`.
+ Each of these return a new `TypeChecker` object.
+
+ Arguments:
+
+ type_checkers (dict):
+
+ The initial mapping of types to their checking functions.
+ """
+ _type_checkers = attr.ib(default=pmap(), converter=pmap)
+
+ def is_type(self, instance, type):
+ """
+ Check if the instance is of the appropriate type.
+
+ Arguments:
+
+ instance (object):
+
+ The instance to check
+
+ type (str):
+
+ The name of the type that is expected.
+
+ Returns:
+
+ bool: Whether it conformed.
+
+
+ Raises:
+
+ `jsonschema.exceptions.UndefinedTypeCheck`:
+ if type is unknown to this object.
+ """
+ try:
+ fn = self._type_checkers[type]
+ except KeyError:
+ raise UndefinedTypeCheck(type)
+
+ return fn(self, instance)
+
+ def redefine(self, type, fn):
+ """
+ Produce a new checker with the given type redefined.
+
+ Arguments:
+
+ type (str):
+
+ The name of the type to check.
+
+ fn (collections.Callable):
+
+ A function taking exactly two parameters - the type
+ checker calling the function and the instance to check.
+ The function should return true if instance is of this
+ type and false otherwise.
+
+ Returns:
+
+ A new `TypeChecker` instance.
+ """
+ return self.redefine_many({type: fn})
+
+ def redefine_many(self, definitions=()):
+ """
+ Produce a new checker with the given types redefined.
+
+ Arguments:
+
+ definitions (dict):
+
+ A dictionary mapping types to their checking functions.
+
+ Returns:
+
+ A new `TypeChecker` instance.
+ """
+ return attr.evolve(
+ self, type_checkers=self._type_checkers.update(definitions),
+ )
+
+ def remove(self, *types):
+ """
+ Produce a new checker with the given types forgotten.
+
+ Arguments:
+
+ types (~collections.Iterable):
+
+ the names of the types to remove.
+
+ Returns:
+
+ A new `TypeChecker` instance
+
+ Raises:
+
+ `jsonschema.exceptions.UndefinedTypeCheck`:
+
+ if any given type is unknown to this object
+ """
+
+ checkers = self._type_checkers
+ for each in types:
+ try:
+ checkers = checkers.remove(each)
+ except KeyError:
+ raise UndefinedTypeCheck(each)
+ return attr.evolve(self, type_checkers=checkers)
+
+
+draft3_type_checker = TypeChecker(
+ {
+ u"any": is_any,
+ u"array": is_array,
+ u"boolean": is_bool,
+ u"integer": is_integer,
+ u"object": is_object,
+ u"null": is_null,
+ u"number": is_number,
+ u"string": is_string,
+ },
+)
+draft4_type_checker = draft3_type_checker.remove(u"any")
+draft6_type_checker = draft4_type_checker.redefine(
+ u"integer",
+ lambda checker, instance: (
+ is_integer(checker, instance) or
+ isinstance(instance, float) and instance.is_integer()
+ ),
+)
+draft7_type_checker = draft6_type_checker
diff --git a/lib/spack/external/_vendoring/jsonschema/_utils.py b/lib/spack/external/_vendoring/jsonschema/_utils.py
new file mode 100644
index 0000000000..ceb880198d
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/_utils.py
@@ -0,0 +1,212 @@
+import itertools
+import json
+import pkgutil
+import re
+
+from jsonschema.compat import MutableMapping, str_types, urlsplit
+
+
+class URIDict(MutableMapping):
+ """
+ Dictionary which uses normalized URIs as keys.
+ """
+
+ def normalize(self, uri):
+ return urlsplit(uri).geturl()
+
+ def __init__(self, *args, **kwargs):
+ self.store = dict()
+ self.store.update(*args, **kwargs)
+
+ def __getitem__(self, uri):
+ return self.store[self.normalize(uri)]
+
+ def __setitem__(self, uri, value):
+ self.store[self.normalize(uri)] = value
+
+ def __delitem__(self, uri):
+ del self.store[self.normalize(uri)]
+
+ def __iter__(self):
+ return iter(self.store)
+
+ def __len__(self):
+ return len(self.store)
+
+ def __repr__(self):
+ return repr(self.store)
+
+
+class Unset(object):
+ """
+ An as-of-yet unset attribute or unprovided default parameter.
+ """
+
+ def __repr__(self):
+ return "<unset>"
+
+
+def load_schema(name):
+ """
+ Load a schema from ./schemas/``name``.json and return it.
+ """
+
+ data = pkgutil.get_data("jsonschema", "schemas/{0}.json".format(name))
+ return json.loads(data.decode("utf-8"))
+
+
+def indent(string, times=1):
+ """
+ A dumb version of `textwrap.indent` from Python 3.3.
+ """
+
+ return "\n".join(" " * (4 * times) + line for line in string.splitlines())
+
+
+def format_as_index(indices):
+ """
+ Construct a single string containing indexing operations for the indices.
+
+ For example, [1, 2, "foo"] -> [1][2]["foo"]
+
+ Arguments:
+
+ indices (sequence):
+
+ The indices to format.
+ """
+
+ if not indices:
+ return ""
+ return "[%s]" % "][".join(repr(index) for index in indices)
+
+
+def find_additional_properties(instance, schema):
+ """
+ Return the set of additional properties for the given ``instance``.
+
+ Weeds out properties that should have been validated by ``properties`` and
+ / or ``patternProperties``.
+
+ Assumes ``instance`` is dict-like already.
+ """
+
+ properties = schema.get("properties", {})
+ patterns = "|".join(schema.get("patternProperties", {}))
+ for property in instance:
+ if property not in properties:
+ if patterns and re.search(patterns, property):
+ continue
+ yield property
+
+
+def extras_msg(extras):
+ """
+ Create an error message for extra items or properties.
+ """
+
+ if len(extras) == 1:
+ verb = "was"
+ else:
+ verb = "were"
+ return ", ".join(repr(extra) for extra in extras), verb
+
+
+def types_msg(instance, types):
+ """
+ Create an error message for a failure to match the given types.
+
+ If the ``instance`` is an object and contains a ``name`` property, it will
+ be considered to be a description of that object and used as its type.
+
+ Otherwise the message is simply the reprs of the given ``types``.
+ """
+
+ reprs = []
+ for type in types:
+ try:
+ reprs.append(repr(type["name"]))
+ except Exception:
+ reprs.append(repr(type))
+ return "%r is not of type %s" % (instance, ", ".join(reprs))
+
+
+def flatten(suitable_for_isinstance):
+ """
+ isinstance() can accept a bunch of really annoying different types:
+ * a single type
+ * a tuple of types
+ * an arbitrary nested tree of tuples
+
+ Return a flattened tuple of the given argument.
+ """
+
+ types = set()
+
+ if not isinstance(suitable_for_isinstance, tuple):
+ suitable_for_isinstance = (suitable_for_isinstance,)
+ for thing in suitable_for_isinstance:
+ if isinstance(thing, tuple):
+ types.update(flatten(thing))
+ else:
+ types.add(thing)
+ return tuple(types)
+
+
+def ensure_list(thing):
+ """
+ Wrap ``thing`` in a list if it's a single str.
+
+ Otherwise, return it unchanged.
+ """
+
+ if isinstance(thing, str_types):
+ return [thing]
+ return thing
+
+
+def equal(one, two):
+ """
+ Check if two things are equal, but evade booleans and ints being equal.
+ """
+ return unbool(one) == unbool(two)
+
+
+def unbool(element, true=object(), false=object()):
+ """
+ A hack to make True and 1 and False and 0 unique for ``uniq``.
+ """
+
+ if element is True:
+ return true
+ elif element is False:
+ return false
+ return element
+
+
+def uniq(container):
+ """
+ Check if all of a container's elements are unique.
+
+ Successively tries first to rely that the elements are hashable, then
+ falls back on them being sortable, and finally falls back on brute
+ force.
+ """
+
+ try:
+ return len(set(unbool(i) for i in container)) == len(container)
+ except TypeError:
+ try:
+ sort = sorted(unbool(i) for i in container)
+ sliced = itertools.islice(sort, 1, None)
+ for i, j in zip(sort, sliced):
+ if i == j:
+ return False
+ except (NotImplementedError, TypeError):
+ seen = []
+ for e in container:
+ e = unbool(e)
+ if e in seen:
+ return False
+ seen.append(e)
+ return True
diff --git a/lib/spack/external/_vendoring/jsonschema/_validators.py b/lib/spack/external/_vendoring/jsonschema/_validators.py
new file mode 100644
index 0000000000..179fec09a9
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/_validators.py
@@ -0,0 +1,373 @@
+import re
+
+from jsonschema._utils import (
+ ensure_list,
+ equal,
+ extras_msg,
+ find_additional_properties,
+ types_msg,
+ unbool,
+ uniq,
+)
+from jsonschema.exceptions import FormatError, ValidationError
+from jsonschema.compat import iteritems
+
+
+def patternProperties(validator, patternProperties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for pattern, subschema in iteritems(patternProperties):
+ for k, v in iteritems(instance):
+ if re.search(pattern, k):
+ for error in validator.descend(
+ v, subschema, path=k, schema_path=pattern,
+ ):
+ yield error
+
+
+def propertyNames(validator, propertyNames, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property in instance:
+ for error in validator.descend(
+ instance=property,
+ schema=propertyNames,
+ ):
+ yield error
+
+
+def additionalProperties(validator, aP, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ extras = set(find_additional_properties(instance, schema))
+
+ if validator.is_type(aP, "object"):
+ for extra in extras:
+ for error in validator.descend(instance[extra], aP, path=extra):
+ yield error
+ elif not aP and extras:
+ if "patternProperties" in schema:
+ patterns = sorted(schema["patternProperties"])
+ if len(extras) == 1:
+ verb = "does"
+ else:
+ verb = "do"
+ error = "%s %s not match any of the regexes: %s" % (
+ ", ".join(map(repr, sorted(extras))),
+ verb,
+ ", ".join(map(repr, patterns)),
+ )
+ yield ValidationError(error)
+ else:
+ error = "Additional properties are not allowed (%s %s unexpected)"
+ yield ValidationError(error % extras_msg(extras))
+
+
+def items(validator, items, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if validator.is_type(items, "array"):
+ for (index, item), subschema in zip(enumerate(instance), items):
+ for error in validator.descend(
+ item, subschema, path=index, schema_path=index,
+ ):
+ yield error
+ else:
+ for index, item in enumerate(instance):
+ for error in validator.descend(item, items, path=index):
+ yield error
+
+
+def additionalItems(validator, aI, instance, schema):
+ if (
+ not validator.is_type(instance, "array") or
+ validator.is_type(schema.get("items", {}), "object")
+ ):
+ return
+
+ len_items = len(schema.get("items", []))
+ if validator.is_type(aI, "object"):
+ for index, item in enumerate(instance[len_items:], start=len_items):
+ for error in validator.descend(item, aI, path=index):
+ yield error
+ elif not aI and len(instance) > len(schema.get("items", [])):
+ error = "Additional items are not allowed (%s %s unexpected)"
+ yield ValidationError(
+ error %
+ extras_msg(instance[len(schema.get("items", [])):])
+ )
+
+
+def const(validator, const, instance, schema):
+ if not equal(instance, const):
+ yield ValidationError("%r was expected" % (const,))
+
+
+def contains(validator, contains, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if not any(validator.is_valid(element, contains) for element in instance):
+ yield ValidationError(
+ "None of %r are valid under the given schema" % (instance,)
+ )
+
+
+def exclusiveMinimum(validator, minimum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance <= minimum:
+ yield ValidationError(
+ "%r is less than or equal to the minimum of %r" % (
+ instance, minimum,
+ ),
+ )
+
+
+def exclusiveMaximum(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance >= maximum:
+ yield ValidationError(
+ "%r is greater than or equal to the maximum of %r" % (
+ instance, maximum,
+ ),
+ )
+
+
+def minimum(validator, minimum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance < minimum:
+ yield ValidationError(
+ "%r is less than the minimum of %r" % (instance, minimum)
+ )
+
+
+def maximum(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance > maximum:
+ yield ValidationError(
+ "%r is greater than the maximum of %r" % (instance, maximum)
+ )
+
+
+def multipleOf(validator, dB, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if isinstance(dB, float):
+ quotient = instance / dB
+ failed = int(quotient) != quotient
+ else:
+ failed = instance % dB
+
+ if failed:
+ yield ValidationError("%r is not a multiple of %r" % (instance, dB))
+
+
+def minItems(validator, mI, instance, schema):
+ if validator.is_type(instance, "array") and len(instance) < mI:
+ yield ValidationError("%r is too short" % (instance,))
+
+
+def maxItems(validator, mI, instance, schema):
+ if validator.is_type(instance, "array") and len(instance) > mI:
+ yield ValidationError("%r is too long" % (instance,))
+
+
+def uniqueItems(validator, uI, instance, schema):
+ if (
+ uI and
+ validator.is_type(instance, "array") and
+ not uniq(instance)
+ ):
+ yield ValidationError("%r has non-unique elements" % (instance,))
+
+
+def pattern(validator, patrn, instance, schema):
+ if (
+ validator.is_type(instance, "string") and
+ not re.search(patrn, instance)
+ ):
+ yield ValidationError("%r does not match %r" % (instance, patrn))
+
+
+def format(validator, format, instance, schema):
+ if validator.format_checker is not None:
+ try:
+ validator.format_checker.check(instance, format)
+ except FormatError as error:
+ yield ValidationError(error.message, cause=error.cause)
+
+
+def minLength(validator, mL, instance, schema):
+ if validator.is_type(instance, "string") and len(instance) < mL:
+ yield ValidationError("%r is too short" % (instance,))
+
+
+def maxLength(validator, mL, instance, schema):
+ if validator.is_type(instance, "string") and len(instance) > mL:
+ yield ValidationError("%r is too long" % (instance,))
+
+
+def dependencies(validator, dependencies, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, dependency in iteritems(dependencies):
+ if property not in instance:
+ continue
+
+ if validator.is_type(dependency, "array"):
+ for each in dependency:
+ if each not in instance:
+ message = "%r is a dependency of %r"
+ yield ValidationError(message % (each, property))
+ else:
+ for error in validator.descend(
+ instance, dependency, schema_path=property,
+ ):
+ yield error
+
+
+def enum(validator, enums, instance, schema):
+ if instance == 0 or instance == 1:
+ unbooled = unbool(instance)
+ if all(unbooled != unbool(each) for each in enums):
+ yield ValidationError("%r is not one of %r" % (instance, enums))
+ elif instance not in enums:
+ yield ValidationError("%r is not one of %r" % (instance, enums))
+
+
+def ref(validator, ref, instance, schema):
+ resolve = getattr(validator.resolver, "resolve", None)
+ if resolve is None:
+ with validator.resolver.resolving(ref) as resolved:
+ for error in validator.descend(instance, resolved):
+ yield error
+ else:
+ scope, resolved = validator.resolver.resolve(ref)
+ validator.resolver.push_scope(scope)
+
+ try:
+ for error in validator.descend(instance, resolved):
+ yield error
+ finally:
+ validator.resolver.pop_scope()
+
+
+def type(validator, types, instance, schema):
+ types = ensure_list(types)
+
+ if not any(validator.is_type(instance, type) for type in types):
+ yield ValidationError(types_msg(instance, types))
+
+
+def properties(validator, properties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, subschema in iteritems(properties):
+ if property in instance:
+ for error in validator.descend(
+ instance[property],
+ subschema,
+ path=property,
+ schema_path=property,
+ ):
+ yield error
+
+
+def required(validator, required, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+ for property in required:
+ if property not in instance:
+ yield ValidationError("%r is a required property" % property)
+
+
+def minProperties(validator, mP, instance, schema):
+ if validator.is_type(instance, "object") and len(instance) < mP:
+ yield ValidationError(
+ "%r does not have enough properties" % (instance,)
+ )
+
+
+def maxProperties(validator, mP, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+ if validator.is_type(instance, "object") and len(instance) > mP:
+ yield ValidationError("%r has too many properties" % (instance,))
+
+
+def allOf(validator, allOf, instance, schema):
+ for index, subschema in enumerate(allOf):
+ for error in validator.descend(instance, subschema, schema_path=index):
+ yield error
+
+
+def anyOf(validator, anyOf, instance, schema):
+ all_errors = []
+ for index, subschema in enumerate(anyOf):
+ errs = list(validator.descend(instance, subschema, schema_path=index))
+ if not errs:
+ break
+ all_errors.extend(errs)
+ else:
+ yield ValidationError(
+ "%r is not valid under any of the given schemas" % (instance,),
+ context=all_errors,
+ )
+
+
+def oneOf(validator, oneOf, instance, schema):
+ subschemas = enumerate(oneOf)
+ all_errors = []
+ for index, subschema in subschemas:
+ errs = list(validator.descend(instance, subschema, schema_path=index))
+ if not errs:
+ first_valid = subschema
+ break
+ all_errors.extend(errs)
+ else:
+ yield ValidationError(
+ "%r is not valid under any of the given schemas" % (instance,),
+ context=all_errors,
+ )
+
+ more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
+ if more_valid:
+ more_valid.append(first_valid)
+ reprs = ", ".join(repr(schema) for schema in more_valid)
+ yield ValidationError(
+ "%r is valid under each of %s" % (instance, reprs)
+ )
+
+
+def not_(validator, not_schema, instance, schema):
+ if validator.is_valid(instance, not_schema):
+ yield ValidationError(
+ "%r is not allowed for %r" % (not_schema, instance)
+ )
+
+
+def if_(validator, if_schema, instance, schema):
+ if validator.is_valid(instance, if_schema):
+ if u"then" in schema:
+ then = schema[u"then"]
+ for error in validator.descend(instance, then, schema_path="then"):
+ yield error
+ elif u"else" in schema:
+ else_ = schema[u"else"]
+ for error in validator.descend(instance, else_, schema_path="else"):
+ yield error
diff --git a/lib/spack/external/_vendoring/jsonschema/benchmarks/__init__.py b/lib/spack/external/_vendoring/jsonschema/benchmarks/__init__.py
new file mode 100644
index 0000000000..e3dcc68993
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/benchmarks/__init__.py
@@ -0,0 +1,5 @@
+"""
+Benchmarks for validation.
+
+This package is *not* public API.
+"""
diff --git a/lib/spack/external/_vendoring/jsonschema/benchmarks/issue232.py b/lib/spack/external/_vendoring/jsonschema/benchmarks/issue232.py
new file mode 100644
index 0000000000..65e3aedf79
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/benchmarks/issue232.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+A performance benchmark using the example from issue #232.
+
+See https://github.com/Julian/jsonschema/pull/232.
+"""
+from twisted.python.filepath import FilePath
+from pyperf import Runner
+from pyrsistent import m
+
+from jsonschema.tests._suite import Version
+import jsonschema
+
+
+issue232 = Version(
+ path=FilePath(__file__).sibling("issue232"),
+ remotes=m(),
+ name="issue232",
+)
+
+
+if __name__ == "__main__":
+ issue232.benchmark(
+ runner=Runner(),
+ Validator=jsonschema.Draft4Validator,
+ )
diff --git a/lib/spack/external/_vendoring/jsonschema/benchmarks/json_schema_test_suite.py b/lib/spack/external/_vendoring/jsonschema/benchmarks/json_schema_test_suite.py
new file mode 100644
index 0000000000..5add5051df
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/benchmarks/json_schema_test_suite.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+"""
+A performance benchmark using the official test suite.
+
+This benchmarks jsonschema using every valid example in the
+JSON-Schema-Test-Suite. It will take some time to complete.
+"""
+from pyperf import Runner
+
+from jsonschema.tests._suite import Suite
+
+
+if __name__ == "__main__":
+ Suite().benchmark(runner=Runner())
diff --git a/lib/spack/external/_vendoring/jsonschema/cli.py b/lib/spack/external/_vendoring/jsonschema/cli.py
new file mode 100644
index 0000000000..ab3335b27c
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/cli.py
@@ -0,0 +1,90 @@
+"""
+The ``jsonschema`` command line.
+"""
+from __future__ import absolute_import
+import argparse
+import json
+import sys
+
+from jsonschema import __version__
+from jsonschema._reflect import namedAny
+from jsonschema.validators import validator_for
+
+
+def _namedAnyWithDefault(name):
+ if "." not in name:
+ name = "jsonschema." + name
+ return namedAny(name)
+
+
+def _json_file(path):
+ with open(path) as file:
+ return json.load(file)
+
+
+parser = argparse.ArgumentParser(
+ description="JSON Schema Validation CLI",
+)
+parser.add_argument(
+ "-i", "--instance",
+ action="append",
+ dest="instances",
+ type=_json_file,
+ help=(
+ "a path to a JSON instance (i.e. filename.json) "
+ "to validate (may be specified multiple times)"
+ ),
+)
+parser.add_argument(
+ "-F", "--error-format",
+ default="{error.instance}: {error.message}\n",
+ help=(
+ "the format to use for each error output message, specified in "
+ "a form suitable for passing to str.format, which will be called "
+ "with 'error' for each error"
+ ),
+)
+parser.add_argument(
+ "-V", "--validator",
+ type=_namedAnyWithDefault,
+ help=(
+ "the fully qualified object name of a validator to use, or, for "
+ "validators that are registered with jsonschema, simply the name "
+ "of the class."
+ ),
+)
+parser.add_argument(
+ "--version",
+ action="version",
+ version=__version__,
+)
+parser.add_argument(
+ "schema",
+ help="the JSON Schema to validate with (i.e. schema.json)",
+ type=_json_file,
+)
+
+
+def parse_args(args):
+ arguments = vars(parser.parse_args(args=args or ["--help"]))
+ if arguments["validator"] is None:
+ arguments["validator"] = validator_for(arguments["schema"])
+ return arguments
+
+
+def main(args=sys.argv[1:]):
+ sys.exit(run(arguments=parse_args(args=args)))
+
+
+def run(arguments, stdout=sys.stdout, stderr=sys.stderr):
+ error_format = arguments["error_format"]
+ validator = arguments["validator"](schema=arguments["schema"])
+
+ validator.check_schema(arguments["schema"])
+
+ errored = False
+ for instance in arguments["instances"] or ():
+ for error in validator.iter_errors(instance):
+ stderr.write(error_format.format(error=error))
+ errored = True
+ return errored
diff --git a/lib/spack/external/_vendoring/jsonschema/compat.py b/lib/spack/external/_vendoring/jsonschema/compat.py
new file mode 100644
index 0000000000..47e0980455
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/compat.py
@@ -0,0 +1,55 @@
+"""
+Python 2/3 compatibility helpers.
+
+Note: This module is *not* public API.
+"""
+import contextlib
+import operator
+import sys
+
+
+try:
+ from collections.abc import MutableMapping, Sequence # noqa
+except ImportError:
+ from collections import MutableMapping, Sequence # noqa
+
+PY3 = sys.version_info[0] >= 3
+
+if PY3:
+ zip = zip
+ from functools import lru_cache
+ from io import StringIO as NativeIO
+ from urllib.parse import (
+ unquote, urljoin, urlunsplit, SplitResult, urlsplit
+ )
+ from urllib.request import pathname2url, urlopen
+ str_types = str,
+ int_types = int,
+ iteritems = operator.methodcaller("items")
+else:
+ from itertools import izip as zip # noqa
+ from io import BytesIO as NativeIO
+ from urlparse import urljoin, urlunsplit, SplitResult, urlsplit
+ from urllib import pathname2url, unquote # noqa
+ import urllib2 # noqa
+ def urlopen(*args, **kwargs):
+ return contextlib.closing(urllib2.urlopen(*args, **kwargs))
+
+ str_types = basestring
+ int_types = int, long
+ iteritems = operator.methodcaller("iteritems")
+
+ from functools32 import lru_cache
+
+
+def urldefrag(url):
+ if "#" in url:
+ s, n, p, q, frag = urlsplit(url)
+ defrag = urlunsplit((s, n, p, q, ""))
+ else:
+ defrag = url
+ frag = ""
+ return defrag, frag
+
+
+# flake8: noqa
diff --git a/lib/spack/external/_vendoring/jsonschema/exceptions.py b/lib/spack/external/_vendoring/jsonschema/exceptions.py
new file mode 100644
index 0000000000..691dcffe6c
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/exceptions.py
@@ -0,0 +1,374 @@
+"""
+Validation errors, and some surrounding helpers.
+"""
+from collections import defaultdict, deque
+import itertools
+import pprint
+import textwrap
+
+import attr
+
+from jsonschema import _utils
+from jsonschema.compat import PY3, iteritems
+
+
+WEAK_MATCHES = frozenset(["anyOf", "oneOf"])
+STRONG_MATCHES = frozenset()
+
+_unset = _utils.Unset()
+
+
+class _Error(Exception):
+ def __init__(
+ self,
+ message,
+ validator=_unset,
+ path=(),
+ cause=None,
+ context=(),
+ validator_value=_unset,
+ instance=_unset,
+ schema=_unset,
+ schema_path=(),
+ parent=None,
+ ):
+ super(_Error, self).__init__(
+ message,
+ validator,
+ path,
+ cause,
+ context,
+ validator_value,
+ instance,
+ schema,
+ schema_path,
+ parent,
+ )
+ self.message = message
+ self.path = self.relative_path = deque(path)
+ self.schema_path = self.relative_schema_path = deque(schema_path)
+ self.context = list(context)
+ self.cause = self.__cause__ = cause
+ self.validator = validator
+ self.validator_value = validator_value
+ self.instance = instance
+ self.schema = schema
+ self.parent = parent
+
+ for error in context:
+ error.parent = self
+
+ def __repr__(self):
+ return "<%s: %r>" % (self.__class__.__name__, self.message)
+
+ def __unicode__(self):
+ essential_for_verbose = (
+ self.validator, self.validator_value, self.instance, self.schema,
+ )
+ if any(m is _unset for m in essential_for_verbose):
+ return self.message
+
+ pschema = pprint.pformat(self.schema, width=72)
+ pinstance = pprint.pformat(self.instance, width=72)
+ return self.message + textwrap.dedent("""
+
+ Failed validating %r in %s%s:
+ %s
+
+ On %s%s:
+ %s
+ """.rstrip()
+ ) % (
+ self.validator,
+ self._word_for_schema_in_error_message,
+ _utils.format_as_index(list(self.relative_schema_path)[:-1]),
+ _utils.indent(pschema),
+ self._word_for_instance_in_error_message,
+ _utils.format_as_index(self.relative_path),
+ _utils.indent(pinstance),
+ )
+
+ if PY3:
+ __str__ = __unicode__
+ else:
+ def __str__(self):
+ return unicode(self).encode("utf-8")
+
+ @classmethod
+ def create_from(cls, other):
+ return cls(**other._contents())
+
+ @property
+ def absolute_path(self):
+ parent = self.parent
+ if parent is None:
+ return self.relative_path
+
+ path = deque(self.relative_path)
+ path.extendleft(reversed(parent.absolute_path))
+ return path
+
+ @property
+ def absolute_schema_path(self):
+ parent = self.parent
+ if parent is None:
+ return self.relative_schema_path
+
+ path = deque(self.relative_schema_path)
+ path.extendleft(reversed(parent.absolute_schema_path))
+ return path
+
+ def _set(self, **kwargs):
+ for k, v in iteritems(kwargs):
+ if getattr(self, k) is _unset:
+ setattr(self, k, v)
+
+ def _contents(self):
+ attrs = (
+ "message", "cause", "context", "validator", "validator_value",
+ "path", "schema_path", "instance", "schema", "parent",
+ )
+ return dict((attr, getattr(self, attr)) for attr in attrs)
+
+
+class ValidationError(_Error):
+ """
+ An instance was invalid under a provided schema.
+ """
+
+ _word_for_schema_in_error_message = "schema"
+ _word_for_instance_in_error_message = "instance"
+
+
+class SchemaError(_Error):
+ """
+ A schema was invalid under its corresponding metaschema.
+ """
+
+ _word_for_schema_in_error_message = "metaschema"
+ _word_for_instance_in_error_message = "schema"
+
+
+@attr.s(hash=True)
+class RefResolutionError(Exception):
+ """
+ A ref could not be resolved.
+ """
+
+ _cause = attr.ib()
+
+ def __str__(self):
+ return str(self._cause)
+
+
+class UndefinedTypeCheck(Exception):
+ """
+ A type checker was asked to check a type it did not have registered.
+ """
+
+ def __init__(self, type):
+ self.type = type
+
+ def __unicode__(self):
+ return "Type %r is unknown to this type checker" % self.type
+
+ if PY3:
+ __str__ = __unicode__
+ else:
+ def __str__(self):
+ return unicode(self).encode("utf-8")
+
+
+class UnknownType(Exception):
+ """
+ A validator was asked to validate an instance against an unknown type.
+ """
+
+ def __init__(self, type, instance, schema):
+ self.type = type
+ self.instance = instance
+ self.schema = schema
+
+ def __unicode__(self):
+ pschema = pprint.pformat(self.schema, width=72)
+ pinstance = pprint.pformat(self.instance, width=72)
+ return textwrap.dedent("""
+ Unknown type %r for validator with schema:
+ %s
+
+ While checking instance:
+ %s
+ """.rstrip()
+ ) % (self.type, _utils.indent(pschema), _utils.indent(pinstance))
+
+ if PY3:
+ __str__ = __unicode__
+ else:
+ def __str__(self):
+ return unicode(self).encode("utf-8")
+
+
+class FormatError(Exception):
+ """
+ Validating a format failed.
+ """
+
+ def __init__(self, message, cause=None):
+ super(FormatError, self).__init__(message, cause)
+ self.message = message
+ self.cause = self.__cause__ = cause
+
+ def __unicode__(self):
+ return self.message
+
+ if PY3:
+ __str__ = __unicode__
+ else:
+ def __str__(self):
+ return self.message.encode("utf-8")
+
+
+class ErrorTree(object):
+ """
+ ErrorTrees make it easier to check which validations failed.
+ """
+
+ _instance = _unset
+
+ def __init__(self, errors=()):
+ self.errors = {}
+ self._contents = defaultdict(self.__class__)
+
+ for error in errors:
+ container = self
+ for element in error.path:
+ container = container[element]
+ container.errors[error.validator] = error
+
+ container._instance = error.instance
+
+ def __contains__(self, index):
+ """
+ Check whether ``instance[index]`` has any errors.
+ """
+
+ return index in self._contents
+
+ def __getitem__(self, index):
+ """
+ Retrieve the child tree one level down at the given ``index``.
+
+ If the index is not in the instance that this tree corresponds to and
+ is not known by this tree, whatever error would be raised by
+ ``instance.__getitem__`` will be propagated (usually this is some
+ subclass of `exceptions.LookupError`.
+ """
+
+ if self._instance is not _unset and index not in self:
+ self._instance[index]
+ return self._contents[index]
+
+ def __setitem__(self, index, value):
+ """
+ Add an error to the tree at the given ``index``.
+ """
+ self._contents[index] = value
+
+ def __iter__(self):
+ """
+ Iterate (non-recursively) over the indices in the instance with errors.
+ """
+
+ return iter(self._contents)
+
+ def __len__(self):
+ """
+ Return the `total_errors`.
+ """
+ return self.total_errors
+
+ def __repr__(self):
+ return "<%s (%s total errors)>" % (self.__class__.__name__, len(self))
+
+ @property
+ def total_errors(self):
+ """
+ The total number of errors in the entire tree, including children.
+ """
+
+ child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
+ return len(self.errors) + child_errors
+
+
+def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
+ """
+ Create a key function that can be used to sort errors by relevance.
+
+ Arguments:
+ weak (set):
+ a collection of validator names to consider to be "weak".
+ If there are two errors at the same level of the instance
+ and one is in the set of weak validator names, the other
+ error will take priority. By default, :validator:`anyOf` and
+ :validator:`oneOf` are considered weak validators and will
+ be superseded by other same-level validation errors.
+
+ strong (set):
+ a collection of validator names to consider to be "strong"
+ """
+ def relevance(error):
+ validator = error.validator
+ return -len(error.path), validator not in weak, validator in strong
+ return relevance
+
+
+relevance = by_relevance()
+
+
+def best_match(errors, key=relevance):
+ """
+ Try to find an error that appears to be the best match among given errors.
+
+ In general, errors that are higher up in the instance (i.e. for which
+ `ValidationError.path` is shorter) are considered better matches,
+ since they indicate "more" is wrong with the instance.
+
+ If the resulting match is either :validator:`oneOf` or :validator:`anyOf`,
+ the *opposite* assumption is made -- i.e. the deepest error is picked,
+ since these validators only need to match once, and any other errors may
+ not be relevant.
+
+ Arguments:
+ errors (collections.Iterable):
+
+ the errors to select from. Do not provide a mixture of
+ errors from different validation attempts (i.e. from
+ different instances or schemas), since it won't produce
+ sensical output.
+
+ key (collections.Callable):
+
+ the key to use when sorting errors. See `relevance` and
+ transitively `by_relevance` for more details (the default is
+ to sort with the defaults of that function). Changing the
+ default is only useful if you want to change the function
+ that rates errors but still want the error context descent
+ done by this function.
+
+ Returns:
+ the best matching error, or ``None`` if the iterable was empty
+
+ .. note::
+
+ This function is a heuristic. Its return value may change for a given
+ set of inputs from version to version if better heuristics are added.
+ """
+ errors = iter(errors)
+ best = next(errors, None)
+ if best is None:
+ return
+ best = max(itertools.chain([best], errors), key=key)
+
+ while best.context:
+ best = min(best.context, key=key)
+ return best
diff --git a/lib/spack/external/_vendoring/jsonschema/schemas/draft3.json b/lib/spack/external/_vendoring/jsonschema/schemas/draft3.json
new file mode 100644
index 0000000000..f8a09c563b
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/schemas/draft3.json
@@ -0,0 +1,199 @@
+{
+ "$schema": "http://json-schema.org/draft-03/schema#",
+ "dependencies": {
+ "exclusiveMaximum": "maximum",
+ "exclusiveMinimum": "minimum"
+ },
+ "id": "http://json-schema.org/draft-03/schema#",
+ "properties": {
+ "$ref": {
+ "format": "uri",
+ "type": "string"
+ },
+ "$schema": {
+ "format": "uri",
+ "type": "string"
+ },
+ "additionalItems": {
+ "default": {},
+ "type": [
+ {
+ "$ref": "#"
+ },
+ "boolean"
+ ]
+ },
+ "additionalProperties": {
+ "default": {},
+ "type": [
+ {
+ "$ref": "#"
+ },
+ "boolean"
+ ]
+ },
+ "default": {
+ "type": "any"
+ },
+ "dependencies": {
+ "additionalProperties": {
+ "items": {
+ "type": "string"
+ },
+ "type": [
+ "string",
+ "array",
+ {
+ "$ref": "#"
+ }
+ ]
+ },
+ "default": {},
+ "type": [
+ "string",
+ "array",
+ "object"
+ ]
+ },
+ "description": {
+ "type": "string"
+ },
+ "disallow": {
+ "items": {
+ "type": [
+ "string",
+ {
+ "$ref": "#"
+ }
+ ]
+ },
+ "type": [
+ "string",
+ "array"
+ ],
+ "uniqueItems": true
+ },
+ "divisibleBy": {
+ "default": 1,
+ "exclusiveMinimum": true,
+ "minimum": 0,
+ "type": "number"
+ },
+ "enum": {
+ "type": "array"
+ },
+ "exclusiveMaximum": {
+ "default": false,
+ "type": "boolean"
+ },
+ "exclusiveMinimum": {
+ "default": false,
+ "type": "boolean"
+ },
+ "extends": {
+ "default": {},
+ "items": {
+ "$ref": "#"
+ },
+ "type": [
+ {
+ "$ref": "#"
+ },
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "id": {
+ "format": "uri",
+ "type": "string"
+ },
+ "items": {
+ "default": {},
+ "items": {
+ "$ref": "#"
+ },
+ "type": [
+ {
+ "$ref": "#"
+ },
+ "array"
+ ]
+ },
+ "maxDecimal": {
+ "minimum": 0,
+ "type": "number"
+ },
+ "maxItems": {
+ "minimum": 0,
+ "type": "integer"
+ },
+ "maxLength": {
+ "type": "integer"
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "minItems": {
+ "default": 0,
+ "minimum": 0,
+ "type": "integer"
+ },
+ "minLength": {
+ "default": 0,
+ "minimum": 0,
+ "type": "integer"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "pattern": {
+ "format": "regex",
+ "type": "string"
+ },
+ "patternProperties": {
+ "additionalProperties": {
+ "$ref": "#"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "properties": {
+ "additionalProperties": {
+ "$ref": "#",
+ "type": "object"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "required": {
+ "default": false,
+ "type": "boolean"
+ },
+ "title": {
+ "type": "string"
+ },
+ "type": {
+ "default": "any",
+ "items": {
+ "type": [
+ "string",
+ {
+ "$ref": "#"
+ }
+ ]
+ },
+ "type": [
+ "string",
+ "array"
+ ],
+ "uniqueItems": true
+ },
+ "uniqueItems": {
+ "default": false,
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+}
diff --git a/lib/spack/external/_vendoring/jsonschema/schemas/draft4.json b/lib/spack/external/_vendoring/jsonschema/schemas/draft4.json
new file mode 100644
index 0000000000..9b666cff88
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/schemas/draft4.json
@@ -0,0 +1,222 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "default": {},
+ "definitions": {
+ "positiveInteger": {
+ "minimum": 0,
+ "type": "integer"
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [
+ {
+ "$ref": "#/definitions/positiveInteger"
+ },
+ {
+ "default": 0
+ }
+ ]
+ },
+ "schemaArray": {
+ "items": {
+ "$ref": "#"
+ },
+ "minItems": 1,
+ "type": "array"
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "items": {
+ "type": "string"
+ },
+ "minItems": 1,
+ "type": "array",
+ "uniqueItems": true
+ }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [
+ "maximum"
+ ],
+ "exclusiveMinimum": [
+ "minimum"
+ ]
+ },
+ "description": "Core schema meta-schema",
+ "id": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "$schema": {
+ "format": "uri",
+ "type": "string"
+ },
+ "additionalItems": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "$ref": "#"
+ }
+ ],
+ "default": {}
+ },
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "$ref": "#"
+ }
+ ],
+ "default": {}
+ },
+ "allOf": {
+ "$ref": "#/definitions/schemaArray"
+ },
+ "anyOf": {
+ "$ref": "#/definitions/schemaArray"
+ },
+ "default": {},
+ "definitions": {
+ "additionalProperties": {
+ "$ref": "#"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "dependencies": {
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "$ref": "#"
+ },
+ {
+ "$ref": "#/definitions/stringArray"
+ }
+ ]
+ },
+ "type": "object"
+ },
+ "description": {
+ "type": "string"
+ },
+ "enum": {
+ "type": "array"
+ },
+ "exclusiveMaximum": {
+ "default": false,
+ "type": "boolean"
+ },
+ "exclusiveMinimum": {
+ "default": false,
+ "type": "boolean"
+ },
+ "format": {
+ "type": "string"
+ },
+ "id": {
+ "format": "uri",
+ "type": "string"
+ },
+ "items": {
+ "anyOf": [
+ {
+ "$ref": "#"
+ },
+ {
+ "$ref": "#/definitions/schemaArray"
+ }
+ ],
+ "default": {}
+ },
+ "maxItems": {
+ "$ref": "#/definitions/positiveInteger"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/positiveInteger"
+ },
+ "maxProperties": {
+ "$ref": "#/definitions/positiveInteger"
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "minItems": {
+ "$ref": "#/definitions/positiveIntegerDefault0"
+ },
+ "minLength": {
+ "$ref": "#/definitions/positiveIntegerDefault0"
+ },
+ "minProperties": {
+ "$ref": "#/definitions/positiveIntegerDefault0"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "multipleOf": {
+ "exclusiveMinimum": true,
+ "minimum": 0,
+ "type": "number"
+ },
+ "not": {
+ "$ref": "#"
+ },
+ "oneOf": {
+ "$ref": "#/definitions/schemaArray"
+ },
+ "pattern": {
+ "format": "regex",
+ "type": "string"
+ },
+ "patternProperties": {
+ "additionalProperties": {
+ "$ref": "#"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "properties": {
+ "additionalProperties": {
+ "$ref": "#"
+ },
+ "default": {},
+ "type": "object"
+ },
+ "required": {
+ "$ref": "#/definitions/stringArray"
+ },
+ "title": {
+ "type": "string"
+ },
+ "type": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/simpleTypes"
+ },
+ {
+ "items": {
+ "$ref": "#/definitions/simpleTypes"
+ },
+ "minItems": 1,
+ "type": "array",
+ "uniqueItems": true
+ }
+ ]
+ },
+ "uniqueItems": {
+ "default": false,
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+}
diff --git a/lib/spack/external/_vendoring/jsonschema/schemas/draft6.json b/lib/spack/external/_vendoring/jsonschema/schemas/draft6.json
new file mode 100644
index 0000000000..a0d2bf7896
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/schemas/draft6.json
@@ -0,0 +1,153 @@
+{
+ "$schema": "http://json-schema.org/draft-06/schema#",
+ "$id": "http://json-schema.org/draft-06/schema#",
+ "title": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "allOf": [
+ { "$ref": "#/definitions/nonNegativeInteger" },
+ { "default": 0 }
+ ]
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ },
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "examples": {
+ "type": "array",
+ "items": {}
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": { "$ref": "#" },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "contains": { "$ref": "#" },
+ "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": { "$ref": "#" },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "propertyNames": { "$ref": "#" },
+ "const": {},
+ "enum": {
+ "type": "array"
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "default": {}
+}
diff --git a/lib/spack/external/_vendoring/jsonschema/schemas/draft7.json b/lib/spack/external/_vendoring/jsonschema/schemas/draft7.json
new file mode 100644
index 0000000000..746cde9690
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/schemas/draft7.json
@@ -0,0 +1,166 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://json-schema.org/draft-07/schema#",
+ "title": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "allOf": [
+ { "$ref": "#/definitions/nonNegativeInteger" },
+ { "default": 0 }
+ ]
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ },
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$comment": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": true,
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "examples": {
+ "type": "array",
+ "items": true
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": { "$ref": "#" },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": true
+ },
+ "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "contains": { "$ref": "#" },
+ "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": { "$ref": "#" },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "propertyNames": { "$ref": "#" },
+ "const": true,
+ "enum": {
+ "type": "array",
+ "items": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "contentMediaType": { "type": "string" },
+ "contentEncoding": { "type": "string" },
+ "if": {"$ref": "#"},
+ "then": {"$ref": "#"},
+ "else": {"$ref": "#"},
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "default": true
+}
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/__init__.py b/lib/spack/external/_vendoring/jsonschema/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/__init__.py
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/_helpers.py b/lib/spack/external/_vendoring/jsonschema/tests/_helpers.py
new file mode 100644
index 0000000000..70f291fe2a
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/_helpers.py
@@ -0,0 +1,5 @@
+def bug(issue=None):
+ message = "A known bug."
+ if issue is not None:
+ message += " See issue #{issue}.".format(issue=issue)
+ return message
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/_suite.py b/lib/spack/external/_vendoring/jsonschema/tests/_suite.py
new file mode 100644
index 0000000000..b68a7b668c
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/_suite.py
@@ -0,0 +1,239 @@
+"""
+Python representations of the JSON Schema Test Suite tests.
+"""
+
+from functools import partial
+import json
+import os
+import re
+import subprocess
+import sys
+import unittest
+
+from twisted.python.filepath import FilePath
+import attr
+
+from jsonschema.compat import PY3
+from jsonschema.validators import validators
+import jsonschema
+
+
+def _find_suite():
+ root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
+ if root is not None:
+ return FilePath(root)
+
+ root = FilePath(jsonschema.__file__).parent().sibling("json")
+ if not root.isdir(): # pragma: no cover
+ raise ValueError(
+ (
+ "Can't find the JSON-Schema-Test-Suite directory. "
+ "Set the 'JSON_SCHEMA_TEST_SUITE' environment "
+ "variable or run the tests from alongside a checkout "
+ "of the suite."
+ ),
+ )
+ return root
+
+
+@attr.s(hash=True)
+class Suite(object):
+
+ _root = attr.ib(default=attr.Factory(_find_suite))
+
+ def _remotes(self):
+ jsonschema_suite = self._root.descendant(["bin", "jsonschema_suite"])
+ remotes = subprocess.check_output(
+ [sys.executable, jsonschema_suite.path, "remotes"],
+ )
+ return {
+ "http://localhost:1234/" + name: schema
+ for name, schema in json.loads(remotes.decode("utf-8")).items()
+ }
+
+ def benchmark(self, runner): # pragma: no cover
+ for name in validators:
+ self.version(name=name).benchmark(runner=runner)
+
+ def version(self, name):
+ return Version(
+ name=name,
+ path=self._root.descendant(["tests", name]),
+ remotes=self._remotes(),
+ )
+
+
+@attr.s(hash=True)
+class Version(object):
+
+ _path = attr.ib()
+ _remotes = attr.ib()
+
+ name = attr.ib()
+
+ def benchmark(self, runner, **kwargs): # pragma: no cover
+ for suite in self.tests():
+ for test in suite:
+ runner.bench_func(
+ test.fully_qualified_name,
+ partial(test.validate_ignoring_errors, **kwargs),
+ )
+
+ def tests(self):
+ return (
+ test
+ for child in self._path.globChildren("*.json")
+ for test in self._tests_in(
+ subject=child.basename()[:-5],
+ path=child,
+ )
+ )
+
+ def format_tests(self):
+ path = self._path.descendant(["optional", "format"])
+ return (
+ test
+ for child in path.globChildren("*.json")
+ for test in self._tests_in(
+ subject=child.basename()[:-5],
+ path=child,
+ )
+ )
+
+ def tests_of(self, name):
+ return self._tests_in(
+ subject=name,
+ path=self._path.child(name + ".json"),
+ )
+
+ def optional_tests_of(self, name):
+ return self._tests_in(
+ subject=name,
+ path=self._path.descendant(["optional", name + ".json"]),
+ )
+
+ def to_unittest_testcase(self, *suites, **kwargs):
+ name = kwargs.pop("name", "Test" + self.name.title())
+ methods = {
+ test.method_name: test.to_unittest_method(**kwargs)
+ for suite in suites
+ for tests in suite
+ for test in tests
+ }
+ cls = type(name, (unittest.TestCase,), methods)
+
+ try:
+ cls.__module__ = _someone_save_us_the_module_of_the_caller()
+ except Exception: # pragma: no cover
+ # We're doing crazy things, so if they go wrong, like a function
+ # behaving differently on some other interpreter, just make them
+ # not happen.
+ pass
+
+ return cls
+
+ def _tests_in(self, subject, path):
+ for each in json.loads(path.getContent().decode("utf-8")):
+ yield (
+ _Test(
+ version=self,
+ subject=subject,
+ case_description=each["description"],
+ schema=each["schema"],
+ remotes=self._remotes,
+ **test
+ ) for test in each["tests"]
+ )
+
+
+@attr.s(hash=True, repr=False)
+class _Test(object):
+
+ version = attr.ib()
+
+ subject = attr.ib()
+ case_description = attr.ib()
+ description = attr.ib()
+
+ data = attr.ib()
+ schema = attr.ib(repr=False)
+
+ valid = attr.ib()
+
+ _remotes = attr.ib()
+
+ def __repr__(self): # pragma: no cover
+ return "<Test {}>".format(self.fully_qualified_name)
+
+ @property
+ def fully_qualified_name(self): # pragma: no cover
+ return " > ".join(
+ [
+ self.version.name,
+ self.subject,
+ self.case_description,
+ self.description,
+ ]
+ )
+
+ @property
+ def method_name(self):
+ delimiters = r"[\W\- ]+"
+ name = "test_%s_%s_%s" % (
+ re.sub(delimiters, "_", self.subject),
+ re.sub(delimiters, "_", self.case_description),
+ re.sub(delimiters, "_", self.description),
+ )
+
+ if not PY3: # pragma: no cover
+ name = name.encode("utf-8")
+ return name
+
+ def to_unittest_method(self, skip=lambda test: None, **kwargs):
+ if self.valid:
+ def fn(this):
+ self.validate(**kwargs)
+ else:
+ def fn(this):
+ with this.assertRaises(jsonschema.ValidationError):
+ self.validate(**kwargs)
+
+ fn.__name__ = self.method_name
+ reason = skip(self)
+ return unittest.skipIf(reason is not None, reason)(fn)
+
+ def validate(self, Validator, **kwargs):
+ resolver = jsonschema.RefResolver.from_schema(
+ schema=self.schema,
+ store=self._remotes,
+ id_of=Validator.ID_OF,
+ )
+ jsonschema.validate(
+ instance=self.data,
+ schema=self.schema,
+ cls=Validator,
+ resolver=resolver,
+ **kwargs
+ )
+
+ def validate_ignoring_errors(self, Validator): # pragma: no cover
+ try:
+ self.validate(Validator=Validator)
+ except jsonschema.ValidationError:
+ pass
+
+
+def _someone_save_us_the_module_of_the_caller():
+ """
+ The FQON of the module 2nd stack frames up from here.
+
+ This is intended to allow us to dynamicallly return test case classes that
+ are indistinguishable from being defined in the module that wants them.
+
+ Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
+ the class that really is running.
+
+ Save us all, this is all so so so so so terrible.
+ """
+
+ return sys._getframe(2).f_globals["__name__"]
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/test_cli.py b/lib/spack/external/_vendoring/jsonschema/tests/test_cli.py
new file mode 100644
index 0000000000..ed820ba3f8
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/test_cli.py
@@ -0,0 +1,151 @@
+from unittest import TestCase
+import json
+import subprocess
+import sys
+
+from jsonschema import Draft4Validator, ValidationError, cli, __version__
+from jsonschema.compat import NativeIO
+from jsonschema.exceptions import SchemaError
+
+
+def fake_validator(*errors):
+ errors = list(reversed(errors))
+
+ class FakeValidator(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def iter_errors(self, instance):
+ if errors:
+ return errors.pop()
+ return []
+
+ def check_schema(self, schema):
+ pass
+
+ return FakeValidator
+
+
+class TestParser(TestCase):
+
+ FakeValidator = fake_validator()
+ instance_file = "foo.json"
+ schema_file = "schema.json"
+
+ def setUp(self):
+ cli.open = self.fake_open
+ self.addCleanup(delattr, cli, "open")
+
+ def fake_open(self, path):
+ if path == self.instance_file:
+ contents = ""
+ elif path == self.schema_file:
+ contents = {}
+ else: # pragma: no cover
+ self.fail("What is {!r}".format(path))
+ return NativeIO(json.dumps(contents))
+
+ def test_find_validator_by_fully_qualified_object_name(self):
+ arguments = cli.parse_args(
+ [
+ "--validator",
+ "jsonschema.tests.test_cli.TestParser.FakeValidator",
+ "--instance", self.instance_file,
+ self.schema_file,
+ ]
+ )
+ self.assertIs(arguments["validator"], self.FakeValidator)
+
+ def test_find_validator_in_jsonschema(self):
+ arguments = cli.parse_args(
+ [
+ "--validator", "Draft4Validator",
+ "--instance", self.instance_file,
+ self.schema_file,
+ ]
+ )
+ self.assertIs(arguments["validator"], Draft4Validator)
+
+
+class TestCLI(TestCase):
+ def test_draft3_schema_draft4_validator(self):
+ stdout, stderr = NativeIO(), NativeIO()
+ with self.assertRaises(SchemaError):
+ cli.run(
+ {
+ "validator": Draft4Validator,
+ "schema": {
+ "anyOf": [
+ {"minimum": 20},
+ {"type": "string"},
+ {"required": True},
+ ],
+ },
+ "instances": [1],
+ "error_format": "{error.message}",
+ },
+ stdout=stdout,
+ stderr=stderr,
+ )
+
+ def test_successful_validation(self):
+ stdout, stderr = NativeIO(), NativeIO()
+ exit_code = cli.run(
+ {
+ "validator": fake_validator(),
+ "schema": {},
+ "instances": [1],
+ "error_format": "{error.message}",
+ },
+ stdout=stdout,
+ stderr=stderr,
+ )
+ self.assertFalse(stdout.getvalue())
+ self.assertFalse(stderr.getvalue())
+ self.assertEqual(exit_code, 0)
+
+ def test_unsuccessful_validation(self):
+ error = ValidationError("I am an error!", instance=1)
+ stdout, stderr = NativeIO(), NativeIO()
+ exit_code = cli.run(
+ {
+ "validator": fake_validator([error]),
+ "schema": {},
+ "instances": [1],
+ "error_format": "{error.instance} - {error.message}",
+ },
+ stdout=stdout,
+ stderr=stderr,
+ )
+ self.assertFalse(stdout.getvalue())
+ self.assertEqual(stderr.getvalue(), "1 - I am an error!")
+ self.assertEqual(exit_code, 1)
+
+ def test_unsuccessful_validation_multiple_instances(self):
+ first_errors = [
+ ValidationError("9", instance=1),
+ ValidationError("8", instance=1),
+ ]
+ second_errors = [ValidationError("7", instance=2)]
+ stdout, stderr = NativeIO(), NativeIO()
+ exit_code = cli.run(
+ {
+ "validator": fake_validator(first_errors, second_errors),
+ "schema": {},
+ "instances": [1, 2],
+ "error_format": "{error.instance} - {error.message}\t",
+ },
+ stdout=stdout,
+ stderr=stderr,
+ )
+ self.assertFalse(stdout.getvalue())
+ self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t")
+ self.assertEqual(exit_code, 1)
+
+ def test_version(self):
+ version = subprocess.check_output(
+ [sys.executable, "-m", "jsonschema", "--version"],
+ stderr=subprocess.STDOUT,
+ )
+ version = version.decode("utf-8").strip()
+ self.assertEqual(version, __version__)
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/test_exceptions.py b/lib/spack/external/_vendoring/jsonschema/tests/test_exceptions.py
new file mode 100644
index 0000000000..eae00d76d7
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/test_exceptions.py
@@ -0,0 +1,462 @@
+from unittest import TestCase
+import textwrap
+
+from jsonschema import Draft4Validator, exceptions
+from jsonschema.compat import PY3
+
+
+class TestBestMatch(TestCase):
+ def best_match(self, errors):
+ errors = list(errors)
+ best = exceptions.best_match(errors)
+ reversed_best = exceptions.best_match(reversed(errors))
+ msg = "Didn't return a consistent best match!\nGot: {0}\n\nThen: {1}"
+ self.assertEqual(
+ best._contents(), reversed_best._contents(),
+ msg=msg.format(best, reversed_best),
+ )
+ return best
+
+ def test_shallower_errors_are_better_matches(self):
+ validator = Draft4Validator(
+ {
+ "properties": {
+ "foo": {
+ "minProperties": 2,
+ "properties": {"bar": {"type": "object"}},
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo": {"bar": []}}))
+ self.assertEqual(best.validator, "minProperties")
+
+ def test_oneOf_and_anyOf_are_weak_matches(self):
+ """
+ A property you *must* match is probably better than one you have to
+ match a part of.
+ """
+
+ validator = Draft4Validator(
+ {
+ "minProperties": 2,
+ "anyOf": [{"type": "string"}, {"type": "number"}],
+ "oneOf": [{"type": "string"}, {"type": "number"}],
+ }
+ )
+ best = self.best_match(validator.iter_errors({}))
+ self.assertEqual(best.validator, "minProperties")
+
+ def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
+ """
+ If the most relevant error is an anyOf, then we traverse its context
+ and select the otherwise *least* relevant error, since in this case
+ that means the most specific, deep, error inside the instance.
+
+ I.e. since only one of the schemas must match, we look for the most
+ relevant one.
+ """
+
+ validator = Draft4Validator(
+ {
+ "properties": {
+ "foo": {
+ "anyOf": [
+ {"type": "string"},
+ {"properties": {"bar": {"type": "array"}}},
+ ],
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo": {"bar": 12}}))
+ self.assertEqual(best.validator_value, "array")
+
+ def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
+ """
+ If the most relevant error is an oneOf, then we traverse its context
+ and select the otherwise *least* relevant error, since in this case
+ that means the most specific, deep, error inside the instance.
+
+ I.e. since only one of the schemas must match, we look for the most
+ relevant one.
+ """
+
+ validator = Draft4Validator(
+ {
+ "properties": {
+ "foo": {
+ "oneOf": [
+ {"type": "string"},
+ {"properties": {"bar": {"type": "array"}}},
+ ],
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo": {"bar": 12}}))
+ self.assertEqual(best.validator_value, "array")
+
+ def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
+ """
+ Now, if the error is allOf, we traverse but select the *most* relevant
+ error from the context, because all schemas here must match anyways.
+ """
+
+ validator = Draft4Validator(
+ {
+ "properties": {
+ "foo": {
+ "allOf": [
+ {"type": "string"},
+ {"properties": {"bar": {"type": "array"}}},
+ ],
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo": {"bar": 12}}))
+ self.assertEqual(best.validator_value, "string")
+
+ def test_nested_context_for_oneOf(self):
+ validator = Draft4Validator(
+ {
+ "properties": {
+ "foo": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "properties": {
+ "bar": {"type": "array"},
+ },
+ },
+ ],
+ },
+ ],
+ },
+ },
+ },
+ )
+ best = self.best_match(validator.iter_errors({"foo": {"bar": 12}}))
+ self.assertEqual(best.validator_value, "array")
+
+ def test_one_error(self):
+ validator = Draft4Validator({"minProperties": 2})
+ error, = validator.iter_errors({})
+ self.assertEqual(
+ exceptions.best_match(validator.iter_errors({})).validator,
+ "minProperties",
+ )
+
+ def test_no_errors(self):
+ validator = Draft4Validator({})
+ self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
+
+
+class TestByRelevance(TestCase):
+ def test_short_paths_are_better_matches(self):
+ shallow = exceptions.ValidationError("Oh no!", path=["baz"])
+ deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
+ match = max([shallow, deep], key=exceptions.relevance)
+ self.assertIs(match, shallow)
+
+ match = max([deep, shallow], key=exceptions.relevance)
+ self.assertIs(match, shallow)
+
+ def test_global_errors_are_even_better_matches(self):
+ shallow = exceptions.ValidationError("Oh no!", path=[])
+ deep = exceptions.ValidationError("Oh yes!", path=["foo"])
+
+ errors = sorted([shallow, deep], key=exceptions.relevance)
+ self.assertEqual(
+ [list(error.path) for error in errors],
+ [["foo"], []],
+ )
+
+ errors = sorted([deep, shallow], key=exceptions.relevance)
+ self.assertEqual(
+ [list(error.path) for error in errors],
+ [["foo"], []],
+ )
+
+ def test_weak_validators_are_lower_priority(self):
+ weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
+ normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
+
+ best_match = exceptions.by_relevance(weak="a")
+
+ match = max([weak, normal], key=best_match)
+ self.assertIs(match, normal)
+
+ match = max([normal, weak], key=best_match)
+ self.assertIs(match, normal)
+
+ def test_strong_validators_are_higher_priority(self):
+ weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
+ normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
+ strong = exceptions.ValidationError("Oh fine!", path=[], validator="c")
+
+ best_match = exceptions.by_relevance(weak="a", strong="c")
+
+ match = max([weak, normal, strong], key=best_match)
+ self.assertIs(match, strong)
+
+ match = max([strong, normal, weak], key=best_match)
+ self.assertIs(match, strong)
+
+
+class TestErrorTree(TestCase):
+ def test_it_knows_how_many_total_errors_it_contains(self):
+ # FIXME: https://github.com/Julian/jsonschema/issues/442
+ errors = [
+ exceptions.ValidationError("Something", validator=i)
+ for i in range(8)
+ ]
+ tree = exceptions.ErrorTree(errors)
+ self.assertEqual(tree.total_errors, 8)
+
+ def test_it_contains_an_item_if_the_item_had_an_error(self):
+ errors = [exceptions.ValidationError("a message", path=["bar"])]
+ tree = exceptions.ErrorTree(errors)
+ self.assertIn("bar", tree)
+
+ def test_it_does_not_contain_an_item_if_the_item_had_no_error(self):
+ errors = [exceptions.ValidationError("a message", path=["bar"])]
+ tree = exceptions.ErrorTree(errors)
+ self.assertNotIn("foo", tree)
+
+ def test_validators_that_failed_appear_in_errors_dict(self):
+ error = exceptions.ValidationError("a message", validator="foo")
+ tree = exceptions.ErrorTree([error])
+ self.assertEqual(tree.errors, {"foo": error})
+
+ def test_it_creates_a_child_tree_for_each_nested_path(self):
+ errors = [
+ exceptions.ValidationError("a bar message", path=["bar"]),
+ exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]),
+ ]
+ tree = exceptions.ErrorTree(errors)
+ self.assertIn(0, tree["bar"])
+ self.assertNotIn(1, tree["bar"])
+
+ def test_children_have_their_errors_dicts_built(self):
+ e1, e2 = (
+ exceptions.ValidationError("1", validator="foo", path=["bar", 0]),
+ exceptions.ValidationError("2", validator="quux", path=["bar", 0]),
+ )
+ tree = exceptions.ErrorTree([e1, e2])
+ self.assertEqual(tree["bar"][0].errors, {"foo": e1, "quux": e2})
+
+ def test_multiple_errors_with_instance(self):
+ e1, e2 = (
+ exceptions.ValidationError(
+ "1",
+ validator="foo",
+ path=["bar", "bar2"],
+ instance="i1"),
+ exceptions.ValidationError(
+ "2",
+ validator="quux",
+ path=["foobar", 2],
+ instance="i2"),
+ )
+ exceptions.ErrorTree([e1, e2])
+
+ def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
+ error = exceptions.ValidationError("123", validator="foo", instance=[])
+ tree = exceptions.ErrorTree([error])
+
+ with self.assertRaises(IndexError):
+ tree[0]
+
+ def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
+ """
+ If a validator is dumb (like :validator:`required` in draft 3) and
+ refers to a path that isn't in the instance, the tree still properly
+ returns a subtree for that path.
+ """
+
+ error = exceptions.ValidationError(
+ "a message", validator="foo", instance={}, path=["foo"],
+ )
+ tree = exceptions.ErrorTree([error])
+ self.assertIsInstance(tree["foo"], exceptions.ErrorTree)
+
+
+class TestErrorInitReprStr(TestCase):
+ def make_error(self, **kwargs):
+ defaults = dict(
+ message=u"hello",
+ validator=u"type",
+ validator_value=u"string",
+ instance=5,
+ schema={u"type": u"string"},
+ )
+ defaults.update(kwargs)
+ return exceptions.ValidationError(**defaults)
+
+ def assertShows(self, expected, **kwargs):
+ if PY3: # pragma: no cover
+ expected = expected.replace("u'", "'")
+ expected = textwrap.dedent(expected).rstrip("\n")
+
+ error = self.make_error(**kwargs)
+ message_line, _, rest = str(error).partition("\n")
+ self.assertEqual(message_line, error.message)
+ self.assertEqual(rest, expected)
+
+ def test_it_calls_super_and_sets_args(self):
+ error = self.make_error()
+ self.assertGreater(len(error.args), 1)
+
+ def test_repr(self):
+ self.assertEqual(
+ repr(exceptions.ValidationError(message="Hello!")),
+ "<ValidationError: %r>" % "Hello!",
+ )
+
+ def test_unset_error(self):
+ error = exceptions.ValidationError("message")
+ self.assertEqual(str(error), "message")
+
+ kwargs = {
+ "validator": "type",
+ "validator_value": "string",
+ "instance": 5,
+ "schema": {"type": "string"},
+ }
+ # Just the message should show if any of the attributes are unset
+ for attr in kwargs:
+ k = dict(kwargs)
+ del k[attr]
+ error = exceptions.ValidationError("message", **k)
+ self.assertEqual(str(error), "message")
+
+ def test_empty_paths(self):
+ self.assertShows(
+ """
+ Failed validating u'type' in schema:
+ {u'type': u'string'}
+
+ On instance:
+ 5
+ """,
+ path=[],
+ schema_path=[],
+ )
+
+ def test_one_item_paths(self):
+ self.assertShows(
+ """
+ Failed validating u'type' in schema:
+ {u'type': u'string'}
+
+ On instance[0]:
+ 5
+ """,
+ path=[0],
+ schema_path=["items"],
+ )
+
+ def test_multiple_item_paths(self):
+ self.assertShows(
+ """
+ Failed validating u'type' in schema[u'items'][0]:
+ {u'type': u'string'}
+
+ On instance[0][u'a']:
+ 5
+ """,
+ path=[0, u"a"],
+ schema_path=[u"items", 0, 1],
+ )
+
+ def test_uses_pprint(self):
+ self.assertShows(
+ """
+ Failed validating u'maxLength' in schema:
+ {0: 0,
+ 1: 1,
+ 2: 2,
+ 3: 3,
+ 4: 4,
+ 5: 5,
+ 6: 6,
+ 7: 7,
+ 8: 8,
+ 9: 9,
+ 10: 10,
+ 11: 11,
+ 12: 12,
+ 13: 13,
+ 14: 14,
+ 15: 15,
+ 16: 16,
+ 17: 17,
+ 18: 18,
+ 19: 19}
+
+ On instance:
+ [0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24]
+ """,
+ instance=list(range(25)),
+ schema=dict(zip(range(20), range(20))),
+ validator=u"maxLength",
+ )
+
+ def test_str_works_with_instances_having_overriden_eq_operator(self):
+ """
+ Check for https://github.com/Julian/jsonschema/issues/164 which
+ rendered exceptions unusable when a `ValidationError` involved
+ instances with an `__eq__` method that returned truthy values.
+ """
+
+ class DontEQMeBro(object):
+ def __eq__(this, other): # pragma: no cover
+ self.fail("Don't!")
+
+ def __ne__(this, other): # pragma: no cover
+ self.fail("Don't!")
+
+ instance = DontEQMeBro()
+ error = exceptions.ValidationError(
+ "a message",
+ validator="foo",
+ instance=instance,
+ validator_value="some",
+ schema="schema",
+ )
+ self.assertIn(repr(instance), str(error))
+
+
+class TestHashable(TestCase):
+ def test_hashable(self):
+ set([exceptions.ValidationError("")])
+ set([exceptions.SchemaError("")])
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/test_format.py b/lib/spack/external/_vendoring/jsonschema/tests/test_format.py
new file mode 100644
index 0000000000..254985f615
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/test_format.py
@@ -0,0 +1,89 @@
+"""
+Tests for the parts of jsonschema related to the :validator:`format` property.
+"""
+
+from unittest import TestCase
+
+from jsonschema import FormatError, ValidationError, FormatChecker
+from jsonschema.validators import Draft4Validator
+
+
+BOOM = ValueError("Boom!")
+BANG = ZeroDivisionError("Bang!")
+
+
+def boom(thing):
+ if thing == "bang":
+ raise BANG
+ raise BOOM
+
+
+class TestFormatChecker(TestCase):
+ def test_it_can_validate_no_formats(self):
+ checker = FormatChecker(formats=())
+ self.assertFalse(checker.checkers)
+
+ def test_it_raises_a_key_error_for_unknown_formats(self):
+ with self.assertRaises(KeyError):
+ FormatChecker(formats=["o noes"])
+
+ def test_it_can_register_cls_checkers(self):
+ original = dict(FormatChecker.checkers)
+ self.addCleanup(FormatChecker.checkers.pop, "boom")
+ FormatChecker.cls_checks("boom")(boom)
+ self.assertEqual(
+ FormatChecker.checkers,
+ dict(original, boom=(boom, ())),
+ )
+
+ def test_it_can_register_checkers(self):
+ checker = FormatChecker()
+ checker.checks("boom")(boom)
+ self.assertEqual(
+ checker.checkers,
+ dict(FormatChecker.checkers, boom=(boom, ()))
+ )
+
+ def test_it_catches_registered_errors(self):
+ checker = FormatChecker()
+ checker.checks("boom", raises=type(BOOM))(boom)
+
+ with self.assertRaises(FormatError) as cm:
+ checker.check(instance=12, format="boom")
+
+ self.assertIs(cm.exception.cause, BOOM)
+ self.assertIs(cm.exception.__cause__, BOOM)
+
+ # Unregistered errors should not be caught
+ with self.assertRaises(type(BANG)):
+ checker.check(instance="bang", format="boom")
+
+ def test_format_error_causes_become_validation_error_causes(self):
+ checker = FormatChecker()
+ checker.checks("boom", raises=ValueError)(boom)
+ validator = Draft4Validator({"format": "boom"}, format_checker=checker)
+
+ with self.assertRaises(ValidationError) as cm:
+ validator.validate("BOOM")
+
+ self.assertIs(cm.exception.cause, BOOM)
+ self.assertIs(cm.exception.__cause__, BOOM)
+
+ def test_format_checkers_come_with_defaults(self):
+ # This is bad :/ but relied upon.
+ # The docs for quite awhile recommended people do things like
+ # validate(..., format_checker=FormatChecker())
+ # We should change that, but we can't without deprecation...
+ checker = FormatChecker()
+ with self.assertRaises(FormatError):
+ checker.check(instance="not-an-ipv4", format="ipv4")
+
+ def test_repr(self):
+ checker = FormatChecker(formats=())
+ checker.checks("foo")(lambda thing: True)
+ checker.checks("bar")(lambda thing: True)
+ checker.checks("baz")(lambda thing: True)
+ self.assertEqual(
+ repr(checker),
+ "<FormatChecker checkers=['bar', 'baz', 'foo']>",
+ )
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/test_jsonschema_test_suite.py b/lib/spack/external/_vendoring/jsonschema/tests/test_jsonschema_test_suite.py
new file mode 100644
index 0000000000..ebccf29735
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/test_jsonschema_test_suite.py
@@ -0,0 +1,277 @@
+"""
+Test runner for the JSON Schema official test suite
+
+Tests comprehensive correctness of each draft's validator.
+
+See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details.
+"""
+
+import sys
+import warnings
+
+from jsonschema import (
+ Draft3Validator,
+ Draft4Validator,
+ Draft6Validator,
+ Draft7Validator,
+ draft3_format_checker,
+ draft4_format_checker,
+ draft6_format_checker,
+ draft7_format_checker,
+)
+from jsonschema.tests._helpers import bug
+from jsonschema.tests._suite import Suite
+from jsonschema.validators import _DEPRECATED_DEFAULT_TYPES, create
+
+
+SUITE = Suite()
+DRAFT3 = SUITE.version(name="draft3")
+DRAFT4 = SUITE.version(name="draft4")
+DRAFT6 = SUITE.version(name="draft6")
+DRAFT7 = SUITE.version(name="draft7")
+
+
+def skip(message, **kwargs):
+ def skipper(test):
+ if all(value == getattr(test, attr) for attr, value in kwargs.items()):
+ return message
+ return skipper
+
+
+def missing_format(checker):
+ def missing_format(test):
+ schema = test.schema
+ if schema is True or schema is False or "format" not in schema:
+ return
+
+ if schema["format"] not in checker.checkers:
+ return "Format checker {0!r} not found.".format(schema["format"])
+ return missing_format
+
+
+is_narrow_build = sys.maxunicode == 2 ** 16 - 1
+if is_narrow_build: # pragma: no cover
+ message = "Not running surrogate Unicode case, this Python is narrow."
+
+ def narrow_unicode_build(test): # pragma: no cover
+ return skip(
+ message=message,
+ description="one supplementary Unicode code point is not long enough",
+ )(test) or skip(
+ message=message,
+ description="two supplementary Unicode code points is long enough",
+ )(test)
+else:
+ def narrow_unicode_build(test): # pragma: no cover
+ return
+
+
+TestDraft3 = DRAFT3.to_unittest_testcase(
+ DRAFT3.tests(),
+ DRAFT3.optional_tests_of(name="bignum"),
+ DRAFT3.optional_tests_of(name="format"),
+ DRAFT3.optional_tests_of(name="zeroTerminatedFloats"),
+ Validator=Draft3Validator,
+ format_checker=draft3_format_checker,
+ skip=lambda test: (
+ narrow_unicode_build(test)
+ or missing_format(draft3_format_checker)(test)
+ or skip(
+ message="Upstream bug in strict_rfc3339",
+ subject="format",
+ description="case-insensitive T and Z",
+ )(test)
+ ),
+)
+
+
+TestDraft4 = DRAFT4.to_unittest_testcase(
+ DRAFT4.tests(),
+ DRAFT4.optional_tests_of(name="bignum"),
+ DRAFT4.optional_tests_of(name="format"),
+ DRAFT4.optional_tests_of(name="zeroTerminatedFloats"),
+ Validator=Draft4Validator,
+ format_checker=draft4_format_checker,
+ skip=lambda test: (
+ narrow_unicode_build(test)
+ or missing_format(draft4_format_checker)(test)
+ or skip(
+ message=bug(),
+ subject="ref",
+ case_description="Recursive references between schemas",
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description="Location-independent identifier",
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description=(
+ "Location-independent identifier with absolute URI"
+ ),
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description=(
+ "Location-independent identifier with base URI change in subschema"
+ ),
+ )(test)
+ or skip(
+ message=bug(),
+ subject="refRemote",
+ case_description="base URI change - change folder in subschema",
+ )(test)
+ or skip(
+ message="Upstream bug in strict_rfc3339",
+ subject="format",
+ description="case-insensitive T and Z",
+ )(test)
+ ),
+)
+
+
+TestDraft6 = DRAFT6.to_unittest_testcase(
+ DRAFT6.tests(),
+ DRAFT6.optional_tests_of(name="bignum"),
+ DRAFT6.optional_tests_of(name="format"),
+ DRAFT6.optional_tests_of(name="zeroTerminatedFloats"),
+ Validator=Draft6Validator,
+ format_checker=draft6_format_checker,
+ skip=lambda test: (
+ narrow_unicode_build(test)
+ or missing_format(draft6_format_checker)(test)
+ or skip(
+ message=bug(),
+ subject="ref",
+ case_description="Recursive references between schemas",
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description="Location-independent identifier",
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description=(
+ "Location-independent identifier with absolute URI"
+ ),
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description=(
+ "Location-independent identifier with base URI change in subschema"
+ ),
+ )(test)
+ or skip(
+ message=bug(),
+ subject="refRemote",
+ case_description="base URI change - change folder in subschema",
+ )(test)
+ or skip(
+ message="Upstream bug in strict_rfc3339",
+ subject="format",
+ description="case-insensitive T and Z",
+ )(test)
+ ),
+)
+
+
+TestDraft7 = DRAFT7.to_unittest_testcase(
+ DRAFT7.tests(),
+ DRAFT7.format_tests(),
+ DRAFT7.optional_tests_of(name="bignum"),
+ DRAFT7.optional_tests_of(name="content"),
+ DRAFT7.optional_tests_of(name="zeroTerminatedFloats"),
+ Validator=Draft7Validator,
+ format_checker=draft7_format_checker,
+ skip=lambda test: (
+ narrow_unicode_build(test)
+ or missing_format(draft7_format_checker)(test)
+ or skip(
+ message=bug(),
+ subject="ref",
+ case_description="Recursive references between schemas",
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description="Location-independent identifier",
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description=(
+ "Location-independent identifier with absolute URI"
+ ),
+ )(test)
+ or skip(
+ message=bug(371),
+ subject="ref",
+ case_description=(
+ "Location-independent identifier with base URI change in subschema"
+ ),
+ )(test)
+ or skip(
+ message=bug(),
+ subject="refRemote",
+ case_description="base URI change - change folder in subschema",
+ )(test)
+ or skip(
+ message="Upstream bug in strict_rfc3339",
+ subject="date-time",
+ description="case-insensitive T and Z",
+ )(test)
+ or skip(
+ message=bug(593),
+ subject="content",
+ case_description=(
+ "validation of string-encoded content based on media type"
+ ),
+ )(test)
+ or skip(
+ message=bug(593),
+ subject="content",
+ case_description="validation of binary string-encoding",
+ )(test)
+ or skip(
+ message=bug(593),
+ subject="content",
+ case_description=(
+ "validation of binary-encoded media type documents"
+ ),
+ )(test)
+ ),
+)
+
+
+with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+
+ TestDraft3LegacyTypeCheck = DRAFT3.to_unittest_testcase(
+ # Interestingly the any part couldn't really be done w/the old API.
+ (
+ (test for test in each if test.schema != {"type": "any"})
+ for each in DRAFT3.tests_of(name="type")
+ ),
+ name="TestDraft3LegacyTypeCheck",
+ Validator=create(
+ meta_schema=Draft3Validator.META_SCHEMA,
+ validators=Draft3Validator.VALIDATORS,
+ default_types=_DEPRECATED_DEFAULT_TYPES,
+ ),
+ )
+
+ TestDraft4LegacyTypeCheck = DRAFT4.to_unittest_testcase(
+ DRAFT4.tests_of(name="type"),
+ name="TestDraft4LegacyTypeCheck",
+ Validator=create(
+ meta_schema=Draft4Validator.META_SCHEMA,
+ validators=Draft4Validator.VALIDATORS,
+ default_types=_DEPRECATED_DEFAULT_TYPES,
+ ),
+ )
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/test_types.py b/lib/spack/external/_vendoring/jsonschema/tests/test_types.py
new file mode 100644
index 0000000000..2280cc395b
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/test_types.py
@@ -0,0 +1,190 @@
+"""
+Tests on the new type interface. The actual correctness of the type checking
+is handled in test_jsonschema_test_suite; these tests check that TypeChecker
+functions correctly and can facilitate extensions to type checking
+"""
+from collections import namedtuple
+from unittest import TestCase
+
+from jsonschema import ValidationError, _validators
+from jsonschema._types import TypeChecker
+from jsonschema.exceptions import UndefinedTypeCheck
+from jsonschema.validators import Draft4Validator, extend
+
+
+def equals_2(checker, instance):
+ return instance == 2
+
+
+def is_namedtuple(instance):
+ return isinstance(instance, tuple) and getattr(instance, "_fields", None)
+
+
+def is_object_or_named_tuple(checker, instance):
+ if Draft4Validator.TYPE_CHECKER.is_type(instance, "object"):
+ return True
+ return is_namedtuple(instance)
+
+
+def coerce_named_tuple(fn):
+ def coerced(validator, value, instance, schema):
+ if is_namedtuple(instance):
+ instance = instance._asdict()
+ return fn(validator, value, instance, schema)
+ return coerced
+
+
+required = coerce_named_tuple(_validators.required)
+properties = coerce_named_tuple(_validators.properties)
+
+
+class TestTypeChecker(TestCase):
+ def test_is_type(self):
+ checker = TypeChecker({"two": equals_2})
+ self.assertEqual(
+ (
+ checker.is_type(instance=2, type="two"),
+ checker.is_type(instance="bar", type="two"),
+ ),
+ (True, False),
+ )
+
+ def test_is_unknown_type(self):
+ with self.assertRaises(UndefinedTypeCheck) as context:
+ TypeChecker().is_type(4, "foobar")
+ self.assertIn("foobar", str(context.exception))
+
+ def test_checks_can_be_added_at_init(self):
+ checker = TypeChecker({"two": equals_2})
+ self.assertEqual(checker, TypeChecker().redefine("two", equals_2))
+
+ def test_redefine_existing_type(self):
+ self.assertEqual(
+ TypeChecker().redefine("two", object()).redefine("two", equals_2),
+ TypeChecker().redefine("two", equals_2),
+ )
+
+ def test_remove(self):
+ self.assertEqual(
+ TypeChecker({"two": equals_2}).remove("two"),
+ TypeChecker(),
+ )
+
+ def test_remove_unknown_type(self):
+ with self.assertRaises(UndefinedTypeCheck) as context:
+ TypeChecker().remove("foobar")
+ self.assertIn("foobar", str(context.exception))
+
+ def test_redefine_many(self):
+ self.assertEqual(
+ TypeChecker().redefine_many({"foo": int, "bar": str}),
+ TypeChecker().redefine("foo", int).redefine("bar", str),
+ )
+
+ def test_remove_multiple(self):
+ self.assertEqual(
+ TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"),
+ TypeChecker(),
+ )
+
+ def test_type_check_can_raise_key_error(self):
+ """
+ Make sure no one writes:
+
+ try:
+ self._type_checkers[type](...)
+ except KeyError:
+
+ ignoring the fact that the function itself can raise that.
+ """
+
+ error = KeyError("Stuff")
+
+ def raises_keyerror(checker, instance):
+ raise error
+
+ with self.assertRaises(KeyError) as context:
+ TypeChecker({"foo": raises_keyerror}).is_type(4, "foo")
+
+ self.assertIs(context.exception, error)
+
+
+class TestCustomTypes(TestCase):
+ def test_simple_type_can_be_extended(self):
+ def int_or_str_int(checker, instance):
+ if not isinstance(instance, (int, str)):
+ return False
+ try:
+ int(instance)
+ except ValueError:
+ return False
+ return True
+
+ CustomValidator = extend(
+ Draft4Validator,
+ type_checker=Draft4Validator.TYPE_CHECKER.redefine(
+ "integer", int_or_str_int,
+ ),
+ )
+ validator = CustomValidator({"type": "integer"})
+
+ validator.validate(4)
+ validator.validate("4")
+
+ with self.assertRaises(ValidationError):
+ validator.validate(4.4)
+
+ def test_object_can_be_extended(self):
+ schema = {"type": "object"}
+
+ Point = namedtuple("Point", ["x", "y"])
+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ u"object", is_object_or_named_tuple,
+ )
+
+ CustomValidator = extend(Draft4Validator, type_checker=type_checker)
+ validator = CustomValidator(schema)
+
+ validator.validate(Point(x=4, y=5))
+
+ def test_object_extensions_require_custom_validators(self):
+ schema = {"type": "object", "required": ["x"]}
+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ u"object", is_object_or_named_tuple,
+ )
+
+ CustomValidator = extend(Draft4Validator, type_checker=type_checker)
+ validator = CustomValidator(schema)
+
+ Point = namedtuple("Point", ["x", "y"])
+ # Cannot handle required
+ with self.assertRaises(ValidationError):
+ validator.validate(Point(x=4, y=5))
+
+ def test_object_extensions_can_handle_custom_validators(self):
+ schema = {
+ "type": "object",
+ "required": ["x"],
+ "properties": {"x": {"type": "integer"}},
+ }
+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ u"object", is_object_or_named_tuple,
+ )
+
+ CustomValidator = extend(
+ Draft4Validator,
+ type_checker=type_checker,
+ validators={"required": required, "properties": properties},
+ )
+
+ validator = CustomValidator(schema)
+
+ Point = namedtuple("Point", ["x", "y"])
+ # Can now process required and properties
+ validator.validate(Point(x=4, y=5))
+
+ with self.assertRaises(ValidationError):
+ validator.validate(Point(x="not an integer", y=5))
diff --git a/lib/spack/external/_vendoring/jsonschema/tests/test_validators.py b/lib/spack/external/_vendoring/jsonschema/tests/test_validators.py
new file mode 100644
index 0000000000..07be4f08bc
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/tests/test_validators.py
@@ -0,0 +1,1762 @@
+from collections import deque
+from contextlib import contextmanager
+from decimal import Decimal
+from io import BytesIO
+from unittest import TestCase
+import json
+import os
+import sys
+import tempfile
+import unittest
+
+from twisted.trial.unittest import SynchronousTestCase
+import attr
+
+from jsonschema import FormatChecker, TypeChecker, exceptions, validators
+from jsonschema.compat import PY3, pathname2url
+from jsonschema.tests._helpers import bug
+
+
+def startswith(validator, startswith, instance, schema):
+ if not instance.startswith(startswith):
+ yield exceptions.ValidationError(u"Whoops!")
+
+
+class TestCreateAndExtend(SynchronousTestCase):
+ def setUp(self):
+ self.addCleanup(
+ self.assertEqual,
+ validators.meta_schemas,
+ dict(validators.meta_schemas),
+ )
+
+ self.meta_schema = {u"$id": "some://meta/schema"}
+ self.validators = {u"startswith": startswith}
+ self.type_checker = TypeChecker()
+ self.Validator = validators.create(
+ meta_schema=self.meta_schema,
+ validators=self.validators,
+ type_checker=self.type_checker,
+ )
+
+ def test_attrs(self):
+ self.assertEqual(
+ (
+ self.Validator.VALIDATORS,
+ self.Validator.META_SCHEMA,
+ self.Validator.TYPE_CHECKER,
+ ), (
+ self.validators,
+ self.meta_schema,
+ self.type_checker,
+ ),
+ )
+
+ def test_init(self):
+ schema = {u"startswith": u"foo"}
+ self.assertEqual(self.Validator(schema).schema, schema)
+
+ def test_iter_errors(self):
+ schema = {u"startswith": u"hel"}
+ iter_errors = self.Validator(schema).iter_errors
+
+ errors = list(iter_errors(u"hello"))
+ self.assertEqual(errors, [])
+
+ expected_error = exceptions.ValidationError(
+ u"Whoops!",
+ instance=u"goodbye",
+ schema=schema,
+ validator=u"startswith",
+ validator_value=u"hel",
+ schema_path=deque([u"startswith"]),
+ )
+
+ errors = list(iter_errors(u"goodbye"))
+ self.assertEqual(len(errors), 1)
+ self.assertEqual(errors[0]._contents(), expected_error._contents())
+
+ def test_if_a_version_is_provided_it_is_registered(self):
+ Validator = validators.create(
+ meta_schema={u"$id": "something"},
+ version="my version",
+ )
+ self.addCleanup(validators.meta_schemas.pop, "something")
+ self.assertEqual(Validator.__name__, "MyVersionValidator")
+
+ def test_if_a_version_is_not_provided_it_is_not_registered(self):
+ original = dict(validators.meta_schemas)
+ validators.create(meta_schema={u"id": "id"})
+ self.assertEqual(validators.meta_schemas, original)
+
+ def test_validates_registers_meta_schema_id(self):
+ meta_schema_key = "meta schema id"
+ my_meta_schema = {u"id": meta_schema_key}
+
+ validators.create(
+ meta_schema=my_meta_schema,
+ version="my version",
+ id_of=lambda s: s.get("id", ""),
+ )
+ self.addCleanup(validators.meta_schemas.pop, meta_schema_key)
+
+ self.assertIn(meta_schema_key, validators.meta_schemas)
+
+ def test_validates_registers_meta_schema_draft6_id(self):
+ meta_schema_key = "meta schema $id"
+ my_meta_schema = {u"$id": meta_schema_key}
+
+ validators.create(
+ meta_schema=my_meta_schema,
+ version="my version",
+ )
+ self.addCleanup(validators.meta_schemas.pop, meta_schema_key)
+
+ self.assertIn(meta_schema_key, validators.meta_schemas)
+
+ def test_create_default_types(self):
+ Validator = validators.create(meta_schema={}, validators=())
+ self.assertTrue(
+ all(
+ Validator({}).is_type(instance=instance, type=type)
+ for type, instance in [
+ (u"array", []),
+ (u"boolean", True),
+ (u"integer", 12),
+ (u"null", None),
+ (u"number", 12.0),
+ (u"object", {}),
+ (u"string", u"foo"),
+ ]
+ ),
+ )
+
+ def test_extend(self):
+ original = dict(self.Validator.VALIDATORS)
+ new = object()
+
+ Extended = validators.extend(
+ self.Validator,
+ validators={u"new": new},
+ )
+ self.assertEqual(
+ (
+ Extended.VALIDATORS,
+ Extended.META_SCHEMA,
+ Extended.TYPE_CHECKER,
+ self.Validator.VALIDATORS,
+ ), (
+ dict(original, new=new),
+ self.Validator.META_SCHEMA,
+ self.Validator.TYPE_CHECKER,
+ original,
+ ),
+ )
+
+ def test_extend_idof(self):
+ """
+ Extending a validator preserves its notion of schema IDs.
+ """
+ def id_of(schema):
+ return schema.get(u"__test__", self.Validator.ID_OF(schema))
+ correct_id = "the://correct/id/"
+ meta_schema = {
+ u"$id": "the://wrong/id/",
+ u"__test__": correct_id,
+ }
+ Original = validators.create(
+ meta_schema=meta_schema,
+ validators=self.validators,
+ type_checker=self.type_checker,
+ id_of=id_of,
+ )
+ self.assertEqual(Original.ID_OF(Original.META_SCHEMA), correct_id)
+
+ Derived = validators.extend(Original)
+ self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id)
+
+
+class TestLegacyTypeChecking(SynchronousTestCase):
+ def test_create_default_types(self):
+ Validator = validators.create(meta_schema={}, validators=())
+ self.assertEqual(
+ set(Validator.DEFAULT_TYPES), {
+ u"array",
+ u"boolean",
+ u"integer",
+ u"null",
+ u"number",
+ u"object", u"string",
+ },
+ )
+ self.flushWarnings()
+
+ def test_extend(self):
+ Validator = validators.create(meta_schema={}, validators=())
+ original = dict(Validator.VALIDATORS)
+ new = object()
+
+ Extended = validators.extend(
+ Validator,
+ validators={u"new": new},
+ )
+ self.assertEqual(
+ (
+ Extended.VALIDATORS,
+ Extended.META_SCHEMA,
+ Extended.TYPE_CHECKER,
+ Validator.VALIDATORS,
+
+ Extended.DEFAULT_TYPES,
+ Extended({}).DEFAULT_TYPES,
+ self.flushWarnings()[0]["message"],
+ ), (
+ dict(original, new=new),
+ Validator.META_SCHEMA,
+ Validator.TYPE_CHECKER,
+ original,
+
+ Validator.DEFAULT_TYPES,
+ Validator.DEFAULT_TYPES,
+ self.flushWarnings()[0]["message"],
+ ),
+ )
+
+ def test_types_redefines_the_validators_type_checker(self):
+ schema = {"type": "string"}
+ self.assertFalse(validators.Draft7Validator(schema).is_valid(12))
+
+ validator = validators.Draft7Validator(
+ schema,
+ types={"string": (str, int)},
+ )
+ self.assertTrue(validator.is_valid(12))
+ self.flushWarnings()
+
+ def test_providing_default_types_warns(self):
+ self.assertWarns(
+ category=DeprecationWarning,
+ message=(
+ "The default_types argument is deprecated. "
+ "Use the type_checker argument instead."
+ ),
+ # https://tm.tl/9363 :'(
+ filename=sys.modules[self.assertWarns.__module__].__file__,
+
+ f=validators.create,
+ meta_schema={},
+ validators={},
+ default_types={"foo": object},
+ )
+
+ def test_cannot_ask_for_default_types_with_non_default_type_checker(self):
+ """
+ We raise an error when you ask a validator with non-default
+ type checker for its DEFAULT_TYPES.
+
+ The type checker argument is new, so no one but this library
+ itself should be trying to use it, and doing so while then
+ asking for DEFAULT_TYPES makes no sense (not to mention is
+ deprecated), since type checkers are not strictly about Python
+ type.
+ """
+ Validator = validators.create(
+ meta_schema={},
+ validators={},
+ type_checker=TypeChecker(),
+ )
+ with self.assertRaises(validators._DontDoThat) as e:
+ Validator.DEFAULT_TYPES
+
+ self.assertIn(
+ "DEFAULT_TYPES cannot be used on Validators using TypeCheckers",
+ str(e.exception),
+ )
+ with self.assertRaises(validators._DontDoThat):
+ Validator({}).DEFAULT_TYPES
+
+ self.assertFalse(self.flushWarnings())
+
+ def test_providing_explicit_type_checker_does_not_warn(self):
+ Validator = validators.create(
+ meta_schema={},
+ validators={},
+ type_checker=TypeChecker(),
+ )
+ self.assertFalse(self.flushWarnings())
+
+ Validator({})
+ self.assertFalse(self.flushWarnings())
+
+ def test_providing_neither_does_not_warn(self):
+ Validator = validators.create(meta_schema={}, validators={})
+ self.assertFalse(self.flushWarnings())
+
+ Validator({})
+ self.assertFalse(self.flushWarnings())
+
+ def test_providing_default_types_with_type_checker_errors(self):
+ with self.assertRaises(TypeError) as e:
+ validators.create(
+ meta_schema={},
+ validators={},
+ default_types={"foo": object},
+ type_checker=TypeChecker(),
+ )
+
+ self.assertIn(
+ "Do not specify default_types when providing a type checker",
+ str(e.exception),
+ )
+ self.assertFalse(self.flushWarnings())
+
+ def test_extending_a_legacy_validator_with_a_type_checker_errors(self):
+ Validator = validators.create(
+ meta_schema={},
+ validators={},
+ default_types={u"array": list}
+ )
+ with self.assertRaises(TypeError) as e:
+ validators.extend(
+ Validator,
+ validators={},
+ type_checker=TypeChecker(),
+ )
+
+ self.assertIn(
+ (
+ "Cannot extend a validator created with default_types "
+ "with a type_checker. Update the validator to use a "
+ "type_checker when created."
+ ),
+ str(e.exception),
+ )
+ self.flushWarnings()
+
+ def test_extending_a_legacy_validator_does_not_rewarn(self):
+ Validator = validators.create(meta_schema={}, default_types={})
+ self.assertTrue(self.flushWarnings())
+
+ validators.extend(Validator)
+ self.assertFalse(self.flushWarnings())
+
+ def test_accessing_default_types_warns(self):
+ Validator = validators.create(meta_schema={}, validators={})
+ self.assertFalse(self.flushWarnings())
+
+ self.assertWarns(
+ DeprecationWarning,
+ (
+ "The DEFAULT_TYPES attribute is deprecated. "
+ "See the type checker attached to this validator instead."
+ ),
+ # https://tm.tl/9363 :'(
+ sys.modules[self.assertWarns.__module__].__file__,
+
+ getattr,
+ Validator,
+ "DEFAULT_TYPES",
+ )
+
+ def test_accessing_default_types_on_the_instance_warns(self):
+ Validator = validators.create(meta_schema={}, validators={})
+ self.assertFalse(self.flushWarnings())
+
+ self.assertWarns(
+ DeprecationWarning,
+ (
+ "The DEFAULT_TYPES attribute is deprecated. "
+ "See the type checker attached to this validator instead."
+ ),
+ # https://tm.tl/9363 :'(
+ sys.modules[self.assertWarns.__module__].__file__,
+
+ getattr,
+ Validator({}),
+ "DEFAULT_TYPES",
+ )
+
+ def test_providing_types_to_init_warns(self):
+ Validator = validators.create(meta_schema={}, validators={})
+ self.assertFalse(self.flushWarnings())
+
+ self.assertWarns(
+ category=DeprecationWarning,
+ message=(
+ "The types argument is deprecated. "
+ "Provide a type_checker to jsonschema.validators.extend "
+ "instead."
+ ),
+ # https://tm.tl/9363 :'(
+ filename=sys.modules[self.assertWarns.__module__].__file__,
+
+ f=Validator,
+ schema={},
+ types={"bar": object},
+ )
+
+
+class TestIterErrors(TestCase):
+ def setUp(self):
+ self.validator = validators.Draft3Validator({})
+
+ def test_iter_errors(self):
+ instance = [1, 2]
+ schema = {
+ u"disallow": u"array",
+ u"enum": [["a", "b", "c"], ["d", "e", "f"]],
+ u"minItems": 3,
+ }
+
+ got = (e.message for e in self.validator.iter_errors(instance, schema))
+ expected = [
+ "%r is disallowed for [1, 2]" % (schema["disallow"],),
+ "[1, 2] is too short",
+ "[1, 2] is not one of %r" % (schema["enum"],),
+ ]
+ self.assertEqual(sorted(got), sorted(expected))
+
+ def test_iter_errors_multiple_failures_one_validator(self):
+ instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"}
+ schema = {
+ u"properties": {
+ "foo": {u"type": "string"},
+ "bar": {u"minItems": 2},
+ "baz": {u"maximum": 10, u"enum": [2, 4, 6, 8]},
+ },
+ }
+
+ errors = list(self.validator.iter_errors(instance, schema))
+ self.assertEqual(len(errors), 4)
+
+
+class TestValidationErrorMessages(TestCase):
+ def message_for(self, instance, schema, *args, **kwargs):
+ kwargs.setdefault("cls", validators.Draft3Validator)
+ with self.assertRaises(exceptions.ValidationError) as e:
+ validators.validate(instance, schema, *args, **kwargs)
+ return e.exception.message
+
+ def test_single_type_failure(self):
+ message = self.message_for(instance=1, schema={u"type": u"string"})
+ self.assertEqual(message, "1 is not of type %r" % u"string")
+
+ def test_single_type_list_failure(self):
+ message = self.message_for(instance=1, schema={u"type": [u"string"]})
+ self.assertEqual(message, "1 is not of type %r" % u"string")
+
+ def test_multiple_type_failure(self):
+ types = u"string", u"object"
+ message = self.message_for(instance=1, schema={u"type": list(types)})
+ self.assertEqual(message, "1 is not of type %r, %r" % types)
+
+ def test_object_without_title_type_failure(self):
+ type = {u"type": [{u"minimum": 3}]}
+ message = self.message_for(instance=1, schema={u"type": [type]})
+ self.assertEqual(message, "1 is less than the minimum of 3")
+
+ def test_object_with_named_type_failure(self):
+ schema = {u"type": [{u"name": "Foo", u"minimum": 3}]}
+ message = self.message_for(instance=1, schema=schema)
+ self.assertEqual(message, "1 is less than the minimum of 3")
+
+ def test_minimum(self):
+ message = self.message_for(instance=1, schema={"minimum": 2})
+ self.assertEqual(message, "1 is less than the minimum of 2")
+
+ def test_maximum(self):
+ message = self.message_for(instance=1, schema={"maximum": 0})
+ self.assertEqual(message, "1 is greater than the maximum of 0")
+
+ def test_dependencies_single_element(self):
+ depend, on = "bar", "foo"
+ schema = {u"dependencies": {depend: on}}
+ message = self.message_for(
+ instance={"bar": 2},
+ schema=schema,
+ cls=validators.Draft3Validator,
+ )
+ self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
+
+ def test_dependencies_list_draft3(self):
+ depend, on = "bar", "foo"
+ schema = {u"dependencies": {depend: [on]}}
+ message = self.message_for(
+ instance={"bar": 2},
+ schema=schema,
+ cls=validators.Draft3Validator,
+ )
+ self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
+
+ def test_dependencies_list_draft7(self):
+ depend, on = "bar", "foo"
+ schema = {u"dependencies": {depend: [on]}}
+ message = self.message_for(
+ instance={"bar": 2},
+ schema=schema,
+ cls=validators.Draft7Validator,
+ )
+ self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
+
+ def test_additionalItems_single_failure(self):
+ message = self.message_for(
+ instance=[2],
+ schema={u"items": [], u"additionalItems": False},
+ )
+ self.assertIn("(2 was unexpected)", message)
+
+ def test_additionalItems_multiple_failures(self):
+ message = self.message_for(
+ instance=[1, 2, 3],
+ schema={u"items": [], u"additionalItems": False}
+ )
+ self.assertIn("(1, 2, 3 were unexpected)", message)
+
+ def test_additionalProperties_single_failure(self):
+ additional = "foo"
+ schema = {u"additionalProperties": False}
+ message = self.message_for(instance={additional: 2}, schema=schema)
+ self.assertIn("(%r was unexpected)" % (additional,), message)
+
+ def test_additionalProperties_multiple_failures(self):
+ schema = {u"additionalProperties": False}
+ message = self.message_for(
+ instance=dict.fromkeys(["foo", "bar"]),
+ schema=schema,
+ )
+
+ self.assertIn(repr("foo"), message)
+ self.assertIn(repr("bar"), message)
+ self.assertIn("were unexpected)", message)
+
+ def test_const(self):
+ schema = {u"const": 12}
+ message = self.message_for(
+ instance={"foo": "bar"},
+ schema=schema,
+ cls=validators.Draft6Validator,
+ )
+ self.assertIn("12 was expected", message)
+
+ def test_contains(self):
+ schema = {u"contains": {u"const": 12}}
+ message = self.message_for(
+ instance=[2, {}, []],
+ schema=schema,
+ cls=validators.Draft6Validator,
+ )
+ self.assertIn(
+ "None of [2, {}, []] are valid under the given schema",
+ message,
+ )
+
+ def test_invalid_format_default_message(self):
+ checker = FormatChecker(formats=())
+ checker.checks(u"thing")(lambda value: False)
+
+ schema = {u"format": u"thing"}
+ message = self.message_for(
+ instance="bla",
+ schema=schema,
+ format_checker=checker,
+ )
+
+ self.assertIn(repr("bla"), message)
+ self.assertIn(repr("thing"), message)
+ self.assertIn("is not a", message)
+
+ def test_additionalProperties_false_patternProperties(self):
+ schema = {u"type": u"object",
+ u"additionalProperties": False,
+ u"patternProperties": {
+ u"^abc$": {u"type": u"string"},
+ u"^def$": {u"type": u"string"},
+ }}
+ message = self.message_for(
+ instance={u"zebra": 123},
+ schema=schema,
+ cls=validators.Draft4Validator,
+ )
+ self.assertEqual(
+ message,
+ "{} does not match any of the regexes: {}, {}".format(
+ repr(u"zebra"), repr(u"^abc$"), repr(u"^def$"),
+ ),
+ )
+ message = self.message_for(
+ instance={u"zebra": 123, u"fish": 456},
+ schema=schema,
+ cls=validators.Draft4Validator,
+ )
+ self.assertEqual(
+ message,
+ "{}, {} do not match any of the regexes: {}, {}".format(
+ repr(u"fish"), repr(u"zebra"), repr(u"^abc$"), repr(u"^def$")
+ ),
+ )
+
+ def test_False_schema(self):
+ message = self.message_for(
+ instance="something",
+ schema=False,
+ cls=validators.Draft7Validator,
+ )
+ self.assertIn("False schema does not allow 'something'", message)
+
+
+class TestValidationErrorDetails(TestCase):
+ # TODO: These really need unit tests for each individual validator, rather
+ # than just these higher level tests.
+ def test_anyOf(self):
+ instance = 5
+ schema = {
+ "anyOf": [
+ {"minimum": 20},
+ {"type": "string"},
+ ],
+ }
+
+ validator = validators.Draft4Validator(schema)
+ errors = list(validator.iter_errors(instance))
+ self.assertEqual(len(errors), 1)
+ e = errors[0]
+
+ self.assertEqual(e.validator, "anyOf")
+ self.assertEqual(e.validator_value, schema["anyOf"])
+ self.assertEqual(e.instance, instance)
+ self.assertEqual(e.schema, schema)
+ self.assertIsNone(e.parent)
+
+ self.assertEqual(e.path, deque([]))
+ self.assertEqual(e.relative_path, deque([]))
+ self.assertEqual(e.absolute_path, deque([]))
+
+ self.assertEqual(e.schema_path, deque(["anyOf"]))
+ self.assertEqual(e.relative_schema_path, deque(["anyOf"]))
+ self.assertEqual(e.absolute_schema_path, deque(["anyOf"]))
+
+ self.assertEqual(len(e.context), 2)
+
+ e1, e2 = sorted_errors(e.context)
+
+ self.assertEqual(e1.validator, "minimum")
+ self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"])
+ self.assertEqual(e1.instance, instance)
+ self.assertEqual(e1.schema, schema["anyOf"][0])
+ self.assertIs(e1.parent, e)
+
+ self.assertEqual(e1.path, deque([]))
+ self.assertEqual(e1.absolute_path, deque([]))
+ self.assertEqual(e1.relative_path, deque([]))
+
+ self.assertEqual(e1.schema_path, deque([0, "minimum"]))
+ self.assertEqual(e1.relative_schema_path, deque([0, "minimum"]))
+ self.assertEqual(
+ e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]),
+ )
+
+ self.assertFalse(e1.context)
+
+ self.assertEqual(e2.validator, "type")
+ self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"])
+ self.assertEqual(e2.instance, instance)
+ self.assertEqual(e2.schema, schema["anyOf"][1])
+ self.assertIs(e2.parent, e)
+
+ self.assertEqual(e2.path, deque([]))
+ self.assertEqual(e2.relative_path, deque([]))
+ self.assertEqual(e2.absolute_path, deque([]))
+
+ self.assertEqual(e2.schema_path, deque([1, "type"]))
+ self.assertEqual(e2.relative_schema_path, deque([1, "type"]))
+ self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"]))
+
+ self.assertEqual(len(e2.context), 0)
+
+ def test_type(self):
+ instance = {"foo": 1}
+ schema = {
+ "type": [
+ {"type": "integer"},
+ {
+ "type": "object",
+ "properties": {"foo": {"enum": [2]}},
+ },
+ ],
+ }
+
+ validator = validators.Draft3Validator(schema)
+ errors = list(validator.iter_errors(instance))
+ self.assertEqual(len(errors), 1)
+ e = errors[0]
+
+ self.assertEqual(e.validator, "type")
+ self.assertEqual(e.validator_value, schema["type"])
+ self.assertEqual(e.instance, instance)
+ self.assertEqual(e.schema, schema)
+ self.assertIsNone(e.parent)
+
+ self.assertEqual(e.path, deque([]))
+ self.assertEqual(e.relative_path, deque([]))
+ self.assertEqual(e.absolute_path, deque([]))
+
+ self.assertEqual(e.schema_path, deque(["type"]))
+ self.assertEqual(e.relative_schema_path, deque(["type"]))
+ self.assertEqual(e.absolute_schema_path, deque(["type"]))
+
+ self.assertEqual(len(e.context), 2)
+
+ e1, e2 = sorted_errors(e.context)
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e1.validator_value, schema["type"][0]["type"])
+ self.assertEqual(e1.instance, instance)
+ self.assertEqual(e1.schema, schema["type"][0])
+ self.assertIs(e1.parent, e)
+
+ self.assertEqual(e1.path, deque([]))
+ self.assertEqual(e1.relative_path, deque([]))
+ self.assertEqual(e1.absolute_path, deque([]))
+
+ self.assertEqual(e1.schema_path, deque([0, "type"]))
+ self.assertEqual(e1.relative_schema_path, deque([0, "type"]))
+ self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"]))
+
+ self.assertFalse(e1.context)
+
+ self.assertEqual(e2.validator, "enum")
+ self.assertEqual(e2.validator_value, [2])
+ self.assertEqual(e2.instance, 1)
+ self.assertEqual(e2.schema, {u"enum": [2]})
+ self.assertIs(e2.parent, e)
+
+ self.assertEqual(e2.path, deque(["foo"]))
+ self.assertEqual(e2.relative_path, deque(["foo"]))
+ self.assertEqual(e2.absolute_path, deque(["foo"]))
+
+ self.assertEqual(
+ e2.schema_path, deque([1, "properties", "foo", "enum"]),
+ )
+ self.assertEqual(
+ e2.relative_schema_path, deque([1, "properties", "foo", "enum"]),
+ )
+ self.assertEqual(
+ e2.absolute_schema_path,
+ deque(["type", 1, "properties", "foo", "enum"]),
+ )
+
+ self.assertFalse(e2.context)
+
+ def test_single_nesting(self):
+ instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"}
+ schema = {
+ "properties": {
+ "foo": {"type": "string"},
+ "bar": {"minItems": 2},
+ "baz": {"maximum": 10, "enum": [2, 4, 6, 8]},
+ },
+ }
+
+ validator = validators.Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2, e3, e4 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque(["bar"]))
+ self.assertEqual(e2.path, deque(["baz"]))
+ self.assertEqual(e3.path, deque(["baz"]))
+ self.assertEqual(e4.path, deque(["foo"]))
+
+ self.assertEqual(e1.relative_path, deque(["bar"]))
+ self.assertEqual(e2.relative_path, deque(["baz"]))
+ self.assertEqual(e3.relative_path, deque(["baz"]))
+ self.assertEqual(e4.relative_path, deque(["foo"]))
+
+ self.assertEqual(e1.absolute_path, deque(["bar"]))
+ self.assertEqual(e2.absolute_path, deque(["baz"]))
+ self.assertEqual(e3.absolute_path, deque(["baz"]))
+ self.assertEqual(e4.absolute_path, deque(["foo"]))
+
+ self.assertEqual(e1.validator, "minItems")
+ self.assertEqual(e2.validator, "enum")
+ self.assertEqual(e3.validator, "maximum")
+ self.assertEqual(e4.validator, "type")
+
+ def test_multiple_nesting(self):
+ instance = [1, {"foo": 2, "bar": {"baz": [1]}}, "quux"]
+ schema = {
+ "type": "string",
+ "items": {
+ "type": ["string", "object"],
+ "properties": {
+ "foo": {"enum": [1, 3]},
+ "bar": {
+ "type": "array",
+ "properties": {
+ "bar": {"required": True},
+ "baz": {"minItems": 2},
+ },
+ },
+ },
+ },
+ }
+
+ validator = validators.Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque([]))
+ self.assertEqual(e2.path, deque([0]))
+ self.assertEqual(e3.path, deque([1, "bar"]))
+ self.assertEqual(e4.path, deque([1, "bar", "bar"]))
+ self.assertEqual(e5.path, deque([1, "bar", "baz"]))
+ self.assertEqual(e6.path, deque([1, "foo"]))
+
+ self.assertEqual(e1.schema_path, deque(["type"]))
+ self.assertEqual(e2.schema_path, deque(["items", "type"]))
+ self.assertEqual(
+ list(e3.schema_path), ["items", "properties", "bar", "type"],
+ )
+ self.assertEqual(
+ list(e4.schema_path),
+ ["items", "properties", "bar", "properties", "bar", "required"],
+ )
+ self.assertEqual(
+ list(e5.schema_path),
+ ["items", "properties", "bar", "properties", "baz", "minItems"]
+ )
+ self.assertEqual(
+ list(e6.schema_path), ["items", "properties", "foo", "enum"],
+ )
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "type")
+ self.assertEqual(e3.validator, "type")
+ self.assertEqual(e4.validator, "required")
+ self.assertEqual(e5.validator, "minItems")
+ self.assertEqual(e6.validator, "enum")
+
+ def test_recursive(self):
+ schema = {
+ "definitions": {
+ "node": {
+ "anyOf": [{
+ "type": "object",
+ "required": ["name", "children"],
+ "properties": {
+ "name": {
+ "type": "string",
+ },
+ "children": {
+ "type": "object",
+ "patternProperties": {
+ "^.*$": {
+ "$ref": "#/definitions/node",
+ },
+ },
+ },
+ },
+ }],
+ },
+ },
+ "type": "object",
+ "required": ["root"],
+ "properties": {"root": {"$ref": "#/definitions/node"}},
+ }
+
+ instance = {
+ "root": {
+ "name": "root",
+ "children": {
+ "a": {
+ "name": "a",
+ "children": {
+ "ab": {
+ "name": "ab",
+ # missing "children"
+ },
+ },
+ },
+ },
+ },
+ }
+ validator = validators.Draft4Validator(schema)
+
+ e, = validator.iter_errors(instance)
+ self.assertEqual(e.absolute_path, deque(["root"]))
+ self.assertEqual(
+ e.absolute_schema_path, deque(["properties", "root", "anyOf"]),
+ )
+
+ e1, = e.context
+ self.assertEqual(e1.absolute_path, deque(["root", "children", "a"]))
+ self.assertEqual(
+ e1.absolute_schema_path, deque(
+ [
+ "properties",
+ "root",
+ "anyOf",
+ 0,
+ "properties",
+ "children",
+ "patternProperties",
+ "^.*$",
+ "anyOf",
+ ],
+ ),
+ )
+
+ e2, = e1.context
+ self.assertEqual(
+ e2.absolute_path, deque(
+ ["root", "children", "a", "children", "ab"],
+ ),
+ )
+ self.assertEqual(
+ e2.absolute_schema_path, deque(
+ [
+ "properties",
+ "root",
+ "anyOf",
+ 0,
+ "properties",
+ "children",
+ "patternProperties",
+ "^.*$",
+ "anyOf",
+ 0,
+ "properties",
+ "children",
+ "patternProperties",
+ "^.*$",
+ "anyOf",
+ ],
+ ),
+ )
+
+ def test_additionalProperties(self):
+ instance = {"bar": "bar", "foo": 2}
+ schema = {"additionalProperties": {"type": "integer", "minimum": 5}}
+
+ validator = validators.Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque(["bar"]))
+ self.assertEqual(e2.path, deque(["foo"]))
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "minimum")
+
+ def test_patternProperties(self):
+ instance = {"bar": 1, "foo": 2}
+ schema = {
+ "patternProperties": {
+ "bar": {"type": "string"},
+ "foo": {"minimum": 5},
+ },
+ }
+
+ validator = validators.Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque(["bar"]))
+ self.assertEqual(e2.path, deque(["foo"]))
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "minimum")
+
+ def test_additionalItems(self):
+ instance = ["foo", 1]
+ schema = {
+ "items": [],
+ "additionalItems": {"type": "integer", "minimum": 5},
+ }
+
+ validator = validators.Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque([0]))
+ self.assertEqual(e2.path, deque([1]))
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "minimum")
+
+ def test_additionalItems_with_items(self):
+ instance = ["foo", "bar", 1]
+ schema = {
+ "items": [{}],
+ "additionalItems": {"type": "integer", "minimum": 5},
+ }
+
+ validator = validators.Draft3Validator(schema)
+ errors = validator.iter_errors(instance)
+ e1, e2 = sorted_errors(errors)
+
+ self.assertEqual(e1.path, deque([1]))
+ self.assertEqual(e2.path, deque([2]))
+
+ self.assertEqual(e1.validator, "type")
+ self.assertEqual(e2.validator, "minimum")
+
+ def test_propertyNames(self):
+ instance = {"foo": 12}
+ schema = {"propertyNames": {"not": {"const": "foo"}}}
+
+ validator = validators.Draft7Validator(schema)
+ error, = validator.iter_errors(instance)
+
+ self.assertEqual(error.validator, "not")
+ self.assertEqual(
+ error.message,
+ "%r is not allowed for %r" % ({"const": "foo"}, "foo"),
+ )
+ self.assertEqual(error.path, deque([]))
+ self.assertEqual(error.schema_path, deque(["propertyNames", "not"]))
+
+ def test_if_then(self):
+ schema = {
+ "if": {"const": 12},
+ "then": {"const": 13},
+ }
+
+ validator = validators.Draft7Validator(schema)
+ error, = validator.iter_errors(12)
+
+ self.assertEqual(error.validator, "const")
+ self.assertEqual(error.message, "13 was expected")
+ self.assertEqual(error.path, deque([]))
+ self.assertEqual(error.schema_path, deque(["if", "then", "const"]))
+
+ def test_if_else(self):
+ schema = {
+ "if": {"const": 12},
+ "else": {"const": 13},
+ }
+
+ validator = validators.Draft7Validator(schema)
+ error, = validator.iter_errors(15)
+
+ self.assertEqual(error.validator, "const")
+ self.assertEqual(error.message, "13 was expected")
+ self.assertEqual(error.path, deque([]))
+ self.assertEqual(error.schema_path, deque(["if", "else", "const"]))
+
+ def test_boolean_schema_False(self):
+ validator = validators.Draft7Validator(False)
+ error, = validator.iter_errors(12)
+
+ self.assertEqual(
+ (
+ error.message,
+ error.validator,
+ error.validator_value,
+ error.instance,
+ error.schema,
+ error.schema_path,
+ ),
+ (
+ "False schema does not allow 12",
+ None,
+ None,
+ 12,
+ False,
+ deque([]),
+ ),
+ )
+
+ def test_ref(self):
+ ref, schema = "someRef", {"additionalProperties": {"type": "integer"}}
+ validator = validators.Draft7Validator(
+ {"$ref": ref},
+ resolver=validators.RefResolver("", {}, store={ref: schema}),
+ )
+ error, = validator.iter_errors({"foo": "notAnInteger"})
+
+ self.assertEqual(
+ (
+ error.message,
+ error.validator,
+ error.validator_value,
+ error.instance,
+ error.absolute_path,
+ error.schema,
+ error.schema_path,
+ ),
+ (
+ "'notAnInteger' is not of type 'integer'",
+ "type",
+ "integer",
+ "notAnInteger",
+ deque(["foo"]),
+ {"type": "integer"},
+ deque(["additionalProperties", "type"]),
+ ),
+ )
+
+
+class MetaSchemaTestsMixin(object):
+ # TODO: These all belong upstream
+ def test_invalid_properties(self):
+ with self.assertRaises(exceptions.SchemaError):
+ self.Validator.check_schema({"properties": {"test": object()}})
+
+ def test_minItems_invalid_string(self):
+ with self.assertRaises(exceptions.SchemaError):
+ # needs to be an integer
+ self.Validator.check_schema({"minItems": "1"})
+
+ def test_enum_allows_empty_arrays(self):
+ """
+ Technically, all the spec says is they SHOULD have elements, not MUST.
+
+ See https://github.com/Julian/jsonschema/issues/529.
+ """
+ self.Validator.check_schema({"enum": []})
+
+ def test_enum_allows_non_unique_items(self):
+ """
+ Technically, all the spec says is they SHOULD be unique, not MUST.
+
+ See https://github.com/Julian/jsonschema/issues/529.
+ """
+ self.Validator.check_schema({"enum": [12, 12]})
+
+
+class ValidatorTestMixin(MetaSchemaTestsMixin, object):
+ def test_valid_instances_are_valid(self):
+ schema, instance = self.valid
+ self.assertTrue(self.Validator(schema).is_valid(instance))
+
+ def test_invalid_instances_are_not_valid(self):
+ schema, instance = self.invalid
+ self.assertFalse(self.Validator(schema).is_valid(instance))
+
+ def test_non_existent_properties_are_ignored(self):
+ self.Validator({object(): object()}).validate(instance=object())
+
+ def test_it_creates_a_ref_resolver_if_not_provided(self):
+ self.assertIsInstance(
+ self.Validator({}).resolver,
+ validators.RefResolver,
+ )
+
+ def test_it_delegates_to_a_ref_resolver(self):
+ ref, schema = "someCoolRef", {"type": "integer"}
+ resolver = validators.RefResolver("", {}, store={ref: schema})
+ validator = self.Validator({"$ref": ref}, resolver=resolver)
+
+ with self.assertRaises(exceptions.ValidationError):
+ validator.validate(None)
+
+ def test_it_delegates_to_a_legacy_ref_resolver(self):
+ """
+ Legacy RefResolvers support only the context manager form of
+ resolution.
+ """
+
+ class LegacyRefResolver(object):
+ @contextmanager
+ def resolving(this, ref):
+ self.assertEqual(ref, "the ref")
+ yield {"type": "integer"}
+
+ resolver = LegacyRefResolver()
+ schema = {"$ref": "the ref"}
+
+ with self.assertRaises(exceptions.ValidationError):
+ self.Validator(schema, resolver=resolver).validate(None)
+
+ def test_is_type_is_true_for_valid_type(self):
+ self.assertTrue(self.Validator({}).is_type("foo", "string"))
+
+ def test_is_type_is_false_for_invalid_type(self):
+ self.assertFalse(self.Validator({}).is_type("foo", "array"))
+
+ def test_is_type_evades_bool_inheriting_from_int(self):
+ self.assertFalse(self.Validator({}).is_type(True, "integer"))
+ self.assertFalse(self.Validator({}).is_type(True, "number"))
+
+ @unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
+ def test_string_a_bytestring_is_a_string(self):
+ self.Validator({"type": "string"}).validate(b"foo")
+
+ def test_patterns_can_be_native_strings(self):
+ """
+ See https://github.com/Julian/jsonschema/issues/611.
+ """
+ self.Validator({"pattern": "foo"}).validate("foo")
+
+ def test_it_can_validate_with_decimals(self):
+ schema = {"items": {"type": "number"}}
+ Validator = validators.extend(
+ self.Validator,
+ type_checker=self.Validator.TYPE_CHECKER.redefine(
+ "number",
+ lambda checker, thing: isinstance(
+ thing, (int, float, Decimal),
+ ) and not isinstance(thing, bool),
+ )
+ )
+
+ validator = Validator(schema)
+ validator.validate([1, 1.1, Decimal(1) / Decimal(8)])
+
+ invalid = ["foo", {}, [], True, None]
+ self.assertEqual(
+ [error.instance for error in validator.iter_errors(invalid)],
+ invalid,
+ )
+
+ def test_it_returns_true_for_formats_it_does_not_know_about(self):
+ validator = self.Validator(
+ {"format": "carrot"}, format_checker=FormatChecker(),
+ )
+ validator.validate("bugs")
+
+ def test_it_does_not_validate_formats_by_default(self):
+ validator = self.Validator({})
+ self.assertIsNone(validator.format_checker)
+
+ def test_it_validates_formats_if_a_checker_is_provided(self):
+ checker = FormatChecker()
+ bad = ValueError("Bad!")
+
+ @checker.checks("foo", raises=ValueError)
+ def check(value):
+ if value == "good":
+ return True
+ elif value == "bad":
+ raise bad
+ else: # pragma: no cover
+ self.fail("What is {}? [Baby Don't Hurt Me]".format(value))
+
+ validator = self.Validator(
+ {"format": "foo"}, format_checker=checker,
+ )
+
+ validator.validate("good")
+ with self.assertRaises(exceptions.ValidationError) as cm:
+ validator.validate("bad")
+
+ # Make sure original cause is attached
+ self.assertIs(cm.exception.cause, bad)
+
+ def test_non_string_custom_type(self):
+ non_string_type = object()
+ schema = {"type": [non_string_type]}
+ Crazy = validators.extend(
+ self.Validator,
+ type_checker=self.Validator.TYPE_CHECKER.redefine(
+ non_string_type,
+ lambda checker, thing: isinstance(thing, int),
+ )
+ )
+ Crazy(schema).validate(15)
+
+ def test_it_properly_formats_tuples_in_errors(self):
+ """
+ A tuple instance properly formats validation errors for uniqueItems.
+
+ See https://github.com/Julian/jsonschema/pull/224
+ """
+ TupleValidator = validators.extend(
+ self.Validator,
+ type_checker=self.Validator.TYPE_CHECKER.redefine(
+ "array",
+ lambda checker, thing: isinstance(thing, tuple),
+ )
+ )
+ with self.assertRaises(exceptions.ValidationError) as e:
+ TupleValidator({"uniqueItems": True}).validate((1, 1))
+ self.assertIn("(1, 1) has non-unique elements", str(e.exception))
+
+
+class AntiDraft6LeakMixin(object):
+ """
+ Make sure functionality from draft 6 doesn't leak backwards in time.
+ """
+
+ def test_True_is_not_a_schema(self):
+ with self.assertRaises(exceptions.SchemaError) as e:
+ self.Validator.check_schema(True)
+ self.assertIn("True is not of type", str(e.exception))
+
+ def test_False_is_not_a_schema(self):
+ with self.assertRaises(exceptions.SchemaError) as e:
+ self.Validator.check_schema(False)
+ self.assertIn("False is not of type", str(e.exception))
+
+ @unittest.skip(bug(523))
+ def test_True_is_not_a_schema_even_if_you_forget_to_check(self):
+ resolver = validators.RefResolver("", {})
+ with self.assertRaises(Exception) as e:
+ self.Validator(True, resolver=resolver).validate(12)
+ self.assertNotIsInstance(e.exception, exceptions.ValidationError)
+
+ @unittest.skip(bug(523))
+ def test_False_is_not_a_schema_even_if_you_forget_to_check(self):
+ resolver = validators.RefResolver("", {})
+ with self.assertRaises(Exception) as e:
+ self.Validator(False, resolver=resolver).validate(12)
+ self.assertNotIsInstance(e.exception, exceptions.ValidationError)
+
+
+class TestDraft3Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase):
+ Validator = validators.Draft3Validator
+ valid = {}, {}
+ invalid = {"type": "integer"}, "foo"
+
+ def test_any_type_is_valid_for_type_any(self):
+ validator = self.Validator({"type": "any"})
+ validator.validate(object())
+
+ def test_any_type_is_redefinable(self):
+ """
+ Sigh, because why not.
+ """
+ Crazy = validators.extend(
+ self.Validator,
+ type_checker=self.Validator.TYPE_CHECKER.redefine(
+ "any", lambda checker, thing: isinstance(thing, int),
+ )
+ )
+ validator = Crazy({"type": "any"})
+ validator.validate(12)
+ with self.assertRaises(exceptions.ValidationError):
+ validator.validate("foo")
+
+ def test_is_type_is_true_for_any_type(self):
+ self.assertTrue(self.Validator({}).is_valid(object(), {"type": "any"}))
+
+ def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
+ self.assertTrue(self.Validator({}).is_type(True, "boolean"))
+ self.assertTrue(self.Validator({}).is_valid(True, {"type": "any"}))
+
+
+class TestDraft4Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase):
+ Validator = validators.Draft4Validator
+ valid = {}, {}
+ invalid = {"type": "integer"}, "foo"
+
+
+class TestDraft6Validator(ValidatorTestMixin, TestCase):
+ Validator = validators.Draft6Validator
+ valid = {}, {}
+ invalid = {"type": "integer"}, "foo"
+
+
+class TestDraft7Validator(ValidatorTestMixin, TestCase):
+ Validator = validators.Draft7Validator
+ valid = {}, {}
+ invalid = {"type": "integer"}, "foo"
+
+
+class TestValidatorFor(SynchronousTestCase):
+ def test_draft_3(self):
+ schema = {"$schema": "http://json-schema.org/draft-03/schema"}
+ self.assertIs(
+ validators.validator_for(schema),
+ validators.Draft3Validator,
+ )
+
+ schema = {"$schema": "http://json-schema.org/draft-03/schema#"}
+ self.assertIs(
+ validators.validator_for(schema),
+ validators.Draft3Validator,
+ )
+
+ def test_draft_4(self):
+ schema = {"$schema": "http://json-schema.org/draft-04/schema"}
+ self.assertIs(
+ validators.validator_for(schema),
+ validators.Draft4Validator,
+ )
+
+ schema = {"$schema": "http://json-schema.org/draft-04/schema#"}
+ self.assertIs(
+ validators.validator_for(schema),
+ validators.Draft4Validator,
+ )
+
+ def test_draft_6(self):
+ schema = {"$schema": "http://json-schema.org/draft-06/schema"}
+ self.assertIs(
+ validators.validator_for(schema),
+ validators.Draft6Validator,
+ )
+
+ schema = {"$schema": "http://json-schema.org/draft-06/schema#"}
+ self.assertIs(
+ validators.validator_for(schema),
+ validators.Draft6Validator,
+ )
+
+ def test_draft_7(self):
+ schema = {"$schema": "http://json-schema.org/draft-07/schema"}
+ self.assertIs(
+ validators.validator_for(schema),
+ validators.Draft7Validator,
+ )
+
+ schema = {"$schema": "http://json-schema.org/draft-07/schema#"}
+ self.assertIs(
+ validators.validator_for(schema),
+ validators.Draft7Validator,
+ )
+
+ def test_True(self):
+ self.assertIs(
+ validators.validator_for(True),
+ validators._LATEST_VERSION,
+ )
+
+ def test_False(self):
+ self.assertIs(
+ validators.validator_for(False),
+ validators._LATEST_VERSION,
+ )
+
+ def test_custom_validator(self):
+ Validator = validators.create(
+ meta_schema={"id": "meta schema id"},
+ version="12",
+ id_of=lambda s: s.get("id", ""),
+ )
+ schema = {"$schema": "meta schema id"}
+ self.assertIs(
+ validators.validator_for(schema),
+ Validator,
+ )
+
+ def test_custom_validator_draft6(self):
+ Validator = validators.create(
+ meta_schema={"$id": "meta schema $id"},
+ version="13",
+ )
+ schema = {"$schema": "meta schema $id"}
+ self.assertIs(
+ validators.validator_for(schema),
+ Validator,
+ )
+
+ def test_validator_for_jsonschema_default(self):
+ self.assertIs(validators.validator_for({}), validators._LATEST_VERSION)
+
+ def test_validator_for_custom_default(self):
+ self.assertIs(validators.validator_for({}, default=None), None)
+
+ def test_warns_if_meta_schema_specified_was_not_found(self):
+ self.assertWarns(
+ category=DeprecationWarning,
+ message=(
+ "The metaschema specified by $schema was not found. "
+ "Using the latest draft to validate, but this will raise "
+ "an error in the future."
+ ),
+ # https://tm.tl/9363 :'(
+ filename=sys.modules[self.assertWarns.__module__].__file__,
+
+ f=validators.validator_for,
+ schema={u"$schema": "unknownSchema"},
+ default={},
+ )
+
+ def test_does_not_warn_if_meta_schema_is_unspecified(self):
+ validators.validator_for(schema={}, default={}),
+ self.assertFalse(self.flushWarnings())
+
+
+class TestValidate(SynchronousTestCase):
+ def assertUses(self, schema, Validator):
+ result = []
+ self.patch(Validator, "check_schema", result.append)
+ validators.validate({}, schema)
+ self.assertEqual(result, [schema])
+
+ def test_draft3_validator_is_chosen(self):
+ self.assertUses(
+ schema={"$schema": "http://json-schema.org/draft-03/schema#"},
+ Validator=validators.Draft3Validator,
+ )
+ # Make sure it works without the empty fragment
+ self.assertUses(
+ schema={"$schema": "http://json-schema.org/draft-03/schema"},
+ Validator=validators.Draft3Validator,
+ )
+
+ def test_draft4_validator_is_chosen(self):
+ self.assertUses(
+ schema={"$schema": "http://json-schema.org/draft-04/schema#"},
+ Validator=validators.Draft4Validator,
+ )
+ # Make sure it works without the empty fragment
+ self.assertUses(
+ schema={"$schema": "http://json-schema.org/draft-04/schema"},
+ Validator=validators.Draft4Validator,
+ )
+
+ def test_draft6_validator_is_chosen(self):
+ self.assertUses(
+ schema={"$schema": "http://json-schema.org/draft-06/schema#"},
+ Validator=validators.Draft6Validator,
+ )
+ # Make sure it works without the empty fragment
+ self.assertUses(
+ schema={"$schema": "http://json-schema.org/draft-06/schema"},
+ Validator=validators.Draft6Validator,
+ )
+
+ def test_draft7_validator_is_chosen(self):
+ self.assertUses(
+ schema={"$schema": "http://json-schema.org/draft-07/schema#"},
+ Validator=validators.Draft7Validator,
+ )
+ # Make sure it works without the empty fragment
+ self.assertUses(
+ schema={"$schema": "http://json-schema.org/draft-07/schema"},
+ Validator=validators.Draft7Validator,
+ )
+
+ def test_draft7_validator_is_the_default(self):
+ self.assertUses(schema={}, Validator=validators.Draft7Validator)
+
+ def test_validation_error_message(self):
+ with self.assertRaises(exceptions.ValidationError) as e:
+ validators.validate(12, {"type": "string"})
+ self.assertRegexpMatches(
+ str(e.exception),
+ "(?s)Failed validating u?'.*' in schema.*On instance",
+ )
+
+ def test_schema_error_message(self):
+ with self.assertRaises(exceptions.SchemaError) as e:
+ validators.validate(12, {"type": 12})
+ self.assertRegexpMatches(
+ str(e.exception),
+ "(?s)Failed validating u?'.*' in metaschema.*On schema",
+ )
+
+ def test_it_uses_best_match(self):
+ # This is a schema that best_match will recurse into
+ schema = {"oneOf": [{"type": "string"}, {"type": "array"}]}
+ with self.assertRaises(exceptions.ValidationError) as e:
+ validators.validate(12, schema)
+ self.assertIn("12 is not of type", str(e.exception))
+
+
+class TestRefResolver(SynchronousTestCase):
+
+ base_uri = ""
+ stored_uri = "foo://stored"
+ stored_schema = {"stored": "schema"}
+
+ def setUp(self):
+ self.referrer = {}
+ self.store = {self.stored_uri: self.stored_schema}
+ self.resolver = validators.RefResolver(
+ self.base_uri, self.referrer, self.store,
+ )
+
+ def test_it_does_not_retrieve_schema_urls_from_the_network(self):
+ ref = validators.Draft3Validator.META_SCHEMA["id"]
+ self.patch(
+ self.resolver,
+ "resolve_remote",
+ lambda *args, **kwargs: self.fail("Should not have been called!"),
+ )
+ with self.resolver.resolving(ref) as resolved:
+ pass
+ self.assertEqual(resolved, validators.Draft3Validator.META_SCHEMA)
+
+ def test_it_resolves_local_refs(self):
+ ref = "#/properties/foo"
+ self.referrer["properties"] = {"foo": object()}
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, self.referrer["properties"]["foo"])
+
+ def test_it_resolves_local_refs_with_id(self):
+ schema = {"id": "http://bar/schema#", "a": {"foo": "bar"}}
+ resolver = validators.RefResolver.from_schema(
+ schema,
+ id_of=lambda schema: schema.get(u"id", u""),
+ )
+ with resolver.resolving("#/a") as resolved:
+ self.assertEqual(resolved, schema["a"])
+ with resolver.resolving("http://bar/schema#/a") as resolved:
+ self.assertEqual(resolved, schema["a"])
+
+ def test_it_retrieves_stored_refs(self):
+ with self.resolver.resolving(self.stored_uri) as resolved:
+ self.assertIs(resolved, self.stored_schema)
+
+ self.resolver.store["cached_ref"] = {"foo": 12}
+ with self.resolver.resolving("cached_ref#/foo") as resolved:
+ self.assertEqual(resolved, 12)
+
+ def test_it_retrieves_unstored_refs_via_requests(self):
+ ref = "http://bar#baz"
+ schema = {"baz": 12}
+
+ if "requests" in sys.modules:
+ self.addCleanup(
+ sys.modules.__setitem__, "requests", sys.modules["requests"],
+ )
+ sys.modules["requests"] = ReallyFakeRequests({"http://bar": schema})
+
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, 12)
+
+ def test_it_retrieves_unstored_refs_via_urlopen(self):
+ ref = "http://bar#baz"
+ schema = {"baz": 12}
+
+ if "requests" in sys.modules:
+ self.addCleanup(
+ sys.modules.__setitem__, "requests", sys.modules["requests"],
+ )
+ sys.modules["requests"] = None
+
+ @contextmanager
+ def fake_urlopen(url):
+ self.assertEqual(url, "http://bar")
+ yield BytesIO(json.dumps(schema).encode("utf8"))
+
+ self.addCleanup(setattr, validators, "urlopen", validators.urlopen)
+ validators.urlopen = fake_urlopen
+
+ with self.resolver.resolving(ref) as resolved:
+ pass
+ self.assertEqual(resolved, 12)
+
+ def test_it_retrieves_local_refs_via_urlopen(self):
+ with tempfile.NamedTemporaryFile(delete=False, mode="wt") as tempf:
+ self.addCleanup(os.remove, tempf.name)
+ json.dump({"foo": "bar"}, tempf)
+
+ ref = "file://{}#foo".format(pathname2url(tempf.name))
+ with self.resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, "bar")
+
+ def test_it_can_construct_a_base_uri_from_a_schema(self):
+ schema = {"id": "foo"}
+ resolver = validators.RefResolver.from_schema(
+ schema,
+ id_of=lambda schema: schema.get(u"id", u""),
+ )
+ self.assertEqual(resolver.base_uri, "foo")
+ self.assertEqual(resolver.resolution_scope, "foo")
+ with resolver.resolving("") as resolved:
+ self.assertEqual(resolved, schema)
+ with resolver.resolving("#") as resolved:
+ self.assertEqual(resolved, schema)
+ with resolver.resolving("foo") as resolved:
+ self.assertEqual(resolved, schema)
+ with resolver.resolving("foo#") as resolved:
+ self.assertEqual(resolved, schema)
+
+ def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
+ schema = {}
+ resolver = validators.RefResolver.from_schema(schema)
+ self.assertEqual(resolver.base_uri, "")
+ self.assertEqual(resolver.resolution_scope, "")
+ with resolver.resolving("") as resolved:
+ self.assertEqual(resolved, schema)
+ with resolver.resolving("#") as resolved:
+ self.assertEqual(resolved, schema)
+
+ def test_custom_uri_scheme_handlers(self):
+ def handler(url):
+ self.assertEqual(url, ref)
+ return schema
+
+ schema = {"foo": "bar"}
+ ref = "foo://bar"
+ resolver = validators.RefResolver("", {}, handlers={"foo": handler})
+ with resolver.resolving(ref) as resolved:
+ self.assertEqual(resolved, schema)
+
+ def test_cache_remote_on(self):
+ response = [object()]
+
+ def handler(url):
+ try:
+ return response.pop()
+ except IndexError: # pragma: no cover
+ self.fail("Response must not have been cached!")
+
+ ref = "foo://bar"
+ resolver = validators.RefResolver(
+ "", {}, cache_remote=True, handlers={"foo": handler},
+ )
+ with resolver.resolving(ref):
+ pass
+ with resolver.resolving(ref):
+ pass
+
+ def test_cache_remote_off(self):
+ response = [object()]
+
+ def handler(url):
+ try:
+ return response.pop()
+ except IndexError: # pragma: no cover
+ self.fail("Handler called twice!")
+
+ ref = "foo://bar"
+ resolver = validators.RefResolver(
+ "", {}, cache_remote=False, handlers={"foo": handler},
+ )
+ with resolver.resolving(ref):
+ pass
+
+ def test_if_you_give_it_junk_you_get_a_resolution_error(self):
+ error = ValueError("Oh no! What's this?")
+
+ def handler(url):
+ raise error
+
+ ref = "foo://bar"
+ resolver = validators.RefResolver("", {}, handlers={"foo": handler})
+ with self.assertRaises(exceptions.RefResolutionError) as err:
+ with resolver.resolving(ref):
+ self.fail("Shouldn't get this far!") # pragma: no cover
+ self.assertEqual(err.exception, exceptions.RefResolutionError(error))
+
+ def test_helpful_error_message_on_failed_pop_scope(self):
+ resolver = validators.RefResolver("", {})
+ resolver.pop_scope()
+ with self.assertRaises(exceptions.RefResolutionError) as exc:
+ resolver.pop_scope()
+ self.assertIn("Failed to pop the scope", str(exc.exception))
+
+
+def sorted_errors(errors):
+ def key(error):
+ return (
+ [str(e) for e in error.path],
+ [str(e) for e in error.schema_path],
+ )
+ return sorted(errors, key=key)
+
+
+@attr.s
+class ReallyFakeRequests(object):
+
+ _responses = attr.ib()
+
+ def get(self, url):
+ response = self._responses.get(url)
+ if url is None: # pragma: no cover
+ raise ValueError("Unknown URL: " + repr(url))
+ return _ReallyFakeJSONResponse(json.dumps(response))
+
+
+@attr.s
+class _ReallyFakeJSONResponse(object):
+
+ _response = attr.ib()
+
+ def json(self):
+ return json.loads(self._response)
diff --git a/lib/spack/external/_vendoring/jsonschema/validators.py b/lib/spack/external/_vendoring/jsonschema/validators.py
new file mode 100644
index 0000000000..1dc420c70d
--- /dev/null
+++ b/lib/spack/external/_vendoring/jsonschema/validators.py
@@ -0,0 +1,970 @@
+"""
+Creation and extension of validators, with implementations for existing drafts.
+"""
+from __future__ import division
+
+from warnings import warn
+import contextlib
+import json
+import numbers
+
+from six import add_metaclass
+
+from jsonschema import (
+ _legacy_validators,
+ _types,
+ _utils,
+ _validators,
+ exceptions,
+)
+from jsonschema.compat import (
+ Sequence,
+ int_types,
+ iteritems,
+ lru_cache,
+ str_types,
+ unquote,
+ urldefrag,
+ urljoin,
+ urlopen,
+ urlsplit,
+)
+
+# Sigh. https://gitlab.com/pycqa/flake8/issues/280
+# https://github.com/pyga/ebb-lint/issues/7
+# Imported for backwards compatibility.
+from jsonschema.exceptions import ErrorTree
+ErrorTree
+
+
+class _DontDoThat(Exception):
+ """
+ Raised when a Validators with non-default type checker is misused.
+
+ Asking one for DEFAULT_TYPES doesn't make sense, since type checkers
+ exist for the unrepresentable cases where DEFAULT_TYPES can't
+ represent the type relationship.
+ """
+
+ def __str__(self):
+ return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers"
+
+
+validators = {}
+meta_schemas = _utils.URIDict()
+
+
+def _generate_legacy_type_checks(types=()):
+ """
+ Generate newer-style type checks out of JSON-type-name-to-type mappings.
+
+ Arguments:
+
+ types (dict):
+
+ A mapping of type names to their Python types
+
+ Returns:
+
+ A dictionary of definitions to pass to `TypeChecker`
+ """
+ types = dict(types)
+
+ def gen_type_check(pytypes):
+ pytypes = _utils.flatten(pytypes)
+
+ def type_check(checker, instance):
+ if isinstance(instance, bool):
+ if bool not in pytypes:
+ return False
+ return isinstance(instance, pytypes)
+
+ return type_check
+
+ definitions = {}
+ for typename, pytypes in iteritems(types):
+ definitions[typename] = gen_type_check(pytypes)
+
+ return definitions
+
+
+_DEPRECATED_DEFAULT_TYPES = {
+ u"array": list,
+ u"boolean": bool,
+ u"integer": int_types,
+ u"null": type(None),
+ u"number": numbers.Number,
+ u"object": dict,
+ u"string": str_types,
+}
+_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker(
+ type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES),
+)
+
+
+def validates(version):
+ """
+ Register the decorated validator for a ``version`` of the specification.
+
+ Registered validators and their meta schemas will be considered when
+ parsing ``$schema`` properties' URIs.
+
+ Arguments:
+
+ version (str):
+
+ An identifier to use as the version's name
+
+ Returns:
+
+ collections.Callable:
+
+ a class decorator to decorate the validator with the version
+ """
+
+ def _validates(cls):
+ validators[version] = cls
+ meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
+ if meta_schema_id:
+ meta_schemas[meta_schema_id] = cls
+ return cls
+ return _validates
+
+
+def _DEFAULT_TYPES(self):
+ if self._CREATED_WITH_DEFAULT_TYPES is None:
+ raise _DontDoThat()
+
+ warn(
+ (
+ "The DEFAULT_TYPES attribute is deprecated. "
+ "See the type checker attached to this validator instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self._DEFAULT_TYPES
+
+
+class _DefaultTypesDeprecatingMetaClass(type):
+ DEFAULT_TYPES = property(_DEFAULT_TYPES)
+
+
+def _id_of(schema):
+ if schema is True or schema is False:
+ return u""
+ return schema.get(u"$id", u"")
+
+
+def create(
+ meta_schema,
+ validators=(),
+ version=None,
+ default_types=None,
+ type_checker=None,
+ id_of=_id_of,
+):
+ """
+ Create a new validator class.
+
+ Arguments:
+
+ meta_schema (collections.Mapping):
+
+ the meta schema for the new validator class
+
+ validators (collections.Mapping):
+
+ a mapping from names to callables, where each callable will
+ validate the schema property with the given name.
+
+ Each callable should take 4 arguments:
+
+ 1. a validator instance,
+ 2. the value of the property being validated within the
+ instance
+ 3. the instance
+ 4. the schema
+
+ version (str):
+
+ an identifier for the version that this validator class will
+ validate. If provided, the returned validator class will
+ have its ``__name__`` set to include the version, and also
+ will have `jsonschema.validators.validates` automatically
+ called for the given version.
+
+ type_checker (jsonschema.TypeChecker):
+
+ a type checker, used when applying the :validator:`type` validator.
+
+ If unprovided, a `jsonschema.TypeChecker` will be created
+ with a set of default types typical of JSON Schema drafts.
+
+ default_types (collections.Mapping):
+
+ .. deprecated:: 3.0.0
+
+ Please use the type_checker argument instead.
+
+ If set, it provides mappings of JSON types to Python types
+ that will be converted to functions and redefined in this
+ object's `jsonschema.TypeChecker`.
+
+ id_of (collections.Callable):
+
+ A function that given a schema, returns its ID.
+
+ Returns:
+
+ a new `jsonschema.IValidator` class
+ """
+
+ if default_types is not None:
+ if type_checker is not None:
+ raise TypeError(
+ "Do not specify default_types when providing a type checker.",
+ )
+ _created_with_default_types = True
+ warn(
+ (
+ "The default_types argument is deprecated. "
+ "Use the type_checker argument instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ type_checker = _types.TypeChecker(
+ type_checkers=_generate_legacy_type_checks(default_types),
+ )
+ else:
+ default_types = _DEPRECATED_DEFAULT_TYPES
+ if type_checker is None:
+ _created_with_default_types = False
+ type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES
+ elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES:
+ _created_with_default_types = False
+ else:
+ _created_with_default_types = None
+
+ @add_metaclass(_DefaultTypesDeprecatingMetaClass)
+ class Validator(object):
+
+ VALIDATORS = dict(validators)
+ META_SCHEMA = dict(meta_schema)
+ TYPE_CHECKER = type_checker
+ ID_OF = staticmethod(id_of)
+
+ DEFAULT_TYPES = property(_DEFAULT_TYPES)
+ _DEFAULT_TYPES = dict(default_types)
+ _CREATED_WITH_DEFAULT_TYPES = _created_with_default_types
+
+ def __init__(
+ self,
+ schema,
+ types=(),
+ resolver=None,
+ format_checker=None,
+ ):
+ if types:
+ warn(
+ (
+ "The types argument is deprecated. Provide "
+ "a type_checker to jsonschema.validators.extend "
+ "instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many(
+ _generate_legacy_type_checks(types),
+ )
+
+ if resolver is None:
+ resolver = RefResolver.from_schema(schema, id_of=id_of)
+
+ self.resolver = resolver
+ self.format_checker = format_checker
+ self.schema = schema
+
+ @classmethod
+ def check_schema(cls, schema):
+ for error in cls(cls.META_SCHEMA).iter_errors(schema):
+ raise exceptions.SchemaError.create_from(error)
+
+ def iter_errors(self, instance, _schema=None):
+ if _schema is None:
+ _schema = self.schema
+
+ if _schema is True:
+ return
+ elif _schema is False:
+ yield exceptions.ValidationError(
+ "False schema does not allow %r" % (instance,),
+ validator=None,
+ validator_value=None,
+ instance=instance,
+ schema=_schema,
+ )
+ return
+
+ scope = id_of(_schema)
+ if scope:
+ self.resolver.push_scope(scope)
+ try:
+ ref = _schema.get(u"$ref")
+ if ref is not None:
+ validators = [(u"$ref", ref)]
+ else:
+ validators = iteritems(_schema)
+
+ for k, v in validators:
+ validator = self.VALIDATORS.get(k)
+ if validator is None:
+ continue
+
+ errors = validator(self, v, instance, _schema) or ()
+ for error in errors:
+ # set details if not already set by the called fn
+ error._set(
+ validator=k,
+ validator_value=v,
+ instance=instance,
+ schema=_schema,
+ )
+ if k != u"$ref":
+ error.schema_path.appendleft(k)
+ yield error
+ finally:
+ if scope:
+ self.resolver.pop_scope()
+
+ def descend(self, instance, schema, path=None, schema_path=None):
+ for error in self.iter_errors(instance, schema):
+ if path is not None:
+ error.path.appendleft(path)
+ if schema_path is not None:
+ error.schema_path.appendleft(schema_path)
+ yield error
+
+ def validate(self, *args, **kwargs):
+ for error in self.iter_errors(*args, **kwargs):
+ raise error
+
+ def is_type(self, instance, type):
+ try:
+ return self.TYPE_CHECKER.is_type(instance, type)
+ except exceptions.UndefinedTypeCheck:
+ raise exceptions.UnknownType(type, instance, self.schema)
+
+ def is_valid(self, instance, _schema=None):
+ error = next(self.iter_errors(instance, _schema), None)
+ return error is None
+
+ if version is not None:
+ Validator = validates(version)(Validator)
+ Validator.__name__ = version.title().replace(" ", "") + "Validator"
+
+ return Validator
+
+
+def extend(validator, validators=(), version=None, type_checker=None):
+ """
+ Create a new validator class by extending an existing one.
+
+ Arguments:
+
+ validator (jsonschema.IValidator):
+
+ an existing validator class
+
+ validators (collections.Mapping):
+
+ a mapping of new validator callables to extend with, whose
+ structure is as in `create`.
+
+ .. note::
+
+ Any validator callables with the same name as an
+ existing one will (silently) replace the old validator
+ callable entirely, effectively overriding any validation
+ done in the "parent" validator class.
+
+ If you wish to instead extend the behavior of a parent's
+ validator callable, delegate and call it directly in
+ the new validator function by retrieving it using
+ ``OldValidator.VALIDATORS["validator_name"]``.
+
+ version (str):
+
+ a version for the new validator class
+
+ type_checker (jsonschema.TypeChecker):
+
+ a type checker, used when applying the :validator:`type` validator.
+
+ If unprovided, the type checker of the extended
+ `jsonschema.IValidator` will be carried along.`
+
+ Returns:
+
+ a new `jsonschema.IValidator` class extending the one provided
+
+ .. note:: Meta Schemas
+
+ The new validator class will have its parent's meta schema.
+
+ If you wish to change or extend the meta schema in the new
+ validator class, modify ``META_SCHEMA`` directly on the returned
+ class. Note that no implicit copying is done, so a copy should
+ likely be made before modifying it, in order to not affect the
+ old validator.
+ """
+
+ all_validators = dict(validator.VALIDATORS)
+ all_validators.update(validators)
+
+ if type_checker is None:
+ type_checker = validator.TYPE_CHECKER
+ elif validator._CREATED_WITH_DEFAULT_TYPES:
+ raise TypeError(
+ "Cannot extend a validator created with default_types "
+ "with a type_checker. Update the validator to use a "
+ "type_checker when created."
+ )
+ return create(
+ meta_schema=validator.META_SCHEMA,
+ validators=all_validators,
+ version=version,
+ type_checker=type_checker,
+ id_of=validator.ID_OF,
+ )
+
+
+Draft3Validator = create(
+ meta_schema=_utils.load_schema("draft3"),
+ validators={
+ u"$ref": _validators.ref,
+ u"additionalItems": _validators.additionalItems,
+ u"additionalProperties": _validators.additionalProperties,
+ u"dependencies": _legacy_validators.dependencies_draft3,
+ u"disallow": _legacy_validators.disallow_draft3,
+ u"divisibleBy": _validators.multipleOf,
+ u"enum": _validators.enum,
+ u"extends": _legacy_validators.extends_draft3,
+ u"format": _validators.format,
+ u"items": _legacy_validators.items_draft3_draft4,
+ u"maxItems": _validators.maxItems,
+ u"maxLength": _validators.maxLength,
+ u"maximum": _legacy_validators.maximum_draft3_draft4,
+ u"minItems": _validators.minItems,
+ u"minLength": _validators.minLength,
+ u"minimum": _legacy_validators.minimum_draft3_draft4,
+ u"pattern": _validators.pattern,
+ u"patternProperties": _validators.patternProperties,
+ u"properties": _legacy_validators.properties_draft3,
+ u"type": _legacy_validators.type_draft3,
+ u"uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft3_type_checker,
+ version="draft3",
+ id_of=lambda schema: schema.get(u"id", ""),
+)
+
+Draft4Validator = create(
+ meta_schema=_utils.load_schema("draft4"),
+ validators={
+ u"$ref": _validators.ref,
+ u"additionalItems": _validators.additionalItems,
+ u"additionalProperties": _validators.additionalProperties,
+ u"allOf": _validators.allOf,
+ u"anyOf": _validators.anyOf,
+ u"dependencies": _validators.dependencies,
+ u"enum": _validators.enum,
+ u"format": _validators.format,
+ u"items": _legacy_validators.items_draft3_draft4,
+ u"maxItems": _validators.maxItems,
+ u"maxLength": _validators.maxLength,
+ u"maxProperties": _validators.maxProperties,
+ u"maximum": _legacy_validators.maximum_draft3_draft4,
+ u"minItems": _validators.minItems,
+ u"minLength": _validators.minLength,
+ u"minProperties": _validators.minProperties,
+ u"minimum": _legacy_validators.minimum_draft3_draft4,
+ u"multipleOf": _validators.multipleOf,
+ u"not": _validators.not_,
+ u"oneOf": _validators.oneOf,
+ u"pattern": _validators.pattern,
+ u"patternProperties": _validators.patternProperties,
+ u"properties": _validators.properties,
+ u"required": _validators.required,
+ u"type": _validators.type,
+ u"uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft4_type_checker,
+ version="draft4",
+ id_of=lambda schema: schema.get(u"id", ""),
+)
+
+Draft6Validator = create(
+ meta_schema=_utils.load_schema("draft6"),
+ validators={
+ u"$ref": _validators.ref,
+ u"additionalItems": _validators.additionalItems,
+ u"additionalProperties": _validators.additionalProperties,
+ u"allOf": _validators.allOf,
+ u"anyOf": _validators.anyOf,
+ u"const": _validators.const,
+ u"contains": _validators.contains,
+ u"dependencies": _validators.dependencies,
+ u"enum": _validators.enum,
+ u"exclusiveMaximum": _validators.exclusiveMaximum,
+ u"exclusiveMinimum": _validators.exclusiveMinimum,
+ u"format": _validators.format,
+ u"items": _validators.items,
+ u"maxItems": _validators.maxItems,
+ u"maxLength": _validators.maxLength,
+ u"maxProperties": _validators.maxProperties,
+ u"maximum": _validators.maximum,
+ u"minItems": _validators.minItems,
+ u"minLength": _validators.minLength,
+ u"minProperties": _validators.minProperties,
+ u"minimum": _validators.minimum,
+ u"multipleOf": _validators.multipleOf,
+ u"not": _validators.not_,
+ u"oneOf": _validators.oneOf,
+ u"pattern": _validators.pattern,
+ u"patternProperties": _validators.patternProperties,
+ u"properties": _validators.properties,
+ u"propertyNames": _validators.propertyNames,
+ u"required": _validators.required,
+ u"type": _validators.type,
+ u"uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft6_type_checker,
+ version="draft6",
+)
+
+Draft7Validator = create(
+ meta_schema=_utils.load_schema("draft7"),
+ validators={
+ u"$ref": _validators.ref,
+ u"additionalItems": _validators.additionalItems,
+ u"additionalProperties": _validators.additionalProperties,
+ u"allOf": _validators.allOf,
+ u"anyOf": _validators.anyOf,
+ u"const": _validators.const,
+ u"contains": _validators.contains,
+ u"dependencies": _validators.dependencies,
+ u"enum": _validators.enum,
+ u"exclusiveMaximum": _validators.exclusiveMaximum,
+ u"exclusiveMinimum": _validators.exclusiveMinimum,
+ u"format": _validators.format,
+ u"if": _validators.if_,
+ u"items": _validators.items,
+ u"maxItems": _validators.maxItems,
+ u"maxLength": _validators.maxLength,
+ u"maxProperties": _validators.maxProperties,
+ u"maximum": _validators.maximum,
+ u"minItems": _validators.minItems,
+ u"minLength": _validators.minLength,
+ u"minProperties": _validators.minProperties,
+ u"minimum": _validators.minimum,
+ u"multipleOf": _validators.multipleOf,
+ u"oneOf": _validators.oneOf,
+ u"not": _validators.not_,
+ u"pattern": _validators.pattern,
+ u"patternProperties": _validators.patternProperties,
+ u"properties": _validators.properties,
+ u"propertyNames": _validators.propertyNames,
+ u"required": _validators.required,
+ u"type": _validators.type,
+ u"uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft7_type_checker,
+ version="draft7",
+)
+
+_LATEST_VERSION = Draft7Validator
+
+
+class RefResolver(object):
+ """
+ Resolve JSON References.
+
+ Arguments:
+
+ base_uri (str):
+
+ The URI of the referring document
+
+ referrer:
+
+ The actual referring document
+
+ store (dict):
+
+ A mapping from URIs to documents to cache
+
+ cache_remote (bool):
+
+ Whether remote refs should be cached after first resolution
+
+ handlers (dict):
+
+ A mapping from URI schemes to functions that should be used
+ to retrieve them
+
+ urljoin_cache (:func:`functools.lru_cache`):
+
+ A cache that will be used for caching the results of joining
+ the resolution scope to subscopes.
+
+ remote_cache (:func:`functools.lru_cache`):
+
+ A cache that will be used for caching the results of
+ resolved remote URLs.
+
+ Attributes:
+
+ cache_remote (bool):
+
+ Whether remote refs should be cached after first resolution
+ """
+
+ def __init__(
+ self,
+ base_uri,
+ referrer,
+ store=(),
+ cache_remote=True,
+ handlers=(),
+ urljoin_cache=None,
+ remote_cache=None,
+ ):
+ if urljoin_cache is None:
+ urljoin_cache = lru_cache(1024)(urljoin)
+ if remote_cache is None:
+ remote_cache = lru_cache(1024)(self.resolve_from_url)
+
+ self.referrer = referrer
+ self.cache_remote = cache_remote
+ self.handlers = dict(handlers)
+
+ self._scopes_stack = [base_uri]
+ self.store = _utils.URIDict(
+ (id, validator.META_SCHEMA)
+ for id, validator in iteritems(meta_schemas)
+ )
+ self.store.update(store)
+ self.store[base_uri] = referrer
+
+ self._urljoin_cache = urljoin_cache
+ self._remote_cache = remote_cache
+
+ @classmethod
+ def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
+ """
+ Construct a resolver from a JSON schema object.
+
+ Arguments:
+
+ schema:
+
+ the referring schema
+
+ Returns:
+
+ `RefResolver`
+ """
+
+ return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
+
+ def push_scope(self, scope):
+ """
+ Enter a given sub-scope.
+
+ Treats further dereferences as being performed underneath the
+ given scope.
+ """
+ self._scopes_stack.append(
+ self._urljoin_cache(self.resolution_scope, scope),
+ )
+
+ def pop_scope(self):
+ """
+ Exit the most recent entered scope.
+
+ Treats further dereferences as being performed underneath the
+ original scope.
+
+ Don't call this method more times than `push_scope` has been
+ called.
+ """
+ try:
+ self._scopes_stack.pop()
+ except IndexError:
+ raise exceptions.RefResolutionError(
+ "Failed to pop the scope from an empty stack. "
+ "`pop_scope()` should only be called once for every "
+ "`push_scope()`"
+ )
+
+ @property
+ def resolution_scope(self):
+ """
+ Retrieve the current resolution scope.
+ """
+ return self._scopes_stack[-1]
+
+ @property
+ def base_uri(self):
+ """
+ Retrieve the current base URI, not including any fragment.
+ """
+ uri, _ = urldefrag(self.resolution_scope)
+ return uri
+
+ @contextlib.contextmanager
+ def in_scope(self, scope):
+ """
+ Temporarily enter the given scope for the duration of the context.
+ """
+ self.push_scope(scope)
+ try:
+ yield
+ finally:
+ self.pop_scope()
+
+ @contextlib.contextmanager
+ def resolving(self, ref):
+ """
+ Resolve the given ``ref`` and enter its resolution scope.
+
+ Exits the scope on exit of this context manager.
+
+ Arguments:
+
+ ref (str):
+
+ The reference to resolve
+ """
+
+ url, resolved = self.resolve(ref)
+ self.push_scope(url)
+ try:
+ yield resolved
+ finally:
+ self.pop_scope()
+
+ def resolve(self, ref):
+ """
+ Resolve the given reference.
+ """
+ url = self._urljoin_cache(self.resolution_scope, ref)
+ return url, self._remote_cache(url)
+
+ def resolve_from_url(self, url):
+ """
+ Resolve the given remote URL.
+ """
+ url, fragment = urldefrag(url)
+ try:
+ document = self.store[url]
+ except KeyError:
+ try:
+ document = self.resolve_remote(url)
+ except Exception as exc:
+ raise exceptions.RefResolutionError(exc)
+
+ return self.resolve_fragment(document, fragment)
+
+ def resolve_fragment(self, document, fragment):
+ """
+ Resolve a ``fragment`` within the referenced ``document``.
+
+ Arguments:
+
+ document:
+
+ The referent document
+
+ fragment (str):
+
+ a URI fragment to resolve within it
+ """
+
+ fragment = fragment.lstrip(u"/")
+ parts = unquote(fragment).split(u"/") if fragment else []
+
+ for part in parts:
+ part = part.replace(u"~1", u"/").replace(u"~0", u"~")
+
+ if isinstance(document, Sequence):
+ # Array indexes should be turned into integers
+ try:
+ part = int(part)
+ except ValueError:
+ pass
+ try:
+ document = document[part]
+ except (TypeError, LookupError):
+ raise exceptions.RefResolutionError(
+ "Unresolvable JSON pointer: %r" % fragment
+ )
+
+ return document
+
+ def resolve_remote(self, uri):
+ """
+ Resolve a remote ``uri``.
+
+ If called directly, does not check the store first, but after
+ retrieving the document at the specified URI it will be saved in
+ the store if :attr:`cache_remote` is True.
+
+ .. note::
+
+ If the requests_ library is present, ``jsonschema`` will use it to
+ request the remote ``uri``, so that the correct encoding is
+ detected and used.
+
+ If it isn't, or if the scheme of the ``uri`` is not ``http`` or
+ ``https``, UTF-8 is assumed.
+
+ Arguments:
+
+ uri (str):
+
+ The URI to resolve
+
+ Returns:
+
+ The retrieved document
+
+ .. _requests: https://pypi.org/project/requests/
+ """
+ try:
+ import requests
+ except ImportError:
+ requests = None
+
+ scheme = urlsplit(uri).scheme
+
+ if scheme in self.handlers:
+ result = self.handlers[scheme](uri)
+ elif scheme in [u"http", u"https"] and requests:
+ # Requests has support for detecting the correct encoding of
+ # json over http
+ result = requests.get(uri).json()
+ else:
+ # Otherwise, pass off to urllib and assume utf-8
+ with urlopen(uri) as url:
+ result = json.loads(url.read().decode("utf-8"))
+
+ if self.cache_remote:
+ self.store[uri] = result
+ return result
+
+
+def validate(instance, schema, cls=None, *args, **kwargs):
+ """
+ Validate an instance under the given schema.
+
+ >>> validate([2, 3, 4], {"maxItems": 2})
+ Traceback (most recent call last):
+ ...
+ ValidationError: [2, 3, 4] is too long
+
+ :func:`validate` will first verify that the provided schema is
+ itself valid, since not doing so can lead to less obvious error
+ messages and fail in less obvious or consistent ways.
+
+ If you know you have a valid schema already, especially if you
+ intend to validate multiple instances with the same schema, you
+ likely would prefer using the `IValidator.validate` method directly
+ on a specific validator (e.g. ``Draft7Validator.validate``).
+
+
+ Arguments:
+
+ instance:
+
+ The instance to validate
+
+ schema:
+
+ The schema to validate with
+
+ cls (IValidator):
+
+ The class that will be used to validate the instance.
+
+ If the ``cls`` argument is not provided, two things will happen
+ in accordance with the specification. First, if the schema has a
+ :validator:`$schema` property containing a known meta-schema [#]_
+ then the proper validator will be used. The specification recommends
+ that all schemas contain :validator:`$schema` properties for this
+ reason. If no :validator:`$schema` property is found, the default
+ validator class is the latest released draft.
+
+ Any other provided positional and keyword arguments will be passed
+ on when instantiating the ``cls``.
+
+ Raises:
+
+ `jsonschema.exceptions.ValidationError` if the instance
+ is invalid
+
+ `jsonschema.exceptions.SchemaError` if the schema itself
+ is invalid
+
+ .. rubric:: Footnotes
+ .. [#] known by a validator registered with
+ `jsonschema.validators.validates`
+ """
+ if cls is None:
+ cls = validator_for(schema)
+
+ cls.check_schema(schema)
+ validator = cls(schema, *args, **kwargs)
+ error = exceptions.best_match(validator.iter_errors(instance))
+ if error is not None:
+ raise error
+
+
+def validator_for(schema, default=_LATEST_VERSION):
+ """
+ Retrieve the validator class appropriate for validating the given schema.
+
+ Uses the :validator:`$schema` property that should be present in the
+ given schema to look up the appropriate validator class.
+
+ Arguments:
+
+ schema (collections.Mapping or bool):
+
+ the schema to look at
+
+ default:
+
+ the default to return if the appropriate validator class
+ cannot be determined.
+
+ If unprovided, the default is to return the latest supported
+ draft.
+ """
+ if schema is True or schema is False or u"$schema" not in schema:
+ return default
+ if schema[u"$schema"] not in meta_schemas:
+ warn(
+ (
+ "The metaschema specified by $schema was not found. "
+ "Using the latest draft to validate, but this will raise "
+ "an error in the future."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION)
diff --git a/lib/spack/external/_vendoring/macholib.pyi b/lib/spack/external/_vendoring/macholib.pyi
new file mode 100644
index 0000000000..62ac8a00a3
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib.pyi
@@ -0,0 +1 @@
+from macholib import * \ No newline at end of file
diff --git a/lib/spack/external/_vendoring/macholib/LICENSE b/lib/spack/external/_vendoring/macholib/LICENSE
new file mode 100644
index 0000000000..602dbb0948
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/LICENSE
@@ -0,0 +1,21 @@
+Copyright 2006-2010 - Bob Ippolito
+Copyright 2010-2020 - Ronald Oussoren
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject
+ to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/spack/external/_vendoring/macholib/MachO.py b/lib/spack/external/_vendoring/macholib/MachO.py
new file mode 100644
index 0000000000..daa36b0d3c
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/MachO.py
@@ -0,0 +1,501 @@
+"""
+Utilities for reading and writing Mach-O headers
+"""
+from __future__ import print_function
+
+import os
+import struct
+import sys
+
+from macholib.util import fileview
+
+from .mach_o import (
+ FAT_MAGIC,
+ FAT_MAGIC_64,
+ LC_DYSYMTAB,
+ LC_ID_DYLIB,
+ LC_LOAD_DYLIB,
+ LC_LOAD_UPWARD_DYLIB,
+ LC_LOAD_WEAK_DYLIB,
+ LC_PREBOUND_DYLIB,
+ LC_REEXPORT_DYLIB,
+ LC_REGISTRY,
+ LC_SEGMENT,
+ LC_SEGMENT_64,
+ LC_SYMTAB,
+ MH_CIGAM,
+ MH_CIGAM_64,
+ MH_FILETYPE_SHORTNAMES,
+ MH_MAGIC,
+ MH_MAGIC_64,
+ S_ZEROFILL,
+ fat_arch,
+ fat_arch64,
+ fat_header,
+ load_command,
+ mach_header,
+ mach_header_64,
+ section,
+ section_64,
+)
+from .ptypes import sizeof
+
+try:
+ from macholib.compat import bytes
+except ImportError:
+ pass
+
+try:
+ unicode
+except NameError:
+ unicode = str
+
+if sys.version_info[0] == 2:
+ range = xrange # noqa: F821
+
+__all__ = ["MachO"]
+
+_RELOCATABLE = {
+ # relocatable commands that should be used for dependency walking
+ LC_LOAD_DYLIB,
+ LC_LOAD_UPWARD_DYLIB,
+ LC_LOAD_WEAK_DYLIB,
+ LC_PREBOUND_DYLIB,
+ LC_REEXPORT_DYLIB,
+}
+
+_RELOCATABLE_NAMES = {
+ LC_LOAD_DYLIB: "load_dylib",
+ LC_LOAD_UPWARD_DYLIB: "load_upward_dylib",
+ LC_LOAD_WEAK_DYLIB: "load_weak_dylib",
+ LC_PREBOUND_DYLIB: "prebound_dylib",
+ LC_REEXPORT_DYLIB: "reexport_dylib",
+}
+
+
+def _shouldRelocateCommand(cmd):
+ """
+ Should this command id be investigated for relocation?
+ """
+ return cmd in _RELOCATABLE
+
+
+def lc_str_value(offset, cmd_info):
+ """
+ Fetch the actual value of a field of type "lc_str"
+ """
+ cmd_load, cmd_cmd, cmd_data = cmd_info
+
+ offset -= sizeof(cmd_load) + sizeof(cmd_cmd)
+ return cmd_data[offset:].strip(b"\x00")
+
+
+class MachO(object):
+ """
+ Provides reading/writing the Mach-O header of a specific existing file.
+
+ If allow_unknown_load_commands is True, allows unknown load commands.
+ Otherwise, raises ValueError if the file contains an unknown load command.
+ """
+
+ # filename - the original filename of this mach-o
+ # sizediff - the current deviation from the initial mach-o size
+ # header - the mach-o header
+ # commands - a list of (load_command, somecommand, data)
+ # data is either a str, or a list of segment structures
+ # total_size - the current mach-o header size (including header)
+ # low_offset - essentially, the maximum mach-o header size
+ # id_cmd - the index of my id command, or None
+
+ def __init__(self, filename, allow_unknown_load_commands=False):
+
+ # supports the ObjectGraph protocol
+ self.graphident = filename
+ self.filename = filename
+ self.loader_path = os.path.dirname(filename)
+
+ # initialized by load
+ self.fat = None
+ self.headers = []
+ self.allow_unknown_load_commands = allow_unknown_load_commands
+ with open(filename, "rb") as fp:
+ self.load(fp)
+
+ def __repr__(self):
+ return "<MachO filename=%r>" % (self.filename,)
+
+ def load(self, fh):
+ assert fh.tell() == 0
+ header = struct.unpack(">I", fh.read(4))[0]
+ fh.seek(0)
+ if header in (FAT_MAGIC, FAT_MAGIC_64):
+ self.load_fat(fh)
+ else:
+ fh.seek(0, 2)
+ size = fh.tell()
+ fh.seek(0)
+ self.load_header(fh, 0, size)
+
+ def load_fat(self, fh):
+ self.fat = fat_header.from_fileobj(fh)
+ if self.fat.magic == FAT_MAGIC:
+ archs = [fat_arch.from_fileobj(fh) for i in range(self.fat.nfat_arch)]
+ elif self.fat.magic == FAT_MAGIC_64:
+ archs = [fat_arch64.from_fileobj(fh) for i in range(self.fat.nfat_arch)]
+ else:
+ raise ValueError("Unknown fat header magic: %r" % (self.fat.magic))
+
+ for arch in archs:
+ self.load_header(fh, arch.offset, arch.size)
+
+ def rewriteLoadCommands(self, *args, **kw):
+ changed = False
+ for header in self.headers:
+ if header.rewriteLoadCommands(*args, **kw):
+ changed = True
+ return changed
+
+ def load_header(self, fh, offset, size):
+ fh.seek(offset)
+ header = struct.unpack(">I", fh.read(4))[0]
+ fh.seek(offset)
+ if header == MH_MAGIC:
+ magic, hdr, endian = MH_MAGIC, mach_header, ">"
+ elif header == MH_CIGAM:
+ magic, hdr, endian = MH_CIGAM, mach_header, "<"
+ elif header == MH_MAGIC_64:
+ magic, hdr, endian = MH_MAGIC_64, mach_header_64, ">"
+ elif header == MH_CIGAM_64:
+ magic, hdr, endian = MH_CIGAM_64, mach_header_64, "<"
+ else:
+ raise ValueError("Unknown Mach-O header: 0x%08x in %r" % (header, fh))
+ hdr = MachOHeader(
+ self, fh, offset, size, magic, hdr, endian, self.allow_unknown_load_commands
+ )
+ self.headers.append(hdr)
+
+ def write(self, f):
+ for header in self.headers:
+ header.write(f)
+
+
+class MachOHeader(object):
+ """
+ Provides reading/writing the Mach-O header of a specific existing file.
+
+ If allow_unknown_load_commands is True, allows unknown load commands.
+ Otherwise, raises ValueError if the file contains an unknown load command.
+ """
+
+ # filename - the original filename of this mach-o
+ # sizediff - the current deviation from the initial mach-o size
+ # header - the mach-o header
+ # commands - a list of (load_command, somecommand, data)
+ # data is either a str, or a list of segment structures
+ # total_size - the current mach-o header size (including header)
+ # low_offset - essentially, the maximum mach-o header size
+ # id_cmd - the index of my id command, or None
+
+ def __init__(
+ self,
+ parent,
+ fh,
+ offset,
+ size,
+ magic,
+ hdr,
+ endian,
+ allow_unknown_load_commands=False,
+ ):
+ self.MH_MAGIC = magic
+ self.mach_header = hdr
+
+ # These are all initialized by self.load()
+ self.parent = parent
+ self.offset = offset
+ self.size = size
+
+ self.endian = endian
+ self.header = None
+ self.commands = None
+ self.id_cmd = None
+ self.sizediff = None
+ self.total_size = None
+ self.low_offset = None
+ self.filetype = None
+ self.headers = []
+
+ self.allow_unknown_load_commands = allow_unknown_load_commands
+
+ self.load(fh)
+
+ def __repr__(self):
+ return "<%s filename=%r offset=%d size=%d endian=%r>" % (
+ type(self).__name__,
+ self.parent.filename,
+ self.offset,
+ self.size,
+ self.endian,
+ )
+
+ def load(self, fh):
+ fh = fileview(fh, self.offset, self.size)
+ fh.seek(0)
+
+ self.sizediff = 0
+ kw = {"_endian_": self.endian}
+ header = self.mach_header.from_fileobj(fh, **kw)
+ self.header = header
+ # if header.magic != self.MH_MAGIC:
+ # raise ValueError("header has magic %08x, expecting %08x" % (
+ # header.magic, self.MH_MAGIC))
+
+ cmd = self.commands = []
+
+ self.filetype = self.get_filetype_shortname(header.filetype)
+
+ read_bytes = 0
+ low_offset = sys.maxsize
+ for i in range(header.ncmds):
+ # read the load command
+ cmd_load = load_command.from_fileobj(fh, **kw)
+
+ # read the specific command
+ klass = LC_REGISTRY.get(cmd_load.cmd, None)
+ if klass is None:
+ if not self.allow_unknown_load_commands:
+ raise ValueError("Unknown load command: %d" % (cmd_load.cmd,))
+ # No load command in the registry, so append the load command itself
+ # instead of trying to deserialize the data after the header.
+ data_size = cmd_load.cmdsize - sizeof(load_command)
+ cmd_data = fh.read(data_size)
+ cmd.append((cmd_load, cmd_load, cmd_data))
+ read_bytes += cmd_load.cmdsize
+ continue
+
+ cmd_cmd = klass.from_fileobj(fh, **kw)
+
+ if cmd_load.cmd == LC_ID_DYLIB:
+ # remember where this command was
+ if self.id_cmd is not None:
+ raise ValueError("This dylib already has an id")
+ self.id_cmd = i
+
+ if cmd_load.cmd in (LC_SEGMENT, LC_SEGMENT_64):
+ # for segment commands, read the list of segments
+ segs = []
+ # assert that the size makes sense
+ if cmd_load.cmd == LC_SEGMENT:
+ section_cls = section
+ else: # LC_SEGMENT_64
+ section_cls = section_64
+
+ expected_size = (
+ sizeof(klass)
+ + sizeof(load_command)
+ + (sizeof(section_cls) * cmd_cmd.nsects)
+ )
+ if cmd_load.cmdsize != expected_size:
+ raise ValueError("Segment size mismatch")
+ # this is a zero block or something
+ # so the beginning is wherever the fileoff of this command is
+ if cmd_cmd.nsects == 0:
+ if cmd_cmd.filesize != 0:
+ low_offset = min(low_offset, cmd_cmd.fileoff)
+ else:
+ # this one has multiple segments
+ for _j in range(cmd_cmd.nsects):
+ # read the segment
+ seg = section_cls.from_fileobj(fh, **kw)
+ # if the segment has a size and is not zero filled
+ # then its beginning is the offset of this segment
+ not_zerofill = (seg.flags & S_ZEROFILL) != S_ZEROFILL
+ if seg.offset > 0 and seg.size > 0 and not_zerofill:
+ low_offset = min(low_offset, seg.offset)
+ if not_zerofill:
+ c = fh.tell()
+ fh.seek(seg.offset)
+ sd = fh.read(seg.size)
+ seg.add_section_data(sd)
+ fh.seek(c)
+ segs.append(seg)
+ # data is a list of segments
+ cmd_data = segs
+
+ # These are disabled for now because writing back doesn't work
+ # elif cmd_load.cmd == LC_CODE_SIGNATURE:
+ # c = fh.tell()
+ # fh.seek(cmd_cmd.dataoff)
+ # cmd_data = fh.read(cmd_cmd.datasize)
+ # fh.seek(c)
+ # elif cmd_load.cmd == LC_SYMTAB:
+ # c = fh.tell()
+ # fh.seek(cmd_cmd.stroff)
+ # cmd_data = fh.read(cmd_cmd.strsize)
+ # fh.seek(c)
+
+ else:
+ # data is a raw str
+ data_size = cmd_load.cmdsize - sizeof(klass) - sizeof(load_command)
+ cmd_data = fh.read(data_size)
+ cmd.append((cmd_load, cmd_cmd, cmd_data))
+ read_bytes += cmd_load.cmdsize
+
+ # make sure the header made sense
+ if read_bytes != header.sizeofcmds:
+ raise ValueError(
+ "Read %d bytes, header reports %d bytes"
+ % (read_bytes, header.sizeofcmds)
+ )
+ self.total_size = sizeof(self.mach_header) + read_bytes
+ self.low_offset = low_offset
+
+ def walkRelocatables(self, shouldRelocateCommand=_shouldRelocateCommand):
+ """
+ for all relocatable commands
+ yield (command_index, command_name, filename)
+ """
+ for (idx, (lc, cmd, data)) in enumerate(self.commands):
+ if shouldRelocateCommand(lc.cmd):
+ name = _RELOCATABLE_NAMES[lc.cmd]
+ ofs = cmd.name - sizeof(lc.__class__) - sizeof(cmd.__class__)
+ yield idx, name, data[
+ ofs : data.find(b"\x00", ofs) # noqa: E203
+ ].decode(sys.getfilesystemencoding())
+
+ def rewriteInstallNameCommand(self, loadcmd):
+ """Rewrite the load command of this dylib"""
+ if self.id_cmd is not None:
+ self.rewriteDataForCommand(self.id_cmd, loadcmd)
+ return True
+ return False
+
+ def changedHeaderSizeBy(self, bytes):
+ self.sizediff += bytes
+ if (self.total_size + self.sizediff) > self.low_offset:
+ print(
+ "WARNING: Mach-O header in %r may be too large to relocate"
+ % (self.parent.filename,)
+ )
+
+ def rewriteLoadCommands(self, changefunc):
+ """
+ Rewrite the load commands based upon a change dictionary
+ """
+ data = changefunc(self.parent.filename)
+ changed = False
+ if data is not None:
+ if self.rewriteInstallNameCommand(data.encode(sys.getfilesystemencoding())):
+ changed = True
+ for idx, _name, filename in self.walkRelocatables():
+ data = changefunc(filename)
+ if data is not None:
+ if self.rewriteDataForCommand(
+ idx, data.encode(sys.getfilesystemencoding())
+ ):
+ changed = True
+ return changed
+
+ def rewriteDataForCommand(self, idx, data):
+ lc, cmd, old_data = self.commands[idx]
+ hdrsize = sizeof(lc.__class__) + sizeof(cmd.__class__)
+ align = struct.calcsize("Q")
+ data = data + (b"\x00" * (align - (len(data) % align)))
+ newsize = hdrsize + len(data)
+ self.commands[idx] = (lc, cmd, data)
+ self.changedHeaderSizeBy(newsize - lc.cmdsize)
+ lc.cmdsize, cmd.name = newsize, hdrsize
+ return True
+
+ def synchronize_size(self):
+ if (self.total_size + self.sizediff) > self.low_offset:
+ raise ValueError(
+ (
+ "New Mach-O header is too large to relocate in %r "
+ "(new size=%r, max size=%r, delta=%r)"
+ )
+ % (
+ self.parent.filename,
+ self.total_size + self.sizediff,
+ self.low_offset,
+ self.sizediff,
+ )
+ )
+ self.header.sizeofcmds += self.sizediff
+ self.total_size = sizeof(self.mach_header) + self.header.sizeofcmds
+ self.sizediff = 0
+
+ def write(self, fileobj):
+ fileobj = fileview(fileobj, self.offset, self.size)
+ fileobj.seek(0)
+
+ # serialize all the mach-o commands
+ self.synchronize_size()
+
+ self.header.to_fileobj(fileobj)
+ for lc, cmd, data in self.commands:
+ lc.to_fileobj(fileobj)
+ cmd.to_fileobj(fileobj)
+
+ if sys.version_info[0] == 2:
+ if isinstance(data, unicode):
+ fileobj.write(data.encode(sys.getfilesystemencoding()))
+
+ elif isinstance(data, (bytes, str)):
+ fileobj.write(data)
+ else:
+ # segments..
+ for obj in data:
+ obj.to_fileobj(fileobj)
+ else:
+ if isinstance(data, str):
+ fileobj.write(data.encode(sys.getfilesystemencoding()))
+
+ elif isinstance(data, bytes):
+ fileobj.write(data)
+
+ else:
+ # segments..
+ for obj in data:
+ obj.to_fileobj(fileobj)
+
+ # zero out the unused space, doubt this is strictly necessary
+ # and is generally probably already the case
+ fileobj.write(b"\x00" * (self.low_offset - fileobj.tell()))
+
+ def getSymbolTableCommand(self):
+ for lc, cmd, _data in self.commands:
+ if lc.cmd == LC_SYMTAB:
+ return cmd
+ return None
+
+ def getDynamicSymbolTableCommand(self):
+ for lc, cmd, _data in self.commands:
+ if lc.cmd == LC_DYSYMTAB:
+ return cmd
+ return None
+
+ def get_filetype_shortname(self, filetype):
+ if filetype in MH_FILETYPE_SHORTNAMES:
+ return MH_FILETYPE_SHORTNAMES[filetype]
+ else:
+ return "unknown"
+
+
+def main(fn):
+ m = MachO(fn)
+ seen = set()
+ for header in m.headers:
+ for _idx, name, other in header.walkRelocatables():
+ if other not in seen:
+ seen.add(other)
+ print("\t" + name + ": " + other)
+
+
+if __name__ == "__main__":
+ import sys
+
+ files = sys.argv[1:] or ["/bin/ls"]
+ for fn in files:
+ print(fn)
+ main(fn)
diff --git a/lib/spack/external/_vendoring/macholib/MachOGraph.py b/lib/spack/external/_vendoring/macholib/MachOGraph.py
new file mode 100644
index 0000000000..8943ed8bd3
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/MachOGraph.py
@@ -0,0 +1,141 @@
+"""
+Utilities for reading and writing Mach-O headers
+"""
+
+import os
+import sys
+
+from altgraph.ObjectGraph import ObjectGraph
+
+from macholib.dyld import dyld_find
+from macholib.itergraphreport import itergraphreport
+from macholib.MachO import MachO
+
+__all__ = ["MachOGraph"]
+
+try:
+ unicode
+except NameError:
+ unicode = str
+
+
+class MissingMachO(object):
+ def __init__(self, filename):
+ self.graphident = filename
+ self.headers = ()
+
+ def __repr__(self):
+ return "<%s graphident=%r>" % (type(self).__name__, self.graphident)
+
+
+class MachOGraph(ObjectGraph):
+ """
+ Graph data structure of Mach-O dependencies
+ """
+
+ def __init__(self, debug=0, graph=None, env=None, executable_path=None):
+ super(MachOGraph, self).__init__(debug=debug, graph=graph)
+ self.env = env
+ self.trans_table = {}
+ self.executable_path = executable_path
+
+ def locate(self, filename, loader=None):
+ if not isinstance(filename, (str, unicode)):
+ raise TypeError("%r is not a string" % (filename,))
+ if filename.startswith("@loader_path/") and loader is not None:
+ fn = self.trans_table.get((loader.filename, filename))
+ if fn is None:
+ loader_path = loader.loader_path
+
+ try:
+ fn = dyld_find(
+ filename,
+ env=self.env,
+ executable_path=self.executable_path,
+ loader_path=loader_path,
+ )
+ self.trans_table[(loader.filename, filename)] = fn
+ except ValueError:
+ return None
+
+ else:
+ fn = self.trans_table.get(filename)
+ if fn is None:
+ try:
+ fn = dyld_find(
+ filename, env=self.env, executable_path=self.executable_path
+ )
+ self.trans_table[filename] = fn
+ except ValueError:
+ return None
+ return fn
+
+ def findNode(self, name, loader=None):
+ assert isinstance(name, (str, unicode))
+ data = super(MachOGraph, self).findNode(name)
+ if data is not None:
+ return data
+ newname = self.locate(name, loader=loader)
+ if newname is not None and newname != name:
+ return self.findNode(newname)
+ return None
+
+ def run_file(self, pathname, caller=None):
+ assert isinstance(pathname, (str, unicode))
+ self.msgin(2, "run_file", pathname)
+ m = self.findNode(pathname, loader=caller)
+ if m is None:
+ if not os.path.exists(pathname):
+ raise ValueError("%r does not exist" % (pathname,))
+ m = self.createNode(MachO, pathname)
+ self.createReference(caller, m, edge_data="run_file")
+ self.scan_node(m)
+ self.msgout(2, "")
+ return m
+
+ def load_file(self, name, caller=None):
+ assert isinstance(name, (str, unicode))
+ self.msgin(2, "load_file", name, caller)
+ m = self.findNode(name, loader=caller)
+ if m is None:
+ newname = self.locate(name, loader=caller)
+ if newname is not None and newname != name:
+ return self.load_file(newname, caller=caller)
+ if os.path.exists(name):
+ m = self.createNode(MachO, name)
+ self.scan_node(m)
+ else:
+ m = self.createNode(MissingMachO, name)
+ self.msgout(2, "")
+ return m
+
+ def scan_node(self, node):
+ self.msgin(2, "scan_node", node)
+ for header in node.headers:
+ for _idx, name, filename in header.walkRelocatables():
+ assert isinstance(name, (str, unicode))
+ assert isinstance(filename, (str, unicode))
+ m = self.load_file(filename, caller=node)
+ self.createReference(node, m, edge_data=name)
+ self.msgout(2, "", node)
+
+ def itergraphreport(self, name="G"):
+ nodes = map(self.graph.describe_node, self.graph.iterdfs(self))
+ describe_edge = self.graph.describe_edge
+ return itergraphreport(nodes, describe_edge, name=name)
+
+ def graphreport(self, fileobj=None):
+ if fileobj is None:
+ fileobj = sys.stdout
+ fileobj.writelines(self.itergraphreport())
+
+
+def main(args):
+ g = MachOGraph()
+ for arg in args:
+ g.run_file(arg)
+ g.graphreport()
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:] or ["/bin/ls"])
diff --git a/lib/spack/external/_vendoring/macholib/MachOStandalone.py b/lib/spack/external/_vendoring/macholib/MachOStandalone.py
new file mode 100644
index 0000000000..6ce154227c
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/MachOStandalone.py
@@ -0,0 +1,173 @@
+import os
+from collections import deque
+
+from macholib.dyld import framework_info
+from macholib.MachOGraph import MachOGraph, MissingMachO
+from macholib.util import (
+ flipwritable,
+ has_filename_filter,
+ in_system_path,
+ iter_platform_files,
+ mergecopy,
+ mergetree,
+)
+
+
+class ExcludedMachO(MissingMachO):
+ pass
+
+
+class FilteredMachOGraph(MachOGraph):
+ def __init__(self, delegate, *args, **kwargs):
+ super(FilteredMachOGraph, self).__init__(*args, **kwargs)
+ self.delegate = delegate
+
+ def createNode(self, cls, name):
+ cls = self.delegate.getClass(name, cls)
+ res = super(FilteredMachOGraph, self).createNode(cls, name)
+ return self.delegate.update_node(res)
+
+ def locate(self, filename, loader=None):
+ newname = super(FilteredMachOGraph, self).locate(filename, loader)
+ if newname is None:
+ return None
+ return self.delegate.locate(newname, loader=loader)
+
+
+class MachOStandalone(object):
+ def __init__(self, base, dest=None, graph=None, env=None, executable_path=None):
+ self.base = os.path.join(os.path.abspath(base), "")
+ if dest is None:
+ dest = os.path.join(self.base, "Contents", "Frameworks")
+ self.dest = dest
+ self.mm = FilteredMachOGraph(
+ self, graph=graph, env=env, executable_path=executable_path
+ )
+ self.changemap = {}
+ self.excludes = []
+ self.pending = deque()
+
+ def update_node(self, m):
+ return m
+
+ def getClass(self, name, cls):
+ if in_system_path(name):
+ return ExcludedMachO
+ for base in self.excludes:
+ if name.startswith(base):
+ return ExcludedMachO
+ return cls
+
+ def locate(self, filename, loader=None):
+ if in_system_path(filename):
+ return filename
+ if filename.startswith(self.base):
+ return filename
+ for base in self.excludes:
+ if filename.startswith(base):
+ return filename
+ if filename in self.changemap:
+ return self.changemap[filename]
+ info = framework_info(filename)
+ if info is None:
+ res = self.copy_dylib(filename)
+ self.changemap[filename] = res
+ return res
+ else:
+ res = self.copy_framework(info)
+ self.changemap[filename] = res
+ return res
+
+ def copy_dylib(self, filename):
+ # When the filename is a symlink use the basename of the target of
+ # the link as the name in standalone bundle. This avoids problems
+ # when two libraries link to the same dylib but using different
+ # symlinks.
+ if os.path.islink(filename):
+ dest = os.path.join(self.dest, os.path.basename(os.path.realpath(filename)))
+ else:
+ dest = os.path.join(self.dest, os.path.basename(filename))
+
+ if not os.path.exists(dest):
+ self.mergecopy(filename, dest)
+ return dest
+
+ def mergecopy(self, src, dest):
+ return mergecopy(src, dest)
+
+ def mergetree(self, src, dest):
+ return mergetree(src, dest)
+
+ def copy_framework(self, info):
+ dest = os.path.join(self.dest, info["shortname"] + ".framework")
+ destfn = os.path.join(self.dest, info["name"])
+ src = os.path.join(info["location"], info["shortname"] + ".framework")
+ if not os.path.exists(dest):
+ self.mergetree(src, dest)
+ self.pending.append((destfn, iter_platform_files(dest)))
+ return destfn
+
+ def run(self, platfiles=None, contents=None):
+ mm = self.mm
+ if contents is None:
+ contents = "@executable_path/.."
+ if platfiles is None:
+ platfiles = iter_platform_files(self.base)
+
+ for fn in platfiles:
+ mm.run_file(fn)
+
+ while self.pending:
+ fmwk, files = self.pending.popleft()
+ ref = mm.findNode(fmwk)
+ for fn in files:
+ mm.run_file(fn, caller=ref)
+
+ changemap = {}
+ skipcontents = os.path.join(os.path.dirname(self.dest), "")
+ machfiles = []
+
+ for node in mm.flatten(has_filename_filter):
+ machfiles.append(node)
+ dest = os.path.join(
+ contents,
+ os.path.normpath(node.filename[len(skipcontents) :]), # noqa: E203
+ )
+ changemap[node.filename] = dest
+
+ def changefunc(path):
+ if path.startswith("@loader_path/"):
+ # This is a quick hack for py2app: In that
+ # usecase paths like this are found in the load
+ # commands of relocatable wheels. Those don't
+ # need rewriting.
+ return path
+
+ res = mm.locate(path)
+ rv = changemap.get(res)
+ if rv is None and path.startswith("@loader_path/"):
+ rv = changemap.get(mm.locate(mm.trans_table.get((node.filename, path))))
+ return rv
+
+ for node in machfiles:
+ fn = mm.locate(node.filename)
+ if fn is None:
+ continue
+ rewroteAny = False
+ for _header in node.headers:
+ if node.rewriteLoadCommands(changefunc):
+ rewroteAny = True
+ if rewroteAny:
+ old_mode = flipwritable(fn)
+ try:
+ with open(fn, "rb+") as f:
+ for _header in node.headers:
+ f.seek(0)
+ node.write(f)
+ f.seek(0, 2)
+ f.flush()
+ finally:
+ flipwritable(fn, old_mode)
+
+ allfiles = [mm.locate(node.filename) for node in machfiles]
+ return set(filter(None, allfiles))
diff --git a/lib/spack/external/_vendoring/macholib/SymbolTable.py b/lib/spack/external/_vendoring/macholib/SymbolTable.py
new file mode 100644
index 0000000000..006abab59f
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/SymbolTable.py
@@ -0,0 +1,104 @@
+"""
+Class to read the symbol table from a Mach-O header
+"""
+from __future__ import with_statement
+
+import sys
+
+from macholib.mach_o import (
+ MH_CIGAM_64,
+ MH_MAGIC_64,
+ dylib_module,
+ dylib_reference,
+ dylib_table_of_contents,
+ nlist,
+ nlist_64,
+ relocation_info,
+)
+
+__all__ = ["SymbolTable"]
+
+if sys.version_info[0] == 2:
+ range = xrange # noqa: F821
+
+
+class SymbolTable(object):
+ def __init__(self, macho, header=None, openfile=None):
+ if openfile is None:
+ openfile = open
+ if header is None:
+ header = macho.headers[0]
+ self.macho_header = header
+ with openfile(macho.filename, "rb") as fh:
+ self.symtab = header.getSymbolTableCommand()
+ self.dysymtab = header.getDynamicSymbolTableCommand()
+
+ if self.symtab is not None:
+ self.nlists = self.readSymbolTable(fh)
+
+ if self.dysymtab is not None:
+ self.readDynamicSymbolTable(fh)
+
+ def readSymbolTable(self, fh):
+ cmd = self.symtab
+ fh.seek(self.macho_header.offset + cmd.stroff)
+ strtab = fh.read(cmd.strsize)
+ fh.seek(self.macho_header.offset + cmd.symoff)
+ nlists = []
+
+ if self.macho_header.MH_MAGIC in [MH_MAGIC_64, MH_CIGAM_64]:
+ cls = nlist_64
+ else:
+ cls = nlist
+
+ for _i in range(cmd.nsyms):
+ cmd = cls.from_fileobj(fh, _endian_=self.macho_header.endian)
+ if cmd.n_un == 0:
+ nlists.append((cmd, ""))
+ else:
+ nlists.append(
+ (
+ cmd,
+ strtab[cmd.n_un : strtab.find(b"\x00", cmd.n_un)], # noqa: E203
+ )
+ )
+ return nlists
+
+ def readDynamicSymbolTable(self, fh):
+ cmd = self.dysymtab
+ nlists = self.nlists
+
+ self.localsyms = nlists[
+ cmd.ilocalsym : cmd.ilocalsym + cmd.nlocalsym # noqa: E203
+ ]
+ self.extdefsyms = nlists[
+ cmd.iextdefsym : cmd.iextdefsym + cmd.nextdefsym # noqa: E203
+ ]
+ self.undefsyms = nlists[
+ cmd.iundefsym : cmd.iundefsym + cmd.nundefsym # noqa: E203
+ ]
+ if cmd.tocoff == 0:
+ self.toc = None
+ else:
+ self.toc = self.readtoc(fh, cmd.tocoff, cmd.ntoc)
+
+ def readtoc(self, fh, off, n):
+ fh.seek(self.macho_header.offset + off)
+ return [dylib_table_of_contents.from_fileobj(fh) for i in range(n)]
+
+ def readmodtab(self, fh, off, n):
+ fh.seek(self.macho_header.offset + off)
+ return [dylib_module.from_fileobj(fh) for i in range(n)]
+
+ def readsym(self, fh, off, n):
+ fh.seek(self.macho_header.offset + off)
+ refs = []
+ for _i in range(n):
+ ref = dylib_reference.from_fileobj(fh)
+ isym, flags = divmod(ref.isym_flags, 256)
+ refs.append((self.nlists[isym], flags))
+ return refs
+
+ def readrel(self, fh, off, n):
+ fh.seek(self.macho_header.offset + off)
+ return [relocation_info.from_fileobj(fh) for i in range(n)]
diff --git a/lib/spack/external/_vendoring/macholib/__init__.py b/lib/spack/external/_vendoring/macholib/__init__.py
new file mode 100644
index 0000000000..9b8d939fd5
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/__init__.py
@@ -0,0 +1,8 @@
+"""
+Enough Mach-O to make your head spin.
+
+See the relevant header files in /usr/include/mach-o
+
+And also Apple's documentation.
+"""
+__version__ = "1.16.2"
diff --git a/lib/spack/external/_vendoring/macholib/__main__.py b/lib/spack/external/_vendoring/macholib/__main__.py
new file mode 100644
index 0000000000..dd3cc11610
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/__main__.py
@@ -0,0 +1,80 @@
+from __future__ import absolute_import, print_function
+
+import os
+import sys
+
+from macholib import macho_dump, macho_standalone
+from macholib.util import is_platform_file
+
+gCommand = None
+
+
+def check_file(fp, path, callback):
+ if not os.path.exists(path):
+ print("%s: %s: No such file or directory" % (gCommand, path), file=sys.stderr)
+ return 1
+
+ try:
+ is_plat = is_platform_file(path)
+
+ except IOError as msg:
+ print("%s: %s: %s" % (gCommand, path, msg), file=sys.stderr)
+ return 1
+
+ else:
+ if is_plat:
+ callback(fp, path)
+ return 0
+
+
+def walk_tree(callback, paths):
+ err = 0
+
+ for base in paths:
+ if os.path.isdir(base):
+ for root, _dirs, files in os.walk(base):
+ for fn in files:
+ err |= check_file(sys.stdout, os.path.join(root, fn), callback)
+ else:
+ err |= check_file(sys.stdout, base, callback)
+
+ return err
+
+
+def print_usage(fp):
+ print("Usage:", file=fp)
+ print(" python -mmacholib [help|--help]", file=fp)
+ print(" python -mmacholib dump FILE ...", file=fp)
+ print(" python -mmacholib find DIR ...", file=fp)
+ print(" python -mmacholib standalone DIR ...", file=fp)
+
+
+def main():
+ global gCommand
+ if len(sys.argv) < 3:
+ print_usage(sys.stderr)
+ sys.exit(1)
+
+ gCommand = sys.argv[1]
+
+ if gCommand == "dump":
+ walk_tree(macho_dump.print_file, sys.argv[2:])
+
+ elif gCommand == "find":
+ walk_tree(lambda fp, path: print(path, file=fp), sys.argv[2:])
+
+ elif gCommand == "standalone":
+ for dn in sys.argv[2:]:
+ macho_standalone.standaloneApp(dn)
+
+ elif gCommand in ("help", "--help"):
+ print_usage(sys.stdout)
+ sys.exit(0)
+
+ else:
+ print_usage(sys.stderr)
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/spack/external/_vendoring/macholib/_cmdline.py b/lib/spack/external/_vendoring/macholib/_cmdline.py
new file mode 100644
index 0000000000..9304fb6257
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/_cmdline.py
@@ -0,0 +1,49 @@
+"""
+Internal helpers for basic commandline tools
+"""
+from __future__ import absolute_import, print_function
+
+import os
+import sys
+
+from macholib.util import is_platform_file
+
+
+def check_file(fp, path, callback):
+ if not os.path.exists(path):
+ print(
+ "%s: %s: No such file or directory" % (sys.argv[0], path), file=sys.stderr
+ )
+ return 1
+
+ try:
+ is_plat = is_platform_file(path)
+
+ except IOError as msg:
+ print("%s: %s: %s" % (sys.argv[0], path, msg), file=sys.stderr)
+ return 1
+
+ else:
+ if is_plat:
+ callback(fp, path)
+ return 0
+
+
+def main(callback):
+ args = sys.argv[1:]
+ name = os.path.basename(sys.argv[0])
+ err = 0
+
+ if not args:
+ print("Usage: %s filename..." % (name,), file=sys.stderr)
+ return 1
+
+ for base in args:
+ if os.path.isdir(base):
+ for root, _dirs, files in os.walk(base):
+ for fn in files:
+ err |= check_file(sys.stdout, os.path.join(root, fn), callback)
+ else:
+ err |= check_file(sys.stdout, base, callback)
+
+ return err
diff --git a/lib/spack/external/_vendoring/macholib/dyld.py b/lib/spack/external/_vendoring/macholib/dyld.py
new file mode 100644
index 0000000000..fcb23198cb
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/dyld.py
@@ -0,0 +1,228 @@
+"""
+dyld emulation
+"""
+
+import ctypes
+import os
+import platform
+import sys
+from itertools import chain
+
+from macholib.dylib import dylib_info
+from macholib.framework import framework_info
+
+__all__ = ["dyld_find", "framework_find", "framework_info", "dylib_info"]
+
+if sys.platform == "darwin" and [
+ int(x) for x in platform.mac_ver()[0].split(".")[:2]
+] >= [10, 16]:
+ try:
+ libc = ctypes.CDLL("libSystem.dylib")
+
+ except OSError:
+ _dyld_shared_cache_contains_path = None
+
+ else:
+ try:
+ _dyld_shared_cache_contains_path = libc._dyld_shared_cache_contains_path
+ except AttributeError:
+ _dyld_shared_cache_contains_path = None
+
+ else:
+ _dyld_shared_cache_contains_path.restype = ctypes.c_bool
+ _dyld_shared_cache_contains_path.argtypes = [ctypes.c_char_p]
+
+ if sys.version_info[0] != 2:
+ __dyld_shared_cache_contains_path = _dyld_shared_cache_contains_path
+
+ def _dyld_shared_cache_contains_path(path):
+ return __dyld_shared_cache_contains_path(path.encode())
+
+else:
+ _dyld_shared_cache_contains_path = None
+
+# These are the defaults as per man dyld(1)
+#
+_DEFAULT_FRAMEWORK_FALLBACK = [
+ os.path.expanduser("~/Library/Frameworks"),
+ "/Library/Frameworks",
+ "/Network/Library/Frameworks",
+ "/System/Library/Frameworks",
+]
+
+_DEFAULT_LIBRARY_FALLBACK = [
+ os.path.expanduser("~/lib"),
+ "/usr/local/lib",
+ "/lib",
+ "/usr/lib",
+]
+
+if sys.version_info[0] == 2:
+
+ def _ensure_utf8(s):
+ if isinstance(s, unicode): # noqa: F821
+ return s.encode("utf8")
+ return s
+
+else:
+
+ def _ensure_utf8(s):
+ if s is not None and not isinstance(s, str):
+ raise ValueError(s)
+ return s
+
+
+def _dyld_env(env, var):
+ if env is None:
+ env = os.environ
+ rval = env.get(var)
+ if rval is None or rval == "":
+ return []
+ return rval.split(":")
+
+
+def dyld_image_suffix(env=None):
+ if env is None:
+ env = os.environ
+ return env.get("DYLD_IMAGE_SUFFIX")
+
+
+def dyld_framework_path(env=None):
+ return _dyld_env(env, "DYLD_FRAMEWORK_PATH")
+
+
+def dyld_library_path(env=None):
+ return _dyld_env(env, "DYLD_LIBRARY_PATH")
+
+
+def dyld_fallback_framework_path(env=None):
+ return _dyld_env(env, "DYLD_FALLBACK_FRAMEWORK_PATH")
+
+
+def dyld_fallback_library_path(env=None):
+ return _dyld_env(env, "DYLD_FALLBACK_LIBRARY_PATH")
+
+
+def dyld_image_suffix_search(iterator, env=None):
+ """For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
+ suffix = dyld_image_suffix(env)
+ if suffix is None:
+ return iterator
+
+ def _inject(iterator=iterator, suffix=suffix):
+ for path in iterator:
+ if path.endswith(".dylib"):
+ yield path[: -len(".dylib")] + suffix + ".dylib"
+ else:
+ yield path + suffix
+ yield path
+
+ return _inject()
+
+
+def dyld_override_search(name, env=None):
+ # If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
+ # framework name, use the first file that exists in the framework
+ # path if any. If there is none go on to search the DYLD_LIBRARY_PATH
+ # if any.
+
+ framework = framework_info(name)
+
+ if framework is not None:
+ for path in dyld_framework_path(env):
+ yield os.path.join(path, framework["name"])
+
+ # If DYLD_LIBRARY_PATH is set then use the first file that exists
+ # in the path. If none use the original name.
+ for path in dyld_library_path(env):
+ yield os.path.join(path, os.path.basename(name))
+
+
+def dyld_executable_path_search(name, executable_path=None):
+ # If we haven't done any searching and found a library and the
+ # dylib_name starts with "@executable_path/" then construct the
+ # library name.
+ if name.startswith("@executable_path/") and executable_path is not None:
+ yield os.path.join(
+ executable_path, name[len("@executable_path/") :] # noqa: E203
+ )
+
+
+def dyld_loader_search(name, loader_path=None):
+ # If we haven't done any searching and found a library and the
+ # dylib_name starts with "@loader_path/" then construct the
+ # library name.
+ if name.startswith("@loader_path/") and loader_path is not None:
+ yield os.path.join(loader_path, name[len("@loader_path/") :]) # noqa: E203
+
+
+def dyld_default_search(name, env=None):
+ yield name
+
+ framework = framework_info(name)
+
+ if framework is not None:
+ fallback_framework_path = dyld_fallback_framework_path(env)
+
+ if fallback_framework_path:
+ for path in fallback_framework_path:
+ yield os.path.join(path, framework["name"])
+
+ else:
+ for path in _DEFAULT_FRAMEWORK_FALLBACK:
+ yield os.path.join(path, framework["name"])
+
+ fallback_library_path = dyld_fallback_library_path(env)
+ if fallback_library_path:
+ for path in fallback_library_path:
+ yield os.path.join(path, os.path.basename(name))
+
+ else:
+ for path in _DEFAULT_LIBRARY_FALLBACK:
+ yield os.path.join(path, os.path.basename(name))
+
+
+def dyld_find(name, executable_path=None, env=None, loader_path=None):
+ """
+ Find a library or framework using dyld semantics
+ """
+ name = _ensure_utf8(name)
+ executable_path = _ensure_utf8(executable_path)
+ for path in dyld_image_suffix_search(
+ chain(
+ dyld_override_search(name, env),
+ dyld_executable_path_search(name, executable_path),
+ dyld_loader_search(name, loader_path),
+ dyld_default_search(name, env),
+ ),
+ env,
+ ):
+ if (
+ _dyld_shared_cache_contains_path is not None
+ and _dyld_shared_cache_contains_path(path)
+ ):
+ return path
+ if os.path.isfile(path):
+ return path
+ raise ValueError("dylib %s could not be found" % (name,))
+
+
+def framework_find(fn, executable_path=None, env=None):
+ """
+ Find a framework using dyld semantics in a very loose manner.
+
+ Will take input such as:
+ Python
+ Python.framework
+ Python.framework/Versions/Current
+ """
+ try:
+ return dyld_find(fn, executable_path=executable_path, env=env)
+ except ValueError:
+ pass
+ fmwk_index = fn.rfind(".framework")
+ if fmwk_index == -1:
+ fmwk_index = len(fn)
+ fn += ".framework"
+ fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
+ return dyld_find(fn, executable_path=executable_path, env=env)
diff --git a/lib/spack/external/_vendoring/macholib/dylib.py b/lib/spack/external/_vendoring/macholib/dylib.py
new file mode 100644
index 0000000000..72301a8f19
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/dylib.py
@@ -0,0 +1,45 @@
+"""
+Generic dylib path manipulation
+"""
+
+import re
+
+__all__ = ["dylib_info"]
+
+_DYLIB_RE = re.compile(
+ r"""(?x)
+(?P<location>^.*)(?:^|/)
+(?P<name>
+ (?P<shortname>\w+?)
+ (?:\.(?P<version>[^._]+))?
+ (?:_(?P<suffix>[^._]+))?
+ \.dylib$
+)
+"""
+)
+
+
+def dylib_info(filename):
+ """
+ A dylib name can take one of the following four forms:
+ Location/Name.SomeVersion_Suffix.dylib
+ Location/Name.SomeVersion.dylib
+ Location/Name_Suffix.dylib
+ Location/Name.dylib
+
+ returns None if not found or a mapping equivalent to:
+ dict(
+ location='Location',
+ name='Name.SomeVersion_Suffix.dylib',
+ shortname='Name',
+ version='SomeVersion',
+ suffix='Suffix',
+ )
+
+ Note that SomeVersion and Suffix are optional and may be None
+ if not present.
+ """
+ is_dylib = _DYLIB_RE.match(filename)
+ if not is_dylib:
+ return None
+ return is_dylib.groupdict()
diff --git a/lib/spack/external/_vendoring/macholib/framework.py b/lib/spack/external/_vendoring/macholib/framework.py
new file mode 100644
index 0000000000..027001e413
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/framework.py
@@ -0,0 +1,45 @@
+"""
+Generic framework path manipulation
+"""
+
+import re
+
+__all__ = ["framework_info"]
+
+_STRICT_FRAMEWORK_RE = re.compile(
+ r"""(?x)
+(?P<location>^.*)(?:^|/)
+(?P<name>
+ (?P<shortname>[-_A-Za-z0-9]+).framework/
+ (?:Versions/(?P<version>[^/]+)/)?
+ (?P=shortname)
+ (?:_(?P<suffix>[^_]+))?
+)$
+"""
+)
+
+
+def framework_info(filename):
+ """
+ A framework name can take one of the following four forms:
+ Location/Name.framework/Versions/SomeVersion/Name_Suffix
+ Location/Name.framework/Versions/SomeVersion/Name
+ Location/Name.framework/Name_Suffix
+ Location/Name.framework/Name
+
+ returns None if not found, or a mapping equivalent to:
+ dict(
+ location='Location',
+ name='Name.framework/Versions/SomeVersion/Name_Suffix',
+ shortname='Name',
+ version='SomeVersion',
+ suffix='Suffix',
+ )
+
+ Note that SomeVersion and Suffix are optional and may be None
+ if not present
+ """
+ is_framework = _STRICT_FRAMEWORK_RE.match(filename)
+ if not is_framework:
+ return None
+ return is_framework.groupdict()
diff --git a/lib/spack/external/_vendoring/macholib/itergraphreport.py b/lib/spack/external/_vendoring/macholib/itergraphreport.py
new file mode 100644
index 0000000000..08692c057c
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/itergraphreport.py
@@ -0,0 +1,73 @@
+"""
+Utilities for creating dot output from a MachOGraph
+"""
+
+from collections import deque
+
+try:
+ from itertools import imap
+except ImportError:
+ imap = map
+
+__all__ = ["itergraphreport"]
+
+
+def itergraphreport(nodes, describe_edge, name="G"):
+ edges = deque()
+ nodetoident = {}
+
+ def nodevisitor(node, data, outgoing, incoming):
+ return {"label": str(node)}
+
+ def edgevisitor(edge, data, head, tail):
+ return {}
+
+ yield "digraph %s {\n" % (name,)
+ attr = {"rankdir": "LR", "concentrate": "true"}
+ cpatt = '%s="%s"'
+ for item in attr.items():
+ yield "\t%s;\n" % (cpatt % item,)
+
+ # find all packages (subgraphs)
+ for (node, data, _outgoing, _incoming) in nodes:
+ nodetoident[node] = getattr(data, "identifier", node)
+
+ # create sets for subgraph, write out descriptions
+ for (node, data, outgoing, incoming) in nodes:
+ # update edges
+ for edge in imap(describe_edge, outgoing):
+ edges.append(edge)
+
+ # describe node
+ yield '\t"%s" [%s];\n' % (
+ node,
+ ",".join(
+ [
+ (cpatt % item)
+ for item in nodevisitor(node, data, outgoing, incoming).items()
+ ]
+ ),
+ )
+
+ graph = []
+
+ while edges:
+ edge, data, head, tail = edges.popleft()
+ if data in ("run_file", "load_dylib"):
+ graph.append((edge, data, head, tail))
+
+ def do_graph(edges, tabs):
+ edgestr = tabs + '"%s" -> "%s" [%s];\n'
+ # describe edge
+ for (edge, data, head, tail) in edges:
+ attribs = edgevisitor(edge, data, head, tail)
+ yield edgestr % (
+ head,
+ tail,
+ ",".join([(cpatt % item) for item in attribs.items()]),
+ )
+
+ for s in do_graph(graph, "\t"):
+ yield s
+
+ yield "}\n"
diff --git a/lib/spack/external/_vendoring/macholib/mach_o.py b/lib/spack/external/_vendoring/macholib/mach_o.py
new file mode 100644
index 0000000000..bfe6157172
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/mach_o.py
@@ -0,0 +1,1641 @@
+"""
+Other than changing the load commands in such a way that they do not
+contain the load command itself, this is largely a by-hand conversion
+of the C headers. Hopefully everything in here should be at least as
+obvious as the C headers, and you should be using the C headers as a real
+reference because the documentation didn't come along for the ride.
+
+Doing much of anything with the symbol tables or segments is really
+not covered at this point.
+
+See /usr/include/mach-o and friends.
+"""
+
+import time
+
+from macholib.ptypes import (
+ Structure,
+ p_int32,
+ p_int64,
+ p_long,
+ p_short,
+ p_uint8,
+ p_uint32,
+ p_uint64,
+ p_ulong,
+ pypackable,
+)
+
+_CPU_ARCH_ABI64 = 0x01000000
+
+CPU_TYPE_NAMES = {
+ -1: "ANY",
+ 1: "VAX",
+ 6: "MC680x0",
+ 7: "i386",
+ _CPU_ARCH_ABI64 | 7: "x86_64",
+ 8: "MIPS",
+ 10: "MC98000",
+ 11: "HPPA",
+ 12: "ARM",
+ _CPU_ARCH_ABI64 | 12: "ARM64",
+ 13: "MC88000",
+ 14: "SPARC",
+ 15: "i860",
+ 16: "Alpha",
+ 18: "PowerPC",
+ _CPU_ARCH_ABI64 | 18: "PowerPC64",
+}
+
+INTEL64_SUBTYPE = {
+ 3: "CPU_SUBTYPE_X86_64_ALL",
+ 4: "CPU_SUBTYPE_X86_ARCH1",
+ 8: "CPU_SUBTYPE_X86_64_H",
+}
+
+# define CPU_SUBTYPE_INTEL(f, m) ((cpu_subtype_t) (f) + ((m) << 4))
+INTEL_SUBTYPE = {
+ 0: "CPU_SUBTYPE_INTEL_MODEL_ALL",
+ 1: "CPU_THREADTYPE_INTEL_HTT",
+ 3: "CPU_SUBTYPE_I386_ALL",
+ 4: "CPU_SUBTYPE_486",
+ 5: "CPU_SUBTYPE_586",
+ 8: "CPU_SUBTYPE_PENTIUM_3",
+ 9: "CPU_SUBTYPE_PENTIUM_M",
+ 10: "CPU_SUBTYPE_PENTIUM_4",
+ 11: "CPU_SUBTYPE_ITANIUM",
+ 12: "CPU_SUBTYPE_XEON",
+ 34: "CPU_SUBTYPE_XEON_MP",
+ 42: "CPU_SUBTYPE_PENTIUM_4_M",
+ 43: "CPU_SUBTYPE_ITANIUM_2",
+ 38: "CPU_SUBTYPE_PENTPRO",
+ 40: "CPU_SUBTYPE_PENTIUM_3_M",
+ 52: "CPU_SUBTYPE_PENTIUM_3_XEON",
+ 102: "CPU_SUBTYPE_PENTII_M3",
+ 132: "CPU_SUBTYPE_486SX",
+ 166: "CPU_SUBTYPE_PENTII_M5",
+ 199: "CPU_SUBTYPE_CELERON",
+ 231: "CPU_SUBTYPE_CELERON_MOBILE",
+}
+
+MC680_SUBTYPE = {
+ 1: "CPU_SUBTYPE_MC680x0_ALL",
+ 2: "CPU_SUBTYPE_MC68040",
+ 3: "CPU_SUBTYPE_MC68030_ONLY",
+}
+
+MIPS_SUBTYPE = {
+ 0: "CPU_SUBTYPE_MIPS_ALL",
+ 1: "CPU_SUBTYPE_MIPS_R2300",
+ 2: "CPU_SUBTYPE_MIPS_R2600",
+ 3: "CPU_SUBTYPE_MIPS_R2800",
+ 4: "CPU_SUBTYPE_MIPS_R2000a",
+ 5: "CPU_SUBTYPE_MIPS_R2000",
+ 6: "CPU_SUBTYPE_MIPS_R3000a",
+ 7: "CPU_SUBTYPE_MIPS_R3000",
+}
+
+MC98000_SUBTYPE = {0: "CPU_SUBTYPE_MC98000_ALL", 1: "CPU_SUBTYPE_MC98601"}
+
+HPPA_SUBTYPE = {0: "CPU_SUBTYPE_HPPA_7100", 1: "CPU_SUBTYPE_HPPA_7100LC"}
+
+MC88_SUBTYPE = {
+ 0: "CPU_SUBTYPE_MC88000_ALL",
+ 1: "CPU_SUBTYPE_MC88100",
+ 2: "CPU_SUBTYPE_MC88110",
+}
+
+SPARC_SUBTYPE = {0: "CPU_SUBTYPE_SPARC_ALL"}
+
+I860_SUBTYPE = {0: "CPU_SUBTYPE_I860_ALL", 1: "CPU_SUBTYPE_I860_860"}
+
+POWERPC_SUBTYPE = {
+ 0: "CPU_SUBTYPE_POWERPC_ALL",
+ 1: "CPU_SUBTYPE_POWERPC_601",
+ 2: "CPU_SUBTYPE_POWERPC_602",
+ 3: "CPU_SUBTYPE_POWERPC_603",
+ 4: "CPU_SUBTYPE_POWERPC_603e",
+ 5: "CPU_SUBTYPE_POWERPC_603ev",
+ 6: "CPU_SUBTYPE_POWERPC_604",
+ 7: "CPU_SUBTYPE_POWERPC_604e",
+ 8: "CPU_SUBTYPE_POWERPC_620",
+ 9: "CPU_SUBTYPE_POWERPC_750",
+ 10: "CPU_SUBTYPE_POWERPC_7400",
+ 11: "CPU_SUBTYPE_POWERPC_7450",
+ 100: "CPU_SUBTYPE_POWERPC_970",
+}
+
+ARM_SUBTYPE = {
+ 0: "CPU_SUBTYPE_ARM_ALL12",
+ 5: "CPU_SUBTYPE_ARM_V4T",
+ 6: "CPU_SUBTYPE_ARM_V6",
+ 7: "CPU_SUBTYPE_ARM_V5TEJ",
+ 8: "CPU_SUBTYPE_ARM_XSCALE",
+ 9: "CPU_SUBTYPE_ARM_V7",
+ 10: "CPU_SUBTYPE_ARM_V7F",
+ 11: "CPU_SUBTYPE_ARM_V7S",
+ 12: "CPU_SUBTYPE_ARM_V7K",
+ 13: "CPU_SUBTYPE_ARM_V8",
+ 14: "CPU_SUBTYPE_ARM_V6M",
+ 15: "CPU_SUBTYPE_ARM_V7M",
+ 16: "CPU_SUBTYPE_ARM_V7EM",
+ 17: "CPU_SUBTYPE_ARM_V8M",
+}
+
+ARM64_SUBTYPE = {
+ 0: "CPU_SUBTYPE_ARM64_ALL",
+ 1: "CPU_SUBTYPE_ARM64_V8",
+ 2: "CPU_SUBTYPE_ARM64E",
+}
+
+VAX_SUBTYPE = {
+ 0: "CPU_SUBTYPE_VAX_ALL",
+ 1: "CPU_SUBTYPE_VAX780",
+ 2: "CPU_SUBTYPE_VAX785",
+ 3: "CPU_SUBTYPE_VAX750",
+ 4: "CPU_SUBTYPE_VAX730",
+ 5: "CPU_SUBTYPE_UVAXI",
+ 6: "CPU_SUBTYPE_UVAXII",
+ 7: "CPU_SUBTYPE_VAX8200",
+ 8: "CPU_SUBTYPE_VAX8500",
+ 9: "CPU_SUBTYPE_VAX8600",
+ 10: "CPU_SUBTYPE_VAX8650",
+ 11: "CPU_SUBTYPE_VAX8800",
+ 12: "CPU_SUBTYPE_UVAXIII",
+}
+
+
+def get_cpu_subtype(cpu_type, cpu_subtype):
+ st = cpu_subtype & 0x0FFFFFFF
+
+ if cpu_type == 1:
+ subtype = VAX_SUBTYPE.get(st, st)
+ elif cpu_type == 6:
+ subtype = MC680_SUBTYPE.get(st, st)
+ elif cpu_type == 7:
+ subtype = INTEL_SUBTYPE.get(st, st)
+ elif cpu_type == 7 | _CPU_ARCH_ABI64:
+ subtype = INTEL64_SUBTYPE.get(st, st)
+ elif cpu_type == 8:
+ subtype = MIPS_SUBTYPE.get(st, st)
+ elif cpu_type == 10:
+ subtype = MC98000_SUBTYPE.get(st, st)
+ elif cpu_type == 11:
+ subtype = HPPA_SUBTYPE.get(st, st)
+ elif cpu_type == 12:
+ subtype = ARM_SUBTYPE.get(st, st)
+ elif cpu_type == 12 | _CPU_ARCH_ABI64:
+ subtype = ARM64_SUBTYPE.get(st, st)
+ elif cpu_type == 13:
+ subtype = MC88_SUBTYPE.get(st, st)
+ elif cpu_type == 14:
+ subtype = SPARC_SUBTYPE.get(st, st)
+ elif cpu_type == 15:
+ subtype = I860_SUBTYPE.get(st, st)
+ elif cpu_type == 16:
+ subtype = MIPS_SUBTYPE.get(st, st)
+ elif cpu_type == 18:
+ subtype = POWERPC_SUBTYPE.get(st, st)
+ elif cpu_type == 18 | _CPU_ARCH_ABI64:
+ subtype = POWERPC_SUBTYPE.get(st, st)
+ else:
+ subtype = str(st)
+
+ return subtype
+
+
+_MH_EXECUTE_SYM = "__mh_execute_header"
+MH_EXECUTE_SYM = "_mh_execute_header"
+_MH_BUNDLE_SYM = "__mh_bundle_header"
+MH_BUNDLE_SYM = "_mh_bundle_header"
+_MH_DYLIB_SYM = "__mh_dylib_header"
+MH_DYLIB_SYM = "_mh_dylib_header"
+_MH_DYLINKER_SYM = "__mh_dylinker_header"
+MH_DYLINKER_SYM = "_mh_dylinker_header"
+
+(
+ MH_OBJECT,
+ MH_EXECUTE,
+ MH_FVMLIB,
+ MH_CORE,
+ MH_PRELOAD,
+ MH_DYLIB,
+ MH_DYLINKER,
+ MH_BUNDLE,
+ MH_DYLIB_STUB,
+ MH_DSYM,
+) = range(0x1, 0xB)
+
+MH_FILESET = 0xC
+
+(
+ MH_NOUNDEFS,
+ MH_INCRLINK,
+ MH_DYLDLINK,
+ MH_BINDATLOAD,
+ MH_PREBOUND,
+ MH_SPLIT_SEGS,
+ MH_LAZY_INIT,
+ MH_TWOLEVEL,
+ MH_FORCE_FLAT,
+ MH_NOMULTIDEFS,
+ MH_NOFIXPREBINDING,
+ MH_PREBINDABLE,
+ MH_ALLMODSBOUND,
+ MH_SUBSECTIONS_VIA_SYMBOLS,
+ MH_CANONICAL,
+ MH_WEAK_DEFINES,
+ MH_BINDS_TO_WEAK,
+ MH_ALLOW_STACK_EXECUTION,
+ MH_ROOT_SAFE,
+ MH_SETUID_SAFE,
+ MH_NO_REEXPORTED_DYLIBS,
+ MH_PIE,
+ MH_DEAD_STRIPPABLE_DYLIB,
+ MH_HAS_TLV_DESCRIPTORS,
+ MH_NO_HEAP_EXECUTION,
+ MH_APP_EXTENSION_SAFE,
+) = map((1).__lshift__, range(26))
+
+MH_MAGIC = 0xFEEDFACE
+MH_CIGAM = 0xCEFAEDFE
+MH_MAGIC_64 = 0xFEEDFACF
+MH_CIGAM_64 = 0xCFFAEDFE
+
+integer_t = p_int32
+cpu_type_t = integer_t
+cpu_subtype_t = p_uint32
+
+MH_FILETYPE_NAMES = {
+ MH_OBJECT: "relocatable object",
+ MH_EXECUTE: "demand paged executable",
+ MH_FVMLIB: "fixed vm shared library",
+ MH_CORE: "core",
+ MH_PRELOAD: "preloaded executable",
+ MH_DYLIB: "dynamically bound shared library",
+ MH_DYLINKER: "dynamic link editor",
+ MH_BUNDLE: "dynamically bound bundle",
+ MH_DYLIB_STUB: "shared library stub for static linking",
+ MH_DSYM: "symbol information",
+ MH_FILESET: "fileset object",
+}
+
+MH_FILETYPE_SHORTNAMES = {
+ MH_OBJECT: "object",
+ MH_EXECUTE: "execute",
+ MH_FVMLIB: "fvmlib",
+ MH_CORE: "core",
+ MH_PRELOAD: "preload",
+ MH_DYLIB: "dylib",
+ MH_DYLINKER: "dylinker",
+ MH_BUNDLE: "bundle",
+ MH_DYLIB_STUB: "dylib_stub",
+ MH_DSYM: "dsym",
+}
+
+MH_FLAGS_NAMES = {
+ MH_NOUNDEFS: "MH_NOUNDEFS",
+ MH_INCRLINK: "MH_INCRLINK",
+ MH_DYLDLINK: "MH_DYLDLINK",
+ MH_BINDATLOAD: "MH_BINDATLOAD",
+ MH_PREBOUND: "MH_PREBOUND",
+ MH_SPLIT_SEGS: "MH_SPLIT_SEGS",
+ MH_LAZY_INIT: "MH_LAZY_INIT",
+ MH_TWOLEVEL: "MH_TWOLEVEL",
+ MH_FORCE_FLAT: "MH_FORCE_FLAT",
+ MH_NOMULTIDEFS: "MH_NOMULTIDEFS",
+ MH_NOFIXPREBINDING: "MH_NOFIXPREBINDING",
+ MH_PREBINDABLE: "MH_PREBINDABLE",
+ MH_ALLMODSBOUND: "MH_ALLMODSBOUND",
+ MH_SUBSECTIONS_VIA_SYMBOLS: "MH_SUBSECTIONS_VIA_SYMBOLS",
+ MH_CANONICAL: "MH_CANONICAL",
+ MH_WEAK_DEFINES: "MH_WEAK_DEFINES",
+ MH_BINDS_TO_WEAK: "MH_BINDS_TO_WEAK",
+ MH_ALLOW_STACK_EXECUTION: "MH_ALLOW_STACK_EXECUTION",
+ MH_ROOT_SAFE: "MH_ROOT_SAFE",
+ MH_SETUID_SAFE: "MH_SETUID_SAFE",
+ MH_NO_REEXPORTED_DYLIBS: "MH_NO_REEXPORTED_DYLIBS",
+ MH_PIE: "MH_PIE",
+ MH_DEAD_STRIPPABLE_DYLIB: "MH_DEAD_STRIPPABLE_DYLIB",
+ MH_HAS_TLV_DESCRIPTORS: "MH_HAS_TLV_DESCRIPTORS",
+ MH_NO_HEAP_EXECUTION: "MH_NO_HEAP_EXECUTION",
+ MH_APP_EXTENSION_SAFE: "MH_APP_EXTENSION_SAFE",
+}
+
+MH_FLAGS_DESCRIPTIONS = {
+ MH_NOUNDEFS: "no undefined references",
+ MH_INCRLINK: "output of an incremental link",
+ MH_DYLDLINK: "input for the dynamic linker",
+ MH_BINDATLOAD: "undefined references bound dynamically when loaded",
+ MH_PREBOUND: "dynamic undefined references prebound",
+ MH_SPLIT_SEGS: "split read-only and read-write segments",
+ MH_LAZY_INIT: "(obsolete)",
+ MH_TWOLEVEL: "using two-level name space bindings",
+ MH_FORCE_FLAT: "forcing all imagges to use flat name space bindings",
+ MH_NOMULTIDEFS: "umbrella guarantees no multiple definitions",
+ MH_NOFIXPREBINDING: "do not notify prebinding agent about this executable",
+ MH_PREBINDABLE: "the binary is not prebound but can have its prebinding redone",
+ MH_ALLMODSBOUND: "indicates that this binary binds to all "
+ "two-level namespace modules of its dependent libraries",
+ MH_SUBSECTIONS_VIA_SYMBOLS: "safe to divide up the sections into "
+ "sub-sections via symbols for dead code stripping",
+ MH_CANONICAL: "the binary has been canonicalized via the unprebind operation",
+ MH_WEAK_DEFINES: "the final linked image contains external weak symbols",
+ MH_BINDS_TO_WEAK: "the final linked image uses weak symbols",
+ MH_ALLOW_STACK_EXECUTION: "all stacks in the task will be given "
+ "stack execution privilege",
+ MH_ROOT_SAFE: "the binary declares it is safe for use in processes with uid zero",
+ MH_SETUID_SAFE: "the binary declares it is safe for use in processes "
+ "when issetugid() is true",
+ MH_NO_REEXPORTED_DYLIBS: "the static linker does not need to examine dependent "
+ "dylibs to see if any are re-exported",
+ MH_PIE: "the OS will load the main executable at a random address",
+ MH_DEAD_STRIPPABLE_DYLIB: "the static linker will automatically not create a "
+ "LC_LOAD_DYLIB load command to the dylib if no symbols are being "
+ "referenced from the dylib",
+ MH_HAS_TLV_DESCRIPTORS: "contains a section of type S_THREAD_LOCAL_VARIABLES",
+ MH_NO_HEAP_EXECUTION: "the OS will run the main executable with a "
+ "non-executable heap even on platforms that don't require it",
+ MH_APP_EXTENSION_SAFE: "the code was linked for use in an application extension.",
+}
+
+
+class mach_version_helper(Structure):
+ _fields_ = (("_version", p_uint32),)
+
+ @property
+ def major(self):
+ return self._version >> 16 & 0xFFFF
+
+ @major.setter
+ def major(self, v):
+ self._version = (self._version & 0xFFFF) | (v << 16)
+
+ @property
+ def minor(self):
+ return self._version >> 8 & 0xFF
+
+ @minor.setter
+ def minor(self, v):
+ self._version = (self._version & 0xFFFF00FF) | (v << 8)
+
+ @property
+ def rev(self):
+ return self._version & 0xFF
+
+ @rev.setter
+ def rev(self, v):
+ return (self._version & 0xFFFFFF00) | v
+
+ def __str__(self):
+ return "%s.%s.%s" % (self.major, self.minor, self.rev)
+
+
+class mach_timestamp_helper(p_uint32):
+ def __str__(self):
+ return time.ctime(self)
+
+
+def read_struct(f, s, **kw):
+ return s.from_fileobj(f, **kw)
+
+
+class mach_header(Structure):
+ _fields_ = (
+ ("magic", p_uint32),
+ ("cputype", cpu_type_t),
+ ("cpusubtype", cpu_subtype_t),
+ ("filetype", p_uint32),
+ ("ncmds", p_uint32),
+ ("sizeofcmds", p_uint32),
+ ("flags", p_uint32),
+ )
+
+ def _describe(self):
+ bit = 1
+ flags = self.flags
+ dflags = []
+ while flags and bit < (1 << 32):
+ if flags & bit:
+ dflags.append(
+ {
+ "name": MH_FLAGS_NAMES.get(bit, str(bit)),
+ "description": MH_FLAGS_DESCRIPTIONS.get(bit, str(bit)),
+ }
+ )
+ flags = flags ^ bit
+ bit <<= 1
+ return (
+ ("magic", int(self.magic)),
+ ("cputype_string", CPU_TYPE_NAMES.get(self.cputype, self.cputype)),
+ ("cputype", int(self.cputype)),
+ ("cpusubtype_string", get_cpu_subtype(self.cputype, self.cpusubtype)),
+ ("cpusubtype", int(self.cpusubtype)),
+ ("filetype_string", MH_FILETYPE_NAMES.get(self.filetype, self.filetype)),
+ ("filetype", int(self.filetype)),
+ ("ncmds", self.ncmds),
+ ("sizeofcmds", self.sizeofcmds),
+ ("flags", dflags),
+ ("raw_flags", int(self.flags)),
+ )
+
+
+class mach_header_64(mach_header):
+ _fields_ = mach_header._fields_ + (("reserved", p_uint32),)
+
+
+class load_command(Structure):
+ _fields_ = (("cmd", p_uint32), ("cmdsize", p_uint32))
+
+ def get_cmd_name(self):
+ return LC_NAMES.get(self.cmd, self.cmd)
+
+
+LC_REQ_DYLD = 0x80000000
+
+(
+ LC_SEGMENT,
+ LC_SYMTAB,
+ LC_SYMSEG,
+ LC_THREAD,
+ LC_UNIXTHREAD,
+ LC_LOADFVMLIB,
+ LC_IDFVMLIB,
+ LC_IDENT,
+ LC_FVMFILE,
+ LC_PREPAGE,
+ LC_DYSYMTAB,
+ LC_LOAD_DYLIB,
+ LC_ID_DYLIB,
+ LC_LOAD_DYLINKER,
+ LC_ID_DYLINKER,
+ LC_PREBOUND_DYLIB,
+ LC_ROUTINES,
+ LC_SUB_FRAMEWORK,
+ LC_SUB_UMBRELLA,
+ LC_SUB_CLIENT,
+ LC_SUB_LIBRARY,
+ LC_TWOLEVEL_HINTS,
+ LC_PREBIND_CKSUM,
+) = range(0x1, 0x18)
+
+LC_LOAD_WEAK_DYLIB = LC_REQ_DYLD | 0x18
+
+LC_SEGMENT_64 = 0x19
+LC_ROUTINES_64 = 0x1A
+LC_UUID = 0x1B
+LC_RPATH = 0x1C | LC_REQ_DYLD
+LC_CODE_SIGNATURE = 0x1D
+LC_CODE_SEGMENT_SPLIT_INFO = 0x1E
+LC_REEXPORT_DYLIB = 0x1F | LC_REQ_DYLD
+LC_LAZY_LOAD_DYLIB = 0x20
+LC_ENCRYPTION_INFO = 0x21
+LC_DYLD_INFO = 0x22
+LC_DYLD_INFO_ONLY = 0x22 | LC_REQ_DYLD
+LC_LOAD_UPWARD_DYLIB = 0x23 | LC_REQ_DYLD
+LC_VERSION_MIN_MACOSX = 0x24
+LC_VERSION_MIN_IPHONEOS = 0x25
+LC_FUNCTION_STARTS = 0x26
+LC_DYLD_ENVIRONMENT = 0x27
+LC_MAIN = 0x28 | LC_REQ_DYLD
+LC_DATA_IN_CODE = 0x29
+LC_SOURCE_VERSION = 0x2A
+LC_DYLIB_CODE_SIGN_DRS = 0x2B
+LC_ENCRYPTION_INFO_64 = 0x2C
+LC_LINKER_OPTION = 0x2D
+LC_LINKER_OPTIMIZATION_HINT = 0x2E
+LC_VERSION_MIN_TVOS = 0x2F
+LC_VERSION_MIN_WATCHOS = 0x30
+LC_NOTE = 0x31
+LC_BUILD_VERSION = 0x32
+LC_DYLD_EXPORTS_TRIE = 0x33 | LC_REQ_DYLD
+LC_DYLD_CHAINED_FIXUPS = 0x34 | LC_REQ_DYLD
+LC_FILESET_ENTRY = 0x35 | LC_REQ_DYLD
+
+
+# this is really a union.. but whatever
+class lc_str(p_uint32):
+ pass
+
+
+p_str16 = pypackable("p_str16", bytes, "16s")
+
+vm_prot_t = p_int32
+
+
+class segment_command(Structure):
+ _fields_ = (
+ ("segname", p_str16),
+ ("vmaddr", p_uint32),
+ ("vmsize", p_uint32),
+ ("fileoff", p_uint32),
+ ("filesize", p_uint32),
+ ("maxprot", vm_prot_t),
+ ("initprot", vm_prot_t),
+ ("nsects", p_uint32), # read the section structures ?
+ ("flags", p_uint32),
+ )
+
+ def describe(self):
+ s = {}
+ s["segname"] = self.segname.rstrip("\x00")
+ s["vmaddr"] = int(self.vmaddr)
+ s["vmsize"] = int(self.vmsize)
+ s["fileoff"] = int(self.fileoff)
+ s["filesize"] = int(self.filesize)
+ s["initprot"] = self.get_initial_virtual_memory_protections()
+ s["initprot_raw"] = int(self.initprot)
+ s["maxprot"] = self.get_max_virtual_memory_protections()
+ s["maxprot_raw"] = int(self.maxprot)
+ s["nsects"] = int(self.nsects)
+ s["flags"] = self.flags
+ return s
+
+ def get_initial_virtual_memory_protections(self):
+ vm = []
+ if self.initprot == 0:
+ vm.append("VM_PROT_NONE")
+ if self.initprot & 1:
+ vm.append("VM_PROT_READ")
+ if self.initprot & 2:
+ vm.append("VM_PROT_WRITE")
+ if self.initprot & 4:
+ vm.append("VM_PROT_EXECUTE")
+ return vm
+
+ def get_max_virtual_memory_protections(self):
+ vm = []
+ if self.maxprot == 0:
+ vm.append("VM_PROT_NONE")
+ if self.maxprot & 1:
+ vm.append("VM_PROT_READ")
+ if self.maxprot & 2:
+ vm.append("VM_PROT_WRITE")
+ if self.maxprot & 4:
+ vm.append("VM_PROT_EXECUTE")
+ return vm
+
+
+class segment_command_64(Structure):
+ _fields_ = (
+ ("segname", p_str16),
+ ("vmaddr", p_uint64),
+ ("vmsize", p_uint64),
+ ("fileoff", p_uint64),
+ ("filesize", p_uint64),
+ ("maxprot", vm_prot_t),
+ ("initprot", vm_prot_t),
+ ("nsects", p_uint32), # read the section structures ?
+ ("flags", p_uint32),
+ )
+
+ def describe(self):
+ s = {}
+ s["segname"] = self.segname.rstrip("\x00")
+ s["vmaddr"] = int(self.vmaddr)
+ s["vmsize"] = int(self.vmsize)
+ s["fileoff"] = int(self.fileoff)
+ s["filesize"] = int(self.filesize)
+ s["initprot"] = self.get_initial_virtual_memory_protections()
+ s["initprot_raw"] = int(self.initprot)
+ s["maxprot"] = self.get_max_virtual_memory_protections()
+ s["maxprot_raw"] = int(self.maxprot)
+ s["nsects"] = int(self.nsects)
+ s["flags"] = self.flags
+ return s
+
+ def get_initial_virtual_memory_protections(self):
+ vm = []
+ if self.initprot == 0:
+ vm.append("VM_PROT_NONE")
+ if self.initprot & 1:
+ vm.append("VM_PROT_READ")
+ if self.initprot & 2:
+ vm.append("VM_PROT_WRITE")
+ if self.initprot & 4:
+ vm.append("VM_PROT_EXECUTE")
+ return vm
+
+ def get_max_virtual_memory_protections(self):
+ vm = []
+ if self.maxprot == 0:
+ vm.append("VM_PROT_NONE")
+ if self.maxprot & 1:
+ vm.append("VM_PROT_READ")
+ if self.maxprot & 2:
+ vm.append("VM_PROT_WRITE")
+ if self.maxprot & 4:
+ vm.append("VM_PROT_EXECUTE")
+ return vm
+
+
+SG_HIGHVM = 0x1
+SG_FVMLIB = 0x2
+SG_NORELOC = 0x4
+SG_PROTECTED_VERSION_1 = 0x8
+
+
+class section(Structure):
+ _fields_ = (
+ ("sectname", p_str16),
+ ("segname", p_str16),
+ ("addr", p_uint32),
+ ("size", p_uint32),
+ ("offset", p_uint32),
+ ("align", p_uint32),
+ ("reloff", p_uint32),
+ ("nreloc", p_uint32),
+ ("flags", p_uint32),
+ ("reserved1", p_uint32),
+ ("reserved2", p_uint32),
+ )
+
+ def describe(self):
+ s = {}
+ s["sectname"] = self.sectname.rstrip("\x00")
+ s["segname"] = self.segname.rstrip("\x00")
+ s["addr"] = int(self.addr)
+ s["size"] = int(self.size)
+ s["offset"] = int(self.offset)
+ s["align"] = int(self.align)
+ s["reloff"] = int(self.reloff)
+ s["nreloc"] = int(self.nreloc)
+ f = {}
+ f["type"] = FLAG_SECTION_TYPES[int(self.flags) & 0xFF]
+ f["attributes"] = []
+ for k in FLAG_SECTION_ATTRIBUTES:
+ if k & self.flags:
+ f["attributes"].append(FLAG_SECTION_ATTRIBUTES[k])
+ if not f["attributes"]:
+ del f["attributes"]
+ s["flags"] = f
+ s["reserved1"] = int(self.reserved1)
+ s["reserved2"] = int(self.reserved2)
+ return s
+
+ def add_section_data(self, data):
+ self.section_data = data
+
+
+class section_64(Structure):
+ _fields_ = (
+ ("sectname", p_str16),
+ ("segname", p_str16),
+ ("addr", p_uint64),
+ ("size", p_uint64),
+ ("offset", p_uint32),
+ ("align", p_uint32),
+ ("reloff", p_uint32),
+ ("nreloc", p_uint32),
+ ("flags", p_uint32),
+ ("reserved1", p_uint32),
+ ("reserved2", p_uint32),
+ ("reserved3", p_uint32),
+ )
+
+ def describe(self):
+ s = {}
+ s["sectname"] = self.sectname.rstrip("\x00")
+ s["segname"] = self.segname.rstrip("\x00")
+ s["addr"] = int(self.addr)
+ s["size"] = int(self.size)
+ s["offset"] = int(self.offset)
+ s["align"] = int(self.align)
+ s["reloff"] = int(self.reloff)
+ s["nreloc"] = int(self.nreloc)
+ f = {}
+ f["type"] = FLAG_SECTION_TYPES[int(self.flags) & 0xFF]
+ f["attributes"] = []
+ for k in FLAG_SECTION_ATTRIBUTES:
+ if k & self.flags:
+ f["attributes"].append(FLAG_SECTION_ATTRIBUTES[k])
+ if not f["attributes"]:
+ del f["attributes"]
+ s["flags"] = f
+ s["reserved1"] = int(self.reserved1)
+ s["reserved2"] = int(self.reserved2)
+ s["reserved3"] = int(self.reserved3)
+ return s
+
+ def add_section_data(self, data):
+ self.section_data = data
+
+
+SECTION_TYPE = 0xFF
+SECTION_ATTRIBUTES = 0xFFFFFF00
+S_REGULAR = 0x0
+S_ZEROFILL = 0x1
+S_CSTRING_LITERALS = 0x2
+S_4BYTE_LITERALS = 0x3
+S_8BYTE_LITERALS = 0x4
+S_LITERAL_POINTERS = 0x5
+S_NON_LAZY_SYMBOL_POINTERS = 0x6
+S_LAZY_SYMBOL_POINTERS = 0x7
+S_SYMBOL_STUBS = 0x8
+S_MOD_INIT_FUNC_POINTERS = 0x9
+S_MOD_TERM_FUNC_POINTERS = 0xA
+S_COALESCED = 0xB
+S_GB_ZEROFILL = 0xC
+S_INTERPOSING = 0xD
+S_16BYTE_LITERALS = 0xE
+S_DTRACE_DOF = 0xF
+S_LAZY_DYLIB_SYMBOL_POINTERS = 0x10
+S_THREAD_LOCAL_REGULAR = 0x11
+S_THREAD_LOCAL_ZEROFILL = 0x12
+S_THREAD_LOCAL_VARIABLES = 0x13
+S_THREAD_LOCAL_VARIABLE_POINTERS = 0x14
+S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15
+
+FLAG_SECTION_TYPES = {
+ S_REGULAR: "S_REGULAR",
+ S_ZEROFILL: "S_ZEROFILL",
+ S_CSTRING_LITERALS: "S_CSTRING_LITERALS",
+ S_4BYTE_LITERALS: "S_4BYTE_LITERALS",
+ S_8BYTE_LITERALS: "S_8BYTE_LITERALS",
+ S_LITERAL_POINTERS: "S_LITERAL_POINTERS",
+ S_NON_LAZY_SYMBOL_POINTERS: "S_NON_LAZY_SYMBOL_POINTERS",
+ S_LAZY_SYMBOL_POINTERS: "S_LAZY_SYMBOL_POINTERS",
+ S_SYMBOL_STUBS: "S_SYMBOL_STUBS",
+ S_MOD_INIT_FUNC_POINTERS: "S_MOD_INIT_FUNC_POINTERS",
+ S_MOD_TERM_FUNC_POINTERS: "S_MOD_TERM_FUNC_POINTERS",
+ S_COALESCED: "S_COALESCED",
+ S_GB_ZEROFILL: "S_GB_ZEROFILL",
+ S_INTERPOSING: "S_INTERPOSING",
+ S_16BYTE_LITERALS: "S_16BYTE_LITERALS",
+ S_DTRACE_DOF: "S_DTRACE_DOF",
+ S_LAZY_DYLIB_SYMBOL_POINTERS: "S_LAZY_DYLIB_SYMBOL_POINTERS",
+ S_THREAD_LOCAL_REGULAR: "S_THREAD_LOCAL_REGULAR",
+ S_THREAD_LOCAL_ZEROFILL: "S_THREAD_LOCAL_ZEROFILL",
+ S_THREAD_LOCAL_VARIABLES: "S_THREAD_LOCAL_VARIABLES",
+ S_THREAD_LOCAL_VARIABLE_POINTERS: "S_THREAD_LOCAL_VARIABLE_POINTERS",
+ S_THREAD_LOCAL_INIT_FUNCTION_POINTERS: "S_THREAD_LOCAL_INIT_FUNCTION_POINTERS",
+}
+
+SECTION_ATTRIBUTES_USR = 0xFF000000
+S_ATTR_PURE_INSTRUCTIONS = 0x80000000
+S_ATTR_NO_TOC = 0x40000000
+S_ATTR_STRIP_STATIC_SYMS = 0x20000000
+S_ATTR_NO_DEAD_STRIP = 0x10000000
+S_ATTR_LIVE_SUPPORT = 0x08000000
+S_ATTR_SELF_MODIFYING_CODE = 0x04000000
+S_ATTR_DEBUG = 0x02000000
+
+SECTION_ATTRIBUTES_SYS = 0x00FFFF00
+S_ATTR_SOME_INSTRUCTIONS = 0x00000400
+S_ATTR_EXT_RELOC = 0x00000200
+S_ATTR_LOC_RELOC = 0x00000100
+
+FLAG_SECTION_ATTRIBUTES = {
+ S_ATTR_PURE_INSTRUCTIONS: "S_ATTR_PURE_INSTRUCTIONS",
+ S_ATTR_NO_TOC: "S_ATTR_NO_TOC",
+ S_ATTR_STRIP_STATIC_SYMS: "S_ATTR_STRIP_STATIC_SYMS",
+ S_ATTR_NO_DEAD_STRIP: "S_ATTR_NO_DEAD_STRIP",
+ S_ATTR_LIVE_SUPPORT: "S_ATTR_LIVE_SUPPORT",
+ S_ATTR_SELF_MODIFYING_CODE: "S_ATTR_SELF_MODIFYING_CODE",
+ S_ATTR_DEBUG: "S_ATTR_DEBUG",
+ S_ATTR_SOME_INSTRUCTIONS: "S_ATTR_SOME_INSTRUCTIONS",
+ S_ATTR_EXT_RELOC: "S_ATTR_EXT_RELOC",
+ S_ATTR_LOC_RELOC: "S_ATTR_LOC_RELOC",
+}
+
+SEG_PAGEZERO = "__PAGEZERO"
+SEG_TEXT = "__TEXT"
+SECT_TEXT = "__text"
+SECT_FVMLIB_INIT0 = "__fvmlib_init0"
+SECT_FVMLIB_INIT1 = "__fvmlib_init1"
+SEG_DATA = "__DATA"
+SECT_DATA = "__data"
+SECT_BSS = "__bss"
+SECT_COMMON = "__common"
+SEG_OBJC = "__OBJC"
+SECT_OBJC_SYMBOLS = "__symbol_table"
+SECT_OBJC_MODULES = "__module_info"
+SECT_OBJC_STRINGS = "__selector_strs"
+SECT_OBJC_REFS = "__selector_refs"
+SEG_ICON = "__ICON"
+SECT_ICON_HEADER = "__header"
+SECT_ICON_TIFF = "__tiff"
+SEG_LINKEDIT = "__LINKEDIT"
+SEG_UNIXSTACK = "__UNIXSTACK"
+SEG_IMPORT = "__IMPORT"
+
+#
+# I really should remove all these _command classes because they
+# are no different. I decided to keep the load commands separate,
+# so classes like fvmlib and fvmlib_command are equivalent.
+#
+
+
+class fvmlib(Structure):
+ _fields_ = (
+ ("name", lc_str),
+ ("minor_version", mach_version_helper),
+ ("header_addr", p_uint32),
+ )
+
+
+class fvmlib_command(Structure):
+ _fields_ = fvmlib._fields_
+
+ def describe(self):
+ s = {}
+ s["header_addr"] = int(self.header_addr)
+ return s
+
+
+class dylib(Structure):
+ _fields_ = (
+ ("name", lc_str),
+ ("timestamp", mach_timestamp_helper),
+ ("current_version", mach_version_helper),
+ ("compatibility_version", mach_version_helper),
+ )
+
+
+# merged dylib structure
+class dylib_command(Structure):
+ _fields_ = dylib._fields_
+
+ def describe(self):
+ s = {}
+ s["timestamp"] = str(self.timestamp)
+ s["current_version"] = str(self.current_version)
+ s["compatibility_version"] = str(self.compatibility_version)
+ return s
+
+
+class sub_framework_command(Structure):
+ _fields_ = (("umbrella", lc_str),)
+
+ def describe(self):
+ return {}
+
+
+class sub_client_command(Structure):
+ _fields_ = (("client", lc_str),)
+
+ def describe(self):
+ return {}
+
+
+class sub_umbrella_command(Structure):
+ _fields_ = (("sub_umbrella", lc_str),)
+
+ def describe(self):
+ return {}
+
+
+class sub_library_command(Structure):
+ _fields_ = (("sub_library", lc_str),)
+
+ def describe(self):
+ return {}
+
+
+class prebound_dylib_command(Structure):
+ _fields_ = (("name", lc_str), ("nmodules", p_uint32), ("linked_modules", lc_str))
+
+ def describe(self):
+ return {"nmodules": int(self.nmodules)}
+
+
+class dylinker_command(Structure):
+ _fields_ = (("name", lc_str),)
+
+ def describe(self):
+ return {}
+
+
+class thread_command(Structure):
+ _fields_ = (("flavor", p_uint32), ("count", p_uint32))
+
+ def describe(self):
+ s = {}
+ s["flavor"] = int(self.flavor)
+ s["count"] = int(self.count)
+ return s
+
+
+class entry_point_command(Structure):
+ _fields_ = (("entryoff", p_uint64), ("stacksize", p_uint64))
+
+ def describe(self):
+ s = {}
+ s["entryoff"] = int(self.entryoff)
+ s["stacksize"] = int(self.stacksize)
+ return s
+
+
+class routines_command(Structure):
+ _fields_ = (
+ ("init_address", p_uint32),
+ ("init_module", p_uint32),
+ ("reserved1", p_uint32),
+ ("reserved2", p_uint32),
+ ("reserved3", p_uint32),
+ ("reserved4", p_uint32),
+ ("reserved5", p_uint32),
+ ("reserved6", p_uint32),
+ )
+
+ def describe(self):
+ s = {}
+ s["init_address"] = int(self.init_address)
+ s["init_module"] = int(self.init_module)
+ s["reserved1"] = int(self.reserved1)
+ s["reserved2"] = int(self.reserved2)
+ s["reserved3"] = int(self.reserved3)
+ s["reserved4"] = int(self.reserved4)
+ s["reserved5"] = int(self.reserved5)
+ s["reserved6"] = int(self.reserved6)
+ return s
+
+
+class routines_command_64(Structure):
+ _fields_ = (
+ ("init_address", p_uint64),
+ ("init_module", p_uint64),
+ ("reserved1", p_uint64),
+ ("reserved2", p_uint64),
+ ("reserved3", p_uint64),
+ ("reserved4", p_uint64),
+ ("reserved5", p_uint64),
+ ("reserved6", p_uint64),
+ )
+
+ def describe(self):
+ s = {}
+ s["init_address"] = int(self.init_address)
+ s["init_module"] = int(self.init_module)
+ s["reserved1"] = int(self.reserved1)
+ s["reserved2"] = int(self.reserved2)
+ s["reserved3"] = int(self.reserved3)
+ s["reserved4"] = int(self.reserved4)
+ s["reserved5"] = int(self.reserved5)
+ s["reserved6"] = int(self.reserved6)
+ return s
+
+
+class symtab_command(Structure):
+ _fields_ = (
+ ("symoff", p_uint32),
+ ("nsyms", p_uint32),
+ ("stroff", p_uint32),
+ ("strsize", p_uint32),
+ )
+
+ def describe(self):
+ s = {}
+ s["symoff"] = int(self.symoff)
+ s["nsyms"] = int(self.nsyms)
+ s["stroff"] = int(self.stroff)
+ s["strsize"] = int(self.strsize)
+ return s
+
+
+class dysymtab_command(Structure):
+ _fields_ = (
+ ("ilocalsym", p_uint32),
+ ("nlocalsym", p_uint32),
+ ("iextdefsym", p_uint32),
+ ("nextdefsym", p_uint32),
+ ("iundefsym", p_uint32),
+ ("nundefsym", p_uint32),
+ ("tocoff", p_uint32),
+ ("ntoc", p_uint32),
+ ("modtaboff", p_uint32),
+ ("nmodtab", p_uint32),
+ ("extrefsymoff", p_uint32),
+ ("nextrefsyms", p_uint32),
+ ("indirectsymoff", p_uint32),
+ ("nindirectsyms", p_uint32),
+ ("extreloff", p_uint32),
+ ("nextrel", p_uint32),
+ ("locreloff", p_uint32),
+ ("nlocrel", p_uint32),
+ )
+
+ def describe(self):
+ dys = {}
+ dys["ilocalsym"] = int(self.ilocalsym)
+ dys["nlocalsym"] = int(self.nlocalsym)
+ dys["iextdefsym"] = int(self.iextdefsym)
+ dys["nextdefsym"] = int(self.nextdefsym)
+ dys["iundefsym"] = int(self.iundefsym)
+ dys["nundefsym"] = int(self.nundefsym)
+ dys["tocoff"] = int(self.tocoff)
+ dys["ntoc"] = int(self.ntoc)
+ dys["modtaboff"] = int(self.modtaboff)
+ dys["nmodtab"] = int(self.nmodtab)
+ dys["extrefsymoff"] = int(self.extrefsymoff)
+ dys["nextrefsyms"] = int(self.nextrefsyms)
+ dys["indirectsymoff"] = int(self.indirectsymoff)
+ dys["nindirectsyms"] = int(self.nindirectsyms)
+ dys["extreloff"] = int(self.extreloff)
+ dys["nextrel"] = int(self.nextrel)
+ dys["locreloff"] = int(self.locreloff)
+ dys["nlocrel"] = int(self.nlocrel)
+ return dys
+
+
+INDIRECT_SYMBOL_LOCAL = 0x80000000
+INDIRECT_SYMBOL_ABS = 0x40000000
+
+
+class dylib_table_of_contents(Structure):
+ _fields_ = (("symbol_index", p_uint32), ("module_index", p_uint32))
+
+
+class dylib_module(Structure):
+ _fields_ = (
+ ("module_name", p_uint32),
+ ("iextdefsym", p_uint32),
+ ("nextdefsym", p_uint32),
+ ("irefsym", p_uint32),
+ ("nrefsym", p_uint32),
+ ("ilocalsym", p_uint32),
+ ("nlocalsym", p_uint32),
+ ("iextrel", p_uint32),
+ ("nextrel", p_uint32),
+ ("iinit_iterm", p_uint32),
+ ("ninit_nterm", p_uint32),
+ ("objc_module_info_addr", p_uint32),
+ ("objc_module_info_size", p_uint32),
+ )
+
+
+class dylib_module_64(Structure):
+ _fields_ = (
+ ("module_name", p_uint32),
+ ("iextdefsym", p_uint32),
+ ("nextdefsym", p_uint32),
+ ("irefsym", p_uint32),
+ ("nrefsym", p_uint32),
+ ("ilocalsym", p_uint32),
+ ("nlocalsym", p_uint32),
+ ("iextrel", p_uint32),
+ ("nextrel", p_uint32),
+ ("iinit_iterm", p_uint32),
+ ("ninit_nterm", p_uint32),
+ ("objc_module_info_size", p_uint32),
+ ("objc_module_info_addr", p_uint64),
+ )
+
+
+class dylib_reference(Structure):
+ _fields_ = (
+ ("isym_flags", p_uint32),
+ # ('isym', p_uint8 * 3),
+ # ('flags', p_uint8),
+ )
+
+
+class twolevel_hints_command(Structure):
+ _fields_ = (("offset", p_uint32), ("nhints", p_uint32))
+
+ def describe(self):
+ s = {}
+ s["offset"] = int(self.offset)
+ s["nhints"] = int(self.nhints)
+ return s
+
+
+class twolevel_hint(Structure):
+ _fields_ = (
+ ("isub_image_itoc", p_uint32),
+ # ('isub_image', p_uint8),
+ # ('itoc', p_uint8 * 3),
+ )
+
+
+class prebind_cksum_command(Structure):
+ _fields_ = (("cksum", p_uint32),)
+
+ def describe(self):
+ return {"cksum": int(self.cksum)}
+
+
+class symseg_command(Structure):
+ _fields_ = (("offset", p_uint32), ("size", p_uint32))
+
+ def describe(self):
+ s = {}
+ s["offset"] = int(self.offset)
+ s["size"] = int(self.size)
+
+
+class ident_command(Structure):
+ _fields_ = ()
+
+ def describe(self):
+ return {}
+
+
+class fvmfile_command(Structure):
+ _fields_ = (("name", lc_str), ("header_addr", p_uint32))
+
+ def describe(self):
+ return {"header_addr": int(self.header_addr)}
+
+
+class uuid_command(Structure):
+ _fields_ = (("uuid", p_str16),)
+
+ def describe(self):
+ return {"uuid": self.uuid.rstrip("\x00")}
+
+
+class rpath_command(Structure):
+ _fields_ = (("path", lc_str),)
+
+ def describe(self):
+ return {}
+
+
+class linkedit_data_command(Structure):
+ _fields_ = (("dataoff", p_uint32), ("datasize", p_uint32))
+
+ def describe(self):
+ s = {}
+ s["dataoff"] = int(self.dataoff)
+ s["datasize"] = int(self.datasize)
+ return s
+
+
+class version_min_command(Structure):
+ _fields_ = (
+ ("version", p_uint32), # X.Y.Z is encoded in nibbles xxxx.yy.zz
+ ("sdk", p_uint32),
+ )
+
+ def describe(self):
+ v = int(self.version)
+ v3 = v & 0xFF
+ v = v >> 8
+ v2 = v & 0xFF
+ v = v >> 8
+ v1 = v & 0xFFFF
+ s = int(self.sdk)
+ s3 = s & 0xFF
+ s = s >> 8
+ s2 = s & 0xFF
+ s = s >> 8
+ s1 = s & 0xFFFF
+ return {
+ "version": str(int(v1)) + "." + str(int(v2)) + "." + str(int(v3)),
+ "sdk": str(int(s1)) + "." + str(int(s2)) + "." + str(int(s3)),
+ }
+
+
+class source_version_command(Structure):
+ _fields_ = (("version", p_uint64),)
+
+ def describe(self):
+ v = int(self.version)
+ a = v >> 40
+ b = (v >> 30) & 0x3FF
+ c = (v >> 20) & 0x3FF
+ d = (v >> 10) & 0x3FF
+ e = v & 0x3FF
+ r = str(a) + "." + str(b) + "." + str(c) + "." + str(d) + "." + str(e)
+ return {"version": r}
+
+
+class note_command(Structure):
+ _fields_ = (("data_owner", p_str16), ("offset", p_uint64), ("size", p_uint64))
+
+
+class build_version_command(Structure):
+ _fields_ = (
+ ("platform", p_uint32),
+ ("minos", p_uint32),
+ ("sdk", p_uint32),
+ ("ntools", p_uint32),
+ )
+
+ def describe(self):
+ return {}
+
+
+class build_tool_version(Structure):
+ _fields_ = (("tool", p_uint32), ("version", p_uint32))
+
+
+class data_in_code_entry(Structure):
+ _fields_ = (("offset", p_uint32), ("length", p_uint32), ("kind", p_uint32))
+
+ def describe(self):
+ return {"offset": self.offset, "length": self.length, "kind": self.kind}
+
+
+DICE_KIND_DATA = 0x0001
+DICE_KIND_JUMP_TABLE8 = 0x0002
+DICE_KIND_JUMP_TABLE16 = 0x0003
+DICE_KIND_JUMP_TABLE32 = 0x0004
+DICE_KIND_ABS_JUMP_TABLE32 = 0x0005
+
+DATA_IN_CODE_KINDS = {
+ DICE_KIND_DATA: "DICE_KIND_DATA",
+ DICE_KIND_JUMP_TABLE8: "DICE_KIND_JUMP_TABLE8",
+ DICE_KIND_JUMP_TABLE16: "DICE_KIND_JUMP_TABLE16",
+ DICE_KIND_JUMP_TABLE32: "DICE_KIND_JUMP_TABLE32",
+ DICE_KIND_ABS_JUMP_TABLE32: "DICE_KIND_ABS_JUMP_TABLE32",
+}
+
+
+class tlv_descriptor(Structure):
+ _fields_ = (
+ ("thunk", p_long), # Actually a pointer to a function
+ ("key", p_ulong),
+ ("offset", p_ulong),
+ )
+
+ def describe(self):
+ return {"thunk": self.thunk, "key": self.key, "offset": self.offset}
+
+
+class encryption_info_command(Structure):
+ _fields_ = (("cryptoff", p_uint32), ("cryptsize", p_uint32), ("cryptid", p_uint32))
+
+ def describe(self):
+ s = {}
+ s["cryptoff"] = int(self.cryptoff)
+ s["cryptsize"] = int(self.cryptsize)
+ s["cryptid"] = int(self.cryptid)
+ return s
+
+
+class encryption_info_command_64(Structure):
+ _fields_ = (
+ ("cryptoff", p_uint32),
+ ("cryptsize", p_uint32),
+ ("cryptid", p_uint32),
+ ("pad", p_uint32),
+ )
+
+ def describe(self):
+ s = {}
+ s["cryptoff"] = int(self.cryptoff)
+ s["cryptsize"] = int(self.cryptsize)
+ s["cryptid"] = int(self.cryptid)
+ s["pad"] = int(self.pad)
+ return s
+
+
+class dyld_info_command(Structure):
+ _fields_ = (
+ ("rebase_off", p_uint32),
+ ("rebase_size", p_uint32),
+ ("bind_off", p_uint32),
+ ("bind_size", p_uint32),
+ ("weak_bind_off", p_uint32),
+ ("weak_bind_size", p_uint32),
+ ("lazy_bind_off", p_uint32),
+ ("lazy_bind_size", p_uint32),
+ ("export_off", p_uint32),
+ ("export_size", p_uint32),
+ )
+
+ def describe(self):
+ dyld = {}
+ dyld["rebase_off"] = int(self.rebase_off)
+ dyld["rebase_size"] = int(self.rebase_size)
+ dyld["bind_off"] = int(self.bind_off)
+ dyld["bind_size"] = int(self.bind_size)
+ dyld["weak_bind_off"] = int(self.weak_bind_off)
+ dyld["weak_bind_size"] = int(self.weak_bind_size)
+ dyld["lazy_bind_off"] = int(self.lazy_bind_off)
+ dyld["lazy_bind_size"] = int(self.lazy_bind_size)
+ dyld["export_off"] = int(self.export_off)
+ dyld["export_size"] = int(self.export_size)
+ return dyld
+
+
+class linker_option_command(Structure):
+ _fields_ = (("count", p_uint32),)
+
+ def describe(self):
+ return {"count": int(self.count)}
+
+
+class fileset_entry_command(Structure):
+ _fields_ = (
+ ("vmaddr", p_uint64),
+ ("fileoff", p_uint64),
+ ("entry_id", lc_str),
+ ("reserved", p_uint32),
+ )
+
+
+LC_REGISTRY = {
+ LC_SEGMENT: segment_command,
+ LC_IDFVMLIB: fvmlib_command,
+ LC_LOADFVMLIB: fvmlib_command,
+ LC_ID_DYLIB: dylib_command,
+ LC_LOAD_DYLIB: dylib_command,
+ LC_LOAD_WEAK_DYLIB: dylib_command,
+ LC_SUB_FRAMEWORK: sub_framework_command,
+ LC_SUB_CLIENT: sub_client_command,
+ LC_SUB_UMBRELLA: sub_umbrella_command,
+ LC_SUB_LIBRARY: sub_library_command,
+ LC_PREBOUND_DYLIB: prebound_dylib_command,
+ LC_ID_DYLINKER: dylinker_command,
+ LC_LOAD_DYLINKER: dylinker_command,
+ LC_THREAD: thread_command,
+ LC_UNIXTHREAD: thread_command,
+ LC_ROUTINES: routines_command,
+ LC_SYMTAB: symtab_command,
+ LC_DYSYMTAB: dysymtab_command,
+ LC_TWOLEVEL_HINTS: twolevel_hints_command,
+ LC_PREBIND_CKSUM: prebind_cksum_command,
+ LC_SYMSEG: symseg_command,
+ LC_IDENT: ident_command,
+ LC_FVMFILE: fvmfile_command,
+ LC_SEGMENT_64: segment_command_64,
+ LC_ROUTINES_64: routines_command_64,
+ LC_UUID: uuid_command,
+ LC_RPATH: rpath_command,
+ LC_CODE_SIGNATURE: linkedit_data_command,
+ LC_CODE_SEGMENT_SPLIT_INFO: linkedit_data_command,
+ LC_REEXPORT_DYLIB: dylib_command,
+ LC_LAZY_LOAD_DYLIB: dylib_command,
+ LC_ENCRYPTION_INFO: encryption_info_command,
+ LC_DYLD_INFO: dyld_info_command,
+ LC_DYLD_INFO_ONLY: dyld_info_command,
+ LC_LOAD_UPWARD_DYLIB: dylib_command,
+ LC_VERSION_MIN_MACOSX: version_min_command,
+ LC_VERSION_MIN_IPHONEOS: version_min_command,
+ LC_FUNCTION_STARTS: linkedit_data_command,
+ LC_DYLD_ENVIRONMENT: dylinker_command,
+ LC_MAIN: entry_point_command,
+ LC_DATA_IN_CODE: linkedit_data_command,
+ LC_SOURCE_VERSION: source_version_command,
+ LC_DYLIB_CODE_SIGN_DRS: linkedit_data_command,
+ LC_ENCRYPTION_INFO_64: encryption_info_command_64,
+ LC_LINKER_OPTION: linker_option_command,
+ LC_LINKER_OPTIMIZATION_HINT: linkedit_data_command,
+ LC_VERSION_MIN_TVOS: version_min_command,
+ LC_VERSION_MIN_WATCHOS: version_min_command,
+ LC_NOTE: note_command,
+ LC_BUILD_VERSION: build_version_command,
+ LC_DYLD_EXPORTS_TRIE: linkedit_data_command,
+ LC_DYLD_CHAINED_FIXUPS: linkedit_data_command,
+ LC_FILESET_ENTRY: fileset_entry_command,
+}
+
+LC_NAMES = {
+ LC_SEGMENT: "LC_SEGMENT",
+ LC_IDFVMLIB: "LC_IDFVMLIB",
+ LC_LOADFVMLIB: "LC_LOADFVMLIB",
+ LC_ID_DYLIB: "LC_ID_DYLIB",
+ LC_LOAD_DYLIB: "LC_LOAD_DYLIB",
+ LC_LOAD_WEAK_DYLIB: "LC_LOAD_WEAK_DYLIB",
+ LC_SUB_FRAMEWORK: "LC_SUB_FRAMEWORK",
+ LC_SUB_CLIENT: "LC_SUB_CLIENT",
+ LC_SUB_UMBRELLA: "LC_SUB_UMBRELLA",
+ LC_SUB_LIBRARY: "LC_SUB_LIBRARY",
+ LC_PREBOUND_DYLIB: "LC_PREBOUND_DYLIB",
+ LC_ID_DYLINKER: "LC_ID_DYLINKER",
+ LC_LOAD_DYLINKER: "LC_LOAD_DYLINKER",
+ LC_THREAD: "LC_THREAD",
+ LC_UNIXTHREAD: "LC_UNIXTHREAD",
+ LC_ROUTINES: "LC_ROUTINES",
+ LC_SYMTAB: "LC_SYMTAB",
+ LC_DYSYMTAB: "LC_DYSYMTAB",
+ LC_TWOLEVEL_HINTS: "LC_TWOLEVEL_HINTS",
+ LC_PREBIND_CKSUM: "LC_PREBIND_CKSUM",
+ LC_SYMSEG: "LC_SYMSEG",
+ LC_IDENT: "LC_IDENT",
+ LC_FVMFILE: "LC_FVMFILE",
+ LC_SEGMENT_64: "LC_SEGMENT_64",
+ LC_ROUTINES_64: "LC_ROUTINES_64",
+ LC_UUID: "LC_UUID",
+ LC_RPATH: "LC_RPATH",
+ LC_CODE_SIGNATURE: "LC_CODE_SIGNATURE",
+ LC_CODE_SEGMENT_SPLIT_INFO: "LC_CODE_SEGMENT_SPLIT_INFO",
+ LC_REEXPORT_DYLIB: "LC_REEXPORT_DYLIB",
+ LC_LAZY_LOAD_DYLIB: "LC_LAZY_LOAD_DYLIB",
+ LC_ENCRYPTION_INFO: "LC_ENCRYPTION_INFO",
+ LC_DYLD_INFO: "LC_DYLD_INFO",
+ LC_DYLD_INFO_ONLY: "LC_DYLD_INFO_ONLY",
+ LC_LOAD_UPWARD_DYLIB: "LC_LOAD_UPWARD_DYLIB",
+ LC_VERSION_MIN_MACOSX: "LC_VERSION_MIN_MACOSX",
+ LC_VERSION_MIN_IPHONEOS: "LC_VERSION_MIN_IPHONEOS",
+ LC_FUNCTION_STARTS: "LC_FUNCTION_STARTS",
+ LC_DYLD_ENVIRONMENT: "LC_DYLD_ENVIRONMENT",
+ LC_MAIN: "LC_MAIN",
+ LC_DATA_IN_CODE: "LC_DATA_IN_CODE",
+ LC_SOURCE_VERSION: "LC_SOURCE_VERSION",
+ LC_DYLIB_CODE_SIGN_DRS: "LC_DYLIB_CODE_SIGN_DRS",
+ LC_LINKER_OPTIMIZATION_HINT: "LC_LINKER_OPTIMIZATION_HINT",
+ LC_VERSION_MIN_TVOS: "LC_VERSION_MIN_TVOS",
+ LC_VERSION_MIN_WATCHOS: "LC_VERSION_MIN_WATCHOS",
+ LC_NOTE: "LC_NOTE",
+ LC_BUILD_VERSION: "LC_BUILD_VERSION",
+ LC_DYLD_EXPORTS_TRIE: "LC_DYLD_EXPORTS_TRIE",
+ LC_DYLD_CHAINED_FIXUPS: "LC_DYLD_CHAINED_FIXUPS",
+ LC_ENCRYPTION_INFO_64: "LC_ENCRYPTION_INFO_64",
+ LC_LINKER_OPTION: "LC_LINKER_OPTION",
+ LC_PREPAGE: "LC_PREPAGE",
+ LC_FILESET_ENTRY: "LC_FILESET_ENTRY",
+}
+
+
+# this is another union.
+class n_un(p_int32):
+ pass
+
+
+class nlist(Structure):
+ _fields_ = (
+ ("n_un", n_un),
+ ("n_type", p_uint8),
+ ("n_sect", p_uint8),
+ ("n_desc", p_short),
+ ("n_value", p_uint32),
+ )
+
+
+class nlist_64(Structure):
+ _fields_ = [
+ ("n_un", n_un),
+ ("n_type", p_uint8),
+ ("n_sect", p_uint8),
+ ("n_desc", p_short),
+ ("n_value", p_int64),
+ ]
+
+
+N_STAB = 0xE0
+N_PEXT = 0x10
+N_TYPE = 0x0E
+N_EXT = 0x01
+
+N_UNDF = 0x0
+N_ABS = 0x2
+N_SECT = 0xE
+N_PBUD = 0xC
+N_INDR = 0xA
+
+NO_SECT = 0
+MAX_SECT = 255
+
+
+class relocation_info(Structure):
+ _fields_ = (("r_address", p_uint32), ("_r_bitfield", p_uint32))
+
+ def _describe(self):
+ return (("r_address", self.r_address), ("_r_bitfield", self._r_bitfield))
+
+
+def GET_COMM_ALIGN(n_desc):
+ return (n_desc >> 8) & 0x0F
+
+
+def SET_COMM_ALIGN(n_desc, align):
+ return (n_desc & 0xF0FF) | ((align & 0x0F) << 8)
+
+
+REFERENCE_TYPE = 0xF
+REFERENCE_FLAG_UNDEFINED_NON_LAZY = 0
+REFERENCE_FLAG_UNDEFINED_LAZY = 1
+REFERENCE_FLAG_DEFINED = 2
+REFERENCE_FLAG_PRIVATE_DEFINED = 3
+REFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZY = 4
+REFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY = 5
+
+REFERENCED_DYNAMICALLY = 0x0010
+
+
+def GET_LIBRARY_ORDINAL(n_desc):
+ return ((n_desc) >> 8) & 0xFF
+
+
+def SET_LIBRARY_ORDINAL(n_desc, ordinal):
+ return ((n_desc) & 0x00FF) | (((ordinal & 0xFF) << 8))
+
+
+SELF_LIBRARY_ORDINAL = 0x0
+MAX_LIBRARY_ORDINAL = 0xFD
+DYNAMIC_LOOKUP_ORDINAL = 0xFE
+EXECUTABLE_ORDINAL = 0xFF
+
+N_NO_DEAD_STRIP = 0x0020
+N_DESC_DISCARDED = 0x0020
+N_WEAK_REF = 0x0040
+N_WEAK_DEF = 0x0080
+N_REF_TO_WEAK = 0x0080
+N_ARM_THUMB_DEF = 0x0008
+N_SYMBOL_RESOLVER = 0x0100
+N_ALT_ENTRY = 0x0200
+
+# /usr/include/mach-o/fat.h
+FAT_MAGIC = 0xCAFEBABE
+FAT_CIGAM = 0xBEBAFECA
+FAT_MAGIC_64 = 0xCAFEBABF
+FAT_CIGAM_64 = 0xBFBAFECA
+
+
+class fat_header(Structure):
+ _fields_ = (("magic", p_uint32), ("nfat_arch", p_uint32))
+
+
+class fat_arch(Structure):
+ _fields_ = (
+ ("cputype", cpu_type_t),
+ ("cpusubtype", cpu_subtype_t),
+ ("offset", p_uint32),
+ ("size", p_uint32),
+ ("align", p_uint32),
+ )
+
+
+class fat_arch64(Structure):
+ _fields_ = (
+ ("cputype", cpu_type_t),
+ ("cpusubtype", cpu_subtype_t),
+ ("offset", p_uint64),
+ ("size", p_uint64),
+ ("align", p_uint32),
+ ("reserved", p_uint32),
+ )
+
+
+REBASE_TYPE_POINTER = 1 # noqa: E221
+REBASE_TYPE_TEXT_ABSOLUTE32 = 2 # noqa: E221
+REBASE_TYPE_TEXT_PCREL32 = 3 # noqa: E221
+
+REBASE_OPCODE_MASK = 0xF0 # noqa: E221
+REBASE_IMMEDIATE_MASK = 0x0F # noqa: E221
+REBASE_OPCODE_DONE = 0x00 # noqa: E221
+REBASE_OPCODE_SET_TYPE_IMM = 0x10 # noqa: E221
+REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x20 # noqa: E221
+REBASE_OPCODE_ADD_ADDR_ULEB = 0x30 # noqa: E221
+REBASE_OPCODE_ADD_ADDR_IMM_SCALED = 0x40 # noqa: E221
+REBASE_OPCODE_DO_REBASE_IMM_TIMES = 0x50 # noqa: E221
+REBASE_OPCODE_DO_REBASE_ULEB_TIMES = 0x60 # noqa: E221
+REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB = 0x70 # noqa: E221
+REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB = 0x80 # noqa: E221
+
+BIND_TYPE_POINTER = 1 # noqa: E221
+BIND_TYPE_TEXT_ABSOLUTE32 = 2 # noqa: E221
+BIND_TYPE_TEXT_PCREL32 = 3 # noqa: E221
+
+BIND_SPECIAL_DYLIB_SELF = 0 # noqa: E221
+BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE = -1 # noqa: E221
+BIND_SPECIAL_DYLIB_FLAT_LOOKUP = -2 # noqa: E221
+
+BIND_SYMBOL_FLAGS_WEAK_IMPORT = 0x1 # noqa: E221
+BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION = 0x8 # noqa: E221
+
+BIND_OPCODE_MASK = 0xF0 # noqa: E221
+BIND_IMMEDIATE_MASK = 0x0F # noqa: E221
+BIND_OPCODE_DONE = 0x00 # noqa: E221
+BIND_OPCODE_SET_DYLIB_ORDINAL_IMM = 0x10 # noqa: E221
+BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB = 0x20 # noqa: E221
+BIND_OPCODE_SET_DYLIB_SPECIAL_IMM = 0x30 # noqa: E221
+BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM = 0x40 # noqa: E221
+BIND_OPCODE_SET_TYPE_IMM = 0x50 # noqa: E221
+BIND_OPCODE_SET_ADDEND_SLEB = 0x60 # noqa: E221
+BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x70 # noqa: E221
+BIND_OPCODE_ADD_ADDR_ULEB = 0x80 # noqa: E221
+BIND_OPCODE_DO_BIND = 0x90 # noqa: E221
+BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB = 0xA0 # noqa: E221
+BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED = 0xB0 # noqa: E221
+BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB = 0xC0 # noqa: E221
+
+EXPORT_SYMBOL_FLAGS_KIND_MASK = 0x03 # noqa: E221
+EXPORT_SYMBOL_FLAGS_KIND_REGULAR = 0x00 # noqa: E221
+EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL = 0x01 # noqa: E221
+EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION = 0x04 # noqa: E221
+EXPORT_SYMBOL_FLAGS_REEXPORT = 0x08 # noqa: E221
+EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER = 0x10 # noqa: E221
+
+PLATFORM_MACOS = 1
+PLATFORM_IOS = 2
+PLATFORM_TVOS = 3
+PLATFORM_WATCHOS = 4
+PLATFORM_BRIDGEOS = 5
+PLATFORM_IOSMAC = 6
+PLATFORM_MACCATALYST = 6
+PLATFORM_IOSSIMULATOR = 7
+PLATFORM_TVOSSIMULATOR = 8
+PLATFORM_WATCHOSSIMULATOR = 9
+
+PLATFORM_NAMES = {
+ PLATFORM_MACOS: "macOS",
+ PLATFORM_IOS: "iOS",
+ PLATFORM_TVOS: "tvOS",
+ PLATFORM_WATCHOS: "watchOS",
+ PLATFORM_BRIDGEOS: "bridgeOS",
+ PLATFORM_MACCATALYST: "catalyst",
+ PLATFORM_IOSSIMULATOR: "iOS simulator",
+ PLATFORM_TVOSSIMULATOR: "tvOS simulator",
+ PLATFORM_WATCHOSSIMULATOR: "watchOS simulator",
+}
+
+TOOL_CLANG = 1
+TOOL_SWIFT = 2
+TOOL_LD = 3
+
+TOOL_NAMES = {TOOL_CLANG: "clang", TOOL_SWIFT: "swift", TOOL_LD: "ld"}
diff --git a/lib/spack/external/_vendoring/macholib/macho_dump.py b/lib/spack/external/_vendoring/macholib/macho_dump.py
new file mode 100644
index 0000000000..bca5d777ae
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/macho_dump.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import sys
+
+from macholib._cmdline import main as _main
+from macholib.mach_o import CPU_TYPE_NAMES, MH_CIGAM_64, MH_MAGIC_64, get_cpu_subtype
+from macholib.MachO import MachO
+
+ARCH_MAP = {
+ ("<", "64-bit"): "x86_64",
+ ("<", "32-bit"): "i386",
+ (">", "64-bit"): "ppc64",
+ (">", "32-bit"): "ppc",
+}
+
+
+def print_file(fp, path):
+ print(path, file=fp)
+ m = MachO(path)
+ for header in m.headers:
+ seen = set()
+
+ if header.MH_MAGIC == MH_MAGIC_64 or header.MH_MAGIC == MH_CIGAM_64:
+ sz = "64-bit"
+ else:
+ sz = "32-bit"
+
+ arch = CPU_TYPE_NAMES.get(header.header.cputype, header.header.cputype)
+
+ subarch = get_cpu_subtype(header.header.cputype, header.header.cpusubtype)
+
+ print(
+ " [%s endian=%r size=%r arch=%r subarch=%r]"
+ % (header.__class__.__name__, header.endian, sz, arch, subarch),
+ file=fp,
+ )
+ for _idx, _name, other in header.walkRelocatables():
+ if other not in seen:
+ seen.add(other)
+ print("\t" + other, file=fp)
+ print("", file=fp)
+
+
+def main():
+ print(
+ "WARNING: 'macho_dump' is deprecated, use 'python -mmacholib dump' " "instead"
+ )
+ _main(print_file)
+
+
+if __name__ == "__main__":
+ try:
+ sys.exit(main())
+ except KeyboardInterrupt:
+ pass
diff --git a/lib/spack/external/_vendoring/macholib/macho_find.py b/lib/spack/external/_vendoring/macholib/macho_find.py
new file mode 100644
index 0000000000..a963c36dcc
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/macho_find.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+from __future__ import print_function
+
+from macholib._cmdline import main as _main
+
+
+def print_file(fp, path):
+ print(path, file=fp)
+
+
+def main():
+ print(
+ "WARNING: 'macho_find' is deprecated, " "use 'python -mmacholib dump' instead"
+ )
+ _main(print_file)
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except KeyboardInterrupt:
+ pass
diff --git a/lib/spack/external/_vendoring/macholib/macho_standalone.py b/lib/spack/external/_vendoring/macholib/macho_standalone.py
new file mode 100644
index 0000000000..0bb29e802f
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/macho_standalone.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+import os
+import sys
+
+from macholib.MachOStandalone import MachOStandalone
+from macholib.util import strip_files
+
+
+def standaloneApp(path):
+ if not (os.path.isdir(path) and os.path.exists(os.path.join(path, "Contents"))):
+ print("%s: %s does not look like an app bundle" % (sys.argv[0], path))
+ sys.exit(1)
+ files = MachOStandalone(path).run()
+ strip_files(files)
+
+
+def main():
+ print(
+ "WARNING: 'macho_standalone' is deprecated, use "
+ "'python -mmacholib standalone' instead"
+ )
+ if not sys.argv[1:]:
+ raise SystemExit("usage: %s [appbundle ...]" % (sys.argv[0],))
+ for fn in sys.argv[1:]:
+ standaloneApp(fn)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/spack/external/_vendoring/macholib/ptypes.py b/lib/spack/external/_vendoring/macholib/ptypes.py
new file mode 100644
index 0000000000..248b5cb2a3
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/ptypes.py
@@ -0,0 +1,334 @@
+"""
+This module defines packable types, that is types than can be easily
+converted to a binary format as used in MachO headers.
+"""
+import struct
+import sys
+from itertools import chain, starmap
+
+try:
+ from itertools import imap, izip
+except ImportError:
+ izip, imap = zip, map
+
+__all__ = """
+sizeof
+BasePackable
+Structure
+pypackable
+p_char
+p_byte
+p_ubyte
+p_short
+p_ushort
+p_int
+p_uint
+p_long
+p_ulong
+p_longlong
+p_ulonglong
+p_int8
+p_uint8
+p_int16
+p_uint16
+p_int32
+p_uint32
+p_int64
+p_uint64
+p_float
+p_double
+""".split()
+
+
+def sizeof(s):
+ """
+ Return the size of an object when packed
+ """
+ if hasattr(s, "_size_"):
+ return s._size_
+
+ elif isinstance(s, bytes):
+ return len(s)
+
+ raise ValueError(s)
+
+
+class MetaPackable(type):
+ """
+ Fixed size struct.unpack-able types use from_tuple as their designated
+ initializer
+ """
+
+ def from_mmap(cls, mm, ptr, **kw):
+ return cls.from_str(mm[ptr : ptr + cls._size_], **kw) # noqa: E203
+
+ def from_fileobj(cls, f, **kw):
+ return cls.from_str(f.read(cls._size_), **kw)
+
+ def from_str(cls, s, **kw):
+ endian = kw.get("_endian_", cls._endian_)
+ return cls.from_tuple(struct.unpack(endian + cls._format_, s), **kw)
+
+ def from_tuple(cls, tpl, **kw):
+ return cls(tpl[0], **kw)
+
+
+class BasePackable(object):
+ _endian_ = ">"
+
+ def to_str(self):
+ raise NotImplementedError
+
+ def to_fileobj(self, f):
+ f.write(self.to_str())
+
+ def to_mmap(self, mm, ptr):
+ mm[ptr : ptr + self._size_] = self.to_str() # noqa: E203
+
+
+# This defines a class with a custom metaclass, we'd normally
+# use "class Packable(BasePackable, metaclass=MetaPackage)",
+# but that syntax is not valid in Python 2 (and likewise the
+# python 2 syntax is not valid in Python 3)
+def _make():
+ def to_str(self):
+ cls = type(self)
+ endian = getattr(self, "_endian_", cls._endian_)
+ return struct.pack(endian + cls._format_, self)
+
+ return MetaPackable("Packable", (BasePackable,), {"to_str": to_str})
+
+
+Packable = _make()
+del _make
+
+
+def pypackable(name, pytype, format):
+ """
+ Create a "mix-in" class with a python type and a
+ Packable with the given struct format
+ """
+ size, items = _formatinfo(format)
+
+ def __new__(cls, *args, **kwds):
+ if "_endian_" in kwds:
+ _endian_ = kwds.pop("_endian_")
+ else:
+ _endian_ = cls._endian_
+
+ result = pytype.__new__(cls, *args, **kwds)
+ result._endian_ = _endian_
+ return result
+
+ return type(Packable)(
+ name,
+ (pytype, Packable),
+ {"_format_": format, "_size_": size, "_items_": items, "__new__": __new__},
+ )
+
+
+def _formatinfo(format):
+ """
+ Calculate the size and number of items in a struct format.
+ """
+ size = struct.calcsize(format)
+ return size, len(struct.unpack(format, b"\x00" * size))
+
+
+class MetaStructure(MetaPackable):
+ """
+ The metaclass of Structure objects that does all the magic.
+
+ Since we can assume that all Structures have a fixed size,
+ we can do a bunch of calculations up front and pack or
+ unpack the whole thing in one struct call.
+ """
+
+ def __new__(cls, clsname, bases, dct):
+ fields = dct["_fields_"]
+ names = []
+ types = []
+ structmarks = []
+ format = ""
+ items = 0
+ size = 0
+
+ def struct_property(name, typ):
+ def _get(self):
+ return self._objects_[name]
+
+ def _set(self, obj):
+ if type(obj) is not typ:
+ obj = typ(obj)
+ self._objects_[name] = obj
+
+ return property(_get, _set, typ.__name__)
+
+ for name, typ in fields:
+ dct[name] = struct_property(name, typ)
+ names.append(name)
+ types.append(typ)
+ format += typ._format_
+ size += typ._size_
+ if typ._items_ > 1:
+ structmarks.append((items, typ._items_, typ))
+ items += typ._items_
+
+ dct["_structmarks_"] = structmarks
+ dct["_names_"] = names
+ dct["_types_"] = types
+ dct["_size_"] = size
+ dct["_items_"] = items
+ dct["_format_"] = format
+ return super(MetaStructure, cls).__new__(cls, clsname, bases, dct)
+
+ def from_tuple(cls, tpl, **kw):
+ values = []
+ current = 0
+ for begin, length, typ in cls._structmarks_:
+ if begin > current:
+ values.extend(tpl[current:begin])
+ current = begin + length
+ values.append(typ.from_tuple(tpl[begin:current], **kw))
+ values.extend(tpl[current:])
+ return cls(*values, **kw)
+
+
+# See metaclass discussion earlier in this file
+def _make():
+ class_dict = {}
+ class_dict["_fields_"] = ()
+
+ def as_method(function):
+ class_dict[function.__name__] = function
+
+ @as_method
+ def __init__(self, *args, **kwargs):
+ if len(args) == 1 and not kwargs and type(args[0]) is type(self):
+ kwargs = args[0]._objects_
+ args = ()
+ self._objects_ = {}
+ iargs = chain(izip(self._names_, args), kwargs.items())
+ for key, value in iargs:
+ if key not in self._names_ and key != "_endian_":
+ raise TypeError
+ setattr(self, key, value)
+ for key, typ in izip(self._names_, self._types_):
+ if key not in self._objects_:
+ self._objects_[key] = typ()
+
+ @as_method
+ def _get_packables(self):
+ for obj in imap(self._objects_.__getitem__, self._names_):
+ if hasattr(obj, "_get_packables"):
+ for obj in obj._get_packables():
+ yield obj
+
+ else:
+ yield obj
+
+ @as_method
+ def to_str(self):
+ return struct.pack(self._endian_ + self._format_, *self._get_packables())
+
+ @as_method
+ def __cmp__(self, other):
+ if type(other) is not type(self):
+ raise TypeError(
+ "Cannot compare objects of type %r to objects of type %r"
+ % (type(other), type(self))
+ )
+ if sys.version_info[0] == 2:
+ _cmp = cmp # noqa: F821
+ else:
+
+ def _cmp(a, b):
+ if a < b:
+ return -1
+ elif a > b:
+ return 1
+ elif a == b:
+ return 0
+ else:
+ raise TypeError()
+
+ for cmpval in starmap(
+ _cmp, izip(self._get_packables(), other._get_packables())
+ ):
+ if cmpval != 0:
+ return cmpval
+ return 0
+
+ @as_method
+ def __eq__(self, other):
+ r = self.__cmp__(other)
+ return r == 0
+
+ @as_method
+ def __ne__(self, other):
+ r = self.__cmp__(other)
+ return r != 0
+
+ @as_method
+ def __lt__(self, other):
+ r = self.__cmp__(other)
+ return r < 0
+
+ @as_method
+ def __le__(self, other):
+ r = self.__cmp__(other)
+ return r <= 0
+
+ @as_method
+ def __gt__(self, other):
+ r = self.__cmp__(other)
+ return r > 0
+
+ @as_method
+ def __ge__(self, other):
+ r = self.__cmp__(other)
+ return r >= 0
+
+ @as_method
+ def __repr__(self):
+ result = []
+ result.append("<")
+ result.append(type(self).__name__)
+ for nm in self._names_:
+ result.append(" %s=%r" % (nm, getattr(self, nm)))
+ result.append(">")
+ return "".join(result)
+
+ return MetaStructure("Structure", (BasePackable,), class_dict)
+
+
+Structure = _make()
+del _make
+
+try:
+ long
+except NameError:
+ long = int
+
+# export common packables with predictable names
+p_char = pypackable("p_char", bytes, "c")
+p_int8 = pypackable("p_int8", int, "b")
+p_uint8 = pypackable("p_uint8", int, "B")
+p_int16 = pypackable("p_int16", int, "h")
+p_uint16 = pypackable("p_uint16", int, "H")
+p_int32 = pypackable("p_int32", int, "i")
+p_uint32 = pypackable("p_uint32", long, "I")
+p_int64 = pypackable("p_int64", long, "q")
+p_uint64 = pypackable("p_uint64", long, "Q")
+p_float = pypackable("p_float", float, "f")
+p_double = pypackable("p_double", float, "d")
+
+# Deprecated names, need trick to emit deprecation warning.
+p_byte = p_int8
+p_ubyte = p_uint8
+p_short = p_int16
+p_ushort = p_uint16
+p_int = p_long = p_int32
+p_uint = p_ulong = p_uint32
+p_longlong = p_int64
+p_ulonglong = p_uint64
diff --git a/lib/spack/external/_vendoring/macholib/util.py b/lib/spack/external/_vendoring/macholib/util.py
new file mode 100644
index 0000000000..d5ab33544a
--- /dev/null
+++ b/lib/spack/external/_vendoring/macholib/util.py
@@ -0,0 +1,262 @@
+import os
+import shutil
+import stat
+import struct
+import sys
+
+from macholib import mach_o
+
+MAGIC = [
+ struct.pack("!L", getattr(mach_o, "MH_" + _))
+ for _ in ["MAGIC", "CIGAM", "MAGIC_64", "CIGAM_64"]
+]
+FAT_MAGIC_BYTES = struct.pack("!L", mach_o.FAT_MAGIC)
+MAGIC_LEN = 4
+STRIPCMD = ["/usr/bin/strip", "-x", "-S", "-"]
+
+try:
+ unicode
+except NameError:
+ unicode = str
+
+
+def fsencoding(s, encoding=sys.getfilesystemencoding()): # noqa: M511,B008
+ """
+ Ensure the given argument is in filesystem encoding (not unicode)
+ """
+ if isinstance(s, unicode):
+ s = s.encode(encoding)
+ return s
+
+
+def move(src, dst):
+ """
+ move that ensures filesystem encoding of paths
+ """
+ shutil.move(fsencoding(src), fsencoding(dst))
+
+
+def copy2(src, dst):
+ """
+ copy2 that ensures filesystem encoding of paths
+ """
+ shutil.copy2(fsencoding(src), fsencoding(dst))
+
+
+def flipwritable(fn, mode=None):
+ """
+ Flip the writability of a file and return the old mode. Returns None
+ if the file is already writable.
+ """
+ if os.access(fn, os.W_OK):
+ return None
+ old_mode = os.stat(fn).st_mode
+ os.chmod(fn, stat.S_IWRITE | old_mode)
+ return old_mode
+
+
+class fileview(object):
+ """
+ A proxy for file-like objects that exposes a given view of a file
+ """
+
+ def __init__(self, fileobj, start, size):
+ self._fileobj = fileobj
+ self._start = start
+ self._end = start + size
+
+ def __repr__(self):
+ return "<fileview [%d, %d] %r>" % (self._start, self._end, self._fileobj)
+
+ def tell(self):
+ return self._fileobj.tell() - self._start
+
+ def _checkwindow(self, seekto, op):
+ if not (self._start <= seekto <= self._end):
+ raise IOError(
+ "%s to offset %d is outside window [%d, %d]"
+ % (op, seekto, self._start, self._end)
+ )
+
+ def seek(self, offset, whence=0):
+ seekto = offset
+ if whence == 0:
+ seekto += self._start
+ elif whence == 1:
+ seekto += self._fileobj.tell()
+ elif whence == 2:
+ seekto += self._end
+ else:
+ raise IOError("Invalid whence argument to seek: %r" % (whence,))
+ self._checkwindow(seekto, "seek")
+ self._fileobj.seek(seekto)
+
+ def write(self, bytes):
+ here = self._fileobj.tell()
+ self._checkwindow(here, "write")
+ self._checkwindow(here + len(bytes), "write")
+ self._fileobj.write(bytes)
+
+ def read(self, size=sys.maxsize):
+ if size < 0:
+ raise ValueError(
+ "Invalid size %s while reading from %s", size, self._fileobj
+ )
+ here = self._fileobj.tell()
+ self._checkwindow(here, "read")
+ bytes = min(size, self._end - here)
+ return self._fileobj.read(bytes)
+
+
+def mergecopy(src, dest):
+ """
+ copy2, but only if the destination isn't up to date
+ """
+ if os.path.exists(dest) and os.stat(dest).st_mtime >= os.stat(src).st_mtime:
+ return
+
+ copy2(src, dest)
+
+
+def mergetree(src, dst, condition=None, copyfn=mergecopy, srcbase=None):
+ """
+ Recursively merge a directory tree using mergecopy().
+ """
+ src = fsencoding(src)
+ dst = fsencoding(dst)
+ if srcbase is None:
+ srcbase = src
+ names = map(fsencoding, os.listdir(src))
+ try:
+ os.makedirs(dst)
+ except OSError:
+ pass
+ errors = []
+ for name in names:
+ srcname = os.path.join(src, name)
+ dstname = os.path.join(dst, name)
+ if condition is not None and not condition(srcname):
+ continue
+ try:
+ if os.path.islink(srcname):
+ realsrc = os.readlink(srcname)
+ os.symlink(realsrc, dstname)
+ elif os.path.isdir(srcname):
+ mergetree(
+ srcname,
+ dstname,
+ condition=condition,
+ copyfn=copyfn,
+ srcbase=srcbase,
+ )
+ else:
+ copyfn(srcname, dstname)
+ except (IOError, os.error) as why:
+ errors.append((srcname, dstname, why))
+ if errors:
+ raise IOError(errors)
+
+
+def sdk_normalize(filename):
+ """
+ Normalize a path to strip out the SDK portion, normally so that it
+ can be decided whether it is in a system path or not.
+ """
+ if filename.startswith("/Developer/SDKs/"):
+ pathcomp = filename.split("/")
+ del pathcomp[1:4]
+ filename = "/".join(pathcomp)
+ return filename
+
+
+NOT_SYSTEM_FILES = []
+
+
+def in_system_path(filename):
+ """
+ Return True if the file is in a system path
+ """
+ fn = sdk_normalize(os.path.realpath(filename))
+ if fn.startswith("/usr/local/"):
+ return False
+ elif fn.startswith("/System/") or fn.startswith("/usr/"):
+ if fn in NOT_SYSTEM_FILES:
+ return False
+ return True
+ else:
+ return False
+
+
+def has_filename_filter(module):
+ """
+ Return False if the module does not have a filename attribute
+ """
+ return getattr(module, "filename", None) is not None
+
+
+def get_magic():
+ """
+ Get a list of valid Mach-O header signatures, not including the fat header
+ """
+ return MAGIC
+
+
+def is_platform_file(path):
+ """
+ Return True if the file is Mach-O
+ """
+ if not os.path.exists(path) or os.path.islink(path):
+ return False
+ # If the header is fat, we need to read into the first arch
+ with open(path, "rb") as fileobj:
+ bytes = fileobj.read(MAGIC_LEN)
+ if bytes == FAT_MAGIC_BYTES:
+ # Read in the fat header
+ fileobj.seek(0)
+ header = mach_o.fat_header.from_fileobj(fileobj, _endian_=">")
+ if header.nfat_arch < 1:
+ return False
+ # Read in the first fat arch header
+ arch = mach_o.fat_arch.from_fileobj(fileobj, _endian_=">")
+ fileobj.seek(arch.offset)
+ # Read magic off the first header
+ bytes = fileobj.read(MAGIC_LEN)
+ for magic in MAGIC:
+ if bytes == magic:
+ return True
+ return False
+
+
+def iter_platform_files(dst):
+ """
+ Walk a directory and yield each full path that is a Mach-O file
+ """
+ for root, _dirs, files in os.walk(dst):
+ for fn in files:
+ fn = os.path.join(root, fn)
+ if is_platform_file(fn):
+ yield fn
+
+
+def strip_files(files, argv_max=(256 * 1024)):
+ """
+ Strip a list of files
+ """
+ tostrip = [(fn, flipwritable(fn)) for fn in files]
+ while tostrip:
+ cmd = list(STRIPCMD)
+ flips = []
+ pathlen = sum(len(s) + 1 for s in cmd)
+ while pathlen < argv_max:
+ if not tostrip:
+ break
+ added, flip = tostrip.pop()
+ pathlen += len(added) + 1
+ cmd.append(added)
+ flips.append((added, flip))
+ else:
+ cmd.pop()
+ tostrip.append(flips.pop())
+ os.spawnv(os.P_WAIT, cmd[0], cmd)
+ for args in flips:
+ flipwritable(*args)
diff --git a/lib/spack/external/_vendoring/markupsafe/LICENSE.rst b/lib/spack/external/_vendoring/markupsafe/LICENSE.rst
new file mode 100644
index 0000000000..9d227a0cc4
--- /dev/null
+++ b/lib/spack/external/_vendoring/markupsafe/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lib/spack/external/_vendoring/markupsafe/__init__.py b/lib/spack/external/_vendoring/markupsafe/__init__.py
new file mode 100644
index 0000000000..d331ac3622
--- /dev/null
+++ b/lib/spack/external/_vendoring/markupsafe/__init__.py
@@ -0,0 +1,288 @@
+import functools
+import re
+import string
+import typing as t
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ class HasHTML(te.Protocol):
+ def __html__(self) -> str:
+ pass
+
+
+__version__ = "2.0.1"
+
+_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
+
+
+def _simple_escaping_wrapper(name: str) -> t.Callable[..., "Markup"]:
+ orig = getattr(str, name)
+
+ @functools.wraps(orig)
+ def wrapped(self: "Markup", *args: t.Any, **kwargs: t.Any) -> "Markup":
+ args = _escape_argspec(list(args), enumerate(args), self.escape) # type: ignore
+ _escape_argspec(kwargs, kwargs.items(), self.escape)
+ return self.__class__(orig(self, *args, **kwargs))
+
+ return wrapped
+
+
+class Markup(str):
+ """A string that is ready to be safely inserted into an HTML or XML
+ document, either because it was escaped or because it was marked
+ safe.
+
+ Passing an object to the constructor converts it to text and wraps
+ it to mark it safe without escaping. To escape the text, use the
+ :meth:`escape` class method instead.
+
+ >>> Markup("Hello, <em>World</em>!")
+ Markup('Hello, <em>World</em>!')
+ >>> Markup(42)
+ Markup('42')
+ >>> Markup.escape("Hello, <em>World</em>!")
+ Markup('Hello &lt;em&gt;World&lt;/em&gt;!')
+
+ This implements the ``__html__()`` interface that some frameworks
+ use. Passing an object that implements ``__html__()`` will wrap the
+ output of that method, marking it safe.
+
+ >>> class Foo:
+ ... def __html__(self):
+ ... return '<a href="/foo">foo</a>'
+ ...
+ >>> Markup(Foo())
+ Markup('<a href="/foo">foo</a>')
+
+ This is a subclass of :class:`str`. It has the same methods, but
+ escapes their arguments and returns a ``Markup`` instance.
+
+ >>> Markup("<em>%s</em>") % ("foo & bar",)
+ Markup('<em>foo &amp; bar</em>')
+ >>> Markup("<em>Hello</em> ") + "<foo>"
+ Markup('<em>Hello</em> &lt;foo&gt;')
+ """
+
+ __slots__ = ()
+
+ def __new__(
+ cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
+ ) -> "Markup":
+ if hasattr(base, "__html__"):
+ base = base.__html__()
+
+ if encoding is None:
+ return super().__new__(cls, base)
+
+ return super().__new__(cls, base, encoding, errors)
+
+ def __html__(self) -> "Markup":
+ return self
+
+ def __add__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
+ if isinstance(other, str) or hasattr(other, "__html__"):
+ return self.__class__(super().__add__(self.escape(other)))
+
+ return NotImplemented
+
+ def __radd__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
+ if isinstance(other, str) or hasattr(other, "__html__"):
+ return self.escape(other).__add__(self)
+
+ return NotImplemented
+
+ def __mul__(self, num: int) -> "Markup":
+ if isinstance(num, int):
+ return self.__class__(super().__mul__(num))
+
+ return NotImplemented # type: ignore
+
+ __rmul__ = __mul__
+
+ def __mod__(self, arg: t.Any) -> "Markup":
+ if isinstance(arg, tuple):
+ arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
+ else:
+ arg = _MarkupEscapeHelper(arg, self.escape)
+
+ return self.__class__(super().__mod__(arg))
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({super().__repr__()})"
+
+ def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "Markup":
+ return self.__class__(super().join(map(self.escape, seq)))
+
+ join.__doc__ = str.join.__doc__
+
+ def split( # type: ignore
+ self, sep: t.Optional[str] = None, maxsplit: int = -1
+ ) -> t.List["Markup"]:
+ return [self.__class__(v) for v in super().split(sep, maxsplit)]
+
+ split.__doc__ = str.split.__doc__
+
+ def rsplit( # type: ignore
+ self, sep: t.Optional[str] = None, maxsplit: int = -1
+ ) -> t.List["Markup"]:
+ return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
+
+ rsplit.__doc__ = str.rsplit.__doc__
+
+ def splitlines(self, keepends: bool = False) -> t.List["Markup"]: # type: ignore
+ return [self.__class__(v) for v in super().splitlines(keepends)]
+
+ splitlines.__doc__ = str.splitlines.__doc__
+
+ def unescape(self) -> str:
+ """Convert escaped markup back into a text string. This replaces
+ HTML entities with the characters they represent.
+
+ >>> Markup("Main &raquo; <em>About</em>").unescape()
+ 'Main » <em>About</em>'
+ """
+ from html import unescape
+
+ return unescape(str(self))
+
+ def striptags(self) -> str:
+ """:meth:`unescape` the markup, remove tags, and normalize
+ whitespace to single spaces.
+
+ >>> Markup("Main &raquo;\t<em>About</em>").striptags()
+ 'Main » About'
+ """
+ stripped = " ".join(_striptags_re.sub("", self).split())
+ return Markup(stripped).unescape()
+
+ @classmethod
+ def escape(cls, s: t.Any) -> "Markup":
+ """Escape a string. Calls :func:`escape` and ensures that for
+ subclasses the correct type is returned.
+ """
+ rv = escape(s)
+
+ if rv.__class__ is not cls:
+ return cls(rv)
+
+ return rv
+
+ for method in (
+ "__getitem__",
+ "capitalize",
+ "title",
+ "lower",
+ "upper",
+ "replace",
+ "ljust",
+ "rjust",
+ "lstrip",
+ "rstrip",
+ "center",
+ "strip",
+ "translate",
+ "expandtabs",
+ "swapcase",
+ "zfill",
+ ):
+ locals()[method] = _simple_escaping_wrapper(method)
+
+ del method
+
+ def partition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
+ l, s, r = super().partition(self.escape(sep))
+ cls = self.__class__
+ return cls(l), cls(s), cls(r)
+
+ def rpartition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
+ l, s, r = super().rpartition(self.escape(sep))
+ cls = self.__class__
+ return cls(l), cls(s), cls(r)
+
+ def format(self, *args: t.Any, **kwargs: t.Any) -> "Markup":
+ formatter = EscapeFormatter(self.escape)
+ return self.__class__(formatter.vformat(self, args, kwargs))
+
+ def __html_format__(self, format_spec: str) -> "Markup":
+ if format_spec:
+ raise ValueError("Unsupported format specification for Markup.")
+
+ return self
+
+
+class EscapeFormatter(string.Formatter):
+ __slots__ = ("escape",)
+
+ def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
+ self.escape = escape
+ super().__init__()
+
+ def format_field(self, value: t.Any, format_spec: str) -> str:
+ if hasattr(value, "__html_format__"):
+ rv = value.__html_format__(format_spec)
+ elif hasattr(value, "__html__"):
+ if format_spec:
+ raise ValueError(
+ f"Format specifier {format_spec} given, but {type(value)} does not"
+ " define __html_format__. A class that defines __html__ must define"
+ " __html_format__ to work with format specifiers."
+ )
+ rv = value.__html__()
+ else:
+ # We need to make sure the format spec is str here as
+ # otherwise the wrong callback methods are invoked.
+ rv = string.Formatter.format_field(self, value, str(format_spec))
+ return str(self.escape(rv))
+
+
+_ListOrDict = t.TypeVar("_ListOrDict", list, dict)
+
+
+def _escape_argspec(
+ obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
+) -> _ListOrDict:
+ """Helper for various string-wrapped functions."""
+ for key, value in iterable:
+ if isinstance(value, str) or hasattr(value, "__html__"):
+ obj[key] = escape(value)
+
+ return obj
+
+
+class _MarkupEscapeHelper:
+ """Helper for :meth:`Markup.__mod__`."""
+
+ __slots__ = ("obj", "escape")
+
+ def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
+ self.obj = obj
+ self.escape = escape
+
+ def __getitem__(self, item: t.Any) -> "_MarkupEscapeHelper":
+ return _MarkupEscapeHelper(self.obj[item], self.escape)
+
+ def __str__(self) -> str:
+ return str(self.escape(self.obj))
+
+ def __repr__(self) -> str:
+ return str(self.escape(repr(self.obj)))
+
+ def __int__(self) -> int:
+ return int(self.obj)
+
+ def __float__(self) -> float:
+ return float(self.obj)
+
+
+# circular import
+try:
+ from ._speedups import escape as escape
+ from ._speedups import escape_silent as escape_silent
+ from ._speedups import soft_str as soft_str
+ from ._speedups import soft_unicode
+except ImportError:
+ from ._native import escape as escape
+ from ._native import escape_silent as escape_silent # noqa: F401
+ from ._native import soft_str as soft_str # noqa: F401
+ from ._native import soft_unicode # noqa: F401
diff --git a/lib/spack/external/_vendoring/markupsafe/_native.py b/lib/spack/external/_vendoring/markupsafe/_native.py
new file mode 100644
index 0000000000..6f7eb7a8cb
--- /dev/null
+++ b/lib/spack/external/_vendoring/markupsafe/_native.py
@@ -0,0 +1,75 @@
+import typing as t
+
+from . import Markup
+
+
+def escape(s: t.Any) -> Markup:
+ """Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
+ the string with HTML-safe sequences. Use this if you need to display
+ text that might contain such characters in HTML.
+
+ If the object has an ``__html__`` method, it is called and the
+ return value is assumed to already be safe for HTML.
+
+ :param s: An object to be converted to a string and escaped.
+ :return: A :class:`Markup` string with the escaped text.
+ """
+ if hasattr(s, "__html__"):
+ return Markup(s.__html__())
+
+ return Markup(
+ str(s)
+ .replace("&", "&amp;")
+ .replace(">", "&gt;")
+ .replace("<", "&lt;")
+ .replace("'", "&#39;")
+ .replace('"', "&#34;")
+ )
+
+
+def escape_silent(s: t.Optional[t.Any]) -> Markup:
+ """Like :func:`escape` but treats ``None`` as the empty string.
+ Useful with optional values, as otherwise you get the string
+ ``'None'`` when the value is ``None``.
+
+ >>> escape(None)
+ Markup('None')
+ >>> escape_silent(None)
+ Markup('')
+ """
+ if s is None:
+ return Markup()
+
+ return escape(s)
+
+
+def soft_str(s: t.Any) -> str:
+ """Convert an object to a string if it isn't already. This preserves
+ a :class:`Markup` string rather than converting it back to a basic
+ string, so it will still be marked as safe and won't be escaped
+ again.
+
+ >>> value = escape("<User 1>")
+ >>> value
+ Markup('&lt;User 1&gt;')
+ >>> escape(str(value))
+ Markup('&amp;lt;User 1&amp;gt;')
+ >>> escape(soft_str(value))
+ Markup('&lt;User 1&gt;')
+ """
+ if not isinstance(s, str):
+ return str(s)
+
+ return s
+
+
+def soft_unicode(s: t.Any) -> str:
+ import warnings
+
+ warnings.warn(
+ "'soft_unicode' has been renamed to 'soft_str'. The old name"
+ " will be removed in MarkupSafe 2.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return soft_str(s)
diff --git a/lib/spack/external/_vendoring/markupsafe/_speedups.c b/lib/spack/external/_vendoring/markupsafe/_speedups.c
new file mode 100644
index 0000000000..44967b1fdc
--- /dev/null
+++ b/lib/spack/external/_vendoring/markupsafe/_speedups.c
@@ -0,0 +1,339 @@
+#include <Python.h>
+
+static PyObject* markup;
+
+static int
+init_constants(void)
+{
+ PyObject *module;
+
+ /* import markup type so that we can mark the return value */
+ module = PyImport_ImportModule("markupsafe");
+ if (!module)
+ return 0;
+ markup = PyObject_GetAttrString(module, "Markup");
+ Py_DECREF(module);
+
+ return 1;
+}
+
+#define GET_DELTA(inp, inp_end, delta) \
+ while (inp < inp_end) { \
+ switch (*inp++) { \
+ case '"': \
+ case '\'': \
+ case '&': \
+ delta += 4; \
+ break; \
+ case '<': \
+ case '>': \
+ delta += 3; \
+ break; \
+ } \
+ }
+
+#define DO_ESCAPE(inp, inp_end, outp) \
+ { \
+ Py_ssize_t ncopy = 0; \
+ while (inp < inp_end) { \
+ switch (*inp) { \
+ case '"': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = '#'; \
+ *outp++ = '3'; \
+ *outp++ = '4'; \
+ *outp++ = ';'; \
+ break; \
+ case '\'': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = '#'; \
+ *outp++ = '3'; \
+ *outp++ = '9'; \
+ *outp++ = ';'; \
+ break; \
+ case '&': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'a'; \
+ *outp++ = 'm'; \
+ *outp++ = 'p'; \
+ *outp++ = ';'; \
+ break; \
+ case '<': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'l'; \
+ *outp++ = 't'; \
+ *outp++ = ';'; \
+ break; \
+ case '>': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'g'; \
+ *outp++ = 't'; \
+ *outp++ = ';'; \
+ break; \
+ default: \
+ ncopy++; \
+ } \
+ inp++; \
+ } \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ }
+
+static PyObject*
+escape_unicode_kind1(PyUnicodeObject *in)
+{
+ Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in);
+ Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS1 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta,
+ PyUnicode_IS_ASCII(in) ? 127 : 255);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_1BYTE_DATA(in);
+ outp = PyUnicode_1BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+static PyObject*
+escape_unicode_kind2(PyUnicodeObject *in)
+{
+ Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in);
+ Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS2 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_2BYTE_DATA(in);
+ outp = PyUnicode_2BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+
+static PyObject*
+escape_unicode_kind4(PyUnicodeObject *in)
+{
+ Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in);
+ Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS4 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_4BYTE_DATA(in);
+ outp = PyUnicode_4BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+static PyObject*
+escape_unicode(PyUnicodeObject *in)
+{
+ if (PyUnicode_READY(in))
+ return NULL;
+
+ switch (PyUnicode_KIND(in)) {
+ case PyUnicode_1BYTE_KIND:
+ return escape_unicode_kind1(in);
+ case PyUnicode_2BYTE_KIND:
+ return escape_unicode_kind2(in);
+ case PyUnicode_4BYTE_KIND:
+ return escape_unicode_kind4(in);
+ }
+ assert(0); /* shouldn't happen */
+ return NULL;
+}
+
+static PyObject*
+escape(PyObject *self, PyObject *text)
+{
+ static PyObject *id_html;
+ PyObject *s = NULL, *rv = NULL, *html;
+
+ if (id_html == NULL) {
+ id_html = PyUnicode_InternFromString("__html__");
+ if (id_html == NULL) {
+ return NULL;
+ }
+ }
+
+ /* we don't have to escape integers, bools or floats */
+ if (PyLong_CheckExact(text) ||
+ PyFloat_CheckExact(text) || PyBool_Check(text) ||
+ text == Py_None)
+ return PyObject_CallFunctionObjArgs(markup, text, NULL);
+
+ /* if the object has an __html__ method that performs the escaping */
+ html = PyObject_GetAttr(text ,id_html);
+ if (html) {
+ s = PyObject_CallObject(html, NULL);
+ Py_DECREF(html);
+ if (s == NULL) {
+ return NULL;
+ }
+ /* Convert to Markup object */
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+ Py_DECREF(s);
+ return rv;
+ }
+
+ /* otherwise make the object unicode if it isn't, then escape */
+ PyErr_Clear();
+ if (!PyUnicode_Check(text)) {
+ PyObject *unicode = PyObject_Str(text);
+ if (!unicode)
+ return NULL;
+ s = escape_unicode((PyUnicodeObject*)unicode);
+ Py_DECREF(unicode);
+ }
+ else
+ s = escape_unicode((PyUnicodeObject*)text);
+
+ /* convert the unicode string into a markup object. */
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+ Py_DECREF(s);
+ return rv;
+}
+
+
+static PyObject*
+escape_silent(PyObject *self, PyObject *text)
+{
+ if (text != Py_None)
+ return escape(self, text);
+ return PyObject_CallFunctionObjArgs(markup, NULL);
+}
+
+
+static PyObject*
+soft_str(PyObject *self, PyObject *s)
+{
+ if (!PyUnicode_Check(s))
+ return PyObject_Str(s);
+ Py_INCREF(s);
+ return s;
+}
+
+
+static PyObject*
+soft_unicode(PyObject *self, PyObject *s)
+{
+ PyErr_WarnEx(
+ PyExc_DeprecationWarning,
+ "'soft_unicode' has been renamed to 'soft_str'. The old name"
+ " will be removed in MarkupSafe 2.1.",
+ 2
+ );
+ return soft_str(self, s);
+}
+
+
+static PyMethodDef module_methods[] = {
+ {
+ "escape",
+ (PyCFunction)escape,
+ METH_O,
+ "Replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"`` in"
+ " the string with HTML-safe sequences. Use this if you need to display"
+ " text that might contain such characters in HTML.\n\n"
+ "If the object has an ``__html__`` method, it is called and the"
+ " return value is assumed to already be safe for HTML.\n\n"
+ ":param s: An object to be converted to a string and escaped.\n"
+ ":return: A :class:`Markup` string with the escaped text.\n"
+ },
+ {
+ "escape_silent",
+ (PyCFunction)escape_silent,
+ METH_O,
+ "Like :func:`escape` but treats ``None`` as the empty string."
+ " Useful with optional values, as otherwise you get the string"
+ " ``'None'`` when the value is ``None``.\n\n"
+ ">>> escape(None)\n"
+ "Markup('None')\n"
+ ">>> escape_silent(None)\n"
+ "Markup('')\n"
+ },
+ {
+ "soft_str",
+ (PyCFunction)soft_str,
+ METH_O,
+ "Convert an object to a string if it isn't already. This preserves"
+ " a :class:`Markup` string rather than converting it back to a basic"
+ " string, so it will still be marked as safe and won't be escaped"
+ " again.\n\n"
+ ">>> value = escape(\"<User 1>\")\n"
+ ">>> value\n"
+ "Markup('&lt;User 1&gt;')\n"
+ ">>> escape(str(value))\n"
+ "Markup('&amp;lt;User 1&amp;gt;')\n"
+ ">>> escape(soft_str(value))\n"
+ "Markup('&lt;User 1&gt;')\n"
+ },
+ {
+ "soft_unicode",
+ (PyCFunction)soft_unicode,
+ METH_O,
+ ""
+ },
+ {NULL, NULL, 0, NULL} /* Sentinel */
+};
+
+static struct PyModuleDef module_definition = {
+ PyModuleDef_HEAD_INIT,
+ "markupsafe._speedups",
+ NULL,
+ -1,
+ module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+PyMODINIT_FUNC
+PyInit__speedups(void)
+{
+ if (!init_constants())
+ return NULL;
+
+ return PyModule_Create(&module_definition);
+}
diff --git a/lib/spack/external/_vendoring/markupsafe/_speedups.pyi b/lib/spack/external/_vendoring/markupsafe/_speedups.pyi
new file mode 100644
index 0000000000..f673240f6d
--- /dev/null
+++ b/lib/spack/external/_vendoring/markupsafe/_speedups.pyi
@@ -0,0 +1,9 @@
+from typing import Any
+from typing import Optional
+
+from . import Markup
+
+def escape(s: Any) -> Markup: ...
+def escape_silent(s: Optional[Any]) -> Markup: ...
+def soft_str(s: Any) -> str: ...
+def soft_unicode(s: Any) -> str: ...
diff --git a/lib/spack/external/_vendoring/markupsafe/py.typed b/lib/spack/external/_vendoring/markupsafe/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/external/_vendoring/markupsafe/py.typed
diff --git a/lib/spack/external/_vendoring/pyrsistent/LICENSE.mit b/lib/spack/external/_vendoring/pyrsistent/LICENSE.mit
new file mode 100644
index 0000000000..4ceb5123c1
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/LICENSE.mit
@@ -0,0 +1,22 @@
+Copyright (c) 2021 Tobias Gustafsson
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/lib/spack/external/_vendoring/pyrsistent/__init__.py b/lib/spack/external/_vendoring/pyrsistent/__init__.py
new file mode 100644
index 0000000000..be299658f3
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/__init__.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+from pyrsistent._pmap import pmap, m, PMap
+
+from pyrsistent._pvector import pvector, v, PVector
+
+from pyrsistent._pset import pset, s, PSet
+
+from pyrsistent._pbag import pbag, b, PBag
+
+from pyrsistent._plist import plist, l, PList
+
+from pyrsistent._pdeque import pdeque, dq, PDeque
+
+from pyrsistent._checked_types import (
+ CheckedPMap, CheckedPVector, CheckedPSet, InvariantException, CheckedKeyTypeError,
+ CheckedValueTypeError, CheckedType, optional)
+
+from pyrsistent._field_common import (
+ field, PTypeError, pset_field, pmap_field, pvector_field)
+
+from pyrsistent._precord import PRecord
+
+from pyrsistent._pclass import PClass, PClassMeta
+
+from pyrsistent._immutable import immutable
+
+from pyrsistent._helpers import freeze, thaw, mutant
+
+from pyrsistent._transformations import inc, discard, rex, ny
+
+from pyrsistent._toolz import get_in
+
+
+__all__ = ('pmap', 'm', 'PMap',
+ 'pvector', 'v', 'PVector',
+ 'pset', 's', 'PSet',
+ 'pbag', 'b', 'PBag',
+ 'plist', 'l', 'PList',
+ 'pdeque', 'dq', 'PDeque',
+ 'CheckedPMap', 'CheckedPVector', 'CheckedPSet', 'InvariantException', 'CheckedKeyTypeError', 'CheckedValueTypeError', 'CheckedType', 'optional',
+ 'PRecord', 'field', 'pset_field', 'pmap_field', 'pvector_field',
+ 'PClass', 'PClassMeta',
+ 'immutable',
+ 'freeze', 'thaw', 'mutant',
+ 'get_in',
+ 'inc', 'discard', 'rex', 'ny')
diff --git a/lib/spack/external/_vendoring/pyrsistent/__init__.pyi b/lib/spack/external/_vendoring/pyrsistent/__init__.pyi
new file mode 100644
index 0000000000..5909f7991a
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/__init__.pyi
@@ -0,0 +1,213 @@
+# flake8: noqa: E704
+# from https://gist.github.com/WuTheFWasThat/091a17d4b5cab597dfd5d4c2d96faf09
+# Stubs for pyrsistent (Python 3.6)
+
+from typing import Any
+from typing import AnyStr
+from typing import Callable
+from typing import Iterable
+from typing import Iterator
+from typing import List
+from typing import Optional
+from typing import Mapping
+from typing import MutableMapping
+from typing import Sequence
+from typing import Set
+from typing import Union
+from typing import Tuple
+from typing import Type
+from typing import TypeVar
+from typing import overload
+
+# see commit 08519aa for explanation of the re-export
+from pyrsistent.typing import CheckedKeyTypeError as CheckedKeyTypeError
+from pyrsistent.typing import CheckedPMap as CheckedPMap
+from pyrsistent.typing import CheckedPSet as CheckedPSet
+from pyrsistent.typing import CheckedPVector as CheckedPVector
+from pyrsistent.typing import CheckedType as CheckedType
+from pyrsistent.typing import CheckedValueTypeError as CheckedValueTypeError
+from pyrsistent.typing import InvariantException as InvariantException
+from pyrsistent.typing import PClass as PClass
+from pyrsistent.typing import PBag as PBag
+from pyrsistent.typing import PDeque as PDeque
+from pyrsistent.typing import PList as PList
+from pyrsistent.typing import PMap as PMap
+from pyrsistent.typing import PMapEvolver as PMapEvolver
+from pyrsistent.typing import PSet as PSet
+from pyrsistent.typing import PSetEvolver as PSetEvolver
+from pyrsistent.typing import PTypeError as PTypeError
+from pyrsistent.typing import PVector as PVector
+from pyrsistent.typing import PVectorEvolver as PVectorEvolver
+
+T = TypeVar('T')
+KT = TypeVar('KT')
+VT = TypeVar('VT')
+
+def pmap(initial: Union[Mapping[KT, VT], Iterable[Tuple[KT, VT]]] = {}, pre_size: int = 0) -> PMap[KT, VT]: ...
+def m(**kwargs: VT) -> PMap[str, VT]: ...
+
+def pvector(iterable: Iterable[T] = ...) -> PVector[T]: ...
+def v(*iterable: T) -> PVector[T]: ...
+
+def pset(iterable: Iterable[T] = (), pre_size: int = 8) -> PSet[T]: ...
+def s(*iterable: T) -> PSet[T]: ...
+
+# see class_test.py for use cases
+Invariant = Tuple[bool, Optional[Union[str, Callable[[], str]]]]
+
+@overload
+def field(
+ type: Union[Type[T], Sequence[Type[T]]] = ...,
+ invariant: Callable[[Any], Union[Invariant, Iterable[Invariant]]] = lambda _: (True, None),
+ initial: Any = object(),
+ mandatory: bool = False,
+ factory: Callable[[Any], T] = lambda x: x,
+ serializer: Callable[[Any, T], Any] = lambda _, value: value,
+) -> T: ...
+# The actual return value (_PField) is irrelevant after a PRecord has been instantiated,
+# see https://github.com/tobgu/pyrsistent/blob/master/pyrsistent/_precord.py#L10
+@overload
+def field(
+ type: Any = ...,
+ invariant: Callable[[Any], Union[Invariant, Iterable[Invariant]]] = lambda _: (True, None),
+ initial: Any = object(),
+ mandatory: bool = False,
+ factory: Callable[[Any], Any] = lambda x: x,
+ serializer: Callable[[Any, Any], Any] = lambda _, value: value,
+) -> Any: ...
+
+# Use precise types for the simplest use cases, but fall back to Any for
+# everything else. See record_test.py for the wide range of possible types for
+# item_type
+@overload
+def pset_field(
+ item_type: Type[T],
+ optional: bool = False,
+ initial: Iterable[T] = ...,
+) -> PSet[T]: ...
+@overload
+def pset_field(
+ item_type: Any,
+ optional: bool = False,
+ initial: Any = (),
+) -> PSet[Any]: ...
+
+@overload
+def pmap_field(
+ key_type: Type[KT],
+ value_type: Type[VT],
+ optional: bool = False,
+ invariant: Callable[[Any], Tuple[bool, Optional[str]]] = lambda _: (True, None),
+) -> PMap[KT, VT]: ...
+@overload
+def pmap_field(
+ key_type: Any,
+ value_type: Any,
+ optional: bool = False,
+ invariant: Callable[[Any], Tuple[bool, Optional[str]]] = lambda _: (True, None),
+) -> PMap[Any, Any]: ...
+
+@overload
+def pvector_field(
+ item_type: Type[T],
+ optional: bool = False,
+ initial: Iterable[T] = ...,
+) -> PVector[T]: ...
+@overload
+def pvector_field(
+ item_type: Any,
+ optional: bool = False,
+ initial: Any = (),
+) -> PVector[Any]: ...
+
+def pbag(elements: Iterable[T]) -> PBag[T]: ...
+def b(*elements: T) -> PBag[T]: ...
+
+def plist(iterable: Iterable[T] = (), reverse: bool = False) -> PList[T]: ...
+def l(*elements: T) -> PList[T]: ...
+
+def pdeque(iterable: Optional[Iterable[T]] = None, maxlen: Optional[int] = None) -> PDeque[T]: ...
+def dq(*iterable: T) -> PDeque[T]: ...
+
+@overload
+def optional(type: T) -> Tuple[T, Type[None]]: ...
+@overload
+def optional(*typs: Any) -> Tuple[Any, ...]: ...
+
+T_PRecord = TypeVar('T_PRecord', bound='PRecord')
+class PRecord(PMap[AnyStr, Any]):
+ _precord_fields: Mapping
+ _precord_initial_values: Mapping
+
+ def __hash__(self) -> int: ...
+ def __init__(self, **kwargs: Any) -> None: ...
+ def __iter__(self) -> Iterator[Any]: ...
+ def __len__(self) -> int: ...
+ @classmethod
+ def create(
+ cls: Type[T_PRecord],
+ kwargs: Mapping,
+ _factory_fields: Optional[Iterable] = None,
+ ignore_extra: bool = False,
+ ) -> T_PRecord: ...
+ # This is OK because T_PRecord is a concrete type
+ def discard(self: T_PRecord, key: KT) -> T_PRecord: ...
+ def remove(self: T_PRecord, key: KT) -> T_PRecord: ...
+
+ def serialize(self, format: Optional[Any] = ...) -> MutableMapping: ...
+
+ # From pyrsistent documentation:
+ # This set function differs slightly from that in the PMap
+ # class. First of all it accepts key-value pairs. Second it accepts multiple key-value
+ # pairs to perform one, atomic, update of multiple fields.
+ @overload
+ def set(self, key: KT, val: VT) -> Any: ...
+ @overload
+ def set(self, **kwargs: VT) -> Any: ...
+
+def immutable(
+ members: Union[str, Iterable[str]] = '',
+ name: str = 'Immutable',
+ verbose: bool = False,
+) -> Tuple: ... # actually a namedtuple
+
+# ignore mypy warning "Overloaded function signatures 1 and 5 overlap with
+# incompatible return types"
+@overload
+def freeze(o: Mapping[KT, VT]) -> PMap[KT, VT]: ... # type: ignore
+@overload
+def freeze(o: List[T]) -> PVector[T]: ... # type: ignore
+@overload
+def freeze(o: Tuple[T, ...]) -> Tuple[T, ...]: ...
+@overload
+def freeze(o: Set[T]) -> PSet[T]: ... # type: ignore
+@overload
+def freeze(o: T) -> T: ...
+
+
+@overload
+def thaw(o: PMap[KT, VT]) -> MutableMapping[KT, VT]: ... # type: ignore
+@overload
+def thaw(o: PVector[T]) -> List[T]: ... # type: ignore
+@overload
+def thaw(o: Tuple[T, ...]) -> Tuple[T, ...]: ...
+# collections.abc.MutableSet is kind of garbage:
+# https://stackoverflow.com/questions/24977898/why-does-collections-mutableset-not-bestow-an-update-method
+@overload
+def thaw(o: PSet[T]) -> Set[T]: ... # type: ignore
+@overload
+def thaw(o: T) -> T: ...
+
+def mutant(fn: Callable) -> Callable: ...
+
+def inc(x: int) -> int: ...
+@overload
+def discard(evolver: PMapEvolver[KT, VT], key: KT) -> None: ...
+@overload
+def discard(evolver: PVectorEvolver[T], key: int) -> None: ...
+@overload
+def discard(evolver: PSetEvolver[T], key: T) -> None: ...
+def rex(expr: str) -> Callable[[Any], bool]: ...
+def ny(_: Any) -> bool: ...
+
+def get_in(keys: Iterable, coll: Mapping, default: Optional[Any] = None, no_default: bool = False) -> Any: ...
diff --git a/lib/spack/external/_vendoring/pyrsistent/_checked_types.py b/lib/spack/external/_vendoring/pyrsistent/_checked_types.py
new file mode 100644
index 0000000000..8ab8c2a8cc
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_checked_types.py
@@ -0,0 +1,542 @@
+from enum import Enum
+
+from abc import abstractmethod, ABCMeta
+from collections.abc import Iterable
+
+from pyrsistent._pmap import PMap, pmap
+from pyrsistent._pset import PSet, pset
+from pyrsistent._pvector import PythonPVector, python_pvector
+
+
+class CheckedType(object):
+ """
+ Marker class to enable creation and serialization of checked object graphs.
+ """
+ __slots__ = ()
+
+ @classmethod
+ @abstractmethod
+ def create(cls, source_data, _factory_fields=None):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def serialize(self, format=None):
+ raise NotImplementedError()
+
+
+def _restore_pickle(cls, data):
+ return cls.create(data, _factory_fields=set())
+
+
+class InvariantException(Exception):
+ """
+ Exception raised from a :py:class:`CheckedType` when invariant tests fail or when a mandatory
+ field is missing.
+
+ Contains two fields of interest:
+ invariant_errors, a tuple of error data for the failing invariants
+ missing_fields, a tuple of strings specifying the missing names
+ """
+
+ def __init__(self, error_codes=(), missing_fields=(), *args, **kwargs):
+ self.invariant_errors = tuple(e() if callable(e) else e for e in error_codes)
+ self.missing_fields = missing_fields
+ super(InvariantException, self).__init__(*args, **kwargs)
+
+ def __str__(self):
+ return super(InvariantException, self).__str__() + \
+ ", invariant_errors=[{invariant_errors}], missing_fields=[{missing_fields}]".format(
+ invariant_errors=', '.join(str(e) for e in self.invariant_errors),
+ missing_fields=', '.join(self.missing_fields))
+
+
+_preserved_iterable_types = (
+ Enum,
+)
+"""Some types are themselves iterable, but we want to use the type itself and
+not its members for the type specification. This defines a set of such types
+that we explicitly preserve.
+
+Note that strings are not such types because the string inputs we pass in are
+values, not types.
+"""
+
+
+def maybe_parse_user_type(t):
+ """Try to coerce a user-supplied type directive into a list of types.
+
+ This function should be used in all places where a user specifies a type,
+ for consistency.
+
+ The policy for what defines valid user input should be clear from the implementation.
+ """
+ is_type = isinstance(t, type)
+ is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types)
+ is_string = isinstance(t, str)
+ is_iterable = isinstance(t, Iterable)
+
+ if is_preserved:
+ return [t]
+ elif is_string:
+ return [t]
+ elif is_type and not is_iterable:
+ return [t]
+ elif is_iterable:
+ # Recur to validate contained types as well.
+ ts = t
+ return tuple(e for t in ts for e in maybe_parse_user_type(t))
+ else:
+ # If this raises because `t` cannot be formatted, so be it.
+ raise TypeError(
+ 'Type specifications must be types or strings. Input: {}'.format(t)
+ )
+
+
+def maybe_parse_many_user_types(ts):
+ # Just a different name to communicate that you're parsing multiple user
+ # inputs. `maybe_parse_user_type` handles the iterable case anyway.
+ return maybe_parse_user_type(ts)
+
+
+def _store_types(dct, bases, destination_name, source_name):
+ maybe_types = maybe_parse_many_user_types([
+ d[source_name]
+ for d in ([dct] + [b.__dict__ for b in bases]) if source_name in d
+ ])
+
+ dct[destination_name] = maybe_types
+
+
+def _merge_invariant_results(result):
+ verdict = True
+ data = []
+ for verd, dat in result:
+ if not verd:
+ verdict = False
+ data.append(dat)
+
+ return verdict, tuple(data)
+
+
+def wrap_invariant(invariant):
+ # Invariant functions may return the outcome of several tests
+ # In those cases the results have to be merged before being passed
+ # back to the client.
+ def f(*args, **kwargs):
+ result = invariant(*args, **kwargs)
+ if isinstance(result[0], bool):
+ return result
+
+ return _merge_invariant_results(result)
+
+ return f
+
+
+def _all_dicts(bases, seen=None):
+ """
+ Yield each class in ``bases`` and each of their base classes.
+ """
+ if seen is None:
+ seen = set()
+ for cls in bases:
+ if cls in seen:
+ continue
+ seen.add(cls)
+ yield cls.__dict__
+ for b in _all_dicts(cls.__bases__, seen):
+ yield b
+
+
+def store_invariants(dct, bases, destination_name, source_name):
+ # Invariants are inherited
+ invariants = []
+ for ns in [dct] + list(_all_dicts(bases)):
+ try:
+ invariant = ns[source_name]
+ except KeyError:
+ continue
+ invariants.append(invariant)
+
+ if not all(callable(invariant) for invariant in invariants):
+ raise TypeError('Invariants must be callable')
+ dct[destination_name] = tuple(wrap_invariant(inv) for inv in invariants)
+
+
+class _CheckedTypeMeta(ABCMeta):
+ def __new__(mcs, name, bases, dct):
+ _store_types(dct, bases, '_checked_types', '__type__')
+ store_invariants(dct, bases, '_checked_invariants', '__invariant__')
+
+ def default_serializer(self, _, value):
+ if isinstance(value, CheckedType):
+ return value.serialize()
+ return value
+
+ dct.setdefault('__serializer__', default_serializer)
+
+ dct['__slots__'] = ()
+
+ return super(_CheckedTypeMeta, mcs).__new__(mcs, name, bases, dct)
+
+
+class CheckedTypeError(TypeError):
+ def __init__(self, source_class, expected_types, actual_type, actual_value, *args, **kwargs):
+ super(CheckedTypeError, self).__init__(*args, **kwargs)
+ self.source_class = source_class
+ self.expected_types = expected_types
+ self.actual_type = actual_type
+ self.actual_value = actual_value
+
+
+class CheckedKeyTypeError(CheckedTypeError):
+ """
+ Raised when trying to set a value using a key with a type that doesn't match the declared type.
+
+ Attributes:
+ source_class -- The class of the collection
+ expected_types -- Allowed types
+ actual_type -- The non matching type
+ actual_value -- Value of the variable with the non matching type
+ """
+ pass
+
+
+class CheckedValueTypeError(CheckedTypeError):
+ """
+ Raised when trying to set a value using a key with a type that doesn't match the declared type.
+
+ Attributes:
+ source_class -- The class of the collection
+ expected_types -- Allowed types
+ actual_type -- The non matching type
+ actual_value -- Value of the variable with the non matching type
+ """
+ pass
+
+
+def _get_class(type_name):
+ module_name, class_name = type_name.rsplit('.', 1)
+ module = __import__(module_name, fromlist=[class_name])
+ return getattr(module, class_name)
+
+
+def get_type(typ):
+ if isinstance(typ, type):
+ return typ
+
+ return _get_class(typ)
+
+
+def get_types(typs):
+ return [get_type(typ) for typ in typs]
+
+
+def _check_types(it, expected_types, source_class, exception_type=CheckedValueTypeError):
+ if expected_types:
+ for e in it:
+ if not any(isinstance(e, get_type(t)) for t in expected_types):
+ actual_type = type(e)
+ msg = "Type {source_class} can only be used with {expected_types}, not {actual_type}".format(
+ source_class=source_class.__name__,
+ expected_types=tuple(get_type(et).__name__ for et in expected_types),
+ actual_type=actual_type.__name__)
+ raise exception_type(source_class, expected_types, actual_type, e, msg)
+
+
+def _invariant_errors(elem, invariants):
+ return [data for valid, data in (invariant(elem) for invariant in invariants) if not valid]
+
+
+def _invariant_errors_iterable(it, invariants):
+ return sum([_invariant_errors(elem, invariants) for elem in it], [])
+
+
+def optional(*typs):
+ """ Convenience function to specify that a value may be of any of the types in type 'typs' or None """
+ return tuple(typs) + (type(None),)
+
+
+def _checked_type_create(cls, source_data, _factory_fields=None, ignore_extra=False):
+ if isinstance(source_data, cls):
+ return source_data
+
+ # Recursively apply create methods of checked types if the types of the supplied data
+ # does not match any of the valid types.
+ types = get_types(cls._checked_types)
+ checked_type = next((t for t in types if issubclass(t, CheckedType)), None)
+ if checked_type:
+ return cls([checked_type.create(data, ignore_extra=ignore_extra)
+ if not any(isinstance(data, t) for t in types) else data
+ for data in source_data])
+
+ return cls(source_data)
+
+class CheckedPVector(PythonPVector, CheckedType, metaclass=_CheckedTypeMeta):
+ """
+ A CheckedPVector is a PVector which allows specifying type and invariant checks.
+
+ >>> class Positives(CheckedPVector):
+ ... __type__ = (int, float)
+ ... __invariant__ = lambda n: (n >= 0, 'Negative')
+ ...
+ >>> Positives([1, 2, 3])
+ Positives([1, 2, 3])
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, initial=()):
+ if type(initial) == PythonPVector:
+ return super(CheckedPVector, cls).__new__(cls, initial._count, initial._shift, initial._root, initial._tail)
+
+ return CheckedPVector.Evolver(cls, python_pvector()).extend(initial).persistent()
+
+ def set(self, key, value):
+ return self.evolver().set(key, value).persistent()
+
+ def append(self, val):
+ return self.evolver().append(val).persistent()
+
+ def extend(self, it):
+ return self.evolver().extend(it).persistent()
+
+ create = classmethod(_checked_type_create)
+
+ def serialize(self, format=None):
+ serializer = self.__serializer__
+ return list(serializer(format, v) for v in self)
+
+ def __reduce__(self):
+ # Pickling support
+ return _restore_pickle, (self.__class__, list(self),)
+
+ class Evolver(PythonPVector.Evolver):
+ __slots__ = ('_destination_class', '_invariant_errors')
+
+ def __init__(self, destination_class, vector):
+ super(CheckedPVector.Evolver, self).__init__(vector)
+ self._destination_class = destination_class
+ self._invariant_errors = []
+
+ def _check(self, it):
+ _check_types(it, self._destination_class._checked_types, self._destination_class)
+ error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
+ self._invariant_errors.extend(error_data)
+
+ def __setitem__(self, key, value):
+ self._check([value])
+ return super(CheckedPVector.Evolver, self).__setitem__(key, value)
+
+ def append(self, elem):
+ self._check([elem])
+ return super(CheckedPVector.Evolver, self).append(elem)
+
+ def extend(self, it):
+ it = list(it)
+ self._check(it)
+ return super(CheckedPVector.Evolver, self).extend(it)
+
+ def persistent(self):
+ if self._invariant_errors:
+ raise InvariantException(error_codes=self._invariant_errors)
+
+ result = self._orig_pvector
+ if self.is_dirty() or (self._destination_class != type(self._orig_pvector)):
+ pv = super(CheckedPVector.Evolver, self).persistent().extend(self._extra_tail)
+ result = self._destination_class(pv)
+ self._reset(result)
+
+ return result
+
+ def __repr__(self):
+ return self.__class__.__name__ + "({0})".format(self.tolist())
+
+ __str__ = __repr__
+
+ def evolver(self):
+ return CheckedPVector.Evolver(self.__class__, self)
+
+
+class CheckedPSet(PSet, CheckedType, metaclass=_CheckedTypeMeta):
+ """
+ A CheckedPSet is a PSet which allows specifying type and invariant checks.
+
+ >>> class Positives(CheckedPSet):
+ ... __type__ = (int, float)
+ ... __invariant__ = lambda n: (n >= 0, 'Negative')
+ ...
+ >>> Positives([1, 2, 3])
+ Positives([1, 2, 3])
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, initial=()):
+ if type(initial) is PMap:
+ return super(CheckedPSet, cls).__new__(cls, initial)
+
+ evolver = CheckedPSet.Evolver(cls, pset())
+ for e in initial:
+ evolver.add(e)
+
+ return evolver.persistent()
+
+ def __repr__(self):
+ return self.__class__.__name__ + super(CheckedPSet, self).__repr__()[4:]
+
+ def __str__(self):
+ return self.__repr__()
+
+ def serialize(self, format=None):
+ serializer = self.__serializer__
+ return set(serializer(format, v) for v in self)
+
+ create = classmethod(_checked_type_create)
+
+ def __reduce__(self):
+ # Pickling support
+ return _restore_pickle, (self.__class__, list(self),)
+
+ def evolver(self):
+ return CheckedPSet.Evolver(self.__class__, self)
+
+ class Evolver(PSet._Evolver):
+ __slots__ = ('_destination_class', '_invariant_errors')
+
+ def __init__(self, destination_class, original_set):
+ super(CheckedPSet.Evolver, self).__init__(original_set)
+ self._destination_class = destination_class
+ self._invariant_errors = []
+
+ def _check(self, it):
+ _check_types(it, self._destination_class._checked_types, self._destination_class)
+ error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
+ self._invariant_errors.extend(error_data)
+
+ def add(self, element):
+ self._check([element])
+ self._pmap_evolver[element] = True
+ return self
+
+ def persistent(self):
+ if self._invariant_errors:
+ raise InvariantException(error_codes=self._invariant_errors)
+
+ if self.is_dirty() or self._destination_class != type(self._original_pset):
+ return self._destination_class(self._pmap_evolver.persistent())
+
+ return self._original_pset
+
+
+class _CheckedMapTypeMeta(type):
+ def __new__(mcs, name, bases, dct):
+ _store_types(dct, bases, '_checked_key_types', '__key_type__')
+ _store_types(dct, bases, '_checked_value_types', '__value_type__')
+ store_invariants(dct, bases, '_checked_invariants', '__invariant__')
+
+ def default_serializer(self, _, key, value):
+ sk = key
+ if isinstance(key, CheckedType):
+ sk = key.serialize()
+
+ sv = value
+ if isinstance(value, CheckedType):
+ sv = value.serialize()
+
+ return sk, sv
+
+ dct.setdefault('__serializer__', default_serializer)
+
+ dct['__slots__'] = ()
+
+ return super(_CheckedMapTypeMeta, mcs).__new__(mcs, name, bases, dct)
+
+# Marker object
+_UNDEFINED_CHECKED_PMAP_SIZE = object()
+
+
+class CheckedPMap(PMap, CheckedType, metaclass=_CheckedMapTypeMeta):
+ """
+ A CheckedPMap is a PMap which allows specifying type and invariant checks.
+
+ >>> class IntToFloatMap(CheckedPMap):
+ ... __key_type__ = int
+ ... __value_type__ = float
+ ... __invariant__ = lambda k, v: (int(v) == k, 'Invalid mapping')
+ ...
+ >>> IntToFloatMap({1: 1.5, 2: 2.25})
+ IntToFloatMap({1: 1.5, 2: 2.25})
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, initial={}, size=_UNDEFINED_CHECKED_PMAP_SIZE):
+ if size is not _UNDEFINED_CHECKED_PMAP_SIZE:
+ return super(CheckedPMap, cls).__new__(cls, size, initial)
+
+ evolver = CheckedPMap.Evolver(cls, pmap())
+ for k, v in initial.items():
+ evolver.set(k, v)
+
+ return evolver.persistent()
+
+ def evolver(self):
+ return CheckedPMap.Evolver(self.__class__, self)
+
+ def __repr__(self):
+ return self.__class__.__name__ + "({0})".format(str(dict(self)))
+
+ __str__ = __repr__
+
+ def serialize(self, format=None):
+ serializer = self.__serializer__
+ return dict(serializer(format, k, v) for k, v in self.items())
+
+ @classmethod
+ def create(cls, source_data, _factory_fields=None):
+ if isinstance(source_data, cls):
+ return source_data
+
+ # Recursively apply create methods of checked types if the types of the supplied data
+ # does not match any of the valid types.
+ key_types = get_types(cls._checked_key_types)
+ checked_key_type = next((t for t in key_types if issubclass(t, CheckedType)), None)
+ value_types = get_types(cls._checked_value_types)
+ checked_value_type = next((t for t in value_types if issubclass(t, CheckedType)), None)
+
+ if checked_key_type or checked_value_type:
+ return cls(dict((checked_key_type.create(key) if checked_key_type and not any(isinstance(key, t) for t in key_types) else key,
+ checked_value_type.create(value) if checked_value_type and not any(isinstance(value, t) for t in value_types) else value)
+ for key, value in source_data.items()))
+
+ return cls(source_data)
+
+ def __reduce__(self):
+ # Pickling support
+ return _restore_pickle, (self.__class__, dict(self),)
+
+ class Evolver(PMap._Evolver):
+ __slots__ = ('_destination_class', '_invariant_errors')
+
+ def __init__(self, destination_class, original_map):
+ super(CheckedPMap.Evolver, self).__init__(original_map)
+ self._destination_class = destination_class
+ self._invariant_errors = []
+
+ def set(self, key, value):
+ _check_types([key], self._destination_class._checked_key_types, self._destination_class, CheckedKeyTypeError)
+ _check_types([value], self._destination_class._checked_value_types, self._destination_class)
+ self._invariant_errors.extend(data for valid, data in (invariant(key, value)
+ for invariant in self._destination_class._checked_invariants)
+ if not valid)
+
+ return super(CheckedPMap.Evolver, self).set(key, value)
+
+ def persistent(self):
+ if self._invariant_errors:
+ raise InvariantException(error_codes=self._invariant_errors)
+
+ if self.is_dirty() or type(self._original_pmap) != self._destination_class:
+ return self._destination_class(self._buckets_evolver.persistent(), self._size)
+
+ return self._original_pmap
diff --git a/lib/spack/external/_vendoring/pyrsistent/_field_common.py b/lib/spack/external/_vendoring/pyrsistent/_field_common.py
new file mode 100644
index 0000000000..9ddfe29d2d
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_field_common.py
@@ -0,0 +1,329 @@
+import sys
+
+from pyrsistent._checked_types import (
+ CheckedPMap,
+ CheckedPSet,
+ CheckedPVector,
+ CheckedType,
+ InvariantException,
+ _restore_pickle,
+ get_type,
+ maybe_parse_user_type,
+ maybe_parse_many_user_types,
+)
+from pyrsistent._checked_types import optional as optional_type
+from pyrsistent._checked_types import wrap_invariant
+import inspect
+
+PY2 = sys.version_info[0] < 3
+
+
+def set_fields(dct, bases, name):
+ dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], []))
+
+ for k, v in list(dct.items()):
+ if isinstance(v, _PField):
+ dct[name][k] = v
+ del dct[k]
+
+
+def check_global_invariants(subject, invariants):
+ error_codes = tuple(error_code for is_ok, error_code in
+ (invariant(subject) for invariant in invariants) if not is_ok)
+ if error_codes:
+ raise InvariantException(error_codes, (), 'Global invariant failed')
+
+
+def serialize(serializer, format, value):
+ if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER:
+ return value.serialize(format)
+
+ return serializer(format, value)
+
+
+def check_type(destination_cls, field, name, value):
+ if field.type and not any(isinstance(value, get_type(t)) for t in field.type):
+ actual_type = type(value)
+ message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__)
+ raise PTypeError(destination_cls, name, field.type, actual_type, message)
+
+
+def is_type_cls(type_cls, field_type):
+ if type(field_type) is set:
+ return True
+ types = tuple(field_type)
+ if len(types) == 0:
+ return False
+ return issubclass(get_type(types[0]), type_cls)
+
+
+def is_field_ignore_extra_complaint(type_cls, field, ignore_extra):
+ # ignore_extra param has default False value, for speed purpose no need to propagate False
+ if not ignore_extra:
+ return False
+
+ if not is_type_cls(type_cls, field.type):
+ return False
+
+ if PY2:
+ return 'ignore_extra' in inspect.getargspec(field.factory).args
+ else:
+ return 'ignore_extra' in inspect.signature(field.factory).parameters
+
+
+
+class _PField(object):
+ __slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer')
+
+ def __init__(self, type, invariant, initial, mandatory, factory, serializer):
+ self.type = type
+ self.invariant = invariant
+ self.initial = initial
+ self.mandatory = mandatory
+ self._factory = factory
+ self.serializer = serializer
+
+ @property
+ def factory(self):
+ # If no factory is specified and the type is another CheckedType use the factory method of that CheckedType
+ if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1:
+ typ = get_type(tuple(self.type)[0])
+ if issubclass(typ, CheckedType):
+ return typ.create
+
+ return self._factory
+
+PFIELD_NO_TYPE = ()
+PFIELD_NO_INVARIANT = lambda _: (True, None)
+PFIELD_NO_FACTORY = lambda x: x
+PFIELD_NO_INITIAL = object()
+PFIELD_NO_SERIALIZER = lambda _, value: value
+
+
+def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL,
+ mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER):
+ """
+ Field specification factory for :py:class:`PRecord`.
+
+ :param type: a type or iterable with types that are allowed for this field
+ :param invariant: a function specifying an invariant that must hold for the field
+ :param initial: value of field if not specified when instantiating the record
+ :param mandatory: boolean specifying if the field is mandatory or not
+ :param factory: function called when field is set.
+ :param serializer: function that returns a serialized version of the field
+ """
+
+ # NB: We have to check this predicate separately from the predicates in
+ # `maybe_parse_user_type` et al. because this one is related to supporting
+ # the argspec for `field`, while those are related to supporting the valid
+ # ways to specify types.
+
+ # Multiple types must be passed in one of the following containers. Note
+ # that a type that is a subclass of one of these containers, like a
+ # `collections.namedtuple`, will work as expected, since we check
+ # `isinstance` and not `issubclass`.
+ if isinstance(type, (list, set, tuple)):
+ types = set(maybe_parse_many_user_types(type))
+ else:
+ types = set(maybe_parse_user_type(type))
+
+ invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant
+ field = _PField(type=types, invariant=invariant_function, initial=initial,
+ mandatory=mandatory, factory=factory, serializer=serializer)
+
+ _check_field_parameters(field)
+
+ return field
+
+
+def _check_field_parameters(field):
+ for t in field.type:
+ if not isinstance(t, type) and not isinstance(t, str):
+ raise TypeError('Type parameter expected, not {0}'.format(type(t)))
+
+ if field.initial is not PFIELD_NO_INITIAL and \
+ not callable(field.initial) and \
+ field.type and not any(isinstance(field.initial, t) for t in field.type):
+ raise TypeError('Initial has invalid type {0}'.format(type(field.initial)))
+
+ if not callable(field.invariant):
+ raise TypeError('Invariant must be callable')
+
+ if not callable(field.factory):
+ raise TypeError('Factory must be callable')
+
+ if not callable(field.serializer):
+ raise TypeError('Serializer must be callable')
+
+
+class PTypeError(TypeError):
+ """
+ Raised when trying to assign a value with a type that doesn't match the declared type.
+
+ Attributes:
+ source_class -- The class of the record
+ field -- Field name
+ expected_types -- Types allowed for the field
+ actual_type -- The non matching type
+ """
+ def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
+ super(PTypeError, self).__init__(*args, **kwargs)
+ self.source_class = source_class
+ self.field = field
+ self.expected_types = expected_types
+ self.actual_type = actual_type
+
+
+SEQ_FIELD_TYPE_SUFFIXES = {
+ CheckedPVector: "PVector",
+ CheckedPSet: "PSet",
+}
+
+# Global dictionary to hold auto-generated field types: used for unpickling
+_seq_field_types = {}
+
+def _restore_seq_field_pickle(checked_class, item_type, data):
+ """Unpickling function for auto-generated PVec/PSet field types."""
+ type_ = _seq_field_types[checked_class, item_type]
+ return _restore_pickle(type_, data)
+
+def _types_to_names(types):
+ """Convert a tuple of types to a human-readable string."""
+ return "".join(get_type(typ).__name__.capitalize() for typ in types)
+
+def _make_seq_field_type(checked_class, item_type):
+ """Create a subclass of the given checked class with the given item type."""
+ type_ = _seq_field_types.get((checked_class, item_type))
+ if type_ is not None:
+ return type_
+
+ class TheType(checked_class):
+ __type__ = item_type
+
+ def __reduce__(self):
+ return (_restore_seq_field_pickle,
+ (checked_class, item_type, list(self)))
+
+ suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
+ TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
+ _seq_field_types[checked_class, item_type] = TheType
+ return TheType
+
+def _sequence_field(checked_class, item_type, optional, initial):
+ """
+ Create checked field for either ``PSet`` or ``PVector``.
+
+ :param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
+ :param item_type: The required type for the items in the set.
+ :param optional: If true, ``None`` can be used as a value for
+ this field.
+ :param initial: Initial value to pass to factory.
+
+ :return: A ``field`` containing a checked class.
+ """
+ TheType = _make_seq_field_type(checked_class, item_type)
+
+ if optional:
+ def factory(argument, _factory_fields=None, ignore_extra=False):
+ if argument is None:
+ return None
+ else:
+ return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra)
+ else:
+ factory = TheType.create
+
+ return field(type=optional_type(TheType) if optional else TheType,
+ factory=factory, mandatory=True,
+ initial=factory(initial))
+
+
+def pset_field(item_type, optional=False, initial=()):
+ """
+ Create checked ``PSet`` field.
+
+ :param item_type: The required type for the items in the set.
+ :param optional: If true, ``None`` can be used as a value for
+ this field.
+ :param initial: Initial value to pass to factory if no value is given
+ for the field.
+
+ :return: A ``field`` containing a ``CheckedPSet`` of the given type.
+ """
+ return _sequence_field(CheckedPSet, item_type, optional,
+ initial)
+
+
+def pvector_field(item_type, optional=False, initial=()):
+ """
+ Create checked ``PVector`` field.
+
+ :param item_type: The required type for the items in the vector.
+ :param optional: If true, ``None`` can be used as a value for
+ this field.
+ :param initial: Initial value to pass to factory if no value is given
+ for the field.
+
+ :return: A ``field`` containing a ``CheckedPVector`` of the given type.
+ """
+ return _sequence_field(CheckedPVector, item_type, optional,
+ initial)
+
+
+_valid = lambda item: (True, "")
+
+
+# Global dictionary to hold auto-generated field types: used for unpickling
+_pmap_field_types = {}
+
+def _restore_pmap_field_pickle(key_type, value_type, data):
+ """Unpickling function for auto-generated PMap field types."""
+ type_ = _pmap_field_types[key_type, value_type]
+ return _restore_pickle(type_, data)
+
+def _make_pmap_field_type(key_type, value_type):
+ """Create a subclass of CheckedPMap with the given key and value types."""
+ type_ = _pmap_field_types.get((key_type, value_type))
+ if type_ is not None:
+ return type_
+
+ class TheMap(CheckedPMap):
+ __key_type__ = key_type
+ __value_type__ = value_type
+
+ def __reduce__(self):
+ return (_restore_pmap_field_pickle,
+ (self.__key_type__, self.__value_type__, dict(self)))
+
+ TheMap.__name__ = "{0}To{1}PMap".format(
+ _types_to_names(TheMap._checked_key_types),
+ _types_to_names(TheMap._checked_value_types))
+ _pmap_field_types[key_type, value_type] = TheMap
+ return TheMap
+
+
+def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
+ """
+ Create a checked ``PMap`` field.
+
+ :param key: The required type for the keys of the map.
+ :param value: The required type for the values of the map.
+ :param optional: If true, ``None`` can be used as a value for
+ this field.
+ :param invariant: Pass-through to ``field``.
+
+ :return: A ``field`` containing a ``CheckedPMap``.
+ """
+ TheMap = _make_pmap_field_type(key_type, value_type)
+
+ if optional:
+ def factory(argument):
+ if argument is None:
+ return None
+ else:
+ return TheMap.create(argument)
+ else:
+ factory = TheMap.create
+
+ return field(mandatory=True, initial=TheMap(),
+ type=optional_type(TheMap) if optional else TheMap,
+ factory=factory, invariant=invariant)
diff --git a/lib/spack/external/_vendoring/pyrsistent/_helpers.py b/lib/spack/external/_vendoring/pyrsistent/_helpers.py
new file mode 100644
index 0000000000..1320e65768
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_helpers.py
@@ -0,0 +1,97 @@
+from functools import wraps
+from pyrsistent._pmap import PMap, pmap
+from pyrsistent._pset import PSet, pset
+from pyrsistent._pvector import PVector, pvector
+
+def freeze(o, strict=True):
+ """
+ Recursively convert simple Python containers into pyrsistent versions
+ of those containers.
+
+ - list is converted to pvector, recursively
+ - dict is converted to pmap, recursively on values (but not keys)
+ - set is converted to pset, but not recursively
+ - tuple is converted to tuple, recursively.
+
+ If strict == True (default):
+
+ - freeze is called on elements of pvectors
+ - freeze is called on values of pmaps
+
+ Sets and dict keys are not recursively frozen because they do not contain
+ mutable data by convention. The main exception to this rule is that
+ dict keys and set elements are often instances of mutable objects that
+ support hash-by-id, which this function can't convert anyway.
+
+ >>> freeze(set([1, 2]))
+ pset([1, 2])
+ >>> freeze([1, {'a': 3}])
+ pvector([1, pmap({'a': 3})])
+ >>> freeze((1, []))
+ (1, pvector([]))
+ """
+ typ = type(o)
+ if typ is dict or (strict and isinstance(o, PMap)):
+ return pmap({k: freeze(v, strict) for k, v in o.items()})
+ if typ is list or (strict and isinstance(o, PVector)):
+ curried_freeze = lambda x: freeze(x, strict)
+ return pvector(map(curried_freeze, o))
+ if typ is tuple:
+ curried_freeze = lambda x: freeze(x, strict)
+ return tuple(map(curried_freeze, o))
+ if typ is set:
+ # impossible to have anything that needs freezing inside a set or pset
+ return pset(o)
+ return o
+
+
+def thaw(o, strict=True):
+ """
+ Recursively convert pyrsistent containers into simple Python containers.
+
+ - pvector is converted to list, recursively
+ - pmap is converted to dict, recursively on values (but not keys)
+ - pset is converted to set, but not recursively
+ - tuple is converted to tuple, recursively.
+
+ If strict == True (the default):
+
+ - thaw is called on elements of lists
+ - thaw is called on values in dicts
+
+ >>> from pyrsistent import s, m, v
+ >>> thaw(s(1, 2))
+ {1, 2}
+ >>> thaw(v(1, m(a=3)))
+ [1, {'a': 3}]
+ >>> thaw((1, v()))
+ (1, [])
+ """
+ typ = type(o)
+ if isinstance(o, PVector) or (strict and typ is list):
+ curried_thaw = lambda x: thaw(x, strict)
+ return list(map(curried_thaw, o))
+ if isinstance(o, PMap) or (strict and typ is dict):
+ return {k: thaw(v, strict) for k, v in o.items()}
+ if typ is tuple:
+ curried_thaw = lambda x: thaw(x, strict)
+ return tuple(map(curried_thaw, o))
+ if isinstance(o, PSet):
+ # impossible to thaw inside psets or sets
+ return set(o)
+ return o
+
+
+def mutant(fn):
+ """
+ Convenience decorator to isolate mutation to within the decorated function (with respect
+ to the input arguments).
+
+ All arguments to the decorated function will be frozen so that they are guaranteed not to change.
+ The return value is also frozen.
+ """
+ @wraps(fn)
+ def inner_f(*args, **kwargs):
+ return freeze(fn(*[freeze(e) for e in args], **dict(freeze(item) for item in kwargs.items())))
+
+ return inner_f
diff --git a/lib/spack/external/_vendoring/pyrsistent/_immutable.py b/lib/spack/external/_vendoring/pyrsistent/_immutable.py
new file mode 100644
index 0000000000..7c75945336
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_immutable.py
@@ -0,0 +1,103 @@
+import sys
+
+
+def immutable(members='', name='Immutable', verbose=False):
+ """
+ Produces a class that either can be used standalone or as a base class for persistent classes.
+
+ This is a thin wrapper around a named tuple.
+
+ Constructing a type and using it to instantiate objects:
+
+ >>> Point = immutable('x, y', name='Point')
+ >>> p = Point(1, 2)
+ >>> p2 = p.set(x=3)
+ >>> p
+ Point(x=1, y=2)
+ >>> p2
+ Point(x=3, y=2)
+
+ Inheriting from a constructed type. In this case no type name needs to be supplied:
+
+ >>> class PositivePoint(immutable('x, y')):
+ ... __slots__ = tuple()
+ ... def __new__(cls, x, y):
+ ... if x > 0 and y > 0:
+ ... return super(PositivePoint, cls).__new__(cls, x, y)
+ ... raise Exception('Coordinates must be positive!')
+ ...
+ >>> p = PositivePoint(1, 2)
+ >>> p.set(x=3)
+ PositivePoint(x=3, y=2)
+ >>> p.set(y=-3)
+ Traceback (most recent call last):
+ Exception: Coordinates must be positive!
+
+ The persistent class also supports the notion of frozen members. The value of a frozen member
+ cannot be updated. For example it could be used to implement an ID that should remain the same
+ over time. A frozen member is denoted by a trailing underscore.
+
+ >>> Point = immutable('x, y, id_', name='Point')
+ >>> p = Point(1, 2, id_=17)
+ >>> p.set(x=3)
+ Point(x=3, y=2, id_=17)
+ >>> p.set(id_=18)
+ Traceback (most recent call last):
+ AttributeError: Cannot set frozen members id_
+ """
+
+ if isinstance(members, str):
+ members = members.replace(',', ' ').split()
+
+ def frozen_member_test():
+ frozen_members = ["'%s'" % f for f in members if f.endswith('_')]
+ if frozen_members:
+ return """
+ frozen_fields = fields_to_modify & set([{frozen_members}])
+ if frozen_fields:
+ raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields))
+ """.format(frozen_members=', '.join(frozen_members))
+
+ return ''
+
+ verbose_string = ""
+ if sys.version_info < (3, 7):
+ # Verbose is no longer supported in Python 3.7
+ verbose_string = ", verbose={verbose}".format(verbose=verbose)
+
+ quoted_members = ', '.join("'%s'" % m for m in members)
+ template = """
+class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})):
+ __slots__ = tuple()
+
+ def __repr__(self):
+ return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__)
+
+ def set(self, **kwargs):
+ if not kwargs:
+ return self
+
+ fields_to_modify = set(kwargs.keys())
+ if not fields_to_modify <= {member_set}:
+ raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set}))
+
+ {frozen_member_test}
+
+ return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self))
+""".format(quoted_members=quoted_members,
+ member_set="set([%s])" % quoted_members if quoted_members else 'set()',
+ frozen_member_test=frozen_member_test(),
+ verbose_string=verbose_string,
+ class_name=name)
+
+ if verbose:
+ print(template)
+
+ from collections import namedtuple
+ namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable')
+ try:
+ exec(template, namespace)
+ except SyntaxError as e:
+ raise SyntaxError(str(e) + ':\n' + template) from e
+
+ return namespace[name]
diff --git a/lib/spack/external/_vendoring/pyrsistent/_pbag.py b/lib/spack/external/_vendoring/pyrsistent/_pbag.py
new file mode 100644
index 0000000000..9cf5840b72
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_pbag.py
@@ -0,0 +1,267 @@
+from collections.abc import Container, Iterable, Sized, Hashable
+from functools import reduce
+from pyrsistent._pmap import pmap
+
+
+def _add_to_counters(counters, element):
+ return counters.set(element, counters.get(element, 0) + 1)
+
+
+class PBag(object):
+ """
+ A persistent bag/multiset type.
+
+ Requires elements to be hashable, and allows duplicates, but has no
+ ordering. Bags are hashable.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`b`
+ or :py:func:`pbag` to create an instance.
+
+ Some examples:
+
+ >>> s = pbag([1, 2, 3, 1])
+ >>> s2 = s.add(4)
+ >>> s3 = s2.remove(1)
+ >>> s
+ pbag([1, 1, 2, 3])
+ >>> s2
+ pbag([1, 1, 2, 3, 4])
+ >>> s3
+ pbag([1, 2, 3, 4])
+ """
+
+ __slots__ = ('_counts', '__weakref__')
+
+ def __init__(self, counts):
+ self._counts = counts
+
+ def add(self, element):
+ """
+ Add an element to the bag.
+
+ >>> s = pbag([1])
+ >>> s2 = s.add(1)
+ >>> s3 = s.add(2)
+ >>> s2
+ pbag([1, 1])
+ >>> s3
+ pbag([1, 2])
+ """
+ return PBag(_add_to_counters(self._counts, element))
+
+ def update(self, iterable):
+ """
+ Update bag with all elements in iterable.
+
+ >>> s = pbag([1])
+ >>> s.update([1, 2])
+ pbag([1, 1, 2])
+ """
+ if iterable:
+ return PBag(reduce(_add_to_counters, iterable, self._counts))
+
+ return self
+
+ def remove(self, element):
+ """
+ Remove an element from the bag.
+
+ >>> s = pbag([1, 1, 2])
+ >>> s2 = s.remove(1)
+ >>> s3 = s.remove(2)
+ >>> s2
+ pbag([1, 2])
+ >>> s3
+ pbag([1, 1])
+ """
+ if element not in self._counts:
+ raise KeyError(element)
+ elif self._counts[element] == 1:
+ newc = self._counts.remove(element)
+ else:
+ newc = self._counts.set(element, self._counts[element] - 1)
+ return PBag(newc)
+
+ def count(self, element):
+ """
+ Return the number of times an element appears.
+
+
+ >>> pbag([]).count('non-existent')
+ 0
+ >>> pbag([1, 1, 2]).count(1)
+ 2
+ """
+ return self._counts.get(element, 0)
+
+ def __len__(self):
+ """
+ Return the length including duplicates.
+
+ >>> len(pbag([1, 1, 2]))
+ 3
+ """
+ return sum(self._counts.itervalues())
+
+ def __iter__(self):
+ """
+ Return an iterator of all elements, including duplicates.
+
+ >>> list(pbag([1, 1, 2]))
+ [1, 1, 2]
+ >>> list(pbag([1, 2]))
+ [1, 2]
+ """
+ for elt, count in self._counts.iteritems():
+ for i in range(count):
+ yield elt
+
+ def __contains__(self, elt):
+ """
+ Check if an element is in the bag.
+
+ >>> 1 in pbag([1, 1, 2])
+ True
+ >>> 0 in pbag([1, 2])
+ False
+ """
+ return elt in self._counts
+
+ def __repr__(self):
+ return "pbag({0})".format(list(self))
+
+ def __eq__(self, other):
+ """
+ Check if two bags are equivalent, honoring the number of duplicates,
+ and ignoring insertion order.
+
+ >>> pbag([1, 1, 2]) == pbag([1, 2])
+ False
+ >>> pbag([2, 1, 0]) == pbag([0, 1, 2])
+ True
+ """
+ if type(other) is not PBag:
+ raise TypeError("Can only compare PBag with PBags")
+ return self._counts == other._counts
+
+ def __lt__(self, other):
+ raise TypeError('PBags are not orderable')
+
+ __le__ = __lt__
+ __gt__ = __lt__
+ __ge__ = __lt__
+
+ # Multiset-style operations similar to collections.Counter
+
+ def __add__(self, other):
+ """
+ Combine elements from two PBags.
+
+ >>> pbag([1, 2, 2]) + pbag([2, 3, 3])
+ pbag([1, 2, 2, 2, 3, 3])
+ """
+ if not isinstance(other, PBag):
+ return NotImplemented
+ result = self._counts.evolver()
+ for elem, other_count in other._counts.iteritems():
+ result[elem] = self.count(elem) + other_count
+ return PBag(result.persistent())
+
+ def __sub__(self, other):
+ """
+ Remove elements from one PBag that are present in another.
+
+ >>> pbag([1, 2, 2, 2, 3]) - pbag([2, 3, 3, 4])
+ pbag([1, 2, 2])
+ """
+ if not isinstance(other, PBag):
+ return NotImplemented
+ result = self._counts.evolver()
+ for elem, other_count in other._counts.iteritems():
+ newcount = self.count(elem) - other_count
+ if newcount > 0:
+ result[elem] = newcount
+ elif elem in self:
+ result.remove(elem)
+ return PBag(result.persistent())
+
+ def __or__(self, other):
+ """
+ Union: Keep elements that are present in either of two PBags.
+
+ >>> pbag([1, 2, 2, 2]) | pbag([2, 3, 3])
+ pbag([1, 2, 2, 2, 3, 3])
+ """
+ if not isinstance(other, PBag):
+ return NotImplemented
+ result = self._counts.evolver()
+ for elem, other_count in other._counts.iteritems():
+ count = self.count(elem)
+ newcount = max(count, other_count)
+ result[elem] = newcount
+ return PBag(result.persistent())
+
+ def __and__(self, other):
+ """
+ Intersection: Only keep elements that are present in both PBags.
+
+ >>> pbag([1, 2, 2, 2]) & pbag([2, 3, 3])
+ pbag([2])
+ """
+ if not isinstance(other, PBag):
+ return NotImplemented
+ result = pmap().evolver()
+ for elem, count in self._counts.iteritems():
+ newcount = min(count, other.count(elem))
+ if newcount > 0:
+ result[elem] = newcount
+ return PBag(result.persistent())
+
+ def __hash__(self):
+ """
+ Hash based on value of elements.
+
+ >>> m = pmap({pbag([1, 2]): "it's here!"})
+ >>> m[pbag([2, 1])]
+ "it's here!"
+ >>> pbag([1, 1, 2]) in m
+ False
+ """
+ return hash(self._counts)
+
+
+Container.register(PBag)
+Iterable.register(PBag)
+Sized.register(PBag)
+Hashable.register(PBag)
+
+
+def b(*elements):
+ """
+ Construct a persistent bag.
+
+ Takes an arbitrary number of arguments to insert into the new persistent
+ bag.
+
+ >>> b(1, 2, 3, 2)
+ pbag([1, 2, 2, 3])
+ """
+ return pbag(elements)
+
+
+def pbag(elements):
+ """
+ Convert an iterable to a persistent bag.
+
+ Takes an iterable with elements to insert.
+
+ >>> pbag([1, 2, 3, 2])
+ pbag([1, 2, 2, 3])
+ """
+ if not elements:
+ return _EMPTY_PBAG
+ return PBag(reduce(_add_to_counters, elements, pmap()))
+
+
+_EMPTY_PBAG = PBag(pmap())
+
diff --git a/lib/spack/external/_vendoring/pyrsistent/_pclass.py b/lib/spack/external/_vendoring/pyrsistent/_pclass.py
new file mode 100644
index 0000000000..fd31a95d63
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_pclass.py
@@ -0,0 +1,262 @@
+from pyrsistent._checked_types import (InvariantException, CheckedType, _restore_pickle, store_invariants)
+from pyrsistent._field_common import (
+ set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
+)
+from pyrsistent._transformations import transform
+
+
+def _is_pclass(bases):
+ return len(bases) == 1 and bases[0] == CheckedType
+
+
+class PClassMeta(type):
+ def __new__(mcs, name, bases, dct):
+ set_fields(dct, bases, name='_pclass_fields')
+ store_invariants(dct, bases, '_pclass_invariants', '__invariant__')
+ dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields'])
+
+ # There must only be one __weakref__ entry in the inheritance hierarchy,
+ # lets put it on the top level class.
+ if _is_pclass(bases):
+ dct['__slots__'] += ('__weakref__',)
+
+ return super(PClassMeta, mcs).__new__(mcs, name, bases, dct)
+
+_MISSING_VALUE = object()
+
+
+def _check_and_set_attr(cls, field, name, value, result, invariant_errors):
+ check_type(cls, field, name, value)
+ is_ok, error_code = field.invariant(value)
+ if not is_ok:
+ invariant_errors.append(error_code)
+ else:
+ setattr(result, name, value)
+
+
+class PClass(CheckedType, metaclass=PClassMeta):
+ """
+ A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
+ from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
+ is not a PMap and hence not a collection but rather a plain Python object.
+
+
+ More documentation and examples of PClass usage is available at https://github.com/tobgu/pyrsistent
+ """
+ def __new__(cls, **kwargs): # Support *args?
+ result = super(PClass, cls).__new__(cls)
+ factory_fields = kwargs.pop('_factory_fields', None)
+ ignore_extra = kwargs.pop('ignore_extra', None)
+ missing_fields = []
+ invariant_errors = []
+ for name, field in cls._pclass_fields.items():
+ if name in kwargs:
+ if factory_fields is None or name in factory_fields:
+ if is_field_ignore_extra_complaint(PClass, field, ignore_extra):
+ value = field.factory(kwargs[name], ignore_extra=ignore_extra)
+ else:
+ value = field.factory(kwargs[name])
+ else:
+ value = kwargs[name]
+ _check_and_set_attr(cls, field, name, value, result, invariant_errors)
+ del kwargs[name]
+ elif field.initial is not PFIELD_NO_INITIAL:
+ initial = field.initial() if callable(field.initial) else field.initial
+ _check_and_set_attr(
+ cls, field, name, initial, result, invariant_errors)
+ elif field.mandatory:
+ missing_fields.append('{0}.{1}'.format(cls.__name__, name))
+
+ if invariant_errors or missing_fields:
+ raise InvariantException(tuple(invariant_errors), tuple(missing_fields), 'Field invariant failed')
+
+ if kwargs:
+ raise AttributeError("'{0}' are not among the specified fields for {1}".format(
+ ', '.join(kwargs), cls.__name__))
+
+ check_global_invariants(result, cls._pclass_invariants)
+
+ result._pclass_frozen = True
+ return result
+
+ def set(self, *args, **kwargs):
+ """
+ Set a field in the instance. Returns a new instance with the updated value. The original instance remains
+ unmodified. Accepts key-value pairs or single string representing the field name and a value.
+
+ >>> from pyrsistent import PClass, field
+ >>> class AClass(PClass):
+ ... x = field()
+ ...
+ >>> a = AClass(x=1)
+ >>> a2 = a.set(x=2)
+ >>> a3 = a.set('x', 3)
+ >>> a
+ AClass(x=1)
+ >>> a2
+ AClass(x=2)
+ >>> a3
+ AClass(x=3)
+ """
+ if args:
+ kwargs[args[0]] = args[1]
+
+ factory_fields = set(kwargs)
+
+ for key in self._pclass_fields:
+ if key not in kwargs:
+ value = getattr(self, key, _MISSING_VALUE)
+ if value is not _MISSING_VALUE:
+ kwargs[key] = value
+
+ return self.__class__(_factory_fields=factory_fields, **kwargs)
+
+ @classmethod
+ def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
+ """
+ Factory method. Will create a new PClass of the current type and assign the values
+ specified in kwargs.
+
+ :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
+ in the set of fields on the PClass.
+ """
+ if isinstance(kwargs, cls):
+ return kwargs
+
+ if ignore_extra:
+ kwargs = {k: kwargs[k] for k in cls._pclass_fields if k in kwargs}
+
+ return cls(_factory_fields=_factory_fields, ignore_extra=ignore_extra, **kwargs)
+
+ def serialize(self, format=None):
+ """
+ Serialize the current PClass using custom serializer functions for fields where
+ such have been supplied.
+ """
+ result = {}
+ for name in self._pclass_fields:
+ value = getattr(self, name, _MISSING_VALUE)
+ if value is not _MISSING_VALUE:
+ result[name] = serialize(self._pclass_fields[name].serializer, format, value)
+
+ return result
+
+ def transform(self, *transformations):
+ """
+ Apply transformations to the currency PClass. For more details on transformations see
+ the documentation for PMap. Transformations on PClasses do not support key matching
+ since the PClass is not a collection. Apart from that the transformations available
+ for other persistent types work as expected.
+ """
+ return transform(self, transformations)
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ for name in self._pclass_fields:
+ if getattr(self, name, _MISSING_VALUE) != getattr(other, name, _MISSING_VALUE):
+ return False
+
+ return True
+
+ return NotImplemented
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ # May want to optimize this by caching the hash somehow
+ return hash(tuple((key, getattr(self, key, _MISSING_VALUE)) for key in self._pclass_fields))
+
+ def __setattr__(self, key, value):
+ if getattr(self, '_pclass_frozen', False):
+ raise AttributeError("Can't set attribute, key={0}, value={1}".format(key, value))
+
+ super(PClass, self).__setattr__(key, value)
+
+ def __delattr__(self, key):
+ raise AttributeError("Can't delete attribute, key={0}, use remove()".format(key))
+
+ def _to_dict(self):
+ result = {}
+ for key in self._pclass_fields:
+ value = getattr(self, key, _MISSING_VALUE)
+ if value is not _MISSING_VALUE:
+ result[key] = value
+
+ return result
+
+ def __repr__(self):
+ return "{0}({1})".format(self.__class__.__name__,
+ ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self._to_dict().items()))
+
+ def __reduce__(self):
+ # Pickling support
+ data = dict((key, getattr(self, key)) for key in self._pclass_fields if hasattr(self, key))
+ return _restore_pickle, (self.__class__, data,)
+
+ def evolver(self):
+ """
+ Returns an evolver for this object.
+ """
+ return _PClassEvolver(self, self._to_dict())
+
+ def remove(self, name):
+ """
+ Remove attribute given by name from the current instance. Raises AttributeError if the
+ attribute doesn't exist.
+ """
+ evolver = self.evolver()
+ del evolver[name]
+ return evolver.persistent()
+
+
+class _PClassEvolver(object):
+ __slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields')
+
+ def __init__(self, original, initial_dict):
+ self._pclass_evolver_original = original
+ self._pclass_evolver_data = initial_dict
+ self._pclass_evolver_data_is_dirty = False
+ self._factory_fields = set()
+
+ def __getitem__(self, item):
+ return self._pclass_evolver_data[item]
+
+ def set(self, key, value):
+ if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value:
+ self._pclass_evolver_data[key] = value
+ self._factory_fields.add(key)
+ self._pclass_evolver_data_is_dirty = True
+
+ return self
+
+ def __setitem__(self, key, value):
+ self.set(key, value)
+
+ def remove(self, item):
+ if item in self._pclass_evolver_data:
+ del self._pclass_evolver_data[item]
+ self._factory_fields.discard(item)
+ self._pclass_evolver_data_is_dirty = True
+ return self
+
+ raise AttributeError(item)
+
+ def __delitem__(self, item):
+ self.remove(item)
+
+ def persistent(self):
+ if self._pclass_evolver_data_is_dirty:
+ return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields,
+ **self._pclass_evolver_data)
+
+ return self._pclass_evolver_original
+
+ def __setattr__(self, key, value):
+ if key not in self.__slots__:
+ self.set(key, value)
+ else:
+ super(_PClassEvolver, self).__setattr__(key, value)
+
+ def __getattr__(self, item):
+ return self[item]
diff --git a/lib/spack/external/_vendoring/pyrsistent/_pdeque.py b/lib/spack/external/_vendoring/pyrsistent/_pdeque.py
new file mode 100644
index 0000000000..bd11bfa032
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_pdeque.py
@@ -0,0 +1,376 @@
+from collections.abc import Sequence, Hashable
+from itertools import islice, chain
+from numbers import Integral
+from pyrsistent._plist import plist
+
+
+class PDeque(object):
+ """
+ Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented
+ using two persistent lists.
+
+ A maximum length can be specified to create a bounded queue.
+
+ Fully supports the Sequence and Hashable protocols including indexing and slicing but
+ if you need fast random access go for the PVector instead.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to
+ create an instance.
+
+ Some examples:
+
+ >>> x = pdeque([1, 2, 3])
+ >>> x.left
+ 1
+ >>> x.right
+ 3
+ >>> x[0] == x.left
+ True
+ >>> x[-1] == x.right
+ True
+ >>> x.pop()
+ pdeque([1, 2])
+ >>> x.pop() == x[:-1]
+ True
+ >>> x.popleft()
+ pdeque([2, 3])
+ >>> x.append(4)
+ pdeque([1, 2, 3, 4])
+ >>> x.appendleft(4)
+ pdeque([4, 1, 2, 3])
+
+ >>> y = pdeque([1, 2, 3], maxlen=3)
+ >>> y.append(4)
+ pdeque([2, 3, 4], maxlen=3)
+ >>> y.appendleft(4)
+ pdeque([4, 1, 2], maxlen=3)
+ """
+ __slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__')
+
+ def __new__(cls, left_list, right_list, length, maxlen=None):
+ instance = super(PDeque, cls).__new__(cls)
+ instance._left_list = left_list
+ instance._right_list = right_list
+ instance._length = length
+
+ if maxlen is not None:
+ if not isinstance(maxlen, Integral):
+ raise TypeError('An integer is required as maxlen')
+
+ if maxlen < 0:
+ raise ValueError("maxlen must be non-negative")
+
+ instance._maxlen = maxlen
+ return instance
+
+ @property
+ def right(self):
+ """
+ Rightmost element in dqueue.
+ """
+ return PDeque._tip_from_lists(self._right_list, self._left_list)
+
+ @property
+ def left(self):
+ """
+ Leftmost element in dqueue.
+ """
+ return PDeque._tip_from_lists(self._left_list, self._right_list)
+
+ @staticmethod
+ def _tip_from_lists(primary_list, secondary_list):
+ if primary_list:
+ return primary_list.first
+
+ if secondary_list:
+ return secondary_list[-1]
+
+ raise IndexError('No elements in empty deque')
+
+ def __iter__(self):
+ return chain(self._left_list, self._right_list.reverse())
+
+ def __repr__(self):
+ return "pdeque({0}{1})".format(list(self),
+ ', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '')
+ __str__ = __repr__
+
+ @property
+ def maxlen(self):
+ """
+ Maximum length of the queue.
+ """
+ return self._maxlen
+
+ def pop(self, count=1):
+ """
+ Return new deque with rightmost element removed. Popping the empty queue
+ will return the empty queue. A optional count can be given to indicate the
+ number of elements to pop. Popping with a negative index is the same as
+ popleft. Executes in amortized O(k) where k is the number of elements to pop.
+
+ >>> pdeque([1, 2]).pop()
+ pdeque([1])
+ >>> pdeque([1, 2]).pop(2)
+ pdeque([])
+ >>> pdeque([1, 2]).pop(-1)
+ pdeque([2])
+ """
+ if count < 0:
+ return self.popleft(-count)
+
+ new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
+ return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
+
+ def popleft(self, count=1):
+ """
+ Return new deque with leftmost element removed. Otherwise functionally
+ equivalent to pop().
+
+ >>> pdeque([1, 2]).popleft()
+ pdeque([2])
+ """
+ if count < 0:
+ return self.pop(-count)
+
+ new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count)
+ return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
+
+ @staticmethod
+ def _pop_lists(primary_list, secondary_list, count):
+ new_primary_list = primary_list
+ new_secondary_list = secondary_list
+
+ while count > 0 and (new_primary_list or new_secondary_list):
+ count -= 1
+ if new_primary_list.rest:
+ new_primary_list = new_primary_list.rest
+ elif new_primary_list:
+ new_primary_list = new_secondary_list.reverse()
+ new_secondary_list = plist()
+ else:
+ new_primary_list = new_secondary_list.reverse().rest
+ new_secondary_list = plist()
+
+ return new_primary_list, new_secondary_list
+
+ def _is_empty(self):
+ return not self._left_list and not self._right_list
+
+ def __lt__(self, other):
+ if not isinstance(other, PDeque):
+ return NotImplemented
+
+ return tuple(self) < tuple(other)
+
+ def __eq__(self, other):
+ if not isinstance(other, PDeque):
+ return NotImplemented
+
+ if tuple(self) == tuple(other):
+ # Sanity check of the length value since it is redundant (there for performance)
+ assert len(self) == len(other)
+ return True
+
+ return False
+
+ def __hash__(self):
+ return hash(tuple(self))
+
+ def __len__(self):
+ return self._length
+
+ def append(self, elem):
+ """
+ Return new deque with elem as the rightmost element.
+
+ >>> pdeque([1, 2]).append(3)
+ pdeque([1, 2, 3])
+ """
+ new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem)
+ return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
+
+ def appendleft(self, elem):
+ """
+ Return new deque with elem as the leftmost element.
+
+ >>> pdeque([1, 2]).appendleft(3)
+ pdeque([3, 1, 2])
+ """
+ new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem)
+ return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
+
+ def _append(self, primary_list, secondary_list, elem):
+ if self._maxlen is not None and self._length == self._maxlen:
+ if self._maxlen == 0:
+ return primary_list, secondary_list, 0
+ new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1)
+ return new_primary_list, new_secondary_list.cons(elem), self._length
+
+ return primary_list, secondary_list.cons(elem), self._length + 1
+
+ @staticmethod
+ def _extend_list(the_list, iterable):
+ count = 0
+ for elem in iterable:
+ the_list = the_list.cons(elem)
+ count += 1
+
+ return the_list, count
+
+ def _extend(self, primary_list, secondary_list, iterable):
+ new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable)
+ new_secondary_list = secondary_list
+ current_len = self._length + extend_count
+ if self._maxlen is not None and current_len > self._maxlen:
+ pop_len = current_len - self._maxlen
+ new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len)
+ extend_count -= pop_len
+
+ return new_primary_list, new_secondary_list, extend_count
+
+ def extend(self, iterable):
+ """
+ Return new deque with all elements of iterable appended to the right.
+
+ >>> pdeque([1, 2]).extend([3, 4])
+ pdeque([1, 2, 3, 4])
+ """
+ new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable)
+ return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
+
+ def extendleft(self, iterable):
+ """
+ Return new deque with all elements of iterable appended to the left.
+
+ NB! The elements will be inserted in reverse order compared to the order in the iterable.
+
+ >>> pdeque([1, 2]).extendleft([3, 4])
+ pdeque([4, 3, 1, 2])
+ """
+ new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable)
+ return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
+
+ def count(self, elem):
+ """
+ Return the number of elements equal to elem present in the queue
+
+ >>> pdeque([1, 2, 1]).count(1)
+ 2
+ """
+ return self._left_list.count(elem) + self._right_list.count(elem)
+
+ def remove(self, elem):
+ """
+ Return new deque with first element from left equal to elem removed. If no such element is found
+ a ValueError is raised.
+
+ >>> pdeque([2, 1, 2]).remove(2)
+ pdeque([1, 2])
+ """
+ try:
+ return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1)
+ except ValueError:
+ # Value not found in left list, try the right list
+ try:
+ # This is severely inefficient with a double reverse, should perhaps implement a remove_last()?
+ return PDeque(self._left_list,
+ self._right_list.reverse().remove(elem).reverse(), self._length - 1)
+ except ValueError as e:
+ raise ValueError('{0} not found in PDeque'.format(elem)) from e
+
+ def reverse(self):
+ """
+ Return reversed deque.
+
+ >>> pdeque([1, 2, 3]).reverse()
+ pdeque([3, 2, 1])
+
+ Also supports the standard python reverse function.
+
+ >>> reversed(pdeque([1, 2, 3]))
+ pdeque([3, 2, 1])
+ """
+ return PDeque(self._right_list, self._left_list, self._length)
+ __reversed__ = reverse
+
+ def rotate(self, steps):
+ """
+ Return deque with elements rotated steps steps.
+
+ >>> x = pdeque([1, 2, 3])
+ >>> x.rotate(1)
+ pdeque([3, 1, 2])
+ >>> x.rotate(-2)
+ pdeque([3, 1, 2])
+ """
+ popped_deque = self.pop(steps)
+ if steps >= 0:
+ return popped_deque.extendleft(islice(self.reverse(), steps))
+
+ return popped_deque.extend(islice(self, -steps))
+
+ def __reduce__(self):
+ # Pickling support
+ return pdeque, (list(self), self._maxlen)
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ if index.step is not None and index.step != 1:
+ # Too difficult, no structural sharing possible
+ return pdeque(tuple(self)[index], maxlen=self._maxlen)
+
+ result = self
+ if index.start is not None:
+ result = result.popleft(index.start % self._length)
+ if index.stop is not None:
+ result = result.pop(self._length - (index.stop % self._length))
+
+ return result
+
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index >= 0:
+ return self.popleft(index).left
+
+ shifted = len(self) + index
+ if shifted < 0:
+ raise IndexError(
+ "pdeque index {0} out of range {1}".format(index, len(self)),
+ )
+ return self.popleft(shifted).left
+
+ index = Sequence.index
+
+Sequence.register(PDeque)
+Hashable.register(PDeque)
+
+
+def pdeque(iterable=(), maxlen=None):
+ """
+ Return deque containing the elements of iterable. If maxlen is specified then
+ len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen.
+
+ >>> pdeque([1, 2, 3])
+ pdeque([1, 2, 3])
+ >>> pdeque([1, 2, 3, 4], maxlen=2)
+ pdeque([3, 4], maxlen=2)
+ """
+ t = tuple(iterable)
+ if maxlen is not None:
+ t = t[-maxlen:]
+ length = len(t)
+ pivot = int(length / 2)
+ left = plist(t[:pivot])
+ right = plist(t[pivot:], reverse=True)
+ return PDeque(left, right, length, maxlen)
+
+def dq(*elements):
+ """
+ Return deque containing all arguments.
+
+ >>> dq(1, 2, 3)
+ pdeque([1, 2, 3])
+ """
+ return pdeque(elements)
diff --git a/lib/spack/external/_vendoring/pyrsistent/_plist.py b/lib/spack/external/_vendoring/pyrsistent/_plist.py
new file mode 100644
index 0000000000..bea7f5ecfe
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_plist.py
@@ -0,0 +1,313 @@
+from collections.abc import Sequence, Hashable
+from numbers import Integral
+from functools import reduce
+
+
+class _PListBuilder(object):
+ """
+ Helper class to allow construction of a list without
+ having to reverse it in the end.
+ """
+ __slots__ = ('_head', '_tail')
+
+ def __init__(self):
+ self._head = _EMPTY_PLIST
+ self._tail = _EMPTY_PLIST
+
+ def _append(self, elem, constructor):
+ if not self._tail:
+ self._head = constructor(elem)
+ self._tail = self._head
+ else:
+ self._tail.rest = constructor(elem)
+ self._tail = self._tail.rest
+
+ return self._head
+
+ def append_elem(self, elem):
+ return self._append(elem, lambda e: PList(e, _EMPTY_PLIST))
+
+ def append_plist(self, pl):
+ return self._append(pl, lambda l: l)
+
+ def build(self):
+ return self._head
+
+
+class _PListBase(object):
+ __slots__ = ('__weakref__',)
+
+ # Selected implementations can be taken straight from the Sequence
+ # class, other are less suitable. Especially those that work with
+ # index lookups.
+ count = Sequence.count
+ index = Sequence.index
+
+ def __reduce__(self):
+ # Pickling support
+ return plist, (list(self),)
+
+ def __len__(self):
+ """
+ Return the length of the list, computed by traversing it.
+
+ This is obviously O(n) but with the current implementation
+ where a list is also a node the overhead of storing the length
+ in every node would be quite significant.
+ """
+ return sum(1 for _ in self)
+
+ def __repr__(self):
+ return "plist({0})".format(list(self))
+ __str__ = __repr__
+
+ def cons(self, elem):
+ """
+ Return a new list with elem inserted as new head.
+
+ >>> plist([1, 2]).cons(3)
+ plist([3, 1, 2])
+ """
+ return PList(elem, self)
+
+ def mcons(self, iterable):
+ """
+ Return a new list with all elements of iterable repeatedly cons:ed to the current list.
+ NB! The elements will be inserted in the reverse order of the iterable.
+ Runs in O(len(iterable)).
+
+ >>> plist([1, 2]).mcons([3, 4])
+ plist([4, 3, 1, 2])
+ """
+ head = self
+ for elem in iterable:
+ head = head.cons(elem)
+
+ return head
+
+ def reverse(self):
+ """
+ Return a reversed version of list. Runs in O(n) where n is the length of the list.
+
+ >>> plist([1, 2, 3]).reverse()
+ plist([3, 2, 1])
+
+ Also supports the standard reversed function.
+
+ >>> reversed(plist([1, 2, 3]))
+ plist([3, 2, 1])
+ """
+ result = plist()
+ head = self
+ while head:
+ result = result.cons(head.first)
+ head = head.rest
+
+ return result
+ __reversed__ = reverse
+
+ def split(self, index):
+ """
+ Spilt the list at position specified by index. Returns a tuple containing the
+ list up until index and the list after the index. Runs in O(index).
+
+ >>> plist([1, 2, 3, 4]).split(2)
+ (plist([1, 2]), plist([3, 4]))
+ """
+ lb = _PListBuilder()
+ right_list = self
+ i = 0
+ while right_list and i < index:
+ lb.append_elem(right_list.first)
+ right_list = right_list.rest
+ i += 1
+
+ if not right_list:
+ # Just a small optimization in the cases where no split occurred
+ return self, _EMPTY_PLIST
+
+ return lb.build(), right_list
+
+ def __iter__(self):
+ li = self
+ while li:
+ yield li.first
+ li = li.rest
+
+ def __lt__(self, other):
+ if not isinstance(other, _PListBase):
+ return NotImplemented
+
+ return tuple(self) < tuple(other)
+
+ def __eq__(self, other):
+ """
+ Traverses the lists, checking equality of elements.
+
+ This is an O(n) operation, but preserves the standard semantics of list equality.
+ """
+ if not isinstance(other, _PListBase):
+ return NotImplemented
+
+ self_head = self
+ other_head = other
+ while self_head and other_head:
+ if not self_head.first == other_head.first:
+ return False
+ self_head = self_head.rest
+ other_head = other_head.rest
+
+ return not self_head and not other_head
+
+ def __getitem__(self, index):
+ # Don't use this this data structure if you plan to do a lot of indexing, it is
+ # very inefficient! Use a PVector instead!
+
+ if isinstance(index, slice):
+ if index.start is not None and index.stop is None and (index.step is None or index.step == 1):
+ return self._drop(index.start)
+
+ # Take the easy way out for all other slicing cases, not much structural reuse possible anyway
+ return plist(tuple(self)[index])
+
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index < 0:
+ # NB: O(n)!
+ index += len(self)
+
+ try:
+ return self._drop(index).first
+ except AttributeError as e:
+ raise IndexError("PList index out of range") from e
+
+ def _drop(self, count):
+ if count < 0:
+ raise IndexError("PList index out of range")
+
+ head = self
+ while count > 0:
+ head = head.rest
+ count -= 1
+
+ return head
+
+ def __hash__(self):
+ return hash(tuple(self))
+
+ def remove(self, elem):
+ """
+ Return new list with first element equal to elem removed. O(k) where k is the position
+ of the element that is removed.
+
+ Raises ValueError if no matching element is found.
+
+ >>> plist([1, 2, 1]).remove(1)
+ plist([2, 1])
+ """
+
+ builder = _PListBuilder()
+ head = self
+ while head:
+ if head.first == elem:
+ return builder.append_plist(head.rest)
+
+ builder.append_elem(head.first)
+ head = head.rest
+
+ raise ValueError('{0} not found in PList'.format(elem))
+
+
+class PList(_PListBase):
+ """
+ Classical Lisp style singly linked list. Adding elements to the head using cons is O(1).
+ Element access is O(k) where k is the position of the element in the list. Taking the
+ length of the list is O(n).
+
+ Fully supports the Sequence and Hashable protocols including indexing and slicing but
+ if you need fast random access go for the PVector instead.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to
+ create an instance.
+
+ Some examples:
+
+ >>> x = plist([1, 2])
+ >>> y = x.cons(3)
+ >>> x
+ plist([1, 2])
+ >>> y
+ plist([3, 1, 2])
+ >>> y.first
+ 3
+ >>> y.rest == x
+ True
+ >>> y[:2]
+ plist([3, 1])
+ """
+ __slots__ = ('first', 'rest')
+
+ def __new__(cls, first, rest):
+ instance = super(PList, cls).__new__(cls)
+ instance.first = first
+ instance.rest = rest
+ return instance
+
+ def __bool__(self):
+ return True
+ __nonzero__ = __bool__
+
+
+Sequence.register(PList)
+Hashable.register(PList)
+
+
+class _EmptyPList(_PListBase):
+ __slots__ = ()
+
+ def __bool__(self):
+ return False
+ __nonzero__ = __bool__
+
+ @property
+ def first(self):
+ raise AttributeError("Empty PList has no first")
+
+ @property
+ def rest(self):
+ return self
+
+
+Sequence.register(_EmptyPList)
+Hashable.register(_EmptyPList)
+
+_EMPTY_PLIST = _EmptyPList()
+
+
+def plist(iterable=(), reverse=False):
+ """
+ Creates a new persistent list containing all elements of iterable.
+ Optional parameter reverse specifies if the elements should be inserted in
+ reverse order or not.
+
+ >>> plist([1, 2, 3])
+ plist([1, 2, 3])
+ >>> plist([1, 2, 3], reverse=True)
+ plist([3, 2, 1])
+ """
+ if not reverse:
+ iterable = list(iterable)
+ iterable.reverse()
+
+ return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST)
+
+
+def l(*elements):
+ """
+ Creates a new persistent list containing all arguments.
+
+ >>> l(1, 2, 3)
+ plist([1, 2, 3])
+ """
+ return plist(elements)
diff --git a/lib/spack/external/_vendoring/pyrsistent/_pmap.py b/lib/spack/external/_vendoring/pyrsistent/_pmap.py
new file mode 100644
index 0000000000..81d99c076f
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_pmap.py
@@ -0,0 +1,461 @@
+from collections.abc import Mapping, Hashable
+from itertools import chain
+from pyrsistent._pvector import pvector
+from pyrsistent._transformations import transform
+
+
+class PMap(object):
+ """
+ Persistent map/dict. Tries to follow the same naming conventions as the built in dict where feasible.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`m` or :py:func:`pmap` to
+ create an instance.
+
+ Was originally written as a very close copy of the Clojure equivalent but was later rewritten to closer
+ re-assemble the python dict. This means that a sparse vector (a PVector) of buckets is used. The keys are
+ hashed and the elements inserted at position hash % len(bucket_vector). Whenever the map size exceeds 2/3 of
+ the containing vectors size the map is reallocated to a vector of double the size. This is done to avoid
+ excessive hash collisions.
+
+ This structure corresponds most closely to the built in dict type and is intended as a replacement. Where the
+ semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
+ for example assignments and deletion of values.
+
+ PMap implements the Mapping protocol and is Hashable. It also supports dot-notation for
+ element access.
+
+ Random access and insert is log32(n) where n is the size of the map.
+
+ The following are examples of some common operations on persistent maps
+
+ >>> m1 = m(a=1, b=3)
+ >>> m2 = m1.set('c', 3)
+ >>> m3 = m2.remove('a')
+ >>> m1
+ pmap({'b': 3, 'a': 1})
+ >>> m2
+ pmap({'c': 3, 'b': 3, 'a': 1})
+ >>> m3
+ pmap({'c': 3, 'b': 3})
+ >>> m3['c']
+ 3
+ >>> m3.c
+ 3
+ """
+ __slots__ = ('_size', '_buckets', '__weakref__', '_cached_hash')
+
+ def __new__(cls, size, buckets):
+ self = super(PMap, cls).__new__(cls)
+ self._size = size
+ self._buckets = buckets
+ return self
+
+ @staticmethod
+ def _get_bucket(buckets, key):
+ index = hash(key) % len(buckets)
+ bucket = buckets[index]
+ return index, bucket
+
+ @staticmethod
+ def _getitem(buckets, key):
+ _, bucket = PMap._get_bucket(buckets, key)
+ if bucket:
+ for k, v in bucket:
+ if k == key:
+ return v
+
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ return PMap._getitem(self._buckets, key)
+
+ @staticmethod
+ def _contains(buckets, key):
+ _, bucket = PMap._get_bucket(buckets, key)
+ if bucket:
+ for k, _ in bucket:
+ if k == key:
+ return True
+
+ return False
+
+ return False
+
+ def __contains__(self, key):
+ return self._contains(self._buckets, key)
+
+ get = Mapping.get
+
+ def __iter__(self):
+ return self.iterkeys()
+
+ def __getattr__(self, key):
+ try:
+ return self[key]
+ except KeyError as e:
+ raise AttributeError(
+ "{0} has no attribute '{1}'".format(type(self).__name__, key)
+ ) from e
+
+ def iterkeys(self):
+ for k, _ in self.iteritems():
+ yield k
+
+ # These are more efficient implementations compared to the original
+ # methods that are based on the keys iterator and then calls the
+ # accessor functions to access the value for the corresponding key
+ def itervalues(self):
+ for _, v in self.iteritems():
+ yield v
+
+ def iteritems(self):
+ for bucket in self._buckets:
+ if bucket:
+ for k, v in bucket:
+ yield k, v
+
+ def values(self):
+ return pvector(self.itervalues())
+
+ def keys(self):
+ return pvector(self.iterkeys())
+
+ def items(self):
+ return pvector(self.iteritems())
+
+ def __len__(self):
+ return self._size
+
+ def __repr__(self):
+ return 'pmap({0})'.format(str(dict(self)))
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if not isinstance(other, Mapping):
+ return NotImplemented
+ if len(self) != len(other):
+ return False
+ if isinstance(other, PMap):
+ if (hasattr(self, '_cached_hash') and hasattr(other, '_cached_hash')
+ and self._cached_hash != other._cached_hash):
+ return False
+ if self._buckets == other._buckets:
+ return True
+ return dict(self.iteritems()) == dict(other.iteritems())
+ elif isinstance(other, dict):
+ return dict(self.iteritems()) == other
+ return dict(self.iteritems()) == dict(other.items())
+
+ __ne__ = Mapping.__ne__
+
+ def __lt__(self, other):
+ raise TypeError('PMaps are not orderable')
+
+ __le__ = __lt__
+ __gt__ = __lt__
+ __ge__ = __lt__
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __hash__(self):
+ if not hasattr(self, '_cached_hash'):
+ self._cached_hash = hash(frozenset(self.iteritems()))
+ return self._cached_hash
+
+ def set(self, key, val):
+ """
+ Return a new PMap with key and val inserted.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m2 = m1.set('a', 3)
+ >>> m3 = m1.set('c' ,4)
+ >>> m1
+ pmap({'b': 2, 'a': 1})
+ >>> m2
+ pmap({'b': 2, 'a': 3})
+ >>> m3
+ pmap({'c': 4, 'b': 2, 'a': 1})
+ """
+ return self.evolver().set(key, val).persistent()
+
+ def remove(self, key):
+ """
+ Return a new PMap without the element specified by key. Raises KeyError if the element
+ is not present.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.remove('a')
+ pmap({'b': 2})
+ """
+ return self.evolver().remove(key).persistent()
+
+ def discard(self, key):
+ """
+ Return a new PMap without the element specified by key. Returns reference to itself
+ if element is not present.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.discard('a')
+ pmap({'b': 2})
+ >>> m1 is m1.discard('c')
+ True
+ """
+ try:
+ return self.remove(key)
+ except KeyError:
+ return self
+
+ def update(self, *maps):
+ """
+ Return a new PMap with the items in Mappings inserted. If the same key is present in multiple
+ maps the rightmost (last) value is inserted.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.update(m(a=2, c=3), {'a': 17, 'd': 35})
+ pmap({'c': 3, 'b': 2, 'a': 17, 'd': 35})
+ """
+ return self.update_with(lambda l, r: r, *maps)
+
+ def update_with(self, update_fn, *maps):
+ """
+ Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple
+ maps the values will be merged using merge_fn going from left to right.
+
+ >>> from operator import add
+ >>> m1 = m(a=1, b=2)
+ >>> m1.update_with(add, m(a=2))
+ pmap({'b': 2, 'a': 3})
+
+ The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost.
+
+ >>> m1 = m(a=1)
+ >>> m1.update_with(lambda l, r: l, m(a=2), {'a':3})
+ pmap({'a': 1})
+ """
+ evolver = self.evolver()
+ for map in maps:
+ for key, value in map.items():
+ evolver.set(key, update_fn(evolver[key], value) if key in evolver else value)
+
+ return evolver.persistent()
+
+ def __add__(self, other):
+ return self.update(other)
+
+ __or__ = __add__
+
+ def __reduce__(self):
+ # Pickling support
+ return pmap, (dict(self),)
+
+ def transform(self, *transformations):
+ """
+ Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
+ consists of two parts. One match expression that specifies which elements to transform
+ and one transformation function that performs the actual transformation.
+
+ >>> from pyrsistent import freeze, ny
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ When nothing has been transformed the original data structure is kept
+
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+ """
+ return transform(self, transformations)
+
+ def copy(self):
+ return self
+
+ class _Evolver(object):
+ __slots__ = ('_buckets_evolver', '_size', '_original_pmap')
+
+ def __init__(self, original_pmap):
+ self._original_pmap = original_pmap
+ self._buckets_evolver = original_pmap._buckets.evolver()
+ self._size = original_pmap._size
+
+ def __getitem__(self, key):
+ return PMap._getitem(self._buckets_evolver, key)
+
+ def __setitem__(self, key, val):
+ self.set(key, val)
+
+ def set(self, key, val):
+ if len(self._buckets_evolver) < 0.67 * self._size:
+ self._reallocate(2 * len(self._buckets_evolver))
+
+ kv = (key, val)
+ index, bucket = PMap._get_bucket(self._buckets_evolver, key)
+ if bucket:
+ for k, v in bucket:
+ if k == key:
+ if v is not val:
+ new_bucket = [(k2, v2) if k2 != k else (k2, val) for k2, v2 in bucket]
+ self._buckets_evolver[index] = new_bucket
+
+ return self
+
+ new_bucket = [kv]
+ new_bucket.extend(bucket)
+ self._buckets_evolver[index] = new_bucket
+ self._size += 1
+ else:
+ self._buckets_evolver[index] = [kv]
+ self._size += 1
+
+ return self
+
+ def _reallocate(self, new_size):
+ new_list = new_size * [None]
+ buckets = self._buckets_evolver.persistent()
+ for k, v in chain.from_iterable(x for x in buckets if x):
+ index = hash(k) % new_size
+ if new_list[index]:
+ new_list[index].append((k, v))
+ else:
+ new_list[index] = [(k, v)]
+
+ # A reallocation should always result in a dirty buckets evolver to avoid
+ # possible loss of elements when doing the reallocation.
+ self._buckets_evolver = pvector().evolver()
+ self._buckets_evolver.extend(new_list)
+
+ def is_dirty(self):
+ return self._buckets_evolver.is_dirty()
+
+ def persistent(self):
+ if self.is_dirty():
+ self._original_pmap = PMap(self._size, self._buckets_evolver.persistent())
+
+ return self._original_pmap
+
+ def __len__(self):
+ return self._size
+
+ def __contains__(self, key):
+ return PMap._contains(self._buckets_evolver, key)
+
+ def __delitem__(self, key):
+ self.remove(key)
+
+ def remove(self, key):
+ index, bucket = PMap._get_bucket(self._buckets_evolver, key)
+
+ if bucket:
+ new_bucket = [(k, v) for (k, v) in bucket if k != key]
+ if len(bucket) > len(new_bucket):
+ self._buckets_evolver[index] = new_bucket if new_bucket else None
+ self._size -= 1
+ return self
+
+ raise KeyError('{0}'.format(key))
+
+ def evolver(self):
+ """
+ Create a new evolver for this pmap. For a discussion on evolvers in general see the
+ documentation for the pvector evolver.
+
+ Create the evolver and perform various mutating updates to it:
+
+ >>> m1 = m(a=1, b=2)
+ >>> e = m1.evolver()
+ >>> e['c'] = 3
+ >>> len(e)
+ 3
+ >>> del e['a']
+
+ The underlying pmap remains the same:
+
+ >>> m1
+ pmap({'b': 2, 'a': 1})
+
+ The changes are kept in the evolver. An updated pmap can be created using the
+ persistent() function on the evolver.
+
+ >>> m2 = e.persistent()
+ >>> m2
+ pmap({'c': 3, 'b': 2})
+
+ The new pmap will share data with the original pmap in the same way that would have
+ been done if only using operations on the pmap.
+ """
+ return self._Evolver(self)
+
+Mapping.register(PMap)
+Hashable.register(PMap)
+
+
+def _turbo_mapping(initial, pre_size):
+ if pre_size:
+ size = pre_size
+ else:
+ try:
+ size = 2 * len(initial) or 8
+ except Exception:
+ # Guess we can't figure out the length. Give up on length hinting,
+ # we can always reallocate later.
+ size = 8
+
+ buckets = size * [None]
+
+ if not isinstance(initial, Mapping):
+ # Make a dictionary of the initial data if it isn't already,
+ # that will save us some job further down since we can assume no
+ # key collisions
+ initial = dict(initial)
+
+ for k, v in initial.items():
+ h = hash(k)
+ index = h % size
+ bucket = buckets[index]
+
+ if bucket:
+ bucket.append((k, v))
+ else:
+ buckets[index] = [(k, v)]
+
+ return PMap(len(initial), pvector().extend(buckets))
+
+
+_EMPTY_PMAP = _turbo_mapping({}, 0)
+
+
+def pmap(initial={}, pre_size=0):
+ """
+ Create new persistent map, inserts all elements in initial into the newly created map.
+ The optional argument pre_size may be used to specify an initial size of the underlying bucket vector. This
+ may have a positive performance impact in the cases where you know beforehand that a large number of elements
+ will be inserted into the map eventually since it will reduce the number of reallocations required.
+
+ >>> pmap({'a': 13, 'b': 14})
+ pmap({'b': 14, 'a': 13})
+ """
+ if not initial and pre_size == 0:
+ return _EMPTY_PMAP
+
+ return _turbo_mapping(initial, pre_size)
+
+
+def m(**kwargs):
+ """
+ Creates a new persistent map. Inserts all key value arguments into the newly created map.
+
+ >>> m(a=13, b=14)
+ pmap({'b': 14, 'a': 13})
+ """
+ return pmap(kwargs)
diff --git a/lib/spack/external/_vendoring/pyrsistent/_precord.py b/lib/spack/external/_vendoring/pyrsistent/_precord.py
new file mode 100644
index 0000000000..1ee8198a1a
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_precord.py
@@ -0,0 +1,167 @@
+from pyrsistent._checked_types import CheckedType, _restore_pickle, InvariantException, store_invariants
+from pyrsistent._field_common import (
+ set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
+)
+from pyrsistent._pmap import PMap, pmap
+
+
+class _PRecordMeta(type):
+ def __new__(mcs, name, bases, dct):
+ set_fields(dct, bases, name='_precord_fields')
+ store_invariants(dct, bases, '_precord_invariants', '__invariant__')
+
+ dct['_precord_mandatory_fields'] = \
+ set(name for name, field in dct['_precord_fields'].items() if field.mandatory)
+
+ dct['_precord_initial_values'] = \
+ dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL)
+
+
+ dct['__slots__'] = ()
+
+ return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct)
+
+
+class PRecord(PMap, CheckedType, metaclass=_PRecordMeta):
+ """
+ A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
+ from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
+ access using subscript notation.
+
+ More documentation and examples of PRecord usage is available at https://github.com/tobgu/pyrsistent
+ """
+ def __new__(cls, **kwargs):
+ # Hack total! If these two special attributes exist that means we can create
+ # ourselves. Otherwise we need to go through the Evolver to create the structures
+ # for us.
+ if '_precord_size' in kwargs and '_precord_buckets' in kwargs:
+ return super(PRecord, cls).__new__(cls, kwargs['_precord_size'], kwargs['_precord_buckets'])
+
+ factory_fields = kwargs.pop('_factory_fields', None)
+ ignore_extra = kwargs.pop('_ignore_extra', False)
+
+ initial_values = kwargs
+ if cls._precord_initial_values:
+ initial_values = dict((k, v() if callable(v) else v)
+ for k, v in cls._precord_initial_values.items())
+ initial_values.update(kwargs)
+
+ e = _PRecordEvolver(cls, pmap(pre_size=len(cls._precord_fields)), _factory_fields=factory_fields, _ignore_extra=ignore_extra)
+ for k, v in initial_values.items():
+ e[k] = v
+
+ return e.persistent()
+
+ def set(self, *args, **kwargs):
+ """
+ Set a field in the record. This set function differs slightly from that in the PMap
+ class. First of all it accepts key-value pairs. Second it accepts multiple key-value
+ pairs to perform one, atomic, update of multiple fields.
+ """
+
+ # The PRecord set() can accept kwargs since all fields that have been declared are
+ # valid python identifiers. Also allow multiple fields to be set in one operation.
+ if args:
+ return super(PRecord, self).set(args[0], args[1])
+
+ return self.update(kwargs)
+
+ def evolver(self):
+ """
+ Returns an evolver of this object.
+ """
+ return _PRecordEvolver(self.__class__, self)
+
+ def __repr__(self):
+ return "{0}({1})".format(self.__class__.__name__,
+ ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self.items()))
+
+ @classmethod
+ def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
+ """
+ Factory method. Will create a new PRecord of the current type and assign the values
+ specified in kwargs.
+
+ :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
+ in the set of fields on the PRecord.
+ """
+ if isinstance(kwargs, cls):
+ return kwargs
+
+ if ignore_extra:
+ kwargs = {k: kwargs[k] for k in cls._precord_fields if k in kwargs}
+
+ return cls(_factory_fields=_factory_fields, _ignore_extra=ignore_extra, **kwargs)
+
+ def __reduce__(self):
+ # Pickling support
+ return _restore_pickle, (self.__class__, dict(self),)
+
+ def serialize(self, format=None):
+ """
+ Serialize the current PRecord using custom serializer functions for fields where
+ such have been supplied.
+ """
+ return dict((k, serialize(self._precord_fields[k].serializer, format, v)) for k, v in self.items())
+
+
+class _PRecordEvolver(PMap._Evolver):
+ __slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields', '_factory_fields', '_ignore_extra')
+
+ def __init__(self, cls, original_pmap, _factory_fields=None, _ignore_extra=False):
+ super(_PRecordEvolver, self).__init__(original_pmap)
+ self._destination_cls = cls
+ self._invariant_error_codes = []
+ self._missing_fields = []
+ self._factory_fields = _factory_fields
+ self._ignore_extra = _ignore_extra
+
+ def __setitem__(self, key, original_value):
+ self.set(key, original_value)
+
+ def set(self, key, original_value):
+ field = self._destination_cls._precord_fields.get(key)
+ if field:
+ if self._factory_fields is None or field in self._factory_fields:
+ try:
+ if is_field_ignore_extra_complaint(PRecord, field, self._ignore_extra):
+ value = field.factory(original_value, ignore_extra=self._ignore_extra)
+ else:
+ value = field.factory(original_value)
+ except InvariantException as e:
+ self._invariant_error_codes += e.invariant_errors
+ self._missing_fields += e.missing_fields
+ return self
+ else:
+ value = original_value
+
+ check_type(self._destination_cls, field, key, value)
+
+ is_ok, error_code = field.invariant(value)
+ if not is_ok:
+ self._invariant_error_codes.append(error_code)
+
+ return super(_PRecordEvolver, self).set(key, value)
+ else:
+ raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__))
+
+ def persistent(self):
+ cls = self._destination_cls
+ is_dirty = self.is_dirty()
+ pm = super(_PRecordEvolver, self).persistent()
+ if is_dirty or not isinstance(pm, cls):
+ result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size)
+ else:
+ result = pm
+
+ if cls._precord_mandatory_fields:
+ self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f
+ in (cls._precord_mandatory_fields - set(result.keys())))
+
+ if self._invariant_error_codes or self._missing_fields:
+ raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields),
+ 'Field invariant failed')
+
+ check_global_invariants(result, cls._precord_invariants)
+
+ return result
diff --git a/lib/spack/external/_vendoring/pyrsistent/_pset.py b/lib/spack/external/_vendoring/pyrsistent/_pset.py
new file mode 100644
index 0000000000..4fae8278f0
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_pset.py
@@ -0,0 +1,227 @@
+from collections.abc import Set, Hashable
+import sys
+from pyrsistent._pmap import pmap
+
+
+class PSet(object):
+ """
+ Persistent set implementation. Built on top of the persistent map. The set supports all operations
+ in the Set protocol and is Hashable.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`s` or :py:func:`pset`
+ to create an instance.
+
+ Random access and insert is log32(n) where n is the size of the set.
+
+ Some examples:
+
+ >>> s = pset([1, 2, 3, 1])
+ >>> s2 = s.add(4)
+ >>> s3 = s2.remove(2)
+ >>> s
+ pset([1, 2, 3])
+ >>> s2
+ pset([1, 2, 3, 4])
+ >>> s3
+ pset([1, 3, 4])
+ """
+ __slots__ = ('_map', '__weakref__')
+
+ def __new__(cls, m):
+ self = super(PSet, cls).__new__(cls)
+ self._map = m
+ return self
+
+ def __contains__(self, element):
+ return element in self._map
+
+ def __iter__(self):
+ return iter(self._map)
+
+ def __len__(self):
+ return len(self._map)
+
+ def __repr__(self):
+ if not self:
+ return 'p' + str(set(self))
+
+ return 'pset([{0}])'.format(str(set(self))[1:-1])
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __hash__(self):
+ return hash(self._map)
+
+ def __reduce__(self):
+ # Pickling support
+ return pset, (list(self),)
+
+ @classmethod
+ def _from_iterable(cls, it, pre_size=8):
+ return PSet(pmap(dict((k, True) for k in it), pre_size=pre_size))
+
+ def add(self, element):
+ """
+ Return a new PSet with element added
+
+ >>> s1 = s(1, 2)
+ >>> s1.add(3)
+ pset([1, 2, 3])
+ """
+ return self.evolver().add(element).persistent()
+
+ def update(self, iterable):
+ """
+ Return a new PSet with elements in iterable added
+
+ >>> s1 = s(1, 2)
+ >>> s1.update([3, 4, 4])
+ pset([1, 2, 3, 4])
+ """
+ e = self.evolver()
+ for element in iterable:
+ e.add(element)
+
+ return e.persistent()
+
+ def remove(self, element):
+ """
+ Return a new PSet with element removed. Raises KeyError if element is not present.
+
+ >>> s1 = s(1, 2)
+ >>> s1.remove(2)
+ pset([1])
+ """
+ if element in self._map:
+ return self.evolver().remove(element).persistent()
+
+ raise KeyError("Element '%s' not present in PSet" % repr(element))
+
+ def discard(self, element):
+ """
+ Return a new PSet with element removed. Returns itself if element is not present.
+ """
+ if element in self._map:
+ return self.evolver().remove(element).persistent()
+
+ return self
+
+ class _Evolver(object):
+ __slots__ = ('_original_pset', '_pmap_evolver')
+
+ def __init__(self, original_pset):
+ self._original_pset = original_pset
+ self._pmap_evolver = original_pset._map.evolver()
+
+ def add(self, element):
+ self._pmap_evolver[element] = True
+ return self
+
+ def remove(self, element):
+ del self._pmap_evolver[element]
+ return self
+
+ def is_dirty(self):
+ return self._pmap_evolver.is_dirty()
+
+ def persistent(self):
+ if not self.is_dirty():
+ return self._original_pset
+
+ return PSet(self._pmap_evolver.persistent())
+
+ def __len__(self):
+ return len(self._pmap_evolver)
+
+ def copy(self):
+ return self
+
+ def evolver(self):
+ """
+ Create a new evolver for this pset. For a discussion on evolvers in general see the
+ documentation for the pvector evolver.
+
+ Create the evolver and perform various mutating updates to it:
+
+ >>> s1 = s(1, 2, 3)
+ >>> e = s1.evolver()
+ >>> _ = e.add(4)
+ >>> len(e)
+ 4
+ >>> _ = e.remove(1)
+
+ The underlying pset remains the same:
+
+ >>> s1
+ pset([1, 2, 3])
+
+ The changes are kept in the evolver. An updated pmap can be created using the
+ persistent() function on the evolver.
+
+ >>> s2 = e.persistent()
+ >>> s2
+ pset([2, 3, 4])
+
+ The new pset will share data with the original pset in the same way that would have
+ been done if only using operations on the pset.
+ """
+ return PSet._Evolver(self)
+
+ # All the operations and comparisons you would expect on a set.
+ #
+ # This is not very beautiful. If we avoid inheriting from PSet we can use the
+ # __slots__ concepts (which requires a new style class) and hopefully save some memory.
+ __le__ = Set.__le__
+ __lt__ = Set.__lt__
+ __gt__ = Set.__gt__
+ __ge__ = Set.__ge__
+ __eq__ = Set.__eq__
+ __ne__ = Set.__ne__
+
+ __and__ = Set.__and__
+ __or__ = Set.__or__
+ __sub__ = Set.__sub__
+ __xor__ = Set.__xor__
+
+ issubset = __le__
+ issuperset = __ge__
+ union = __or__
+ intersection = __and__
+ difference = __sub__
+ symmetric_difference = __xor__
+
+ isdisjoint = Set.isdisjoint
+
+Set.register(PSet)
+Hashable.register(PSet)
+
+_EMPTY_PSET = PSet(pmap())
+
+
+def pset(iterable=(), pre_size=8):
+ """
+ Creates a persistent set from iterable. Optionally takes a sizing parameter equivalent to that
+ used for :py:func:`pmap`.
+
+ >>> s1 = pset([1, 2, 3, 2])
+ >>> s1
+ pset([1, 2, 3])
+ """
+ if not iterable:
+ return _EMPTY_PSET
+
+ return PSet._from_iterable(iterable, pre_size=pre_size)
+
+
+def s(*elements):
+ """
+ Create a persistent set.
+
+ Takes an arbitrary number of arguments to insert into the new set.
+
+ >>> s1 = s(1, 2, 3, 2)
+ >>> s1
+ pset([1, 2, 3])
+ """
+ return pset(elements)
diff --git a/lib/spack/external/_vendoring/pyrsistent/_pvector.py b/lib/spack/external/_vendoring/pyrsistent/_pvector.py
new file mode 100644
index 0000000000..2aff0e8b42
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_pvector.py
@@ -0,0 +1,711 @@
+from abc import abstractmethod, ABCMeta
+from collections.abc import Sequence, Hashable
+from numbers import Integral
+import operator
+from pyrsistent._transformations import transform
+
+
+def _bitcount(val):
+ return bin(val).count("1")
+
+BRANCH_FACTOR = 32
+BIT_MASK = BRANCH_FACTOR - 1
+SHIFT = _bitcount(BIT_MASK)
+
+
+def compare_pvector(v, other, operator):
+ return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other)
+
+
+def _index_or_slice(index, stop):
+ if stop is None:
+ return index
+
+ return slice(index, stop)
+
+
+class PythonPVector(object):
+ """
+ Support structure for PVector that implements structural sharing for vectors using a trie.
+ """
+ __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__')
+
+ def __new__(cls, count, shift, root, tail):
+ self = super(PythonPVector, cls).__new__(cls)
+ self._count = count
+ self._shift = shift
+ self._root = root
+ self._tail = tail
+
+ # Derived attribute stored for performance
+ self._tail_offset = self._count - len(self._tail)
+ return self
+
+ def __len__(self):
+ return self._count
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ # There are more conditions than the below where it would be OK to
+ # return ourselves, implement those...
+ if index.start is None and index.stop is None and index.step is None:
+ return self
+
+ # This is a bit nasty realizing the whole structure as a list before
+ # slicing it but it is the fastest way I've found to date, and it's easy :-)
+ return _EMPTY_PVECTOR.extend(self.tolist()[index])
+
+ if index < 0:
+ index += self._count
+
+ return PythonPVector._node_for(self, index)[index & BIT_MASK]
+
+ def __add__(self, other):
+ return self.extend(other)
+
+ def __repr__(self):
+ return 'pvector({0})'.format(str(self.tolist()))
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __iter__(self):
+ # This is kind of lazy and will produce some memory overhead but it is the fasted method
+ # by far of those tried since it uses the speed of the built in python list directly.
+ return iter(self.tolist())
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq)
+
+ def __gt__(self, other):
+ return compare_pvector(self, other, operator.gt)
+
+ def __lt__(self, other):
+ return compare_pvector(self, other, operator.lt)
+
+ def __ge__(self, other):
+ return compare_pvector(self, other, operator.ge)
+
+ def __le__(self, other):
+ return compare_pvector(self, other, operator.le)
+
+ def __mul__(self, times):
+ if times <= 0 or self is _EMPTY_PVECTOR:
+ return _EMPTY_PVECTOR
+
+ if times == 1:
+ return self
+
+ return _EMPTY_PVECTOR.extend(times * self.tolist())
+
+ __rmul__ = __mul__
+
+ def _fill_list(self, node, shift, the_list):
+ if shift:
+ shift -= SHIFT
+ for n in node:
+ self._fill_list(n, shift, the_list)
+ else:
+ the_list.extend(node)
+
+ def tolist(self):
+ """
+ The fastest way to convert the vector into a python list.
+ """
+ the_list = []
+ self._fill_list(self._root, self._shift, the_list)
+ the_list.extend(self._tail)
+ return the_list
+
+ def _totuple(self):
+ """
+ Returns the content as a python tuple.
+ """
+ return tuple(self.tolist())
+
+ def __hash__(self):
+ # Taking the easy way out again...
+ return hash(self._totuple())
+
+ def transform(self, *transformations):
+ return transform(self, transformations)
+
+ def __reduce__(self):
+ # Pickling support
+ return pvector, (self.tolist(),)
+
+ def mset(self, *args):
+ if len(args) % 2:
+ raise TypeError("mset expected an even number of arguments")
+
+ evolver = self.evolver()
+ for i in range(0, len(args), 2):
+ evolver[args[i]] = args[i+1]
+
+ return evolver.persistent()
+
+ class Evolver(object):
+ __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes',
+ '_extra_tail', '_cached_leafs', '_orig_pvector')
+
+ def __init__(self, v):
+ self._reset(v)
+
+ def __getitem__(self, index):
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index < 0:
+ index += self._count + len(self._extra_tail)
+
+ if self._count <= index < self._count + len(self._extra_tail):
+ return self._extra_tail[index - self._count]
+
+ return PythonPVector._node_for(self, index)[index & BIT_MASK]
+
+ def _reset(self, v):
+ self._count = v._count
+ self._shift = v._shift
+ self._root = v._root
+ self._tail = v._tail
+ self._tail_offset = v._tail_offset
+ self._dirty_nodes = {}
+ self._cached_leafs = {}
+ self._extra_tail = []
+ self._orig_pvector = v
+
+ def append(self, element):
+ self._extra_tail.append(element)
+ return self
+
+ def extend(self, iterable):
+ self._extra_tail.extend(iterable)
+ return self
+
+ def set(self, index, val):
+ self[index] = val
+ return self
+
+ def __setitem__(self, index, val):
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index < 0:
+ index += self._count + len(self._extra_tail)
+
+ if 0 <= index < self._count:
+ node = self._cached_leafs.get(index >> SHIFT)
+ if node:
+ node[index & BIT_MASK] = val
+ elif index >= self._tail_offset:
+ if id(self._tail) not in self._dirty_nodes:
+ self._tail = list(self._tail)
+ self._dirty_nodes[id(self._tail)] = True
+ self._cached_leafs[index >> SHIFT] = self._tail
+ self._tail[index & BIT_MASK] = val
+ else:
+ self._root = self._do_set(self._shift, self._root, index, val)
+ elif self._count <= index < self._count + len(self._extra_tail):
+ self._extra_tail[index - self._count] = val
+ elif index == self._count + len(self._extra_tail):
+ self._extra_tail.append(val)
+ else:
+ raise IndexError("Index out of range: %s" % (index,))
+
+ def _do_set(self, level, node, i, val):
+ if id(node) in self._dirty_nodes:
+ ret = node
+ else:
+ ret = list(node)
+ self._dirty_nodes[id(ret)] = True
+
+ if level == 0:
+ ret[i & BIT_MASK] = val
+ self._cached_leafs[i >> SHIFT] = ret
+ else:
+ sub_index = (i >> level) & BIT_MASK # >>>
+ ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
+
+ return ret
+
+ def delete(self, index):
+ del self[index]
+ return self
+
+ def __delitem__(self, key):
+ if self._orig_pvector:
+ # All structural sharing bets are off, base evolver on _extra_tail only
+ l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist()
+ l.extend(self._extra_tail)
+ self._reset(_EMPTY_PVECTOR)
+ self._extra_tail = l
+
+ del self._extra_tail[key]
+
+ def persistent(self):
+ result = self._orig_pvector
+ if self.is_dirty():
+ result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail)
+ self._reset(result)
+
+ return result
+
+ def __len__(self):
+ return self._count + len(self._extra_tail)
+
+ def is_dirty(self):
+ return bool(self._dirty_nodes or self._extra_tail)
+
+ def evolver(self):
+ return PythonPVector.Evolver(self)
+
+ def set(self, i, val):
+ # This method could be implemented by a call to mset() but doing so would cause
+ # a ~5 X performance penalty on PyPy (considered the primary platform for this implementation
+ # of PVector) so we're keeping this implementation for now.
+
+ if not isinstance(i, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__)
+
+ if i < 0:
+ i += self._count
+
+ if 0 <= i < self._count:
+ if i >= self._tail_offset:
+ new_tail = list(self._tail)
+ new_tail[i & BIT_MASK] = val
+ return PythonPVector(self._count, self._shift, self._root, new_tail)
+
+ return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail)
+
+ if i == self._count:
+ return self.append(val)
+
+ raise IndexError("Index out of range: %s" % (i,))
+
+ def _do_set(self, level, node, i, val):
+ ret = list(node)
+ if level == 0:
+ ret[i & BIT_MASK] = val
+ else:
+ sub_index = (i >> level) & BIT_MASK # >>>
+ ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
+
+ return ret
+
+ @staticmethod
+ def _node_for(pvector_like, i):
+ if 0 <= i < pvector_like._count:
+ if i >= pvector_like._tail_offset:
+ return pvector_like._tail
+
+ node = pvector_like._root
+ for level in range(pvector_like._shift, 0, -SHIFT):
+ node = node[(i >> level) & BIT_MASK] # >>>
+
+ return node
+
+ raise IndexError("Index out of range: %s" % (i,))
+
+ def _create_new_root(self):
+ new_shift = self._shift
+
+ # Overflow root?
+ if (self._count >> SHIFT) > (1 << self._shift): # >>>
+ new_root = [self._root, self._new_path(self._shift, self._tail)]
+ new_shift += SHIFT
+ else:
+ new_root = self._push_tail(self._shift, self._root, self._tail)
+
+ return new_root, new_shift
+
+ def append(self, val):
+ if len(self._tail) < BRANCH_FACTOR:
+ new_tail = list(self._tail)
+ new_tail.append(val)
+ return PythonPVector(self._count + 1, self._shift, self._root, new_tail)
+
+ # Full tail, push into tree
+ new_root, new_shift = self._create_new_root()
+ return PythonPVector(self._count + 1, new_shift, new_root, [val])
+
+ def _new_path(self, level, node):
+ if level == 0:
+ return node
+
+ return [self._new_path(level - SHIFT, node)]
+
+ def _mutating_insert_tail(self):
+ self._root, self._shift = self._create_new_root()
+ self._tail = []
+
+ def _mutating_fill_tail(self, offset, sequence):
+ max_delta_len = BRANCH_FACTOR - len(self._tail)
+ delta = sequence[offset:offset + max_delta_len]
+ self._tail.extend(delta)
+ delta_len = len(delta)
+ self._count += delta_len
+ return offset + delta_len
+
+ def _mutating_extend(self, sequence):
+ offset = 0
+ sequence_len = len(sequence)
+ while offset < sequence_len:
+ offset = self._mutating_fill_tail(offset, sequence)
+ if len(self._tail) == BRANCH_FACTOR:
+ self._mutating_insert_tail()
+
+ self._tail_offset = self._count - len(self._tail)
+
+ def extend(self, obj):
+ # Mutates the new vector directly for efficiency but that's only an
+ # implementation detail, once it is returned it should be considered immutable
+ l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj)
+ if l:
+ new_vector = self.append(l[0])
+ new_vector._mutating_extend(l[1:])
+ return new_vector
+
+ return self
+
+ def _push_tail(self, level, parent, tail_node):
+ """
+ if parent is leaf, insert node,
+ else does it map to an existing child? ->
+ node_to_insert = push node one more level
+ else alloc new path
+
+ return node_to_insert placed in copy of parent
+ """
+ ret = list(parent)
+
+ if level == SHIFT:
+ ret.append(tail_node)
+ return ret
+
+ sub_index = ((self._count - 1) >> level) & BIT_MASK # >>>
+ if len(parent) > sub_index:
+ ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node)
+ return ret
+
+ ret.append(self._new_path(level - SHIFT, tail_node))
+ return ret
+
+ def index(self, value, *args, **kwargs):
+ return self.tolist().index(value, *args, **kwargs)
+
+ def count(self, value):
+ return self.tolist().count(value)
+
+ def delete(self, index, stop=None):
+ l = self.tolist()
+ del l[_index_or_slice(index, stop)]
+ return _EMPTY_PVECTOR.extend(l)
+
+ def remove(self, value):
+ l = self.tolist()
+ l.remove(value)
+ return _EMPTY_PVECTOR.extend(l)
+
+class PVector(metaclass=ABCMeta):
+ """
+ Persistent vector implementation. Meant as a replacement for the cases where you would normally
+ use a Python list.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`v` and :py:func:`pvector` to
+ create an instance.
+
+ Heavily influenced by the persistent vector available in Clojure. Initially this was more or
+ less just a port of the Java code for the Clojure vector. It has since been modified and to
+ some extent optimized for usage in Python.
+
+ The vector is organized as a trie, any mutating method will return a new vector that contains the changes. No
+ updates are done to the original vector. Structural sharing between vectors are applied where possible to save
+ space and to avoid making complete copies.
+
+ This structure corresponds most closely to the built in list type and is intended as a replacement. Where the
+ semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
+ for example assignments.
+
+ The PVector implements the Sequence protocol and is Hashable.
+
+ Inserts are amortized O(1). Random access is log32(n) where n is the size of the vector.
+
+ The following are examples of some common operations on persistent vectors:
+
+ >>> p = v(1, 2, 3)
+ >>> p2 = p.append(4)
+ >>> p3 = p2.extend([5, 6, 7])
+ >>> p
+ pvector([1, 2, 3])
+ >>> p2
+ pvector([1, 2, 3, 4])
+ >>> p3
+ pvector([1, 2, 3, 4, 5, 6, 7])
+ >>> p3[5]
+ 6
+ >>> p.set(1, 99)
+ pvector([1, 99, 3])
+ >>>
+ """
+
+ @abstractmethod
+ def __len__(self):
+ """
+ >>> len(v(1, 2, 3))
+ 3
+ """
+
+ @abstractmethod
+ def __getitem__(self, index):
+ """
+ Get value at index. Full slicing support.
+
+ >>> v1 = v(5, 6, 7, 8)
+ >>> v1[2]
+ 7
+ >>> v1[1:3]
+ pvector([6, 7])
+ """
+
+ @abstractmethod
+ def __add__(self, other):
+ """
+ >>> v1 = v(1, 2)
+ >>> v2 = v(3, 4)
+ >>> v1 + v2
+ pvector([1, 2, 3, 4])
+ """
+
+ @abstractmethod
+ def __mul__(self, times):
+ """
+ >>> v1 = v(1, 2)
+ >>> 3 * v1
+ pvector([1, 2, 1, 2, 1, 2])
+ """
+
+ @abstractmethod
+ def __hash__(self):
+ """
+ >>> v1 = v(1, 2, 3)
+ >>> v2 = v(1, 2, 3)
+ >>> hash(v1) == hash(v2)
+ True
+ """
+
+ @abstractmethod
+ def evolver(self):
+ """
+ Create a new evolver for this pvector. The evolver acts as a mutable view of the vector
+ with "transaction like" semantics. No part of the underlying vector i updated, it is still
+ fully immutable. Furthermore multiple evolvers created from the same pvector do not
+ interfere with each other.
+
+ You may want to use an evolver instead of working directly with the pvector in the
+ following cases:
+
+ * Multiple updates are done to the same vector and the intermediate results are of no
+ interest. In this case using an evolver may be a more efficient and easier to work with.
+ * You need to pass a vector into a legacy function or a function that you have no control
+ over which performs in place mutations of lists. In this case pass an evolver instance
+ instead and then create a new pvector from the evolver once the function returns.
+
+ The following example illustrates a typical workflow when working with evolvers. It also
+ displays most of the API (which i kept small by design, you should not be tempted to
+ use evolvers in excess ;-)).
+
+ Create the evolver and perform various mutating updates to it:
+
+ >>> v1 = v(1, 2, 3, 4, 5)
+ >>> e = v1.evolver()
+ >>> e[1] = 22
+ >>> _ = e.append(6)
+ >>> _ = e.extend([7, 8, 9])
+ >>> e[8] += 1
+ >>> len(e)
+ 9
+
+ The underlying pvector remains the same:
+
+ >>> v1
+ pvector([1, 2, 3, 4, 5])
+
+ The changes are kept in the evolver. An updated pvector can be created using the
+ persistent() function on the evolver.
+
+ >>> v2 = e.persistent()
+ >>> v2
+ pvector([1, 22, 3, 4, 5, 6, 7, 8, 10])
+
+ The new pvector will share data with the original pvector in the same way that would have
+ been done if only using operations on the pvector.
+ """
+
+ @abstractmethod
+ def mset(self, *args):
+ """
+ Return a new vector with elements in specified positions replaced by values (multi set).
+
+ Elements on even positions in the argument list are interpreted as indexes while
+ elements on odd positions are considered values.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.mset(0, 11, 2, 33)
+ pvector([11, 2, 33])
+ """
+
+ @abstractmethod
+ def set(self, i, val):
+ """
+ Return a new vector with element at position i replaced with val. The original vector remains unchanged.
+
+ Setting a value one step beyond the end of the vector is equal to appending. Setting beyond that will
+ result in an IndexError.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.set(1, 4)
+ pvector([1, 4, 3])
+ >>> v1.set(3, 4)
+ pvector([1, 2, 3, 4])
+ >>> v1.set(-1, 4)
+ pvector([1, 2, 4])
+ """
+
+ @abstractmethod
+ def append(self, val):
+ """
+ Return a new vector with val appended.
+
+ >>> v1 = v(1, 2)
+ >>> v1.append(3)
+ pvector([1, 2, 3])
+ """
+
+ @abstractmethod
+ def extend(self, obj):
+ """
+ Return a new vector with all values in obj appended to it. Obj may be another
+ PVector or any other Iterable.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.extend([4, 5])
+ pvector([1, 2, 3, 4, 5])
+ """
+
+ @abstractmethod
+ def index(self, value, *args, **kwargs):
+ """
+ Return first index of value. Additional indexes may be supplied to limit the search to a
+ sub range of the vector.
+
+ >>> v1 = v(1, 2, 3, 4, 3)
+ >>> v1.index(3)
+ 2
+ >>> v1.index(3, 3, 5)
+ 4
+ """
+
+ @abstractmethod
+ def count(self, value):
+ """
+ Return the number of times that value appears in the vector.
+
+ >>> v1 = v(1, 4, 3, 4)
+ >>> v1.count(4)
+ 2
+ """
+
+ @abstractmethod
+ def transform(self, *transformations):
+ """
+ Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
+ consists of two parts. One match expression that specifies which elements to transform
+ and one transformation function that performs the actual transformation.
+
+ >>> from pyrsistent import freeze, ny
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ When nothing has been transformed the original data structure is kept
+
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+ """
+
+ @abstractmethod
+ def delete(self, index, stop=None):
+ """
+ Delete a portion of the vector by index or range.
+
+ >>> v1 = v(1, 2, 3, 4, 5)
+ >>> v1.delete(1)
+ pvector([1, 3, 4, 5])
+ >>> v1.delete(1, 3)
+ pvector([1, 4, 5])
+ """
+
+ @abstractmethod
+ def remove(self, value):
+ """
+ Remove the first occurrence of a value from the vector.
+
+ >>> v1 = v(1, 2, 3, 2, 1)
+ >>> v2 = v1.remove(1)
+ >>> v2
+ pvector([2, 3, 2, 1])
+ >>> v2.remove(1)
+ pvector([2, 3, 2])
+ """
+
+
+_EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], [])
+PVector.register(PythonPVector)
+Sequence.register(PVector)
+Hashable.register(PVector)
+
+def python_pvector(iterable=()):
+ """
+ Create a new persistent vector containing the elements in iterable.
+
+ >>> v1 = pvector([1, 2, 3])
+ >>> v1
+ pvector([1, 2, 3])
+ """
+ return _EMPTY_PVECTOR.extend(iterable)
+
+try:
+ # Use the C extension as underlying trie implementation if it is available
+ import os
+ if os.environ.get('PYRSISTENT_NO_C_EXTENSION'):
+ pvector = python_pvector
+ else:
+ from pvectorc import pvector
+ PVector.register(type(pvector()))
+except ImportError:
+ pvector = python_pvector
+
+
+def v(*elements):
+ """
+ Create a new persistent vector containing all parameters to this function.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1
+ pvector([1, 2, 3])
+ """
+ return pvector(elements)
diff --git a/lib/spack/external/_vendoring/pyrsistent/_toolz.py b/lib/spack/external/_vendoring/pyrsistent/_toolz.py
new file mode 100644
index 0000000000..a7faed1653
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_toolz.py
@@ -0,0 +1,83 @@
+"""
+Functionality copied from the toolz package to avoid having
+to add toolz as a dependency.
+
+See https://github.com/pytoolz/toolz/.
+
+toolz is relased under BSD licence. Below is the licence text
+from toolz as it appeared when copying the code.
+
+--------------------------------------------------------------
+
+Copyright (c) 2013 Matthew Rocklin
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ a. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ b. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ c. Neither the name of toolz nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+"""
+import operator
+from functools import reduce
+
+
+def get_in(keys, coll, default=None, no_default=False):
+ """
+ NB: This is a straight copy of the get_in implementation found in
+ the toolz library (https://github.com/pytoolz/toolz/). It works
+ with persistent data structures as well as the corresponding
+ datastructures from the stdlib.
+
+ Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
+
+ If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
+ ``no_default`` is specified, then it raises KeyError or IndexError.
+
+ ``get_in`` is a generalization of ``operator.getitem`` for nested data
+ structures such as dictionaries and lists.
+ >>> from pyrsistent import freeze
+ >>> transaction = freeze({'name': 'Alice',
+ ... 'purchase': {'items': ['Apple', 'Orange'],
+ ... 'costs': [0.50, 1.25]},
+ ... 'credit card': '5555-1234-1234-1234'})
+ >>> get_in(['purchase', 'items', 0], transaction)
+ 'Apple'
+ >>> get_in(['name'], transaction)
+ 'Alice'
+ >>> get_in(['purchase', 'total'], transaction)
+ >>> get_in(['purchase', 'items', 'apple'], transaction)
+ >>> get_in(['purchase', 'items', 10], transaction)
+ >>> get_in(['purchase', 'total'], transaction, 0)
+ 0
+ >>> get_in(['y'], {}, no_default=True)
+ Traceback (most recent call last):
+ ...
+ KeyError: 'y'
+ """
+ try:
+ return reduce(operator.getitem, keys, coll)
+ except (KeyError, IndexError, TypeError):
+ if no_default:
+ raise
+ return default
diff --git a/lib/spack/external/_vendoring/pyrsistent/_transformations.py b/lib/spack/external/_vendoring/pyrsistent/_transformations.py
new file mode 100644
index 0000000000..7544843aca
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/_transformations.py
@@ -0,0 +1,139 @@
+import re
+try:
+ from inspect import Parameter, signature
+except ImportError:
+ signature = None
+ from inspect import getfullargspec
+
+
+_EMPTY_SENTINEL = object()
+
+
+def inc(x):
+ """ Add one to the current value """
+ return x + 1
+
+
+def dec(x):
+ """ Subtract one from the current value """
+ return x - 1
+
+
+def discard(evolver, key):
+ """ Discard the element and returns a structure without the discarded elements """
+ try:
+ del evolver[key]
+ except KeyError:
+ pass
+
+
+# Matchers
+def rex(expr):
+ """ Regular expression matcher to use together with transform functions """
+ r = re.compile(expr)
+ return lambda key: isinstance(key, str) and r.match(key)
+
+
+def ny(_):
+ """ Matcher that matches any value """
+ return True
+
+
+# Support functions
+def _chunks(l, n):
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def transform(structure, transformations):
+ r = structure
+ for path, command in _chunks(transformations, 2):
+ r = _do_to_path(r, path, command)
+ return r
+
+
+def _do_to_path(structure, path, command):
+ if not path:
+ return command(structure) if callable(command) else command
+
+ kvs = _get_keys_and_values(structure, path[0])
+ return _update_structure(structure, kvs, path[1:], command)
+
+
+def _items(structure):
+ try:
+ return structure.items()
+ except AttributeError:
+ # Support wider range of structures by adding a transform_items() or similar?
+ return list(enumerate(structure))
+
+
+def _get(structure, key, default):
+ try:
+ if hasattr(structure, '__getitem__'):
+ return structure[key]
+
+ return getattr(structure, key)
+
+ except (IndexError, KeyError):
+ return default
+
+
+def _get_keys_and_values(structure, key_spec):
+ if callable(key_spec):
+ # Support predicates as callable objects in the path
+ arity = _get_arity(key_spec)
+ if arity == 1:
+ # Unary predicates are called with the "key" of the path
+ # - eg a key in a mapping, an index in a sequence.
+ return [(k, v) for k, v in _items(structure) if key_spec(k)]
+ elif arity == 2:
+ # Binary predicates are called with the key and the corresponding
+ # value.
+ return [(k, v) for k, v in _items(structure) if key_spec(k, v)]
+ else:
+ # Other arities are an error.
+ raise ValueError(
+ "callable in transform path must take 1 or 2 arguments"
+ )
+
+ # Non-callables are used as-is as a key.
+ return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))]
+
+
+if signature is None:
+ def _get_arity(f):
+ argspec = getfullargspec(f)
+ return len(argspec.args) - len(argspec.defaults or ())
+else:
+ def _get_arity(f):
+ return sum(
+ 1
+ for p
+ in signature(f).parameters.values()
+ if p.default is Parameter.empty
+ and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
+ )
+
+
+def _update_structure(structure, kvs, path, command):
+ from pyrsistent._pmap import pmap
+ e = structure.evolver()
+ if not path and command is discard:
+ # Do this in reverse to avoid index problems with vectors. See #92.
+ for k, v in reversed(kvs):
+ discard(e, k)
+ else:
+ for k, v in kvs:
+ is_empty = False
+ if v is _EMPTY_SENTINEL:
+ # Allow expansion of structure but make sure to cover the case
+ # when an empty pmap is added as leaf node. See #154.
+ is_empty = True
+ v = pmap()
+
+ result = _do_to_path(v, path, command)
+ if result is not v or is_empty:
+ e[k] = result
+
+ return e.persistent()
diff --git a/lib/spack/external/_vendoring/pyrsistent/py.typed b/lib/spack/external/_vendoring/pyrsistent/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/py.typed
diff --git a/lib/spack/external/_vendoring/pyrsistent/typing.py b/lib/spack/external/_vendoring/pyrsistent/typing.py
new file mode 100644
index 0000000000..6a86c831ba
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/typing.py
@@ -0,0 +1,80 @@
+"""Helpers for use with type annotation.
+
+Use the empty classes in this module when annotating the types of Pyrsistent
+objects, instead of using the actual collection class.
+
+For example,
+
+ from pyrsistent import pvector
+ from pyrsistent.typing import PVector
+
+ myvector: PVector[str] = pvector(['a', 'b', 'c'])
+
+"""
+from __future__ import absolute_import
+
+try:
+ from typing import Container
+ from typing import Hashable
+ from typing import Generic
+ from typing import Iterable
+ from typing import Mapping
+ from typing import Sequence
+ from typing import Sized
+ from typing import TypeVar
+
+ __all__ = [
+ 'CheckedPMap',
+ 'CheckedPSet',
+ 'CheckedPVector',
+ 'PBag',
+ 'PDeque',
+ 'PList',
+ 'PMap',
+ 'PSet',
+ 'PVector',
+ ]
+
+ T = TypeVar('T')
+ KT = TypeVar('KT')
+ VT = TypeVar('VT')
+
+ class CheckedPMap(Mapping[KT, VT], Hashable):
+ pass
+
+ # PSet.add and PSet.discard have different type signatures than that of Set.
+ class CheckedPSet(Generic[T], Hashable):
+ pass
+
+ class CheckedPVector(Sequence[T], Hashable):
+ pass
+
+ class PBag(Container[T], Iterable[T], Sized, Hashable):
+ pass
+
+ class PDeque(Sequence[T], Hashable):
+ pass
+
+ class PList(Sequence[T], Hashable):
+ pass
+
+ class PMap(Mapping[KT, VT], Hashable):
+ pass
+
+ # PSet.add and PSet.discard have different type signatures than that of Set.
+ class PSet(Generic[T], Hashable):
+ pass
+
+ class PVector(Sequence[T], Hashable):
+ pass
+
+ class PVectorEvolver(Generic[T]):
+ pass
+
+ class PMapEvolver(Generic[KT, VT]):
+ pass
+
+ class PSetEvolver(Generic[T]):
+ pass
+except ImportError:
+ pass
diff --git a/lib/spack/external/_vendoring/pyrsistent/typing.pyi b/lib/spack/external/_vendoring/pyrsistent/typing.pyi
new file mode 100644
index 0000000000..0221c48cc9
--- /dev/null
+++ b/lib/spack/external/_vendoring/pyrsistent/typing.pyi
@@ -0,0 +1,292 @@
+# flake8: noqa: E704
+# from https://gist.github.com/WuTheFWasThat/091a17d4b5cab597dfd5d4c2d96faf09
+# Stubs for pyrsistent (Python 3.6)
+#
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import Generic
+from typing import Hashable
+from typing import Iterator
+from typing import Iterable
+from typing import List
+from typing import Mapping
+from typing import Optional
+from typing import Sequence
+from typing import AbstractSet
+from typing import Sized
+from typing import Set
+from typing import Tuple
+from typing import TypeVar
+from typing import Type
+from typing import Union
+from typing import overload
+
+T = TypeVar('T')
+KT = TypeVar('KT')
+VT = TypeVar('VT')
+
+
+class PMap(Mapping[KT, VT], Hashable):
+ def __add__(self, other: PMap[KT, VT]) -> PMap[KT, VT]: ...
+ def __getitem__(self, key: KT) -> VT: ...
+ def __getattr__(self, key: str) -> VT: ...
+ def __hash__(self) -> int: ...
+ def __iter__(self) -> Iterator[KT]: ...
+ def __len__(self) -> int: ...
+ def copy(self) -> PMap[KT, VT]: ...
+ def discard(self, key: KT) -> PMap[KT, VT]: ...
+ def evolver(self) -> PMapEvolver[KT, VT]: ...
+ def iteritems(self) -> Iterable[Tuple[KT, VT]]: ...
+ def iterkeys(self) -> Iterable[KT]: ...
+ def itervalues(self) -> Iterable[VT]: ...
+ def remove(self, key: KT) -> PMap[KT, VT]: ...
+ def set(self, key: KT, val: VT) -> PMap[KT, VT]: ...
+ def transform(self, *transformations: Any) -> PMap[KT, VT]: ...
+ def update(self, *args: Mapping): ...
+ def update_with(self, update_fn: Callable[[VT, VT], VT], *args: Mapping) -> Any: ...
+
+
+class PMapEvolver(Generic[KT, VT]):
+ def __delitem__(self, key: KT) -> None: ...
+ def __getitem__(self, key: KT) -> VT: ...
+ def __len__(self) -> int: ...
+ def __setitem__(self, key: KT, val: VT) -> None: ...
+ def is_dirty(self) -> bool: ...
+ def persistent(self) -> PMap[KT, VT]: ...
+ def remove(self, key: KT) -> PMapEvolver[KT, VT]: ...
+ def set(self, key: KT, val: VT) -> PMapEvolver[KT, VT]: ...
+
+
+class PVector(Sequence[T], Hashable):
+ def __add__(self, other: PVector[T]) -> PVector[T]: ...
+ @overload
+ def __getitem__(self, index: int) -> T: ...
+ @overload
+ def __getitem__(self, index: slice) -> PVector[T]: ...
+ def __hash__(self) -> int: ...
+ def __len__(self) -> int: ...
+ def __mul__(self, other: PVector[T]) -> PVector[T]: ...
+ def append(self, val: T) -> PVector[T]: ...
+ def delete(self, index: int, stop: Optional[int]) -> PVector[T]: ...
+ def evolver(self) -> PVectorEvolver[T]: ...
+ def extend(self, obj: Iterable[T]) -> PVector[T]: ...
+ def tolist(self) -> List[T]: ...
+ def mset(self, *args: Iterable[Union[T, int]]) -> PVector[T]: ...
+ def remove(self, value: T) -> PVector[T]: ...
+ # Not compatible with MutableSequence
+ def set(self, i: int, val: T) -> PVector[T]: ...
+ def transform(self, *transformations: Any) -> PVector[T]: ...
+
+
+class PVectorEvolver(Sequence[T], Sized):
+ def __delitem__(self, i: Union[int, slice]) -> None: ...
+ @overload
+ def __getitem__(self, index: int) -> T: ...
+ # Not actually supported
+ @overload
+ def __getitem__(self, index: slice) -> PVectorEvolver[T]: ...
+ def __len__(self) -> int: ...
+ def __setitem__(self, index: int, val: T) -> None: ...
+ def append(self, val: T) -> PVectorEvolver[T]: ...
+ def delete(self, value: T) -> PVectorEvolver[T]: ...
+ def extend(self, obj: Iterable[T]) -> PVectorEvolver[T]: ...
+ def is_dirty(self) -> bool: ...
+ def persistent(self) -> PVector[T]: ...
+ def set(self, i: int, val: T) -> PVectorEvolver[T]: ...
+
+
+class PSet(AbstractSet[T], Hashable):
+ def __contains__(self, element: object) -> bool: ...
+ def __hash__(self) -> int: ...
+ def __iter__(self) -> Iterator[T]: ...
+ def __len__(self) -> int: ...
+ def add(self, element: T) -> PSet[T]: ...
+ def copy(self) -> PSet[T]: ...
+ def difference(self, iterable: Iterable) -> PSet[T]: ...
+ def discard(self, element: T) -> PSet[T]: ...
+ def evolver(self) -> PSetEvolver[T]: ...
+ def intersection(self, iterable: Iterable) -> PSet[T]: ...
+ def issubset(self, iterable: Iterable) -> bool: ...
+ def issuperset(self, iterable: Iterable) -> bool: ...
+ def remove(self, element: T) -> PSet[T]: ...
+ def symmetric_difference(self, iterable: Iterable[T]) -> PSet[T]: ...
+ def union(self, iterable: Iterable[T]) -> PSet[T]: ...
+ def update(self, iterable: Iterable[T]) -> PSet[T]: ...
+
+
+class PSetEvolver(Generic[T], Sized):
+ def __len__(self) -> int: ...
+ def add(self, element: T) -> PSetEvolver[T]: ...
+ def is_dirty(self) -> bool: ...
+ def persistent(self) -> PSet[T]: ...
+ def remove(self, element: T) -> PSetEvolver[T]: ...
+
+
+class PBag(Generic[T], Sized, Hashable):
+ def __add__(self, other: PBag[T]) -> PBag[T]: ...
+ def __and__(self, other: PBag[T]) -> PBag[T]: ...
+ def __contains__(self, elem: object) -> bool: ...
+ def __hash__(self) -> int: ...
+ def __iter__(self) -> Iterator[T]: ...
+ def __len__(self) -> int: ...
+ def __or__(self, other: PBag[T]) -> PBag[T]: ...
+ def __sub__(self, other: PBag[T]) -> PBag[T]: ...
+ def add(self, elem: T) -> PBag[T]: ...
+ def count(self, elem: T) -> int: ...
+ def remove(self, elem: T) -> PBag[T]: ...
+ def update(self, iterable: Iterable[T]) -> PBag[T]: ...
+
+
+class PDeque(Sequence[T], Hashable):
+ @overload
+ def __getitem__(self, index: int) -> T: ...
+ @overload
+ def __getitem__(self, index: slice) -> PDeque[T]: ...
+ def __hash__(self) -> int: ...
+ def __len__(self) -> int: ...
+ def __lt__(self, other: PDeque[T]) -> bool: ...
+ def append(self, elem: T) -> PDeque[T]: ...
+ def appendleft(self, elem: T) -> PDeque[T]: ...
+ def extend(self, iterable: Iterable[T]) -> PDeque[T]: ...
+ def extendleft(self, iterable: Iterable[T]) -> PDeque[T]: ...
+ @property
+ def left(self) -> T: ...
+ # The real return type is Integral according to what pyrsistent
+ # checks at runtime but mypy doesn't deal in numeric.*:
+ # https://github.com/python/mypy/issues/2636
+ @property
+ def maxlen(self) -> int: ...
+ def pop(self, count: int = 1) -> PDeque[T]: ...
+ def popleft(self, count: int = 1) -> PDeque[T]: ...
+ def remove(self, elem: T) -> PDeque[T]: ...
+ def reverse(self) -> PDeque[T]: ...
+ @property
+ def right(self) -> T: ...
+ def rotate(self, steps: int) -> PDeque[T]: ...
+
+
+class PList(Sequence[T], Hashable):
+ @overload
+ def __getitem__(self, index: int) -> T: ...
+ @overload
+ def __getitem__(self, index: slice) -> PList[T]: ...
+ def __hash__(self) -> int: ...
+ def __len__(self) -> int: ...
+ def __lt__(self, other: PList[T]) -> bool: ...
+ def __gt__(self, other: PList[T]) -> bool: ...
+ def cons(self, elem: T) -> PList[T]: ...
+ @property
+ def first(self) -> T: ...
+ def mcons(self, iterable: Iterable[T]) -> PList[T]: ...
+ def remove(self, elem: T) -> PList[T]: ...
+ @property
+ def rest(self) -> PList[T]: ...
+ def reverse(self) -> PList[T]: ...
+ def split(self, index: int) -> Tuple[PList[T], PList[T]]: ...
+
+T_PClass = TypeVar('T_PClass', bound='PClass')
+
+class PClass(Hashable):
+ def __new__(cls, **kwargs: Any): ...
+ def set(self: T_PClass, *args: Any, **kwargs: Any) -> T_PClass: ...
+ @classmethod
+ def create(
+ cls: Type[T_PClass],
+ kwargs: Any,
+ _factory_fields: Optional[Any] = ...,
+ ignore_extra: bool = ...,
+ ) -> T_PClass: ...
+ def serialize(self, format: Optional[Any] = ...): ...
+ def transform(self, *transformations: Any): ...
+ def __eq__(self, other: object): ...
+ def __ne__(self, other: object): ...
+ def __hash__(self): ...
+ def __reduce__(self): ...
+ def evolver(self) -> PClassEvolver: ...
+ def remove(self: T_PClass, name: Any) -> T_PClass: ...
+
+class PClassEvolver:
+ def __init__(self, original: Any, initial_dict: Any) -> None: ...
+ def __getitem__(self, item: Any): ...
+ def set(self, key: Any, value: Any): ...
+ def __setitem__(self, key: Any, value: Any) -> None: ...
+ def remove(self, item: Any): ...
+ def __delitem__(self, item: Any) -> None: ...
+ def persistent(self) -> PClass: ...
+ def __getattr__(self, item: Any): ...
+
+
+
+class CheckedPMap(PMap[KT, VT]):
+ __key_type__: Type[KT]
+ __value_type__: Type[VT]
+ def __new__(cls, source: Mapping[KT, VT] = ..., size: int = ...) -> CheckedPMap: ...
+ @classmethod
+ def create(cls, source_data: Mapping[KT, VT], _factory_fields: Any = ...) -> CheckedPMap[KT, VT]: ...
+ def serialize(self, format: Optional[Any] = ...) -> Dict[KT, VT]: ...
+
+
+class CheckedPVector(PVector[T]):
+ __type__: Type[T]
+ def __new__(self, initial: Iterable[T] = ...) -> CheckedPVector: ...
+ @classmethod
+ def create(cls, source_data: Iterable[T], _factory_fields: Any = ...) -> CheckedPVector[T]: ...
+ def serialize(self, format: Optional[Any] = ...) -> List[T]: ...
+
+
+class CheckedPSet(PSet[T]):
+ __type__: Type[T]
+ def __new__(cls, initial: Iterable[T] = ...) -> CheckedPSet: ...
+ @classmethod
+ def create(cls, source_data: Iterable[T], _factory_fields: Any = ...) -> CheckedPSet[T]: ...
+ def serialize(self, format: Optional[Any] = ...) -> Set[T]: ...
+
+
+class InvariantException(Exception):
+ invariant_errors: Tuple[Any, ...] = ... # possibly nested tuple
+ missing_fields: Tuple[str, ...] = ...
+ def __init__(
+ self,
+ error_codes: Any = ...,
+ missing_fields: Any = ...,
+ *args: Any,
+ **kwargs: Any
+ ) -> None: ...
+
+
+class CheckedTypeError(TypeError):
+ source_class: Type[Any]
+ expected_types: Tuple[Any, ...]
+ actual_type: Type[Any]
+ actual_value: Any
+ def __init__(
+ self,
+ source_class: Any,
+ expected_types: Any,
+ actual_type: Any,
+ actual_value: Any,
+ *args: Any,
+ **kwargs: Any
+ ) -> None: ...
+
+
+class CheckedKeyTypeError(CheckedTypeError): ...
+class CheckedValueTypeError(CheckedTypeError): ...
+class CheckedType: ...
+
+
+class PTypeError(TypeError):
+ source_class: Type[Any] = ...
+ field: str = ...
+ expected_types: Tuple[Any, ...] = ...
+ actual_type: Type[Any] = ...
+ def __init__(
+ self,
+ source_class: Any,
+ field: Any,
+ expected_types: Any,
+ actual_type: Any,
+ *args: Any,
+ **kwargs: Any
+ ) -> None: ...
diff --git a/lib/spack/external/_vendoring/six.LICENSE b/lib/spack/external/_vendoring/six.LICENSE
new file mode 100644
index 0000000000..de6633112c
--- /dev/null
+++ b/lib/spack/external/_vendoring/six.LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2010-2020 Benjamin Peterson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/spack/external/_vendoring/six.py b/lib/spack/external/_vendoring/six.py
new file mode 100644
index 0000000000..4e15675d8b
--- /dev/null
+++ b/lib/spack/external/_vendoring/six.py
@@ -0,0 +1,998 @@
+# Copyright (c) 2010-2020 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.16.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+if PY34:
+ from importlib.util import spec_from_loader
+else:
+ spec_from_loader = None
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.known_modules:
+ return spec_from_loader(fullname, self)
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+ def create_module(self, spec):
+ return self.load_module(spec.name)
+
+ def exec_module(self, module):
+ pass
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] > (3,):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+
+ def __new__(cls, name, this_bases, d):
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, binary_type):
+ return s
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ # Optimization: Fast return for the common case.
+ if type(s) is str:
+ return s
+ if PY2 and isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/lib/spack/external/_vendoring/six/__init__.pyi b/lib/spack/external/_vendoring/six/__init__.pyi
new file mode 100644
index 0000000000..e5c0e24227
--- /dev/null
+++ b/lib/spack/external/_vendoring/six/__init__.pyi
@@ -0,0 +1 @@
+from six import * \ No newline at end of file
diff --git a/lib/spack/external/_vendoring/six/moves/__init__.pyi b/lib/spack/external/_vendoring/six/moves/__init__.pyi
new file mode 100644
index 0000000000..7a82f79db6
--- /dev/null
+++ b/lib/spack/external/_vendoring/six/moves/__init__.pyi
@@ -0,0 +1 @@
+from six.moves import * \ No newline at end of file
diff --git a/lib/spack/external/_vendoring/six/moves/configparser.pyi b/lib/spack/external/_vendoring/six/moves/configparser.pyi
new file mode 100644
index 0000000000..f77b3f4105
--- /dev/null
+++ b/lib/spack/external/_vendoring/six/moves/configparser.pyi
@@ -0,0 +1 @@
+from six.moves.configparser import * \ No newline at end of file