summaryrefslogtreecommitdiff
path: root/lib/spack/external/jinja2/ext.py
diff options
context:
space:
mode:
Diffstat (limited to 'lib/spack/external/jinja2/ext.py')
-rw-r--r--lib/spack/external/jinja2/ext.py387
1 files changed, 232 insertions, 155 deletions
diff --git a/lib/spack/external/jinja2/ext.py b/lib/spack/external/jinja2/ext.py
index 0734a84f73..9141be4dac 100644
--- a/lib/spack/external/jinja2/ext.py
+++ b/lib/spack/external/jinja2/ext.py
@@ -1,42 +1,49 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.ext
- ~~~~~~~~~~
-
- Jinja extensions allow to add custom tags similar to the way django custom
- tags work. By default two example extensions exist: an i18n and a cache
- extension.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
+"""Extension API for adding custom tags and behavior."""
+import pprint
import re
-
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.environment import Environment
-from jinja2.runtime import concat
-from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
-from jinja2.utils import contextfunction, import_string, Markup
-from jinja2._compat import with_metaclass, string_types, iteritems
-
+from sys import version_info
+
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import iteritems
+from ._compat import string_types
+from ._compat import with_metaclass
+from .defaults import BLOCK_END_STRING
+from .defaults import BLOCK_START_STRING
+from .defaults import COMMENT_END_STRING
+from .defaults import COMMENT_START_STRING
+from .defaults import KEEP_TRAILING_NEWLINE
+from .defaults import LINE_COMMENT_PREFIX
+from .defaults import LINE_STATEMENT_PREFIX
+from .defaults import LSTRIP_BLOCKS
+from .defaults import NEWLINE_SEQUENCE
+from .defaults import TRIM_BLOCKS
+from .defaults import VARIABLE_END_STRING
+from .defaults import VARIABLE_START_STRING
+from .environment import Environment
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .nodes import ContextReference
+from .runtime import concat
+from .utils import contextfunction
+from .utils import import_string
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
-GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
+GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
+
+_ws_re = re.compile(r"\s*\n\s*")
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
- def __new__(cls, name, bases, d):
- rv = type.__new__(cls, name, bases, d)
- rv.identifier = rv.__module__ + '.' + rv.__name__
+ def __new__(mcs, name, bases, d):
+ rv = type.__new__(mcs, name, bases, d)
+ rv.identifier = rv.__module__ + "." + rv.__name__
return rv
@@ -91,10 +98,6 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
-
- In the `ext` folder of the Jinja2 source distribution there is a file
- called `inlinegettext.py` which implements a filter that utilizes this
- method.
"""
return stream
@@ -116,8 +119,9 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
- def call_method(self, name, args=None, kwargs=None, dyn_args=None,
- dyn_kwargs=None, lineno=None):
+ def call_method(
+ self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
+ ):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
@@ -125,13 +129,19 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
args = []
if kwargs is None:
kwargs = []
- return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
- dyn_args, dyn_kwargs, lineno=lineno)
+ return nodes.Call(
+ self.attr(name, lineno=lineno),
+ args,
+ kwargs,
+ dyn_args,
+ dyn_kwargs,
+ lineno=lineno,
+ )
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
- return __context.call(__context.resolve('gettext'), *args, **kwargs)
+ return __context.call(__context.resolve("gettext"), *args, **kwargs)
def _make_new_gettext(func):
@@ -140,24 +150,31 @@ def _make_new_gettext(func):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
+ # Always treat as a format string, even if there are no
+ # variables. This makes translation strings more consistent
+ # and predictable. This requires escaping
return rv % variables
+
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
- variables.setdefault('num', __num)
+ variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
+ # Always treat as a format string, see gettext comment above.
return rv % variables
+
return ngettext
class InternationalizationExtension(Extension):
- """This extension adds gettext support to Jinja2."""
- tags = set(['trans'])
+ """This extension adds gettext support to Jinja."""
+
+ tags = {"trans"}
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
@@ -168,30 +185,28 @@ class InternationalizationExtension(Extension):
def __init__(self, environment):
Extension.__init__(self, environment)
- environment.globals['_'] = _gettext_alias
+ environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
- newstyle_gettext=False
+ newstyle_gettext=False,
)
def _install(self, translations, newstyle=None):
- gettext = getattr(translations, 'ugettext', None)
+ gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
- ngettext = getattr(translations, 'ungettext', None)
+ ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
- lambda x: x,
- lambda s, p, n: (n != 1 and (p,) or (s,))[0],
- newstyle
+ lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
@@ -200,13 +215,10 @@ class InternationalizationExtension(Extension):
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
- self.environment.globals.update(
- gettext=gettext,
- ngettext=ngettext
- )
+ self.environment.globals.update(gettext=gettext, ngettext=ngettext)
def _uninstall(self, translations):
- for key in 'gettext', 'ngettext':
+ for key in "gettext", "ngettext":
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
@@ -226,41 +238,44 @@ class InternationalizationExtension(Extension):
plural_expr_assignment = None
variables = {}
trimmed = None
- while parser.stream.current.type != 'block_end':
+ while parser.stream.current.type != "block_end":
if variables:
- parser.stream.expect('comma')
+ parser.stream.expect("comma")
# skip colon for python compatibility
- if parser.stream.skip_if('colon'):
+ if parser.stream.skip_if("colon"):
break
- name = parser.stream.expect('name')
+ name = parser.stream.expect("name")
if name.value in variables:
- parser.fail('translatable variable %r defined twice.' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
+ parser.fail(
+ "translatable variable %r defined twice." % name.value,
+ name.lineno,
+ exc=TemplateAssertionError,
+ )
# expressions
- if parser.stream.current.type == 'assign':
+ if parser.stream.current.type == "assign":
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
- elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
- trimmed = name.value == 'trimmed'
+ elif trimmed is None and name.value in ("trimmed", "notrimmed"):
+ trimmed = name.value == "trimmed"
continue
else:
- variables[name.value] = var = nodes.Name(name.value, 'load')
+ variables[name.value] = var = nodes.Name(name.value, "load")
if plural_expr is None:
if isinstance(var, nodes.Call):
- plural_expr = nodes.Name('_trans', 'load')
+ plural_expr = nodes.Name("_trans", "load")
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
- nodes.Name('_trans', 'store'), var)
+ nodes.Name("_trans", "store"), var
+ )
else:
plural_expr = var
- num_called_num = name.value == 'num'
+ num_called_num = name.value == "num"
- parser.stream.expect('block_end')
+ parser.stream.expect("block_end")
plural = None
have_plural = False
@@ -271,22 +286,24 @@ class InternationalizationExtension(Extension):
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
- plural_expr = nodes.Name(singular_names[0], 'load')
- num_called_num = singular_names[0] == 'num'
+ plural_expr = nodes.Name(singular_names[0], "load")
+ num_called_num = singular_names[0] == "num"
# if we have a pluralize block, we parse that too
- if parser.stream.current.test('name:pluralize'):
+ if parser.stream.current.test("name:pluralize"):
have_plural = True
next(parser.stream)
- if parser.stream.current.type != 'block_end':
- name = parser.stream.expect('name')
+ if parser.stream.current.type != "block_end":
+ name = parser.stream.expect("name")
if name.value not in variables:
- parser.fail('unknown variable %r for pluralization' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
+ parser.fail(
+ "unknown variable %r for pluralization" % name.value,
+ name.lineno,
+ exc=TemplateAssertionError,
+ )
plural_expr = variables[name.value]
- num_called_num = name.value == 'num'
- parser.stream.expect('block_end')
+ num_called_num = name.value == "num"
+ parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
@@ -296,88 +313,97 @@ class InternationalizationExtension(Extension):
# register free names as simple name expressions
for var in referenced:
if var not in variables:
- variables[var] = nodes.Name(var, 'load')
+ variables[var] = nodes.Name(var, "load")
if not have_plural:
plural_expr = None
elif plural_expr is None:
- parser.fail('pluralize without variables', lineno)
+ parser.fail("pluralize without variables", lineno)
if trimmed is None:
- trimmed = self.environment.policies['ext.i18n.trimmed']
+ trimmed = self.environment.policies["ext.i18n.trimmed"]
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
- node = self._make_node(singular, plural, variables, plural_expr,
- bool(referenced),
- num_called_num and have_plural)
+ node = self._make_node(
+ singular,
+ plural,
+ variables,
+ plural_expr,
+ bool(referenced),
+ num_called_num and have_plural,
+ )
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
- def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
- return _ws_re.sub(' ', string.strip())
+ def _trim_whitespace(self, string, _ws_re=_ws_re):
+ return _ws_re.sub(" ", string.strip())
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
- if parser.stream.current.type == 'data':
- buf.append(parser.stream.current.value.replace('%', '%%'))
+ if parser.stream.current.type == "data":
+ buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
- elif parser.stream.current.type == 'variable_begin':
+ elif parser.stream.current.type == "variable_begin":
next(parser.stream)
- name = parser.stream.expect('name').value
+ name = parser.stream.expect("name").value
referenced.append(name)
- buf.append('%%(%s)s' % name)
- parser.stream.expect('variable_end')
- elif parser.stream.current.type == 'block_begin':
+ buf.append("%%(%s)s" % name)
+ parser.stream.expect("variable_end")
+ elif parser.stream.current.type == "block_begin":
next(parser.stream)
- if parser.stream.current.test('name:endtrans'):
+ if parser.stream.current.test("name:endtrans"):
break
- elif parser.stream.current.test('name:pluralize'):
+ elif parser.stream.current.test("name:pluralize"):
if allow_pluralize:
break
- parser.fail('a translatable section can have only one '
- 'pluralize section')
- parser.fail('control structures in translatable sections are '
- 'not allowed')
+ parser.fail(
+ "a translatable section can have only one pluralize section"
+ )
+ parser.fail(
+ "control structures in translatable sections are not allowed"
+ )
elif parser.stream.eos:
- parser.fail('unclosed translation block')
+ parser.fail("unclosed translation block")
else:
- assert False, 'internal parser error'
+ raise RuntimeError("internal parser error")
return referenced, concat(buf)
- def _make_node(self, singular, plural, variables, plural_expr,
- vars_referenced, num_called_num):
+ def _make_node(
+ self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
+ ):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
- singular = singular.replace('%%', '%')
+ singular = singular.replace("%%", "%")
if plural:
- plural = plural.replace('%%', '%')
+ plural = plural.replace("%%", "%")
# singular only:
if plural_expr is None:
- gettext = nodes.Name('gettext', 'load')
- node = nodes.Call(gettext, [nodes.Const(singular)],
- [], None, None)
+ gettext = nodes.Name("gettext", "load")
+ node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
# singular and plural
else:
- ngettext = nodes.Name('ngettext', 'load')
- node = nodes.Call(ngettext, [
- nodes.Const(singular),
- nodes.Const(plural),
- plural_expr
- ], [], None, None)
+ ngettext = nodes.Name("ngettext", "load")
+ node = nodes.Call(
+ ngettext,
+ [nodes.Const(singular), nodes.Const(plural), plural_expr],
+ [],
+ None,
+ None,
+ )
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
@@ -386,7 +412,7 @@ class InternationalizationExtension(Extension):
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
- if num_called_num and key == 'num':
+ if num_called_num and key == "num":
continue
node.kwargs.append(nodes.Keyword(key, value))
@@ -396,18 +422,24 @@ class InternationalizationExtension(Extension):
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
- node = nodes.Mod(node, nodes.Dict([
- nodes.Pair(nodes.Const(key), value)
- for key, value in variables.items()
- ]))
+ node = nodes.Mod(
+ node,
+ nodes.Dict(
+ [
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]
+ ),
+ )
return nodes.Output([node])
class ExprStmtExtension(Extension):
- """Adds a `do` tag to Jinja2 that works like the print statement just
+ """Adds a `do` tag to Jinja that works like the print statement just
that it doesn't print the return value.
"""
- tags = set(['do'])
+
+ tags = set(["do"])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
@@ -417,11 +449,12 @@ class ExprStmtExtension(Extension):
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
- tags = set(['break', 'continue'])
+
+ tags = set(["break", "continue"])
def parse(self, parser):
token = next(parser.stream)
- if token.value == 'break':
+ if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
@@ -434,8 +467,50 @@ class AutoEscapeExtension(Extension):
pass
-def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
- babel_style=True):
+class DebugExtension(Extension):
+ """A ``{% debug %}`` tag that dumps the available variables,
+ filters, and tests.
+
+ .. code-block:: html+jinja
+
+ <pre>{% debug %}</pre>
+
+ .. code-block:: text
+
+ {'context': {'cycler': <class 'jinja2.utils.Cycler'>,
+ ...,
+ 'namespace': <class 'jinja2.utils.Namespace'>},
+ 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
+ ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
+ 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
+ ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
+
+ .. versionadded:: 2.11.0
+ """
+
+ tags = {"debug"}
+
+ def parse(self, parser):
+ lineno = parser.stream.expect("name:debug").lineno
+ context = ContextReference()
+ result = self.call_method("_render", [context], lineno=lineno)
+ return nodes.Output([result], lineno=lineno)
+
+ def _render(self, context):
+ result = {
+ "context": context.get_all(),
+ "filters": sorted(self.environment.filters.keys()),
+ "tests": sorted(self.environment.tests.keys()),
+ }
+
+ # Set the depth since the intent is to show the top few names.
+ if version_info[:2] >= (3, 4):
+ return pprint.pformat(result, depth=3, compact=True)
+ else:
+ return pprint.pformat(result, depth=3)
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
@@ -471,19 +546,20 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
- if not isinstance(node.node, nodes.Name) or \
- node.node.name not in gettext_functions:
+ if (
+ not isinstance(node.node, nodes.Name)
+ or node.node.name not in gettext_functions
+ ):
continue
strings = []
for arg in node.args:
- if isinstance(arg, nodes.Const) and \
- isinstance(arg.value, string_types):
+ if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
- for arg in node.kwargs:
+ for _ in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
@@ -517,9 +593,10 @@ class _CommentFinder(object):
def find_backwards(self, offset):
try:
- for _, token_type, token_value in \
- reversed(self.tokens[self.offset:offset]):
- if token_type in ('comment', 'linecomment'):
+ for _, token_type, token_value in reversed(
+ self.tokens[self.offset : offset]
+ ):
+ if token_type in ("comment", "linecomment"):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
@@ -533,7 +610,7 @@ class _CommentFinder(object):
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
- for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
@@ -545,7 +622,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
- try to find the best preceeding comment that begins with one of the
+ try to find the best preceding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
@@ -568,7 +645,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
(comments will be empty currently)
"""
extensions = set()
- for extension in options.get('extensions', '').split(','):
+ for extension in options.get("extensions", "").split(","):
extension = extension.strip()
if not extension:
continue
@@ -577,38 +654,37 @@ def babel_extract(fileobj, keywords, comment_tags, options):
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
- return options.get(key, str(default)).lower() in \
- ('1', 'on', 'yes', 'true')
+ return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
- silent = getbool(options, 'silent', True)
+ silent = getbool(options, "silent", True)
environment = Environment(
- options.get('block_start_string', BLOCK_START_STRING),
- options.get('block_end_string', BLOCK_END_STRING),
- options.get('variable_start_string', VARIABLE_START_STRING),
- options.get('variable_end_string', VARIABLE_END_STRING),
- options.get('comment_start_string', COMMENT_START_STRING),
- options.get('comment_end_string', COMMENT_END_STRING),
- options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
- options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
- getbool(options, 'trim_blocks', TRIM_BLOCKS),
- getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
+ options.get("block_start_string", BLOCK_START_STRING),
+ options.get("block_end_string", BLOCK_END_STRING),
+ options.get("variable_start_string", VARIABLE_START_STRING),
+ options.get("variable_end_string", VARIABLE_END_STRING),
+ options.get("comment_start_string", COMMENT_START_STRING),
+ options.get("comment_end_string", COMMENT_END_STRING),
+ options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
+ options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
+ getbool(options, "trim_blocks", TRIM_BLOCKS),
+ getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
- getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
+ getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
- auto_reload=False
+ auto_reload=False,
)
- if getbool(options, 'trimmed'):
- environment.policies['ext.i18n.trimmed'] = True
- if getbool(options, 'newstyle_gettext'):
+ if getbool(options, "trimmed"):
+ environment.policies["ext.i18n.trimmed"] = True
+ if getbool(options, "newstyle_gettext"):
environment.newstyle_gettext = True
- source = fileobj.read().decode(options.get('encoding', 'utf-8'))
+ source = fileobj.read().decode(options.get("encoding", "utf-8"))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
- except TemplateSyntaxError as e:
+ except TemplateSyntaxError:
if not silent:
raise
# skip templates with syntax errors
@@ -625,3 +701,4 @@ do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
+debug = DebugExtension