summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/docs/analyze.rst2
-rw-r--r--lib/spack/docs/build_systems/autotoolspackage.rst18
-rw-r--r--lib/spack/docs/build_systems/cmakepackage.rst14
-rw-r--r--lib/spack/docs/build_systems/pythonpackage.rst13
-rw-r--r--lib/spack/docs/conf.py1
-rw-r--r--lib/spack/docs/contribution_guide.rst2
-rw-r--r--lib/spack/docs/developer_guide.rst11
-rw-r--r--lib/spack/docs/module_file_support.rst144
-rw-r--r--lib/spack/docs/tables/system_prerequisites.csv2
-rwxr-xr-xlib/spack/env/cc2
l---------lib/spack/env/oneapi/dpcpp1
-rw-r--r--lib/spack/external/__init__.py54
-rw-r--r--lib/spack/external/attr/LICENSE21
-rw-r--r--lib/spack/external/attr/__init__.py78
-rw-r--r--lib/spack/external/attr/_cmp.py152
-rw-r--r--lib/spack/external/attr/_compat.py242
-rw-r--r--lib/spack/external/attr/_config.py23
-rw-r--r--lib/spack/external/attr/_funcs.py395
-rw-r--r--lib/spack/external/attr/_make.py3052
-rw-r--r--lib/spack/external/attr/_next_gen.py158
-rw-r--r--lib/spack/external/attr/_version_info.py85
-rw-r--r--lib/spack/external/attr/converters.py111
-rw-r--r--lib/spack/external/attr/exceptions.py92
-rw-r--r--lib/spack/external/attr/filters.py52
-rw-r--r--lib/spack/external/attr/setters.py77
-rw-r--r--lib/spack/external/attr/validators.py379
-rw-r--r--lib/spack/external/ctest_log_parser.py4
-rw-r--r--lib/spack/external/distro.py716
-rw-r--r--lib/spack/external/functools_backport.py47
-rw-r--r--lib/spack/external/jinja2/LICENSE.rst28
-rw-r--r--lib/spack/external/jinja2/__init__.py123
-rw-r--r--lib/spack/external/jinja2/_compat.py67
-rw-r--r--lib/spack/external/jinja2/_identifier.py6
-rw-r--r--lib/spack/external/jinja2/asyncfilters.py70
-rw-r--r--lib/spack/external/jinja2/asyncsupport.py186
-rw-r--r--lib/spack/external/jinja2/bccache.py142
-rw-r--r--lib/spack/external/jinja2/compiler.py1242
-rw-r--r--lib/spack/external/jinja2/constants.py15
-rw-r--r--lib/spack/external/jinja2/debug.py522
-rw-r--r--lib/spack/external/jinja2/defaults.py68
-rw-r--r--lib/spack/external/jinja2/environment.py592
-rw-r--r--lib/spack/external/jinja2/exceptions.py73
-rw-r--r--lib/spack/external/jinja2/ext.py387
-rw-r--r--lib/spack/external/jinja2/filters.py716
-rw-r--r--lib/spack/external/jinja2/idtracking.py58
-rw-r--r--lib/spack/external/jinja2/lexer.py775
-rw-r--r--lib/spack/external/jinja2/loaders.py135
-rw-r--r--lib/spack/external/jinja2/meta.py33
-rw-r--r--lib/spack/external/jinja2/nativetypes.py248
-rw-r--r--lib/spack/external/jinja2/nodes.py427
-rw-r--r--lib/spack/external/jinja2/optimizer.py60
-rw-r--r--lib/spack/external/jinja2/parser.py568
-rw-r--r--lib/spack/external/jinja2/runtime.py704
-rw-r--r--lib/spack/external/jinja2/sandbox.py239
-rw-r--r--lib/spack/external/jinja2/tests.py145
-rw-r--r--lib/spack/external/jinja2/utils.py410
-rw-r--r--lib/spack/external/jinja2/visitor.py14
-rw-r--r--lib/spack/external/jsonschema/README.rst104
-rw-r--r--lib/spack/external/jsonschema/__init__.py31
-rw-r--r--lib/spack/external/jsonschema/_format.py287
-rw-r--r--lib/spack/external/jsonschema/_legacy_validators.py141
-rw-r--r--lib/spack/external/jsonschema/_types.py188
-rw-r--r--lib/spack/external/jsonschema/_utils.py29
-rw-r--r--lib/spack/external/jsonschema/_validators.py279
-rw-r--r--lib/spack/external/jsonschema/cli.py36
-rw-r--r--lib/spack/external/jsonschema/compat.py46
-rw-r--r--lib/spack/external/jsonschema/exceptions.py164
-rw-r--r--lib/spack/external/jsonschema/schemas/draft3.json4
-rw-r--r--lib/spack/external/jsonschema/schemas/draft4.json7
-rw-r--r--lib/spack/external/jsonschema/schemas/draft6.json153
-rw-r--r--lib/spack/external/jsonschema/schemas/draft7.json166
-rw-r--r--lib/spack/external/jsonschema/tests/__init__.py0
-rw-r--r--lib/spack/external/jsonschema/tests/compat.py15
-rw-r--r--lib/spack/external/jsonschema/tests/test_cli.py110
-rw-r--r--lib/spack/external/jsonschema/tests/test_exceptions.py382
-rw-r--r--lib/spack/external/jsonschema/tests/test_format.py63
-rw-r--r--lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py290
-rw-r--r--lib/spack/external/jsonschema/tests/test_validators.py786
-rw-r--r--lib/spack/external/jsonschema/validators.py891
-rw-r--r--lib/spack/external/markupsafe/AUTHORS13
-rw-r--r--lib/spack/external/markupsafe/LICENSE33
-rw-r--r--lib/spack/external/markupsafe/LICENSE.rst28
-rw-r--r--lib/spack/external/markupsafe/README.rst138
-rw-r--r--lib/spack/external/markupsafe/__init__.py273
-rw-r--r--lib/spack/external/markupsafe/_compat.py23
-rw-r--r--lib/spack/external/markupsafe/_constants.py517
-rw-r--r--lib/spack/external/markupsafe/_native.py67
-rw-r--r--lib/spack/external/ordereddict_backport.py22
-rw-r--r--lib/spack/external/py2/functools32/LICENSE289
-rw-r--r--lib/spack/external/py2/functools32/__init__.py1
-rw-r--r--lib/spack/external/py2/functools32/_dummy_thread32.py158
-rw-r--r--lib/spack/external/py2/functools32/functools32.py423
-rw-r--r--lib/spack/external/py2/functools32/reprlib32.py157
-rw-r--r--lib/spack/external/py26/ordereddict.py127
-rw-r--r--lib/spack/external/pyrsistent/LICENSE22
-rw-r--r--lib/spack/external/pyrsistent/__init__.py6
-rw-r--r--lib/spack/external/pyrsistent/_compat.py31
-rw-r--r--lib/spack/external/pyrsistent/_pmap.py460
-rw-r--r--lib/spack/external/pyrsistent/_pvector.py713
-rw-r--r--lib/spack/external/pyrsistent/_transformations.py143
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/LICENSE (renamed from lib/spack/external/_pytest/LICENSE)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/__init__.py (renamed from lib/spack/external/_pytest/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_argcomplete.py (renamed from lib/spack/external/_pytest/_argcomplete.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_code/__init__.py (renamed from lib/spack/external/_pytest/_code/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_code/_py2traceback.py (renamed from lib/spack/external/_pytest/_code/_py2traceback.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_code/code.py (renamed from lib/spack/external/_pytest/_code/code.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_code/source.py (renamed from lib/spack/external/_pytest/_code/source.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_pluggy.py (renamed from lib/spack/external/_pytest/_pluggy.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/_version.py (renamed from lib/spack/external/_pytest/_version.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/assertion/__init__.py (renamed from lib/spack/external/_pytest/assertion/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/assertion/rewrite.py (renamed from lib/spack/external/_pytest/assertion/rewrite.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/assertion/truncate.py (renamed from lib/spack/external/_pytest/assertion/truncate.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/assertion/util.py (renamed from lib/spack/external/_pytest/assertion/util.py)0
-rwxr-xr-xlib/spack/external/pytest-fallback/_pytest/cacheprovider.py (renamed from lib/spack/external/_pytest/cacheprovider.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/capture.py (renamed from lib/spack/external/_pytest/capture.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/compat.py (renamed from lib/spack/external/_pytest/compat.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/config.py (renamed from lib/spack/external/_pytest/config.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/debugging.py (renamed from lib/spack/external/_pytest/debugging.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/deprecated.py (renamed from lib/spack/external/_pytest/deprecated.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/doctest.py (renamed from lib/spack/external/_pytest/doctest.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/fixtures.py (renamed from lib/spack/external/_pytest/fixtures.py)8
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/freeze_support.py (renamed from lib/spack/external/_pytest/freeze_support.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/helpconfig.py (renamed from lib/spack/external/_pytest/helpconfig.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/hookspec.py (renamed from lib/spack/external/_pytest/hookspec.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/junitxml.py (renamed from lib/spack/external/_pytest/junitxml.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/main.py (renamed from lib/spack/external/_pytest/main.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/mark.py (renamed from lib/spack/external/_pytest/mark.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/monkeypatch.py (renamed from lib/spack/external/_pytest/monkeypatch.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/nodes.py (renamed from lib/spack/external/_pytest/nodes.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/nose.py (renamed from lib/spack/external/_pytest/nose.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/outcomes.py (renamed from lib/spack/external/_pytest/outcomes.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/pastebin.py (renamed from lib/spack/external/_pytest/pastebin.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/pytester.py (renamed from lib/spack/external/_pytest/pytester.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/python.py (renamed from lib/spack/external/_pytest/python.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/python_api.py (renamed from lib/spack/external/_pytest/python_api.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/recwarn.py (renamed from lib/spack/external/_pytest/recwarn.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/resultlog.py (renamed from lib/spack/external/_pytest/resultlog.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/runner.py (renamed from lib/spack/external/_pytest/runner.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/setuponly.py (renamed from lib/spack/external/_pytest/setuponly.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/setupplan.py (renamed from lib/spack/external/_pytest/setupplan.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/skipping.py (renamed from lib/spack/external/_pytest/skipping.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/terminal.py (renamed from lib/spack/external/_pytest/terminal.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/tmpdir.py (renamed from lib/spack/external/_pytest/tmpdir.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/unittest.py (renamed from lib/spack/external/_pytest/unittest.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/README.md (renamed from lib/spack/external/_pytest/vendored_packages/README.md)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/__init__.py (renamed from lib/spack/external/_pytest/vendored_packages/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst (renamed from lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER (renamed from lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt (renamed from lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA (renamed from lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD (renamed from lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL (renamed from lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json (renamed from lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt (renamed from lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy.py (renamed from lib/spack/external/_pytest/vendored_packages/pluggy.py)0
-rw-r--r--lib/spack/external/pytest-fallback/_pytest/warnings.py (renamed from lib/spack/external/_pytest/warnings.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/__init__.py (renamed from lib/spack/external/py/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/__metainfo.py (renamed from lib/spack/external/py/__metainfo.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_apipkg.py (renamed from lib/spack/external/py/_apipkg.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_builtin.py (renamed from lib/spack/external/py/_builtin.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_code/__init__.py (renamed from lib/spack/external/py/_code/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_code/_assertionnew.py (renamed from lib/spack/external/py/_code/_assertionnew.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_code/_assertionold.py (renamed from lib/spack/external/py/_code/_assertionold.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_code/_py2traceback.py (renamed from lib/spack/external/py/_code/_py2traceback.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_code/assertion.py (renamed from lib/spack/external/py/_code/assertion.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_code/code.py (renamed from lib/spack/external/py/_code/code.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_code/source.py (renamed from lib/spack/external/py/_code/source.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_error.py (renamed from lib/spack/external/py/_error.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_iniconfig.py (renamed from lib/spack/external/py/_iniconfig.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_io/__init__.py (renamed from lib/spack/external/py/_io/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_io/capture.py (renamed from lib/spack/external/py/_io/capture.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_io/saferepr.py (renamed from lib/spack/external/py/_io/saferepr.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_io/terminalwriter.py (renamed from lib/spack/external/py/_io/terminalwriter.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_log/__init__.py (renamed from lib/spack/external/py/_log/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_log/log.py (renamed from lib/spack/external/py/_log/log.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_log/warning.py (renamed from lib/spack/external/py/_log/warning.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_path/__init__.py (renamed from lib/spack/external/py/_path/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_path/cacheutil.py (renamed from lib/spack/external/py/_path/cacheutil.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_path/common.py (renamed from lib/spack/external/py/_path/common.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_path/local.py (renamed from lib/spack/external/py/_path/local.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_path/svnurl.py (renamed from lib/spack/external/py/_path/svnurl.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_path/svnwc.py (renamed from lib/spack/external/py/_path/svnwc.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_process/__init__.py (renamed from lib/spack/external/py/_process/__init__.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_process/cmdexec.py (renamed from lib/spack/external/py/_process/cmdexec.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_process/forkedfunc.py (renamed from lib/spack/external/py/_process/forkedfunc.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_process/killproc.py (renamed from lib/spack/external/py/_process/killproc.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_std.py (renamed from lib/spack/external/py/_std.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/_xmlgen.py (renamed from lib/spack/external/py/_xmlgen.py)0
-rw-r--r--lib/spack/external/pytest-fallback/py/test.py (renamed from lib/spack/external/py/test.py)0
-rw-r--r--lib/spack/external/pytest-fallback/pytest.py (renamed from lib/spack/external/pytest.py)0
-rw-r--r--lib/spack/external/six.py147
-rw-r--r--lib/spack/llnl/util/multiproc.py2
-rw-r--r--lib/spack/llnl/util/tty/log.py2
-rw-r--r--lib/spack/spack/analyzers/libabigail.py12
-rw-r--r--lib/spack/spack/binary_distribution.py211
-rw-r--r--lib/spack/spack/bootstrap.py453
-rw-r--r--lib/spack/spack/build_environment.py7
-rw-r--r--lib/spack/spack/build_systems/autotools.py6
-rw-r--r--lib/spack/spack/build_systems/cmake.py7
-rw-r--r--lib/spack/spack/build_systems/cuda.py3
-rw-r--r--lib/spack/spack/build_systems/intel.py27
-rw-r--r--lib/spack/spack/build_systems/oneapi.py41
-rw-r--r--lib/spack/spack/build_systems/python.py3
-rw-r--r--lib/spack/spack/build_systems/rocm.py5
-rw-r--r--lib/spack/spack/build_systems/sip.py3
-rw-r--r--lib/spack/spack/ci.py49
-rw-r--r--lib/spack/spack/cmd/analyze.py1
-rw-r--r--lib/spack/spack/cmd/bootstrap.py43
-rw-r--r--lib/spack/spack/cmd/buildcache.py513
-rw-r--r--lib/spack/spack/cmd/checksum.py8
-rw-r--r--lib/spack/spack/cmd/ci.py34
-rw-r--r--lib/spack/spack/cmd/common/arguments.py19
-rw-r--r--lib/spack/spack/cmd/containerize.py1
-rw-r--r--lib/spack/spack/cmd/create.py6
-rw-r--r--lib/spack/spack/cmd/dev_build.py4
-rw-r--r--lib/spack/spack/cmd/flake8.py25
-rw-r--r--lib/spack/spack/cmd/install.py6
-rw-r--r--lib/spack/spack/cmd/license.py3
-rw-r--r--lib/spack/spack/cmd/mirror.py45
-rw-r--r--lib/spack/spack/cmd/monitor.py1
-rw-r--r--lib/spack/spack/cmd/style.py51
-rw-r--r--lib/spack/spack/cmd/tutorial.py4
-rw-r--r--lib/spack/spack/cmd/unit_test.py86
-rw-r--r--lib/spack/spack/compilers/dpcpp.py29
-rw-r--r--lib/spack/spack/compilers/oneapi.py10
-rw-r--r--lib/spack/spack/concretize.py30
-rw-r--r--lib/spack/spack/config.py3
-rw-r--r--lib/spack/spack/container/writers/__init__.py5
-rw-r--r--lib/spack/spack/detection/path.py12
-rw-r--r--lib/spack/spack/directives.py3
-rw-r--r--lib/spack/spack/environment/environment.py113
-rw-r--r--lib/spack/spack/extensions.py5
-rw-r--r--lib/spack/spack/fetch_strategy.py8
-rw-r--r--lib/spack/spack/filesystem_view.py5
-rw-r--r--lib/spack/spack/hooks/__init__.py1
-rw-r--r--lib/spack/spack/hooks/monitor.py11
-rw-r--r--lib/spack/spack/install_test.py5
-rw-r--r--lib/spack/spack/installer.py6
-rw-r--r--lib/spack/spack/mirror.py136
-rw-r--r--lib/spack/spack/monitor.py59
-rw-r--r--lib/spack/spack/operating_systems/linux_distro.py6
-rw-r--r--lib/spack/spack/package.py3
-rw-r--r--lib/spack/spack/pkgkit.py6
-rw-r--r--lib/spack/spack/relocate.py40
-rw-r--r--lib/spack/spack/reporters/cdash.py6
-rw-r--r--lib/spack/spack/s3_handler.py3
-rw-r--r--lib/spack/spack/schema/bootstrap.py2
-rw-r--r--lib/spack/spack/schema/buildcache_spec.py2
-rw-r--r--lib/spack/spack/schema/cdash.py2
-rw-r--r--lib/spack/spack/schema/compilers.py2
-rw-r--r--lib/spack/spack/schema/config.py2
-rw-r--r--lib/spack/spack/schema/database_index.py2
-rw-r--r--lib/spack/spack/schema/env.py2
-rw-r--r--lib/spack/spack/schema/gitlab_ci.py2
-rw-r--r--lib/spack/spack/schema/merged.py2
-rw-r--r--lib/spack/spack/schema/mirrors.py6
-rw-r--r--lib/spack/spack/schema/modules.py2
-rw-r--r--lib/spack/spack/schema/packages.py2
-rw-r--r--lib/spack/spack/schema/projections.py2
-rw-r--r--lib/spack/spack/schema/repos.py2
-rw-r--r--lib/spack/spack/schema/spec.py2
-rw-r--r--lib/spack/spack/schema/upstreams.py2
-rw-r--r--lib/spack/spack/spec.py11
-rw-r--r--lib/spack/spack/stage.py20
-rw-r--r--lib/spack/spack/store.py74
-rw-r--r--lib/spack/spack/test/bootstrap.py27
-rw-r--r--lib/spack/spack/test/build_distribution.py9
-rw-r--r--lib/spack/spack/test/build_systems.py16
-rw-r--r--lib/spack/spack/test/cc.py2
-rw-r--r--lib/spack/spack/test/cmd/audit.py2
-rw-r--r--lib/spack/spack/test/cmd/build_env.py10
-rw-r--r--lib/spack/spack/test/cmd/checksum.py4
-rw-r--r--lib/spack/spack/test/cmd/ci.py22
-rw-r--r--lib/spack/spack/test/cmd/dev_build.py2
-rw-r--r--lib/spack/spack/test/cmd/mirror.py29
-rw-r--r--lib/spack/spack/test/cmd/style.py15
-rw-r--r--lib/spack/spack/test/cmd/test.py29
-rw-r--r--lib/spack/spack/test/cmd/unit_test.py5
-rw-r--r--lib/spack/spack/test/cmd/url.py11
-rw-r--r--lib/spack/spack/test/compilers/detection.py5
-rw-r--r--lib/spack/spack/test/concretize_preferences.py10
-rw-r--r--lib/spack/spack/test/conftest.py12
-rw-r--r--lib/spack/spack/test/database.py15
-rw-r--r--lib/spack/spack/test/directives.py9
-rw-r--r--lib/spack/spack/test/llnl/util/tty/log.py22
-rw-r--r--lib/spack/spack/test/monitor.py28
-rw-r--r--lib/spack/spack/test/relocate.py52
-rw-r--r--lib/spack/spack/test/stage.py24
-rw-r--r--lib/spack/spack/test/web.py33
-rw-r--r--lib/spack/spack/util/mock_package.py6
-rw-r--r--lib/spack/spack/util/s3.py44
-rw-r--r--lib/spack/spack/util/spack_yaml.py4
-rw-r--r--lib/spack/spack/util/web.py10
293 files changed, 16848 insertions, 7908 deletions
diff --git a/lib/spack/docs/analyze.rst b/lib/spack/docs/analyze.rst
index 38af77cd7f..2df48d1e76 100644
--- a/lib/spack/docs/analyze.rst
+++ b/lib/spack/docs/analyze.rst
@@ -59,7 +59,7 @@ are available:
install_files : install file listing read from install_manifest.json
environment_variables : environment variables parsed from spack-build-env.txt
config_args : config args loaded from spack-configure-args.txt
- abigail : Application Binary Interface (ABI) features for objects
+ libabigail : Application Binary Interface (ABI) features for objects
In the above, the first three are fairly simple - parsing metadata files from
diff --git a/lib/spack/docs/build_systems/autotoolspackage.rst b/lib/spack/docs/build_systems/autotoolspackage.rst
index 71d8d7d866..d62fb08ce8 100644
--- a/lib/spack/docs/build_systems/autotoolspackage.rst
+++ b/lib/spack/docs/build_systems/autotoolspackage.rst
@@ -420,6 +420,24 @@ Or when one variant controls multiple flags:
config_args += self.with_or_without('memchecker', variant='debug_tools')
config_args += self.with_or_without('profiler', variant='debug_tools')
+
+""""""""""""""""""""
+Conditional variants
+""""""""""""""""""""
+
+When a variant is conditional and its condition is not met on the concrete spec, the
+``with_or_without`` and ``enable_or_disable`` methods will simply return an empty list.
+
+For example:
+
+.. code-block:: python
+
+ variant('profiler', when='@2.0:')
+ config_args += self.with_or_without('profiler)
+
+will neither add ``--with-profiler`` nor ``--without-profiler`` when the version is
+below ``2.0``.
+
""""""""""""""""""""
Activation overrides
""""""""""""""""""""
diff --git a/lib/spack/docs/build_systems/cmakepackage.rst b/lib/spack/docs/build_systems/cmakepackage.rst
index 3c3c96f92c..7ebac48734 100644
--- a/lib/spack/docs/build_systems/cmakepackage.rst
+++ b/lib/spack/docs/build_systems/cmakepackage.rst
@@ -145,6 +145,20 @@ and without the :meth:`~spack.build_systems.cmake.CMakePackage.define` and
return args
+Spack supports CMake defines from conditional variants too. Whenever the condition on
+the variant is not met, ``define_from_variant()`` will simply return an empty string,
+and CMake simply ignores the empty command line argument. For example the following
+
+.. code-block:: python
+
+ variant('example', default=True, when='@2.0:')
+
+ def cmake_args(self):
+ return [self.define_from_variant('EXAMPLE', 'example')]
+
+will generate ``'cmake' '-DEXAMPLE=ON' ...`` when `@2.0: +example` is met, but will
+result in ``'cmake' '' ...`` when the spec version is below ``2.0``.
+
^^^^^^^^^^
Generators
diff --git a/lib/spack/docs/build_systems/pythonpackage.rst b/lib/spack/docs/build_systems/pythonpackage.rst
index 30875d15f3..365c5d7bce 100644
--- a/lib/spack/docs/build_systems/pythonpackage.rst
+++ b/lib/spack/docs/build_systems/pythonpackage.rst
@@ -125,12 +125,15 @@ The zip file will not contain a ``setup.py``, but it will contain a
``METADATA`` file which contains all the information you need to
write a ``package.py`` build recipe.
+.. _pypi:
+
^^^^
PyPI
^^^^
-The vast majority of Python packages are hosted on PyPI - The Python
-Package Index. ``pip`` only supports packages hosted on PyPI, making
+The vast majority of Python packages are hosted on PyPI (The Python
+Package Index), which is :ref:`preferred over GitHub <pypi-vs-github>`
+for downloading packages. ``pip`` only supports packages hosted on PyPI, making
it the only option for developers who want a simple installation.
Search for "PyPI <package-name>" to find the download page. Note that
some pages are versioned, and the first result may not be the newest
@@ -217,6 +220,7 @@ try to extract the wheel:
version('1.11.0', sha256='d8c9d24ea90457214d798b0d922489863dad518adde3638e08ef62de28fb183a', expand=False)
+.. _pypi-vs-github:
"""""""""""""""
PyPI vs. GitHub
@@ -263,6 +267,9 @@ location, but PyPI is preferred for the following reasons:
PyPI is nice because it makes it physically impossible to
re-release the same version of a package with a different checksum.
+Use the :ref:`pypi attribute <pypi>` to facilitate construction of PyPI package
+references.
+
^^^^^^^^^^^^^^^^^^^^^^^^^
Build system dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -709,7 +716,7 @@ The package may have its own unit or regression tests. Spack can
run these tests during the installation by adding phase-appropriate
test methods.
-For example, ``py-numpy`` adds the following as a check to run
+For example, ``py-numpy`` adds the following as a check to run
after the ``install`` phase:
.. code-block:: python
diff --git a/lib/spack/docs/conf.py b/lib/spack/docs/conf.py
index 2b58223e1d..d0a2bb9e33 100644
--- a/lib/spack/docs/conf.py
+++ b/lib/spack/docs/conf.py
@@ -30,6 +30,7 @@ from sphinx.ext.apidoc import main as sphinx_apidoc
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external'))
+sys.path.insert(0, os.path.abspath('_spack_root/lib/spack/external/pytest-fallback'))
if sys.version_info[0] < 3:
sys.path.insert(
diff --git a/lib/spack/docs/contribution_guide.rst b/lib/spack/docs/contribution_guide.rst
index 17e24b816e..4d639a1c9b 100644
--- a/lib/spack/docs/contribution_guide.rst
+++ b/lib/spack/docs/contribution_guide.rst
@@ -71,7 +71,7 @@ locally to speed up the review process.
new release that is causing problems. If this is the case, please file an issue.
-We currently test against Python 2.6, 2.7, and 3.5-3.7 on both macOS and Linux and
+We currently test against Python 2.7 and 3.5-3.9 on both macOS and Linux and
perform 3 types of tests:
.. _cmd-spack-unit-test:
diff --git a/lib/spack/docs/developer_guide.rst b/lib/spack/docs/developer_guide.rst
index 2530b4ef03..e62b9fae5a 100644
--- a/lib/spack/docs/developer_guide.rst
+++ b/lib/spack/docs/developer_guide.rst
@@ -671,6 +671,13 @@ If you need to write a hook that is relevant to a failure within a build
process, you would want to instead use ``on_phase_failure``.
+"""""""""""""""""""""""""""
+``on_install_cancel(spec)``
+"""""""""""""""""""""""""""
+
+The same, but triggered if a spec install is cancelled for any reason.
+
+
"""""""""""""""""""""""""""""""""""""""""""""""
``on_phase_success(pkg, phase_name, log_file)``
"""""""""""""""""""""""""""""""""""""""""""""""
@@ -1177,6 +1184,10 @@ completed, the steps to make the major release are:
If CI is not passing, submit pull requests to ``develop`` as normal
and keep rebasing the release branch on ``develop`` until CI passes.
+#. Make sure the entire documentation is up to date. If documentation
+ is outdated submit pull requests to ``develop`` as normal
+ and keep rebasing the release branch on ``develop``.
+
#. Follow the steps in :ref:`publishing-releases`.
#. Follow the steps in :ref:`merging-releases`.
diff --git a/lib/spack/docs/module_file_support.rst b/lib/spack/docs/module_file_support.rst
index c6cbc4b22b..689b0415c6 100644
--- a/lib/spack/docs/module_file_support.rst
+++ b/lib/spack/docs/module_file_support.rst
@@ -273,29 +273,30 @@ of the installed software. For instance, in the snippet below:
.. code-block:: yaml
modules:
- tcl:
- # The keyword `all` selects every package
- all:
- environment:
- set:
- BAR: 'bar'
- # This anonymous spec selects any package that
- # depends on openmpi. The double colon at the
- # end clears the set of rules that matched so far.
- ^openmpi::
- environment:
- set:
- BAR: 'baz'
- # Selects any zlib package
- zlib:
- environment:
- prepend_path:
- LD_LIBRARY_PATH: 'foo'
- # Selects zlib compiled with gcc@4.8
- zlib%gcc@4.8:
- environment:
- unset:
- - FOOBAR
+ default:
+ tcl:
+ # The keyword `all` selects every package
+ all:
+ environment:
+ set:
+ BAR: 'bar'
+ # This anonymous spec selects any package that
+ # depends on openmpi. The double colon at the
+ # end clears the set of rules that matched so far.
+ ^openmpi::
+ environment:
+ set:
+ BAR: 'baz'
+ # Selects any zlib package
+ zlib:
+ environment:
+ prepend_path:
+ LD_LIBRARY_PATH: 'foo'
+ # Selects zlib compiled with gcc@4.8
+ zlib%gcc@4.8:
+ environment:
+ unset:
+ - FOOBAR
you are instructing Spack to set the environment variable ``BAR=bar`` for every module,
unless the associated spec satisfies ``^openmpi`` in which case ``BAR=baz``.
@@ -322,9 +323,10 @@ your system. If you write a configuration file like:
.. code-block:: yaml
modules:
- tcl:
- whitelist: ['gcc', 'llvm'] # Whitelist will have precedence over blacklist
- blacklist: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler
+ default:
+ tcl:
+ whitelist: ['gcc', 'llvm'] # Whitelist will have precedence over blacklist
+ blacklist: ['%gcc@4.4.7'] # Assuming gcc@4.4.7 is the system compiler
you will prevent the generation of module files for any package that
is compiled with ``gcc@4.4.7``, with the only exception of any ``gcc``
@@ -349,8 +351,9 @@ shows how to set hash length in the module file names:
.. code-block:: yaml
modules:
- tcl:
- hash_length: 7
+ default:
+ tcl:
+ hash_length: 7
To help make module names more readable, and to help alleviate name conflicts
with a short hash, one can use the ``suffixes`` option in the modules
@@ -360,11 +363,12 @@ For instance, the following config options,
.. code-block:: yaml
modules:
- tcl:
- all:
- suffixes:
- ^python@2.7.12: 'python-2.7.12'
- ^openblas: 'openblas'
+ default:
+ tcl:
+ all:
+ suffixes:
+ ^python@2.7.12: 'python-2.7.12'
+ ^openblas: 'openblas'
will add a ``python-2.7.12`` version string to any packages compiled with
python matching the spec, ``python@2.7.12``. This is useful to know which
@@ -379,10 +383,11 @@ covered in :ref:`adding_projections_to_views`.
.. code-block:: yaml
modules:
- tcl:
- projections:
- all: '{name}/{version}-{compiler.name}-{compiler.version}-module'
- ^mpi: '{name}/{version}-{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}-module'
+ default:
+ tcl:
+ projections:
+ all: '{name}/{version}-{compiler.name}-{compiler.version}-module'
+ ^mpi: '{name}/{version}-{^mpi.name}-{^mpi.version}-{compiler.name}-{compiler.version}-module'
will create module files that are nested in directories by package
name, contain the version and compiler name and version, and have the
@@ -403,15 +408,16 @@ that are already in the LMod hierarchy.
.. code-block:: yaml
modules:
- enable:
- - tcl
- tcl:
- projections:
- all: '{name}/{version}-{compiler.name}-{compiler.version}'
- all:
- conflict:
- - '{name}'
- - 'intel/14.0.1'
+ default:
+ enable:
+ - tcl
+ tcl:
+ projections:
+ all: '{name}/{version}-{compiler.name}-{compiler.version}'
+ all:
+ conflict:
+ - '{name}'
+ - 'intel/14.0.1'
will create module files that will conflict with ``intel/14.0.1`` and with the
base directory of the same module, effectively preventing the possibility to
@@ -431,16 +437,17 @@ that are already in the LMod hierarchy.
.. code-block:: yaml
modules:
- enable:
- - lmod
- lmod:
- core_compilers:
- - 'gcc@4.8'
- core_specs:
- - 'python'
- hierarchy:
- - 'mpi'
- - 'lapack'
+ default:
+ enable:
+ - lmod
+ lmod:
+ core_compilers:
+ - 'gcc@4.8'
+ core_specs:
+ - 'python'
+ hierarchy:
+ - 'mpi'
+ - 'lapack'
that will generate a hierarchy in which the ``lapack`` and ``mpi`` layer can be switched
independently. This allows a site to build the same libraries or applications against different
@@ -591,11 +598,12 @@ do so by using the environment blacklist:
.. code-block:: yaml
modules:
- tcl:
- all:
- filter:
- # Exclude changes to any of these variables
- environment_blacklist: ['CPATH', 'LIBRARY_PATH']
+ default:
+ tcl:
+ all:
+ filter:
+ # Exclude changes to any of these variables
+ environment_blacklist: ['CPATH', 'LIBRARY_PATH']
The configuration above will generate module files that will not contain
modifications to either ``CPATH`` or ``LIBRARY_PATH``.
@@ -614,9 +622,10 @@ activated using ``spack activate``:
.. code-block:: yaml
modules:
- tcl:
- ^python:
- autoload: 'direct'
+ default:
+ tcl:
+ ^python:
+ autoload: 'direct'
The configuration file above will produce module files that will
load their direct dependencies if the package installed depends on ``python``.
@@ -633,9 +642,10 @@ The allowed values for the ``autoload`` statement are either ``none``,
.. code-block:: yaml
modules:
- lmod:
- all:
- autoload: 'direct'
+ default:
+ lmod:
+ all:
+ autoload: 'direct'
.. note::
TCL prerequisites
diff --git a/lib/spack/docs/tables/system_prerequisites.csv b/lib/spack/docs/tables/system_prerequisites.csv
index 074be4bffb..980aea77f6 100644
--- a/lib/spack/docs/tables/system_prerequisites.csv
+++ b/lib/spack/docs/tables/system_prerequisites.csv
@@ -1,5 +1,5 @@
Name, Supported Versions, Notes, Requirement Reason
-Python, 2.6/2.7/3.5-3.9, , Interpreter for Spack
+Python, 2.7/3.5-3.9, , Interpreter for Spack
C/C++ Compilers, , , Building software
make, , , Build software
patch, , , Build software
diff --git a/lib/spack/env/cc b/lib/spack/env/cc
index c498db0583..59ff0001e1 100755
--- a/lib/spack/env/cc
+++ b/lib/spack/env/cc
@@ -248,7 +248,7 @@ case "$command" in
lang_flags=C
debug_flags="-g"
;;
- c++|CC|g++|clang++|armclang++|icpc|icpx|pgc++|nvc++|xlc++|xlc++_r|FCC)
+ c++|CC|g++|clang++|armclang++|icpc|icpx|dpcpp|pgc++|nvc++|xlc++|xlc++_r|FCC)
command="$SPACK_CXX"
language="C++"
comp="CXX"
diff --git a/lib/spack/env/oneapi/dpcpp b/lib/spack/env/oneapi/dpcpp
new file mode 120000
index 0000000000..82c2b8e90a
--- /dev/null
+++ b/lib/spack/env/oneapi/dpcpp
@@ -0,0 +1 @@
+../cc \ No newline at end of file
diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py
index be4b9fbf14..c4c4855fe0 100644
--- a/lib/spack/external/__init__.py
+++ b/lib/spack/external/__init__.py
@@ -17,13 +17,20 @@ argparse
--------
* Homepage: https://pypi.python.org/pypi/argparse
-* Usage: We include our own version to be Python 2.6 compatible.
+* Usage: We include our own version to be Python 3.X compatible.
* Version: 1.4.0
* Note: This package has been slightly modified to improve
error message formatting. See the following commit if the
vendored copy ever needs to be updated again:
https://github.com/spack/spack/pull/6786/commits/dfcef577b77249106ea4e4c69a6cd9e64fa6c418
+attrs
+----------------
+
+* Homepage: https://github.com/python-attrs/attrs
+* Usage: Needed by jsonschema.
+* Version: 21.2.0 (83d3cd70f90a3f4d19ee8b508e58d1c58821c0ad)
+
ctest_log_parser
----------------
@@ -37,49 +44,36 @@ distro
* Homepage: https://pypi.python.org/pypi/distro
* Usage: Provides a more stable linux distribution detection.
-* Version: 1.0.4 (last version supporting Python 2.6)
+* Version: 1.6.0 (64946a1e2a9ff529047070657728600e006c99ff)
+* Note: Last version supporting Python 2.7
-functools
----------
-
-* Homepage: https://github.com/python/cpython/blob/2.7/Lib/functools.py
-* Usage: Used for implementation of total_ordering.
-* Version: Unversioned
-* Note: This is the functools.total_ordering implementation
- from Python 2.7 backported so we can run on Python 2.6.
+functools32
+-----------
+* Homepage: https://github.com/MiCHiLU/python-functools32
+* Usage: Needed by jsonschema when using Python 2.7.
+* Version: 3.2.3-2
jinja2
------
* Homepage: https://pypi.python.org/pypi/Jinja2
* Usage: A modern and designer-friendly templating language for Python.
-* Version: 2.10
+* Version: 2.11.3 (last version supporting Python 2.7)
jsonschema
----------
* Homepage: https://pypi.python.org/pypi/jsonschema
* Usage: An implementation of JSON Schema for Python.
-* Version: 2.4.0 (last version before functools32 dependency was added)
-* Note: functools32 doesn't support Python 2.6 or 3.0, so jsonschema
- cannot be upgraded any further until we drop 2.6.
- Also, jsonschema/validators.py has been modified NOT to try to import
- requests (see 7a1dd517b8).
+* Version: 3.2.0 (last version before 2.7 and 3.6 support was dropped)
+* Note: We don't include tests or benchmarks; just what Spack needs.
markupsafe
----------
* Homepage: https://pypi.python.org/pypi/MarkupSafe
* Usage: Implements a XML/HTML/XHTML Markup safe string for Python.
-* Version: 1.0
-
-orderddict
-----------
-
-* Homepage: https://pypi.org/project/ordereddict/
-* Usage: A drop-in substitute for Py2.7's new collections.OrderedDict
- that works in Python 2.4-2.6.
-* Version: 1.1
+* Version: 1.1.1 (last version supporting Python 2.7)
py
--
@@ -91,6 +85,14 @@ py
* Note: This packages has been modified:
* https://github.com/pytest-dev/py/pull/186 was backported
+pyrsistent
+----------
+
+* Homepage: http://github.com/tobgu/pyrsistent/
+* Usage: Needed by `jsonschema`
+* Version: 0.16.1 (last version supporting Python 2.7)
+* Note: We only include the parts needed for `jsonschema`.
+
pytest
------
@@ -120,7 +122,7 @@ six
* Homepage: https://pypi.python.org/pypi/six
* Usage: Python 2 and 3 compatibility utilities.
-* Version: 1.11.0
+* Version: 1.16.0
macholib
--------
diff --git a/lib/spack/external/attr/LICENSE b/lib/spack/external/attr/LICENSE
new file mode 100644
index 0000000000..7ae3df9309
--- /dev/null
+++ b/lib/spack/external/attr/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Hynek Schlawack
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/spack/external/attr/__init__.py b/lib/spack/external/attr/__init__.py
new file mode 100644
index 0000000000..b1ce7fe248
--- /dev/null
+++ b/lib/spack/external/attr/__init__.py
@@ -0,0 +1,78 @@
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from functools import partial
+
+from . import converters, exceptions, filters, setters, validators
+from ._cmp import cmp_using
+from ._config import get_run_validators, set_run_validators
+from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
+from ._make import (
+ NOTHING,
+ Attribute,
+ Factory,
+ attrib,
+ attrs,
+ fields,
+ fields_dict,
+ make_class,
+ validate,
+)
+from ._version_info import VersionInfo
+
+
+__version__ = "21.2.0"
+__version_info__ = VersionInfo._from_version_string(__version__)
+
+__title__ = "attrs"
+__description__ = "Classes Without Boilerplate"
+__url__ = "https://www.attrs.org/"
+__uri__ = __url__
+__doc__ = __description__ + " <" + __uri__ + ">"
+
+__author__ = "Hynek Schlawack"
+__email__ = "hs@ox.cx"
+
+__license__ = "MIT"
+__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
+
+
+s = attributes = attrs
+ib = attr = attrib
+dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
+
+__all__ = [
+ "Attribute",
+ "Factory",
+ "NOTHING",
+ "asdict",
+ "assoc",
+ "astuple",
+ "attr",
+ "attrib",
+ "attributes",
+ "attrs",
+ "cmp_using",
+ "converters",
+ "evolve",
+ "exceptions",
+ "fields",
+ "fields_dict",
+ "filters",
+ "get_run_validators",
+ "has",
+ "ib",
+ "make_class",
+ "resolve_types",
+ "s",
+ "set_run_validators",
+ "setters",
+ "validate",
+ "validators",
+]
+
+if sys.version_info[:2] >= (3, 6):
+ from ._next_gen import define, field, frozen, mutable
+
+ __all__.extend((define, field, frozen, mutable))
diff --git a/lib/spack/external/attr/_cmp.py b/lib/spack/external/attr/_cmp.py
new file mode 100644
index 0000000000..b747b603f1
--- /dev/null
+++ b/lib/spack/external/attr/_cmp.py
@@ -0,0 +1,152 @@
+from __future__ import absolute_import, division, print_function
+
+import functools
+
+from ._compat import new_class
+from ._make import _make_ne
+
+
+_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
+
+
+def cmp_using(
+ eq=None,
+ lt=None,
+ le=None,
+ gt=None,
+ ge=None,
+ require_same_type=True,
+ class_name="Comparable",
+):
+ """
+ Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and
+ ``cmp`` arguments to customize field comparison.
+
+ The resulting class will have a full set of ordering methods if
+ at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided.
+
+ :param Optional[callable] eq: `callable` used to evaluate equality
+ of two objects.
+ :param Optional[callable] lt: `callable` used to evaluate whether
+ one object is less than another object.
+ :param Optional[callable] le: `callable` used to evaluate whether
+ one object is less than or equal to another object.
+ :param Optional[callable] gt: `callable` used to evaluate whether
+ one object is greater than another object.
+ :param Optional[callable] ge: `callable` used to evaluate whether
+ one object is greater than or equal to another object.
+
+ :param bool require_same_type: When `True`, equality and ordering methods
+ will return `NotImplemented` if objects are not of the same type.
+
+ :param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
+
+ See `comparison` for more details.
+
+ .. versionadded:: 21.1.0
+ """
+
+ body = {
+ "__slots__": ["value"],
+ "__init__": _make_init(),
+ "_requirements": [],
+ "_is_comparable_to": _is_comparable_to,
+ }
+
+ # Add operations.
+ num_order_functions = 0
+ has_eq_function = False
+
+ if eq is not None:
+ has_eq_function = True
+ body["__eq__"] = _make_operator("eq", eq)
+ body["__ne__"] = _make_ne()
+
+ if lt is not None:
+ num_order_functions += 1
+ body["__lt__"] = _make_operator("lt", lt)
+
+ if le is not None:
+ num_order_functions += 1
+ body["__le__"] = _make_operator("le", le)
+
+ if gt is not None:
+ num_order_functions += 1
+ body["__gt__"] = _make_operator("gt", gt)
+
+ if ge is not None:
+ num_order_functions += 1
+ body["__ge__"] = _make_operator("ge", ge)
+
+ type_ = new_class(class_name, (object,), {}, lambda ns: ns.update(body))
+
+ # Add same type requirement.
+ if require_same_type:
+ type_._requirements.append(_check_same_type)
+
+ # Add total ordering if at least one operation was defined.
+ if 0 < num_order_functions < 4:
+ if not has_eq_function:
+ # functools.total_ordering requires __eq__ to be defined,
+ # so raise early error here to keep a nice stack.
+ raise ValueError(
+ "eq must be define is order to complete ordering from "
+ "lt, le, gt, ge."
+ )
+ type_ = functools.total_ordering(type_)
+
+ return type_
+
+
+def _make_init():
+ """
+ Create __init__ method.
+ """
+
+ def __init__(self, value):
+ """
+ Initialize object with *value*.
+ """
+ self.value = value
+
+ return __init__
+
+
+def _make_operator(name, func):
+ """
+ Create operator method.
+ """
+
+ def method(self, other):
+ if not self._is_comparable_to(other):
+ return NotImplemented
+
+ result = func(self.value, other.value)
+ if result is NotImplemented:
+ return NotImplemented
+
+ return result
+
+ method.__name__ = "__%s__" % (name,)
+ method.__doc__ = "Return a %s b. Computed by attrs." % (
+ _operation_names[name],
+ )
+
+ return method
+
+
+def _is_comparable_to(self, other):
+ """
+ Check whether `other` is comparable to `self`.
+ """
+ for func in self._requirements:
+ if not func(self, other):
+ return False
+ return True
+
+
+def _check_same_type(self, other):
+ """
+ Return True if *self* and *other* are of the same type, False otherwise.
+ """
+ return other.value.__class__ is self.value.__class__
diff --git a/lib/spack/external/attr/_compat.py b/lib/spack/external/attr/_compat.py
new file mode 100644
index 0000000000..6939f338da
--- /dev/null
+++ b/lib/spack/external/attr/_compat.py
@@ -0,0 +1,242 @@
+from __future__ import absolute_import, division, print_function
+
+import platform
+import sys
+import types
+import warnings
+
+
+PY2 = sys.version_info[0] == 2
+PYPY = platform.python_implementation() == "PyPy"
+
+
+if PYPY or sys.version_info[:2] >= (3, 6):
+ ordered_dict = dict
+else:
+ from collections import OrderedDict
+
+ ordered_dict = OrderedDict
+
+
+if PY2:
+ from collections import Mapping, Sequence
+
+ from UserDict import IterableUserDict
+
+ # We 'bundle' isclass instead of using inspect as importing inspect is
+ # fairly expensive (order of 10-15 ms for a modern machine in 2016)
+ def isclass(klass):
+ return isinstance(klass, (type, types.ClassType))
+
+ def new_class(name, bases, kwds, exec_body):
+ """
+ A minimal stub of types.new_class that we need for make_class.
+ """
+ ns = {}
+ exec_body(ns)
+
+ return type(name, bases, ns)
+
+ # TYPE is used in exceptions, repr(int) is different on Python 2 and 3.
+ TYPE = "type"
+
+ def iteritems(d):
+ return d.iteritems()
+
+ # Python 2 is bereft of a read-only dict proxy, so we make one!
+ class ReadOnlyDict(IterableUserDict):
+ """
+ Best-effort read-only dict wrapper.
+ """
+
+ def __setitem__(self, key, val):
+ # We gently pretend we're a Python 3 mappingproxy.
+ raise TypeError(
+ "'mappingproxy' object does not support item assignment"
+ )
+
+ def update(self, _):
+ # We gently pretend we're a Python 3 mappingproxy.
+ raise AttributeError(
+ "'mappingproxy' object has no attribute 'update'"
+ )
+
+ def __delitem__(self, _):
+ # We gently pretend we're a Python 3 mappingproxy.
+ raise TypeError(
+ "'mappingproxy' object does not support item deletion"
+ )
+
+ def clear(self):
+ # We gently pretend we're a Python 3 mappingproxy.
+ raise AttributeError(
+ "'mappingproxy' object has no attribute 'clear'"
+ )
+
+ def pop(self, key, default=None):
+ # We gently pretend we're a Python 3 mappingproxy.
+ raise AttributeError(
+ "'mappingproxy' object has no attribute 'pop'"
+ )
+
+ def popitem(self):
+ # We gently pretend we're a Python 3 mappingproxy.
+ raise AttributeError(
+ "'mappingproxy' object has no attribute 'popitem'"
+ )
+
+ def setdefault(self, key, default=None):
+ # We gently pretend we're a Python 3 mappingproxy.
+ raise AttributeError(
+ "'mappingproxy' object has no attribute 'setdefault'"
+ )
+
+ def __repr__(self):
+ # Override to be identical to the Python 3 version.
+ return "mappingproxy(" + repr(self.data) + ")"
+
+ def metadata_proxy(d):
+ res = ReadOnlyDict()
+ res.data.update(d) # We blocked update, so we have to do it like this.
+ return res
+
+ def just_warn(*args, **kw): # pragma: no cover
+ """
+ We only warn on Python 3 because we are not aware of any concrete
+ consequences of not setting the cell on Python 2.
+ """
+
+
+else: # Python 3 and later.
+ from collections.abc import Mapping, Sequence # noqa
+
+ def just_warn(*args, **kw):
+ """
+ We only warn on Python 3 because we are not aware of any concrete
+ consequences of not setting the cell on Python 2.
+ """
+ warnings.warn(
+ "Running interpreter doesn't sufficiently support code object "
+ "introspection. Some features like bare super() or accessing "
+ "__class__ will not work with slotted classes.",
+ RuntimeWarning,
+ stacklevel=2,
+ )
+
+ def isclass(klass):
+ return isinstance(klass, type)
+
+ TYPE = "class"
+
+ def iteritems(d):
+ return d.items()
+
+ new_class = types.new_class
+
+ def metadata_proxy(d):
+ return types.MappingProxyType(dict(d))
+
+
+def make_set_closure_cell():
+ """Return a function of two arguments (cell, value) which sets
+ the value stored in the closure cell `cell` to `value`.
+ """
+ # pypy makes this easy. (It also supports the logic below, but
+ # why not do the easy/fast thing?)
+ if PYPY:
+
+ def set_closure_cell(cell, value):
+ cell.__setstate__((value,))
+
+ return set_closure_cell
+
+ # Otherwise gotta do it the hard way.
+
+ # Create a function that will set its first cellvar to `value`.
+ def set_first_cellvar_to(value):
+ x = value
+ return
+
+ # This function will be eliminated as dead code, but
+ # not before its reference to `x` forces `x` to be
+ # represented as a closure cell rather than a local.
+ def force_x_to_be_a_cell(): # pragma: no cover
+ return x
+
+ try:
+ # Extract the code object and make sure our assumptions about
+ # the closure behavior are correct.
+ if PY2:
+ co = set_first_cellvar_to.func_code
+ else:
+ co = set_first_cellvar_to.__code__
+ if co.co_cellvars != ("x",) or co.co_freevars != ():
+ raise AssertionError # pragma: no cover
+
+ # Convert this code object to a code object that sets the
+ # function's first _freevar_ (not cellvar) to the argument.
+ if sys.version_info >= (3, 8):
+ # CPython 3.8+ has an incompatible CodeType signature
+ # (added a posonlyargcount argument) but also added
+ # CodeType.replace() to do this without counting parameters.
+ set_first_freevar_code = co.replace(
+ co_cellvars=co.co_freevars, co_freevars=co.co_cellvars
+ )
+ else:
+ args = [co.co_argcount]
+ if not PY2:
+ args.append(co.co_kwonlyargcount)
+ args.extend(
+ [
+ co.co_nlocals,
+ co.co_stacksize,
+ co.co_flags,
+ co.co_code,
+ co.co_consts,
+ co.co_names,
+ co.co_varnames,
+ co.co_filename,
+ co.co_name,
+ co.co_firstlineno,
+ co.co_lnotab,
+ # These two arguments are reversed:
+ co.co_cellvars,
+ co.co_freevars,
+ ]
+ )
+ set_first_freevar_code = types.CodeType(*args)
+
+ def set_closure_cell(cell, value):
+ # Create a function using the set_first_freevar_code,
+ # whose first closure cell is `cell`. Calling it will
+ # change the value of that cell.
+ setter = types.FunctionType(
+ set_first_freevar_code, {}, "setter", (), (cell,)
+ )
+ # And call it to set the cell.
+ setter(value)
+
+ # Make sure it works on this interpreter:
+ def make_func_with_cell():
+ x = None
+
+ def func():
+ return x # pragma: no cover
+
+ return func
+
+ if PY2:
+ cell = make_func_with_cell().func_closure[0]
+ else:
+ cell = make_func_with_cell().__closure__[0]
+ set_closure_cell(cell, 100)
+ if cell.cell_contents != 100:
+ raise AssertionError # pragma: no cover
+
+ except Exception:
+ return just_warn
+ else:
+ return set_closure_cell
+
+
+set_closure_cell = make_set_closure_cell()
diff --git a/lib/spack/external/attr/_config.py b/lib/spack/external/attr/_config.py
new file mode 100644
index 0000000000..8ec920962d
--- /dev/null
+++ b/lib/spack/external/attr/_config.py
@@ -0,0 +1,23 @@
+from __future__ import absolute_import, division, print_function
+
+
+__all__ = ["set_run_validators", "get_run_validators"]
+
+_run_validators = True
+
+
+def set_run_validators(run):
+ """
+ Set whether or not validators are run. By default, they are run.
+ """
+ if not isinstance(run, bool):
+ raise TypeError("'run' must be bool.")
+ global _run_validators
+ _run_validators = run
+
+
+def get_run_validators():
+ """
+ Return whether or not validators are run.
+ """
+ return _run_validators
diff --git a/lib/spack/external/attr/_funcs.py b/lib/spack/external/attr/_funcs.py
new file mode 100644
index 0000000000..fda508c5c4
--- /dev/null
+++ b/lib/spack/external/attr/_funcs.py
@@ -0,0 +1,395 @@
+from __future__ import absolute_import, division, print_function
+
+import copy
+
+from ._compat import iteritems
+from ._make import NOTHING, _obj_setattr, fields
+from .exceptions import AttrsAttributeNotFoundError
+
+
+def asdict(
+ inst,
+ recurse=True,
+ filter=None,
+ dict_factory=dict,
+ retain_collection_types=False,
+ value_serializer=None,
+):
+ """
+ Return the ``attrs`` attribute values of *inst* as a dict.
+
+ Optionally recurse into other ``attrs``-decorated classes.
+
+ :param inst: Instance of an ``attrs``-decorated class.
+ :param bool recurse: Recurse into classes that are also
+ ``attrs``-decorated.
+ :param callable filter: A callable whose return code determines whether an
+ attribute or element is included (``True``) or dropped (``False``). Is
+ called with the `attr.Attribute` as the first argument and the
+ value as the second argument.
+ :param callable dict_factory: A callable to produce dictionaries from. For
+ example, to produce ordered dictionaries instead of normal Python
+ dictionaries, pass in ``collections.OrderedDict``.
+ :param bool retain_collection_types: Do not convert to ``list`` when
+ encountering an attribute whose type is ``tuple`` or ``set``. Only
+ meaningful if ``recurse`` is ``True``.
+ :param Optional[callable] value_serializer: A hook that is called for every
+ attribute or dict key/value. It receives the current instance, field
+ and value and must return the (updated) value. The hook is run *after*
+ the optional *filter* has been applied.
+
+ :rtype: return type of *dict_factory*
+
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ .. versionadded:: 16.0.0 *dict_factory*
+ .. versionadded:: 16.1.0 *retain_collection_types*
+ .. versionadded:: 20.3.0 *value_serializer*
+ """
+ attrs = fields(inst.__class__)
+ rv = dict_factory()
+ for a in attrs:
+ v = getattr(inst, a.name)
+ if filter is not None and not filter(a, v):
+ continue
+
+ if value_serializer is not None:
+ v = value_serializer(inst, a, v)
+
+ if recurse is True:
+ if has(v.__class__):
+ rv[a.name] = asdict(
+ v,
+ True,
+ filter,
+ dict_factory,
+ retain_collection_types,
+ value_serializer,
+ )
+ elif isinstance(v, (tuple, list, set, frozenset)):
+ cf = v.__class__ if retain_collection_types is True else list
+ rv[a.name] = cf(
+ [
+ _asdict_anything(
+ i,
+ filter,
+ dict_factory,
+ retain_collection_types,
+ value_serializer,
+ )
+ for i in v
+ ]
+ )
+ elif isinstance(v, dict):
+ df = dict_factory
+ rv[a.name] = df(
+ (
+ _asdict_anything(
+ kk,
+ filter,
+ df,
+ retain_collection_types,
+ value_serializer,
+ ),
+ _asdict_anything(
+ vv,
+ filter,
+ df,
+ retain_collection_types,
+ value_serializer,
+ ),
+ )
+ for kk, vv in iteritems(v)
+ )
+ else:
+ rv[a.name] = v
+ else:
+ rv[a.name] = v
+ return rv
+
+
+def _asdict_anything(
+ val,
+ filter,
+ dict_factory,
+ retain_collection_types,
+ value_serializer,
+):
+ """
+ ``asdict`` only works on attrs instances, this works on anything.
+ """
+ if getattr(val.__class__, "__attrs_attrs__", None) is not None:
+ # Attrs class.
+ rv = asdict(
+ val,
+ True,
+ filter,
+ dict_factory,
+ retain_collection_types,
+ value_serializer,
+ )
+ elif isinstance(val, (tuple, list, set, frozenset)):
+ cf = val.__class__ if retain_collection_types is True else list
+ rv = cf(
+ [
+ _asdict_anything(
+ i,
+ filter,
+ dict_factory,
+ retain_collection_types,
+ value_serializer,
+ )
+ for i in val
+ ]
+ )
+ elif isinstance(val, dict):
+ df = dict_factory
+ rv = df(
+ (
+ _asdict_anything(
+ kk, filter, df, retain_collection_types, value_serializer
+ ),
+ _asdict_anything(
+ vv, filter, df, retain_collection_types, value_serializer
+ ),
+ )
+ for kk, vv in iteritems(val)
+ )
+ else:
+ rv = val
+ if value_serializer is not None:
+ rv = value_serializer(None, None, rv)
+
+ return rv
+
+
+def astuple(
+ inst,
+ recurse=True,
+ filter=None,
+ tuple_factory=tuple,
+ retain_collection_types=False,
+):
+ """
+ Return the ``attrs`` attribute values of *inst* as a tuple.
+
+ Optionally recurse into other ``attrs``-decorated classes.
+
+ :param inst: Instance of an ``attrs``-decorated class.
+ :param bool recurse: Recurse into classes that are also
+ ``attrs``-decorated.
+ :param callable filter: A callable whose return code determines whether an
+ attribute or element is included (``True``) or dropped (``False``). Is
+ called with the `attr.Attribute` as the first argument and the
+ value as the second argument.
+ :param callable tuple_factory: A callable to produce tuples from. For
+ example, to produce lists instead of tuples.
+ :param bool retain_collection_types: Do not convert to ``list``
+ or ``dict`` when encountering an attribute which type is
+ ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is
+ ``True``.
+
+ :rtype: return type of *tuple_factory*
+
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ .. versionadded:: 16.2.0
+ """
+ attrs = fields(inst.__class__)
+ rv = []
+ retain = retain_collection_types # Very long. :/
+ for a in attrs:
+ v = getattr(inst, a.name)
+ if filter is not None and not filter(a, v):
+ continue
+ if recurse is True:
+ if has(v.__class__):
+ rv.append(
+ astuple(
+ v,
+ recurse=True,
+ filter=filter,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ )
+ elif isinstance(v, (tuple, list, set, frozenset)):
+ cf = v.__class__ if retain is True else list
+ rv.append(
+ cf(
+ [
+ astuple(
+ j,
+ recurse=True,
+ filter=filter,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(j.__class__)
+ else j
+ for j in v
+ ]
+ )
+ )
+ elif isinstance(v, dict):
+ df = v.__class__ if retain is True else dict
+ rv.append(
+ df(
+ (
+ astuple(
+ kk,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(kk.__class__)
+ else kk,
+ astuple(
+ vv,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(vv.__class__)
+ else vv,
+ )
+ for kk, vv in iteritems(v)
+ )
+ )
+ else:
+ rv.append(v)
+ else:
+ rv.append(v)
+
+ return rv if tuple_factory is list else tuple_factory(rv)
+
+
+def has(cls):
+ """
+ Check whether *cls* is a class with ``attrs`` attributes.
+
+ :param type cls: Class to introspect.
+ :raise TypeError: If *cls* is not a class.
+
+ :rtype: bool
+ """
+ return getattr(cls, "__attrs_attrs__", None) is not None
+
+
+def assoc(inst, **changes):
+ """
+ Copy *inst* and apply *changes*.
+
+ :param inst: Instance of a class with ``attrs`` attributes.
+ :param changes: Keyword changes in the new copy.
+
+ :return: A copy of inst with *changes* incorporated.
+
+ :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't
+ be found on *cls*.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ .. deprecated:: 17.1.0
+ Use `evolve` instead.
+ """
+ import warnings
+
+ warnings.warn(
+ "assoc is deprecated and will be removed after 2018/01.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ new = copy.copy(inst)
+ attrs = fields(inst.__class__)
+ for k, v in iteritems(changes):
+ a = getattr(attrs, k, NOTHING)
+ if a is NOTHING:
+ raise AttrsAttributeNotFoundError(
+ "{k} is not an attrs attribute on {cl}.".format(
+ k=k, cl=new.__class__
+ )
+ )
+ _obj_setattr(new, k, v)
+ return new
+
+
+def evolve(inst, **changes):
+ """
+ Create a new instance, based on *inst* with *changes* applied.
+
+ :param inst: Instance of a class with ``attrs`` attributes.
+ :param changes: Keyword changes in the new copy.
+
+ :return: A copy of inst with *changes* incorporated.
+
+ :raise TypeError: If *attr_name* couldn't be found in the class
+ ``__init__``.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ .. versionadded:: 17.1.0
+ """
+ cls = inst.__class__
+ attrs = fields(cls)
+ for a in attrs:
+ if not a.init:
+ continue
+ attr_name = a.name # To deal with private attributes.
+ init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
+ if init_name not in changes:
+ changes[init_name] = getattr(inst, attr_name)
+
+ return cls(**changes)
+
+
+def resolve_types(cls, globalns=None, localns=None, attribs=None):
+ """
+ Resolve any strings and forward annotations in type annotations.
+
+ This is only required if you need concrete types in `Attribute`'s *type*
+ field. In other words, you don't need to resolve your types if you only
+ use them for static type checking.
+
+ With no arguments, names will be looked up in the module in which the class
+ was created. If this is not what you want, e.g. if the name only exists
+ inside a method, you may pass *globalns* or *localns* to specify other
+ dictionaries in which to look up these names. See the docs of
+ `typing.get_type_hints` for more details.
+
+ :param type cls: Class to resolve.
+ :param Optional[dict] globalns: Dictionary containing global variables.
+ :param Optional[dict] localns: Dictionary containing local variables.
+ :param Optional[list] attribs: List of attribs for the given class.
+ This is necessary when calling from inside a ``field_transformer``
+ since *cls* is not an ``attrs`` class yet.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class and you didn't pass any attribs.
+ :raise NameError: If types cannot be resolved because of missing variables.
+
+ :returns: *cls* so you can use this function also as a class decorator.
+ Please note that you have to apply it **after** `attr.s`. That means
+ the decorator has to come in the line **before** `attr.s`.
+
+ .. versionadded:: 20.1.0
+ .. versionadded:: 21.1.0 *attribs*
+
+ """
+ try:
+ # Since calling get_type_hints is expensive we cache whether we've
+ # done it already.
+ cls.__attrs_types_resolved__
+ except AttributeError:
+ import typing
+
+ hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
+ for field in fields(cls) if attribs is None else attribs:
+ if field.name in hints:
+ # Since fields have been frozen we must work around it.
+ _obj_setattr(field, "type", hints[field.name])
+ cls.__attrs_types_resolved__ = True
+
+ # Return the class so you can use it as a decorator too.
+ return cls
diff --git a/lib/spack/external/attr/_make.py b/lib/spack/external/attr/_make.py
new file mode 100644
index 0000000000..a1912b1233
--- /dev/null
+++ b/lib/spack/external/attr/_make.py
@@ -0,0 +1,3052 @@
+from __future__ import absolute_import, division, print_function
+
+import copy
+import inspect
+import linecache
+import sys
+import threading
+import uuid
+import warnings
+
+from operator import itemgetter
+
+from . import _config, setters
+from ._compat import (
+ PY2,
+ PYPY,
+ isclass,
+ iteritems,
+ metadata_proxy,
+ new_class,
+ ordered_dict,
+ set_closure_cell,
+)
+from .exceptions import (
+ DefaultAlreadySetError,
+ FrozenInstanceError,
+ NotAnAttrsClassError,
+ PythonTooOldError,
+ UnannotatedAttributeError,
+)
+
+
+if not PY2:
+ import typing
+
+
+# This is used at least twice, so cache it here.
+_obj_setattr = object.__setattr__
+_init_converter_pat = "__attr_converter_%s"
+_init_factory_pat = "__attr_factory_{}"
+_tuple_property_pat = (
+ " {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
+)
+_classvar_prefixes = (
+ "typing.ClassVar",
+ "t.ClassVar",
+ "ClassVar",
+ "typing_extensions.ClassVar",
+)
+# we don't use a double-underscore prefix because that triggers
+# name mangling when trying to create a slot for the field
+# (when slots=True)
+_hash_cache_field = "_attrs_cached_hash"
+
+_empty_metadata_singleton = metadata_proxy({})
+
+# Unique object for unequivocal getattr() defaults.
+_sentinel = object()
+
+
+class _Nothing(object):
+ """
+ Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
+
+ ``_Nothing`` is a singleton. There is only ever one of it.
+
+ .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
+ """
+
+ _singleton = None
+
+ def __new__(cls):
+ if _Nothing._singleton is None:
+ _Nothing._singleton = super(_Nothing, cls).__new__(cls)
+ return _Nothing._singleton
+
+ def __repr__(self):
+ return "NOTHING"
+
+ def __bool__(self):
+ return False
+
+ def __len__(self):
+ return 0 # __bool__ for Python 2
+
+
+NOTHING = _Nothing()
+"""
+Sentinel to indicate the lack of a value when ``None`` is ambiguous.
+"""
+
+
+class _CacheHashWrapper(int):
+ """
+ An integer subclass that pickles / copies as None
+
+ This is used for non-slots classes with ``cache_hash=True``, to avoid
+ serializing a potentially (even likely) invalid hash value. Since ``None``
+ is the default value for uncalculated hashes, whenever this is copied,
+ the copy's value for the hash should automatically reset.
+
+ See GH #613 for more details.
+ """
+
+ if PY2:
+ # For some reason `type(None)` isn't callable in Python 2, but we don't
+ # actually need a constructor for None objects, we just need any
+ # available function that returns None.
+ def __reduce__(self, _none_constructor=getattr, _args=(0, "", None)):
+ return _none_constructor, _args
+
+ else:
+
+ def __reduce__(self, _none_constructor=type(None), _args=()):
+ return _none_constructor, _args
+
+
+def attrib(
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=None,
+ init=True,
+ metadata=None,
+ type=None,
+ converter=None,
+ factory=None,
+ kw_only=False,
+ eq=None,
+ order=None,
+ on_setattr=None,
+):
+ """
+ Create a new attribute on a class.
+
+ .. warning::
+
+ Does *not* do anything unless the class is also decorated with
+ `attr.s`!
+
+ :param default: A value that is used if an ``attrs``-generated ``__init__``
+ is used and no value is passed while instantiating or the attribute is
+ excluded using ``init=False``.
+
+ If the value is an instance of `Factory`, its callable will be
+ used to construct a new value (useful for mutable data types like lists
+ or dicts).
+
+ If a default is not set (or set manually to `attr.NOTHING`), a value
+ *must* be supplied when instantiating; otherwise a `TypeError`
+ will be raised.
+
+ The default can also be set using decorator notation as shown below.
+
+ :type default: Any value
+
+ :param callable factory: Syntactic sugar for
+ ``default=attr.Factory(factory)``.
+
+ :param validator: `callable` that is called by ``attrs``-generated
+ ``__init__`` methods after the instance has been initialized. They
+ receive the initialized instance, the `Attribute`, and the
+ passed value.
+
+ The return value is *not* inspected so the validator has to throw an
+ exception itself.
+
+ If a `list` is passed, its items are treated as validators and must
+ all pass.
+
+ Validators can be globally disabled and re-enabled using
+ `get_run_validators`.
+
+ The validator can also be set using decorator notation as shown below.
+
+ :type validator: `callable` or a `list` of `callable`\\ s.
+
+ :param repr: Include this attribute in the generated ``__repr__``
+ method. If ``True``, include the attribute; if ``False``, omit it. By
+ default, the built-in ``repr()`` function is used. To override how the
+ attribute value is formatted, pass a ``callable`` that takes a single
+ value and returns a string. Note that the resulting string is used
+ as-is, i.e. it will be used directly *instead* of calling ``repr()``
+ (the default).
+ :type repr: a `bool` or a `callable` to use a custom function.
+
+ :param eq: If ``True`` (default), include this attribute in the
+ generated ``__eq__`` and ``__ne__`` methods that check two instances
+ for equality. To override how the attribute value is compared,
+ pass a ``callable`` that takes a single value and returns the value
+ to be compared.
+ :type eq: a `bool` or a `callable`.
+
+ :param order: If ``True`` (default), include this attributes in the
+ generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
+ To override how the attribute value is ordered,
+ pass a ``callable`` that takes a single value and returns the value
+ to be ordered.
+ :type order: a `bool` or a `callable`.
+
+ :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the
+ same value. Must not be mixed with *eq* or *order*.
+ :type cmp: a `bool` or a `callable`.
+
+ :param Optional[bool] hash: Include this attribute in the generated
+ ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This
+ is the correct behavior according the Python spec. Setting this value
+ to anything else than ``None`` is *discouraged*.
+ :param bool init: Include this attribute in the generated ``__init__``
+ method. It is possible to set this to ``False`` and set a default
+ value. In that case this attributed is unconditionally initialized
+ with the specified default value or factory.
+ :param callable converter: `callable` that is called by
+ ``attrs``-generated ``__init__`` methods to convert attribute's value
+ to the desired format. It is given the passed-in value, and the
+ returned value will be used as the new value of the attribute. The
+ value is converted before being passed to the validator, if any.
+ :param metadata: An arbitrary mapping, to be used by third-party
+ components. See `extending_metadata`.
+ :param type: The type of the attribute. In Python 3.6 or greater, the
+ preferred method to specify the type is using a variable annotation
+ (see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
+ This argument is provided for backward compatibility.
+ Regardless of the approach used, the type will be stored on
+ ``Attribute.type``.
+
+ Please note that ``attrs`` doesn't do anything with this metadata by
+ itself. You can use it as part of your own code or for
+ `static type checking <types>`.
+ :param kw_only: Make this attribute keyword-only (Python 3+)
+ in the generated ``__init__`` (if ``init`` is ``False``, this
+ parameter is ignored).
+ :param on_setattr: Allows to overwrite the *on_setattr* setting from
+ `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used.
+ Set to `attr.setters.NO_OP` to run **no** `setattr` hooks for this
+ attribute -- regardless of the setting in `attr.s`.
+ :type on_setattr: `callable`, or a list of callables, or `None`, or
+ `attr.setters.NO_OP`
+
+ .. versionadded:: 15.2.0 *convert*
+ .. versionadded:: 16.3.0 *metadata*
+ .. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
+ .. versionchanged:: 17.1.0
+ *hash* is ``None`` and therefore mirrors *eq* by default.
+ .. versionadded:: 17.3.0 *type*
+ .. deprecated:: 17.4.0 *convert*
+ .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
+ *convert* to achieve consistency with other noun-based arguments.
+ .. versionadded:: 18.1.0
+ ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
+ .. versionadded:: 18.2.0 *kw_only*
+ .. versionchanged:: 19.2.0 *convert* keyword argument removed.
+ .. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
+ .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
+ .. versionadded:: 19.2.0 *eq* and *order*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionchanged:: 20.3.0 *kw_only* backported to Python 2
+ .. versionchanged:: 21.1.0
+ *eq*, *order*, and *cmp* also accept a custom callable
+ .. versionchanged:: 21.1.0 *cmp* undeprecated
+ """
+ eq, eq_key, order, order_key = _determine_attrib_eq_order(
+ cmp, eq, order, True
+ )
+
+ if hash is not None and hash is not True and hash is not False:
+ raise TypeError(
+ "Invalid value for hash. Must be True, False, or None."
+ )
+
+ if factory is not None:
+ if default is not NOTHING:
+ raise ValueError(
+ "The `default` and `factory` arguments are mutually "
+ "exclusive."
+ )
+ if not callable(factory):
+ raise ValueError("The `factory` argument must be a callable.")
+ default = Factory(factory)
+
+ if metadata is None:
+ metadata = {}
+
+ # Apply syntactic sugar by auto-wrapping.
+ if isinstance(on_setattr, (list, tuple)):
+ on_setattr = setters.pipe(*on_setattr)
+
+ if validator and isinstance(validator, (list, tuple)):
+ validator = and_(*validator)
+
+ if converter and isinstance(converter, (list, tuple)):
+ converter = pipe(*converter)
+
+ return _CountingAttr(
+ default=default,
+ validator=validator,
+ repr=repr,
+ cmp=None,
+ hash=hash,
+ init=init,
+ converter=converter,
+ metadata=metadata,
+ type=type,
+ kw_only=kw_only,
+ eq=eq,
+ eq_key=eq_key,
+ order=order,
+ order_key=order_key,
+ on_setattr=on_setattr,
+ )
+
+
+def _compile_and_eval(script, globs, locs=None, filename=""):
+ """
+ "Exec" the script with the given global (globs) and local (locs) variables.
+ """
+ bytecode = compile(script, filename, "exec")
+ eval(bytecode, globs, locs)
+
+
+def _make_method(name, script, filename, globs=None):
+ """
+ Create the method with the script given and return the method object.
+ """
+ locs = {}
+ if globs is None:
+ globs = {}
+
+ _compile_and_eval(script, globs, locs, filename)
+
+ # In order of debuggers like PDB being able to step through the code,
+ # we add a fake linecache entry.
+ linecache.cache[filename] = (
+ len(script),
+ None,
+ script.splitlines(True),
+ filename,
+ )
+
+ return locs[name]
+
+
+def _make_attr_tuple_class(cls_name, attr_names):
+ """
+ Create a tuple subclass to hold `Attribute`s for an `attrs` class.
+
+ The subclass is a bare tuple with properties for names.
+
+ class MyClassAttributes(tuple):
+ __slots__ = ()
+ x = property(itemgetter(0))
+ """
+ attr_class_name = "{}Attributes".format(cls_name)
+ attr_class_template = [
+ "class {}(tuple):".format(attr_class_name),
+ " __slots__ = ()",
+ ]
+ if attr_names:
+ for i, attr_name in enumerate(attr_names):
+ attr_class_template.append(
+ _tuple_property_pat.format(index=i, attr_name=attr_name)
+ )
+ else:
+ attr_class_template.append(" pass")
+ globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
+ _compile_and_eval("\n".join(attr_class_template), globs)
+ return globs[attr_class_name]
+
+
+# Tuple class for extracted attributes from a class definition.
+# `base_attrs` is a subset of `attrs`.
+_Attributes = _make_attr_tuple_class(
+ "_Attributes",
+ [
+ # all attributes to build dunder methods for
+ "attrs",
+ # attributes that have been inherited
+ "base_attrs",
+ # map inherited attributes to their originating classes
+ "base_attrs_map",
+ ],
+)
+
+
+def _is_class_var(annot):
+ """
+ Check whether *annot* is a typing.ClassVar.
+
+ The string comparison hack is used to avoid evaluating all string
+ annotations which would put attrs-based classes at a performance
+ disadvantage compared to plain old classes.
+ """
+ annot = str(annot)
+
+ # Annotation can be quoted.
+ if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
+ annot = annot[1:-1]
+
+ return annot.startswith(_classvar_prefixes)
+
+
+def _has_own_attribute(cls, attrib_name):
+ """
+ Check whether *cls* defines *attrib_name* (and doesn't just inherit it).
+
+ Requires Python 3.
+ """
+ attr = getattr(cls, attrib_name, _sentinel)
+ if attr is _sentinel:
+ return False
+
+ for base_cls in cls.__mro__[1:]:
+ a = getattr(base_cls, attrib_name, None)
+ if attr is a:
+ return False
+
+ return True
+
+
+def _get_annotations(cls):
+ """
+ Get annotations for *cls*.
+ """
+ if _has_own_attribute(cls, "__annotations__"):
+ return cls.__annotations__
+
+ return {}
+
+
+def _counter_getter(e):
+ """
+ Key function for sorting to avoid re-creating a lambda for every class.
+ """
+ return e[1].counter
+
+
+def _collect_base_attrs(cls, taken_attr_names):
+ """
+ Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
+ """
+ base_attrs = []
+ base_attr_map = {} # A dictionary of base attrs to their classes.
+
+ # Traverse the MRO and collect attributes.
+ for base_cls in reversed(cls.__mro__[1:-1]):
+ for a in getattr(base_cls, "__attrs_attrs__", []):
+ if a.inherited or a.name in taken_attr_names:
+ continue
+
+ a = a.evolve(inherited=True)
+ base_attrs.append(a)
+ base_attr_map[a.name] = base_cls
+
+ # For each name, only keep the freshest definition i.e. the furthest at the
+ # back. base_attr_map is fine because it gets overwritten with every new
+ # instance.
+ filtered = []
+ seen = set()
+ for a in reversed(base_attrs):
+ if a.name in seen:
+ continue
+ filtered.insert(0, a)
+ seen.add(a.name)
+
+ return filtered, base_attr_map
+
+
+def _collect_base_attrs_broken(cls, taken_attr_names):
+ """
+ Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
+
+ N.B. *taken_attr_names* will be mutated.
+
+ Adhere to the old incorrect behavior.
+
+ Notably it collects from the front and considers inherited attributes which
+ leads to the buggy behavior reported in #428.
+ """
+ base_attrs = []
+ base_attr_map = {} # A dictionary of base attrs to their classes.
+
+ # Traverse the MRO and collect attributes.
+ for base_cls in cls.__mro__[1:-1]:
+ for a in getattr(base_cls, "__attrs_attrs__", []):
+ if a.name in taken_attr_names:
+ continue
+
+ a = a.evolve(inherited=True)
+ taken_attr_names.add(a.name)
+ base_attrs.append(a)
+ base_attr_map[a.name] = base_cls
+
+ return base_attrs, base_attr_map
+
+
+def _transform_attrs(
+ cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer
+):
+ """
+ Transform all `_CountingAttr`s on a class into `Attribute`s.
+
+ If *these* is passed, use that and don't look for them on the class.
+
+ *collect_by_mro* is True, collect them in the correct MRO order, otherwise
+ use the old -- incorrect -- order. See #428.
+
+ Return an `_Attributes`.
+ """
+ cd = cls.__dict__
+ anns = _get_annotations(cls)
+
+ if these is not None:
+ ca_list = [(name, ca) for name, ca in iteritems(these)]
+
+ if not isinstance(these, ordered_dict):
+ ca_list.sort(key=_counter_getter)
+ elif auto_attribs is True:
+ ca_names = {
+ name
+ for name, attr in cd.items()
+ if isinstance(attr, _CountingAttr)
+ }
+ ca_list = []
+ annot_names = set()
+ for attr_name, type in anns.items():
+ if _is_class_var(type):
+ continue
+ annot_names.add(attr_name)
+ a = cd.get(attr_name, NOTHING)
+
+ if not isinstance(a, _CountingAttr):
+ if a is NOTHING:
+ a = attrib()
+ else:
+ a = attrib(default=a)
+ ca_list.append((attr_name, a))
+
+ unannotated = ca_names - annot_names
+ if len(unannotated) > 0:
+ raise UnannotatedAttributeError(
+ "The following `attr.ib`s lack a type annotation: "
+ + ", ".join(
+ sorted(unannotated, key=lambda n: cd.get(n).counter)
+ )
+ + "."
+ )
+ else:
+ ca_list = sorted(
+ (
+ (name, attr)
+ for name, attr in cd.items()
+ if isinstance(attr, _CountingAttr)
+ ),
+ key=lambda e: e[1].counter,
+ )
+
+ own_attrs = [
+ Attribute.from_counting_attr(
+ name=attr_name, ca=ca, type=anns.get(attr_name)
+ )
+ for attr_name, ca in ca_list
+ ]
+
+ if collect_by_mro:
+ base_attrs, base_attr_map = _collect_base_attrs(
+ cls, {a.name for a in own_attrs}
+ )
+ else:
+ base_attrs, base_attr_map = _collect_base_attrs_broken(
+ cls, {a.name for a in own_attrs}
+ )
+
+ attr_names = [a.name for a in base_attrs + own_attrs]
+
+ AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
+
+ if kw_only:
+ own_attrs = [a.evolve(kw_only=True) for a in own_attrs]
+ base_attrs = [a.evolve(kw_only=True) for a in base_attrs]
+
+ attrs = AttrsClass(base_attrs + own_attrs)
+
+ # Mandatory vs non-mandatory attr order only matters when they are part of
+ # the __init__ signature and when they aren't kw_only (which are moved to
+ # the end and can be mandatory or non-mandatory in any order, as they will
+ # be specified as keyword args anyway). Check the order of those attrs:
+ had_default = False
+ for a in (a for a in attrs if a.init is not False and a.kw_only is False):
+ if had_default is True and a.default is NOTHING:
+ raise ValueError(
+ "No mandatory attributes allowed after an attribute with a "
+ "default value or factory. Attribute in question: %r" % (a,)
+ )
+
+ if had_default is False and a.default is not NOTHING:
+ had_default = True
+
+ if field_transformer is not None:
+ attrs = field_transformer(cls, attrs)
+ return _Attributes((attrs, base_attrs, base_attr_map))
+
+
+if PYPY:
+
+ def _frozen_setattrs(self, name, value):
+ """
+ Attached to frozen classes as __setattr__.
+ """
+ if isinstance(self, BaseException) and name in (
+ "__cause__",
+ "__context__",
+ ):
+ BaseException.__setattr__(self, name, value)
+ return
+
+ raise FrozenInstanceError()
+
+
+else:
+
+ def _frozen_setattrs(self, name, value):
+ """
+ Attached to frozen classes as __setattr__.
+ """
+ raise FrozenInstanceError()
+
+
+def _frozen_delattrs(self, name):
+ """
+ Attached to frozen classes as __delattr__.
+ """
+ raise FrozenInstanceError()
+
+
+class _ClassBuilder(object):
+ """
+ Iteratively build *one* class.
+ """
+
+ __slots__ = (
+ "_attr_names",
+ "_attrs",
+ "_base_attr_map",
+ "_base_names",
+ "_cache_hash",
+ "_cls",
+ "_cls_dict",
+ "_delete_attribs",
+ "_frozen",
+ "_has_pre_init",
+ "_has_post_init",
+ "_is_exc",
+ "_on_setattr",
+ "_slots",
+ "_weakref_slot",
+ "_has_own_setattr",
+ "_has_custom_setattr",
+ )
+
+ def __init__(
+ self,
+ cls,
+ these,
+ slots,
+ frozen,
+ weakref_slot,
+ getstate_setstate,
+ auto_attribs,
+ kw_only,
+ cache_hash,
+ is_exc,
+ collect_by_mro,
+ on_setattr,
+ has_custom_setattr,
+ field_transformer,
+ ):
+ attrs, base_attrs, base_map = _transform_attrs(
+ cls,
+ these,
+ auto_attribs,
+ kw_only,
+ collect_by_mro,
+ field_transformer,
+ )
+
+ self._cls = cls
+ self._cls_dict = dict(cls.__dict__) if slots else {}
+ self._attrs = attrs
+ self._base_names = set(a.name for a in base_attrs)
+ self._base_attr_map = base_map
+ self._attr_names = tuple(a.name for a in attrs)
+ self._slots = slots
+ self._frozen = frozen
+ self._weakref_slot = weakref_slot
+ self._cache_hash = cache_hash
+ self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
+ self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
+ self._delete_attribs = not bool(these)
+ self._is_exc = is_exc
+ self._on_setattr = on_setattr
+
+ self._has_custom_setattr = has_custom_setattr
+ self._has_own_setattr = False
+
+ self._cls_dict["__attrs_attrs__"] = self._attrs
+
+ if frozen:
+ self._cls_dict["__setattr__"] = _frozen_setattrs
+ self._cls_dict["__delattr__"] = _frozen_delattrs
+
+ self._has_own_setattr = True
+
+ if getstate_setstate:
+ (
+ self._cls_dict["__getstate__"],
+ self._cls_dict["__setstate__"],
+ ) = self._make_getstate_setstate()
+
+ def __repr__(self):
+ return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
+
+ def build_class(self):
+ """
+ Finalize class based on the accumulated configuration.
+
+ Builder cannot be used after calling this method.
+ """
+ if self._slots is True:
+ return self._create_slots_class()
+ else:
+ return self._patch_original_class()
+
+ def _patch_original_class(self):
+ """
+ Apply accumulated methods and return the class.
+ """
+ cls = self._cls
+ base_names = self._base_names
+
+ # Clean class of attribute definitions (`attr.ib()`s).
+ if self._delete_attribs:
+ for name in self._attr_names:
+ if (
+ name not in base_names
+ and getattr(cls, name, _sentinel) is not _sentinel
+ ):
+ try:
+ delattr(cls, name)
+ except AttributeError:
+ # This can happen if a base class defines a class
+ # variable and we want to set an attribute with the
+ # same name by using only a type annotation.
+ pass
+
+ # Attach our dunder methods.
+ for name, value in self._cls_dict.items():
+ setattr(cls, name, value)
+
+ # If we've inherited an attrs __setattr__ and don't write our own,
+ # reset it to object's.
+ if not self._has_own_setattr and getattr(
+ cls, "__attrs_own_setattr__", False
+ ):
+ cls.__attrs_own_setattr__ = False
+
+ if not self._has_custom_setattr:
+ cls.__setattr__ = object.__setattr__
+
+ return cls
+
+ def _create_slots_class(self):
+ """
+ Build and return a new class with a `__slots__` attribute.
+ """
+ cd = {
+ k: v
+ for k, v in iteritems(self._cls_dict)
+ if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
+ }
+
+ # If our class doesn't have its own implementation of __setattr__
+ # (either from the user or by us), check the bases, if one of them has
+ # an attrs-made __setattr__, that needs to be reset. We don't walk the
+ # MRO because we only care about our immediate base classes.
+ # XXX: This can be confused by subclassing a slotted attrs class with
+ # XXX: a non-attrs class and subclass the resulting class with an attrs
+ # XXX: class. See `test_slotted_confused` for details. For now that's
+ # XXX: OK with us.
+ if not self._has_own_setattr:
+ cd["__attrs_own_setattr__"] = False
+
+ if not self._has_custom_setattr:
+ for base_cls in self._cls.__bases__:
+ if base_cls.__dict__.get("__attrs_own_setattr__", False):
+ cd["__setattr__"] = object.__setattr__
+ break
+
+ # Traverse the MRO to collect existing slots
+ # and check for an existing __weakref__.
+ existing_slots = dict()
+ weakref_inherited = False
+ for base_cls in self._cls.__mro__[1:-1]:
+ if base_cls.__dict__.get("__weakref__", None) is not None:
+ weakref_inherited = True
+ existing_slots.update(
+ {
+ name: getattr(base_cls, name)
+ for name in getattr(base_cls, "__slots__", [])
+ }
+ )
+
+ base_names = set(self._base_names)
+
+ names = self._attr_names
+ if (
+ self._weakref_slot
+ and "__weakref__" not in getattr(self._cls, "__slots__", ())
+ and "__weakref__" not in names
+ and not weakref_inherited
+ ):
+ names += ("__weakref__",)
+
+ # We only add the names of attributes that aren't inherited.
+ # Setting __slots__ to inherited attributes wastes memory.
+ slot_names = [name for name in names if name not in base_names]
+ # There are slots for attributes from current class
+ # that are defined in parent classes.
+ # As their descriptors may be overriden by a child class,
+ # we collect them here and update the class dict
+ reused_slots = {
+ slot: slot_descriptor
+ for slot, slot_descriptor in iteritems(existing_slots)
+ if slot in slot_names
+ }
+ slot_names = [name for name in slot_names if name not in reused_slots]
+ cd.update(reused_slots)
+ if self._cache_hash:
+ slot_names.append(_hash_cache_field)
+ cd["__slots__"] = tuple(slot_names)
+
+ qualname = getattr(self._cls, "__qualname__", None)
+ if qualname is not None:
+ cd["__qualname__"] = qualname
+
+ # Create new class based on old class and our methods.
+ cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
+
+ # The following is a fix for
+ # https://github.com/python-attrs/attrs/issues/102. On Python 3,
+ # if a method mentions `__class__` or uses the no-arg super(), the
+ # compiler will bake a reference to the class in the method itself
+ # as `method.__closure__`. Since we replace the class with a
+ # clone, we rewrite these references so it keeps working.
+ for item in cls.__dict__.values():
+ if isinstance(item, (classmethod, staticmethod)):
+ # Class- and staticmethods hide their functions inside.
+ # These might need to be rewritten as well.
+ closure_cells = getattr(item.__func__, "__closure__", None)
+ elif isinstance(item, property):
+ # Workaround for property `super()` shortcut (PY3-only).
+ # There is no universal way for other descriptors.
+ closure_cells = getattr(item.fget, "__closure__", None)
+ else:
+ closure_cells = getattr(item, "__closure__", None)
+
+ if not closure_cells: # Catch None or the empty list.
+ continue
+ for cell in closure_cells:
+ try:
+ match = cell.cell_contents is self._cls
+ except ValueError: # ValueError: Cell is empty
+ pass
+ else:
+ if match:
+ set_closure_cell(cell, cls)
+
+ return cls
+
+ def add_repr(self, ns):
+ self._cls_dict["__repr__"] = self._add_method_dunders(
+ _make_repr(self._attrs, ns=ns)
+ )
+ return self
+
+ def add_str(self):
+ repr = self._cls_dict.get("__repr__")
+ if repr is None:
+ raise ValueError(
+ "__str__ can only be generated if a __repr__ exists."
+ )
+
+ def __str__(self):
+ return self.__repr__()
+
+ self._cls_dict["__str__"] = self._add_method_dunders(__str__)
+ return self
+
+ def _make_getstate_setstate(self):
+ """
+ Create custom __setstate__ and __getstate__ methods.
+ """
+ # __weakref__ is not writable.
+ state_attr_names = tuple(
+ an for an in self._attr_names if an != "__weakref__"
+ )
+
+ def slots_getstate(self):
+ """
+ Automatically created by attrs.
+ """
+ return tuple(getattr(self, name) for name in state_attr_names)
+
+ hash_caching_enabled = self._cache_hash
+
+ def slots_setstate(self, state):
+ """
+ Automatically created by attrs.
+ """
+ __bound_setattr = _obj_setattr.__get__(self, Attribute)
+ for name, value in zip(state_attr_names, state):
+ __bound_setattr(name, value)
+
+ # The hash code cache is not included when the object is
+ # serialized, but it still needs to be initialized to None to
+ # indicate that the first call to __hash__ should be a cache
+ # miss.
+ if hash_caching_enabled:
+ __bound_setattr(_hash_cache_field, None)
+
+ return slots_getstate, slots_setstate
+
+ def make_unhashable(self):
+ self._cls_dict["__hash__"] = None
+ return self
+
+ def add_hash(self):
+ self._cls_dict["__hash__"] = self._add_method_dunders(
+ _make_hash(
+ self._cls,
+ self._attrs,
+ frozen=self._frozen,
+ cache_hash=self._cache_hash,
+ )
+ )
+
+ return self
+
+ def add_init(self):
+ self._cls_dict["__init__"] = self._add_method_dunders(
+ _make_init(
+ self._cls,
+ self._attrs,
+ self._has_pre_init,
+ self._has_post_init,
+ self._frozen,
+ self._slots,
+ self._cache_hash,
+ self._base_attr_map,
+ self._is_exc,
+ self._on_setattr is not None
+ and self._on_setattr is not setters.NO_OP,
+ attrs_init=False,
+ )
+ )
+
+ return self
+
+ def add_attrs_init(self):
+ self._cls_dict["__attrs_init__"] = self._add_method_dunders(
+ _make_init(
+ self._cls,
+ self._attrs,
+ self._has_pre_init,
+ self._has_post_init,
+ self._frozen,
+ self._slots,
+ self._cache_hash,
+ self._base_attr_map,
+ self._is_exc,
+ self._on_setattr is not None
+ and self._on_setattr is not setters.NO_OP,
+ attrs_init=True,
+ )
+ )
+
+ return self
+
+ def add_eq(self):
+ cd = self._cls_dict
+
+ cd["__eq__"] = self._add_method_dunders(
+ _make_eq(self._cls, self._attrs)
+ )
+ cd["__ne__"] = self._add_method_dunders(_make_ne())
+
+ return self
+
+ def add_order(self):
+ cd = self._cls_dict
+
+ cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
+ self._add_method_dunders(meth)
+ for meth in _make_order(self._cls, self._attrs)
+ )
+
+ return self
+
+ def add_setattr(self):
+ if self._frozen:
+ return self
+
+ sa_attrs = {}
+ for a in self._attrs:
+ on_setattr = a.on_setattr or self._on_setattr
+ if on_setattr and on_setattr is not setters.NO_OP:
+ sa_attrs[a.name] = a, on_setattr
+
+ if not sa_attrs:
+ return self
+
+ if self._has_custom_setattr:
+ # We need to write a __setattr__ but there already is one!
+ raise ValueError(
+ "Can't combine custom __setattr__ with on_setattr hooks."
+ )
+
+ # docstring comes from _add_method_dunders
+ def __setattr__(self, name, val):
+ try:
+ a, hook = sa_attrs[name]
+ except KeyError:
+ nval = val
+ else:
+ nval = hook(self, a, val)
+
+ _obj_setattr(self, name, nval)
+
+ self._cls_dict["__attrs_own_setattr__"] = True
+ self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__)
+ self._has_own_setattr = True
+
+ return self
+
+ def _add_method_dunders(self, method):
+ """
+ Add __module__ and __qualname__ to a *method* if possible.
+ """
+ try:
+ method.__module__ = self._cls.__module__
+ except AttributeError:
+ pass
+
+ try:
+ method.__qualname__ = ".".join(
+ (self._cls.__qualname__, method.__name__)
+ )
+ except AttributeError:
+ pass
+
+ try:
+ method.__doc__ = "Method generated by attrs for class %s." % (
+ self._cls.__qualname__,
+ )
+ except AttributeError:
+ pass
+
+ return method
+
+
+_CMP_DEPRECATION = (
+ "The usage of `cmp` is deprecated and will be removed on or after "
+ "2021-06-01. Please use `eq` and `order` instead."
+)
+
+
+def _determine_attrs_eq_order(cmp, eq, order, default_eq):
+ """
+ Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
+ values of eq and order. If *eq* is None, set it to *default_eq*.
+ """
+ if cmp is not None and any((eq is not None, order is not None)):
+ raise ValueError("Don't mix `cmp` with `eq' and `order`.")
+
+ # cmp takes precedence due to bw-compatibility.
+ if cmp is not None:
+ return cmp, cmp
+
+ # If left None, equality is set to the specified default and ordering
+ # mirrors equality.
+ if eq is None:
+ eq = default_eq
+
+ if order is None:
+ order = eq
+
+ if eq is False and order is True:
+ raise ValueError("`order` can only be True if `eq` is True too.")
+
+ return eq, order
+
+
+def _determine_attrib_eq_order(cmp, eq, order, default_eq):
+ """
+ Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
+ values of eq and order. If *eq* is None, set it to *default_eq*.
+ """
+ if cmp is not None and any((eq is not None, order is not None)):
+ raise ValueError("Don't mix `cmp` with `eq' and `order`.")
+
+ def decide_callable_or_boolean(value):
+ """
+ Decide whether a key function is used.
+ """
+ if callable(value):
+ value, key = True, value
+ else:
+ key = None
+ return value, key
+
+ # cmp takes precedence due to bw-compatibility.
+ if cmp is not None:
+ cmp, cmp_key = decide_callable_or_boolean(cmp)
+ return cmp, cmp_key, cmp, cmp_key
+
+ # If left None, equality is set to the specified default and ordering
+ # mirrors equality.
+ if eq is None:
+ eq, eq_key = default_eq, None
+ else:
+ eq, eq_key = decide_callable_or_boolean(eq)
+
+ if order is None:
+ order, order_key = eq, eq_key
+ else:
+ order, order_key = decide_callable_or_boolean(order)
+
+ if eq is False and order is True:
+ raise ValueError("`order` can only be True if `eq` is True too.")
+
+ return eq, eq_key, order, order_key
+
+
+def _determine_whether_to_implement(
+ cls, flag, auto_detect, dunders, default=True
+):
+ """
+ Check whether we should implement a set of methods for *cls*.
+
+ *flag* is the argument passed into @attr.s like 'init', *auto_detect* the
+ same as passed into @attr.s and *dunders* is a tuple of attribute names
+ whose presence signal that the user has implemented it themselves.
+
+ Return *default* if no reason for either for or against is found.
+
+ auto_detect must be False on Python 2.
+ """
+ if flag is True or flag is False:
+ return flag
+
+ if flag is None and auto_detect is False:
+ return default
+
+ # Logically, flag is None and auto_detect is True here.
+ for dunder in dunders:
+ if _has_own_attribute(cls, dunder):
+ return False
+
+ return default
+
+
+def attrs(
+ maybe_cls=None,
+ these=None,
+ repr_ns=None,
+ repr=None,
+ cmp=None,
+ hash=None,
+ init=None,
+ slots=False,
+ frozen=False,
+ weakref_slot=True,
+ str=False,
+ auto_attribs=False,
+ kw_only=False,
+ cache_hash=False,
+ auto_exc=False,
+ eq=None,
+ order=None,
+ auto_detect=False,
+ collect_by_mro=False,
+ getstate_setstate=None,
+ on_setattr=None,
+ field_transformer=None,
+):
+ r"""
+ A class decorator that adds `dunder
+ <https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
+ specified attributes using `attr.ib` or the *these* argument.
+
+ :param these: A dictionary of name to `attr.ib` mappings. This is
+ useful to avoid the definition of your attributes within the class body
+ because you can't (e.g. if you want to add ``__repr__`` methods to
+ Django models) or don't want to.
+
+ If *these* is not ``None``, ``attrs`` will *not* search the class body
+ for attributes and will *not* remove any attributes from it.
+
+ If *these* is an ordered dict (`dict` on Python 3.6+,
+ `collections.OrderedDict` otherwise), the order is deduced from
+ the order of the attributes inside *these*. Otherwise the order
+ of the definition of the attributes is used.
+
+ :type these: `dict` of `str` to `attr.ib`
+
+ :param str repr_ns: When using nested classes, there's no way in Python 2
+ to automatically detect that. Therefore it's possible to set the
+ namespace explicitly for a more meaningful ``repr`` output.
+ :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
+ *order*, and *hash* arguments explicitly, assume they are set to
+ ``True`` **unless any** of the involved methods for one of the
+ arguments is implemented in the *current* class (i.e. it is *not*
+ inherited from some base class).
+
+ So for example by implementing ``__eq__`` on a class yourself,
+ ``attrs`` will deduce ``eq=False`` and will create *neither*
+ ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
+ ``__ne__`` by default, so it *should* be enough to only implement
+ ``__eq__`` in most cases).
+
+ .. warning::
+
+ If you prevent ``attrs`` from creating the ordering methods for you
+ (``order=False``, e.g. by implementing ``__le__``), it becomes
+ *your* responsibility to make sure its ordering is sound. The best
+ way is to use the `functools.total_ordering` decorator.
+
+
+ Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
+ *cmp*, or *hash* overrides whatever *auto_detect* would determine.
+
+ *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises
+ a `PythonTooOldError`.
+
+ :param bool repr: Create a ``__repr__`` method with a human readable
+ representation of ``attrs`` attributes..
+ :param bool str: Create a ``__str__`` method that is identical to
+ ``__repr__``. This is usually not necessary except for
+ `Exception`\ s.
+ :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
+ and ``__ne__`` methods that check two instances for equality.
+
+ They compare the instances as if they were tuples of their ``attrs``
+ attributes if and only if the types of both classes are *identical*!
+ :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
+ ``__gt__``, and ``__ge__`` methods that behave like *eq* above and
+ allow instances to be ordered. If ``None`` (default) mirror value of
+ *eq*.
+ :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
+ and *order* to the same value. Must not be mixed with *eq* or *order*.
+ :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method
+ is generated according how *eq* and *frozen* are set.
+
+ 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
+ 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
+ None, marking it unhashable (which it is).
+ 3. If *eq* is False, ``__hash__`` will be left untouched meaning the
+ ``__hash__`` method of the base class will be used (if base class is
+ ``object``, this means it will fall back to id-based hashing.).
+
+ Although not recommended, you can decide for yourself and force
+ ``attrs`` to create one (e.g. if the class is immutable even though you
+ didn't freeze it programmatically) by passing ``True`` or not. Both of
+ these cases are rather special and should be used carefully.
+
+ See our documentation on `hashing`, Python's documentation on
+ `object.__hash__`, and the `GitHub issue that led to the default \
+ behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
+ details.
+ :param bool init: Create a ``__init__`` method that initializes the
+ ``attrs`` attributes. Leading underscores are stripped for the argument
+ name. If a ``__attrs_pre_init__`` method exists on the class, it will
+ be called before the class is initialized. If a ``__attrs_post_init__``
+ method exists on the class, it will be called after the class is fully
+ initialized.
+
+ If ``init`` is ``False``, an ``__attrs_init__`` method will be
+ injected instead. This allows you to define a custom ``__init__``
+ method that can do pre-init work such as ``super().__init__()``,
+ and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
+ :param bool slots: Create a `slotted class <slotted classes>` that's more
+ memory-efficient. Slotted classes are generally superior to the default
+ dict classes, but have some gotchas you should know about, so we
+ encourage you to read the `glossary entry <slotted classes>`.
+ :param bool frozen: Make instances immutable after initialization. If
+ someone attempts to modify a frozen instance,
+ `attr.exceptions.FrozenInstanceError` is raised.
+
+ .. note::
+
+ 1. This is achieved by installing a custom ``__setattr__`` method
+ on your class, so you can't implement your own.
+
+ 2. True immutability is impossible in Python.
+
+ 3. This *does* have a minor a runtime performance `impact
+ <how-frozen>` when initializing new instances. In other words:
+ ``__init__`` is slightly slower with ``frozen=True``.
+
+ 4. If a class is frozen, you cannot modify ``self`` in
+ ``__attrs_post_init__`` or a self-written ``__init__``. You can
+ circumvent that limitation by using
+ ``object.__setattr__(self, "attribute_name", value)``.
+
+ 5. Subclasses of a frozen class are frozen too.
+
+ :param bool weakref_slot: Make instances weak-referenceable. This has no
+ effect unless ``slots`` is also enabled.
+ :param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated
+ attributes (Python 3.6 and later only) from the class body.
+
+ In this case, you **must** annotate every field. If ``attrs``
+ encounters a field that is set to an `attr.ib` but lacks a type
+ annotation, an `attr.exceptions.UnannotatedAttributeError` is
+ raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
+ want to set a type.
+
+ If you assign a value to those attributes (e.g. ``x: int = 42``), that
+ value becomes the default value like if it were passed using
+ ``attr.ib(default=42)``. Passing an instance of `Factory` also
+ works as expected in most cases (see warning below).
+
+ Attributes annotated as `typing.ClassVar`, and attributes that are
+ neither annotated nor set to an `attr.ib` are **ignored**.
+
+ .. warning::
+ For features that use the attribute name to create decorators (e.g.
+ `validators <validators>`), you still *must* assign `attr.ib` to
+ them. Otherwise Python will either not find the name or try to use
+ the default value to call e.g. ``validator`` on it.
+
+ These errors can be quite confusing and probably the most common bug
+ report on our bug tracker.
+
+ .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
+ :param bool kw_only: Make all attributes keyword-only (Python 3+)
+ in the generated ``__init__`` (if ``init`` is ``False``, this
+ parameter is ignored).
+ :param bool cache_hash: Ensure that the object's hash code is computed
+ only once and stored on the object. If this is set to ``True``,
+ hashing must be either explicitly or implicitly enabled for this
+ class. If the hash code is cached, avoid any reassignments of
+ fields involved in hash code computation or mutations of the objects
+ those fields point to after object creation. If such changes occur,
+ the behavior of the object's hash code is undefined.
+ :param bool auto_exc: If the class subclasses `BaseException`
+ (which implicitly includes any subclass of any exception), the
+ following happens to behave like a well-behaved Python exceptions
+ class:
+
+ - the values for *eq*, *order*, and *hash* are ignored and the
+ instances compare and hash by the instance's ids (N.B. ``attrs`` will
+ *not* remove existing implementations of ``__hash__`` or the equality
+ methods. It just won't add own ones.),
+ - all attributes that are either passed into ``__init__`` or have a
+ default value are additionally available as a tuple in the ``args``
+ attribute,
+ - the value of *str* is ignored leaving ``__str__`` to base classes.
+ :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs``
+ collects attributes from base classes. The default behavior is
+ incorrect in certain cases of multiple inheritance. It should be on by
+ default but is kept off for backward-compatability.
+
+ See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
+ more details.
+
+ :param Optional[bool] getstate_setstate:
+ .. note::
+ This is usually only interesting for slotted classes and you should
+ probably just set *auto_detect* to `True`.
+
+ If `True`, ``__getstate__`` and
+ ``__setstate__`` are generated and attached to the class. This is
+ necessary for slotted classes to be pickleable. If left `None`, it's
+ `True` by default for slotted classes and ``False`` for dict classes.
+
+ If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
+ and **either** ``__getstate__`` or ``__setstate__`` is detected directly
+ on the class (i.e. not inherited), it is set to `False` (this is usually
+ what you want).
+
+ :param on_setattr: A callable that is run whenever the user attempts to set
+ an attribute (either by assignment like ``i.x = 42`` or by using
+ `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
+ as validators: the instance, the attribute that is being modified, and
+ the new value.
+
+ If no exception is raised, the attribute is set to the return value of
+ the callable.
+
+ If a list of callables is passed, they're automatically wrapped in an
+ `attr.setters.pipe`.
+
+ :param Optional[callable] field_transformer:
+ A function that is called with the original class object and all
+ fields right before ``attrs`` finalizes the class. You can use
+ this, e.g., to automatically add converters or validators to
+ fields based on their types. See `transform-fields` for more details.
+
+ .. versionadded:: 16.0.0 *slots*
+ .. versionadded:: 16.1.0 *frozen*
+ .. versionadded:: 16.3.0 *str*
+ .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
+ .. versionchanged:: 17.1.0
+ *hash* supports ``None`` as value which is also the default now.
+ .. versionadded:: 17.3.0 *auto_attribs*
+ .. versionchanged:: 18.1.0
+ If *these* is passed, no attributes are deleted from the class body.
+ .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
+ .. versionadded:: 18.2.0 *weakref_slot*
+ .. deprecated:: 18.2.0
+ ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
+ `DeprecationWarning` if the classes compared are subclasses of
+ each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
+ to each other.
+ .. versionchanged:: 19.2.0
+ ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
+ subclasses comparable anymore.
+ .. versionadded:: 18.2.0 *kw_only*
+ .. versionadded:: 18.2.0 *cache_hash*
+ .. versionadded:: 19.1.0 *auto_exc*
+ .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
+ .. versionadded:: 19.2.0 *eq* and *order*
+ .. versionadded:: 20.1.0 *auto_detect*
+ .. versionadded:: 20.1.0 *collect_by_mro*
+ .. versionadded:: 20.1.0 *getstate_setstate*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionadded:: 20.3.0 *field_transformer*
+ .. versionchanged:: 21.1.0
+ ``init=False`` injects ``__attrs_init__``
+ .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
+ .. versionchanged:: 21.1.0 *cmp* undeprecated
+ """
+ if auto_detect and PY2:
+ raise PythonTooOldError(
+ "auto_detect only works on Python 3 and later."
+ )
+
+ eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
+ hash_ = hash # work around the lack of nonlocal
+
+ if isinstance(on_setattr, (list, tuple)):
+ on_setattr = setters.pipe(*on_setattr)
+
+ def wrap(cls):
+
+ if getattr(cls, "__class__", None) is None:
+ raise TypeError("attrs only works with new-style classes.")
+
+ is_frozen = frozen or _has_frozen_base_class(cls)
+ is_exc = auto_exc is True and issubclass(cls, BaseException)
+ has_own_setattr = auto_detect and _has_own_attribute(
+ cls, "__setattr__"
+ )
+
+ if has_own_setattr and is_frozen:
+ raise ValueError("Can't freeze a class with a custom __setattr__.")
+
+ builder = _ClassBuilder(
+ cls,
+ these,
+ slots,
+ is_frozen,
+ weakref_slot,
+ _determine_whether_to_implement(
+ cls,
+ getstate_setstate,
+ auto_detect,
+ ("__getstate__", "__setstate__"),
+ default=slots,
+ ),
+ auto_attribs,
+ kw_only,
+ cache_hash,
+ is_exc,
+ collect_by_mro,
+ on_setattr,
+ has_own_setattr,
+ field_transformer,
+ )
+ if _determine_whether_to_implement(
+ cls, repr, auto_detect, ("__repr__",)
+ ):
+ builder.add_repr(repr_ns)
+ if str is True:
+ builder.add_str()
+
+ eq = _determine_whether_to_implement(
+ cls, eq_, auto_detect, ("__eq__", "__ne__")
+ )
+ if not is_exc and eq is True:
+ builder.add_eq()
+ if not is_exc and _determine_whether_to_implement(
+ cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
+ ):
+ builder.add_order()
+
+ builder.add_setattr()
+
+ if (
+ hash_ is None
+ and auto_detect is True
+ and _has_own_attribute(cls, "__hash__")
+ ):
+ hash = False
+ else:
+ hash = hash_
+ if hash is not True and hash is not False and hash is not None:
+ # Can't use `hash in` because 1 == True for example.
+ raise TypeError(
+ "Invalid value for hash. Must be True, False, or None."
+ )
+ elif hash is False or (hash is None and eq is False) or is_exc:
+ # Don't do anything. Should fall back to __object__'s __hash__
+ # which is by id.
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " hashing must be either explicitly or implicitly "
+ "enabled."
+ )
+ elif hash is True or (
+ hash is None and eq is True and is_frozen is True
+ ):
+ # Build a __hash__ if told so, or if it's safe.
+ builder.add_hash()
+ else:
+ # Raise TypeError on attempts to hash.
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " hashing must be either explicitly or implicitly "
+ "enabled."
+ )
+ builder.make_unhashable()
+
+ if _determine_whether_to_implement(
+ cls, init, auto_detect, ("__init__",)
+ ):
+ builder.add_init()
+ else:
+ builder.add_attrs_init()
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " init must be True."
+ )
+
+ return builder.build_class()
+
+ # maybe_cls's type depends on the usage of the decorator. It's a class
+ # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
+ if maybe_cls is None:
+ return wrap
+ else:
+ return wrap(maybe_cls)
+
+
+_attrs = attrs
+"""
+Internal alias so we can use it in functions that take an argument called
+*attrs*.
+"""
+
+
+if PY2:
+
+ def _has_frozen_base_class(cls):
+ """
+ Check whether *cls* has a frozen ancestor by looking at its
+ __setattr__.
+ """
+ return (
+ getattr(cls.__setattr__, "__module__", None)
+ == _frozen_setattrs.__module__
+ and cls.__setattr__.__name__ == _frozen_setattrs.__name__
+ )
+
+
+else:
+
+ def _has_frozen_base_class(cls):
+ """
+ Check whether *cls* has a frozen ancestor by looking at its
+ __setattr__.
+ """
+ return cls.__setattr__ == _frozen_setattrs
+
+
+def _generate_unique_filename(cls, func_name):
+ """
+ Create a "filename" suitable for a function being generated.
+ """
+ unique_id = uuid.uuid4()
+ extra = ""
+ count = 1
+
+ while True:
+ unique_filename = "<attrs generated {0} {1}.{2}{3}>".format(
+ func_name,
+ cls.__module__,
+ getattr(cls, "__qualname__", cls.__name__),
+ extra,
+ )
+ # To handle concurrency we essentially "reserve" our spot in
+ # the linecache with a dummy line. The caller can then
+ # set this value correctly.
+ cache_line = (1, None, (str(unique_id),), unique_filename)
+ if (
+ linecache.cache.setdefault(unique_filename, cache_line)
+ == cache_line
+ ):
+ return unique_filename
+
+ # Looks like this spot is taken. Try again.
+ count += 1
+ extra = "-{0}".format(count)
+
+
+def _make_hash(cls, attrs, frozen, cache_hash):
+ attrs = tuple(
+ a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
+ )
+
+ tab = " "
+
+ unique_filename = _generate_unique_filename(cls, "hash")
+ type_hash = hash(unique_filename)
+
+ hash_def = "def __hash__(self"
+ hash_func = "hash(("
+ closing_braces = "))"
+ if not cache_hash:
+ hash_def += "):"
+ else:
+ if not PY2:
+ hash_def += ", *"
+
+ hash_def += (
+ ", _cache_wrapper="
+ + "__import__('attr._make')._make._CacheHashWrapper):"
+ )
+ hash_func = "_cache_wrapper(" + hash_func
+ closing_braces += ")"
+
+ method_lines = [hash_def]
+
+ def append_hash_computation_lines(prefix, indent):
+ """
+ Generate the code for actually computing the hash code.
+ Below this will either be returned directly or used to compute
+ a value which is then cached, depending on the value of cache_hash
+ """
+
+ method_lines.extend(
+ [
+ indent + prefix + hash_func,
+ indent + " %d," % (type_hash,),
+ ]
+ )
+
+ for a in attrs:
+ method_lines.append(indent + " self.%s," % a.name)
+
+ method_lines.append(indent + " " + closing_braces)
+
+ if cache_hash:
+ method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
+ if frozen:
+ append_hash_computation_lines(
+ "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
+ )
+ method_lines.append(tab * 2 + ")") # close __setattr__
+ else:
+ append_hash_computation_lines(
+ "self.%s = " % _hash_cache_field, tab * 2
+ )
+ method_lines.append(tab + "return self.%s" % _hash_cache_field)
+ else:
+ append_hash_computation_lines("return ", tab)
+
+ script = "\n".join(method_lines)
+ return _make_method("__hash__", script, unique_filename)
+
+
+def _add_hash(cls, attrs):
+ """
+ Add a hash method to *cls*.
+ """
+ cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
+ return cls
+
+
+def _make_ne():
+ """
+ Create __ne__ method.
+ """
+
+ def __ne__(self, other):
+ """
+ Check equality and either forward a NotImplemented or
+ return the result negated.
+ """
+ result = self.__eq__(other)
+ if result is NotImplemented:
+ return NotImplemented
+
+ return not result
+
+ return __ne__
+
+
+def _make_eq(cls, attrs):
+ """
+ Create __eq__ method for *cls* with *attrs*.
+ """
+ attrs = [a for a in attrs if a.eq]
+
+ unique_filename = _generate_unique_filename(cls, "eq")
+ lines = [
+ "def __eq__(self, other):",
+ " if other.__class__ is not self.__class__:",
+ " return NotImplemented",
+ ]
+
+ # We can't just do a big self.x = other.x and... clause due to
+ # irregularities like nan == nan is false but (nan,) == (nan,) is true.
+ globs = {}
+ if attrs:
+ lines.append(" return (")
+ others = [" ) == ("]
+ for a in attrs:
+ if a.eq_key:
+ cmp_name = "_%s_key" % (a.name,)
+ # Add the key function to the global namespace
+ # of the evaluated function.
+ globs[cmp_name] = a.eq_key
+ lines.append(
+ " %s(self.%s),"
+ % (
+ cmp_name,
+ a.name,
+ )
+ )
+ others.append(
+ " %s(other.%s),"
+ % (
+ cmp_name,
+ a.name,
+ )
+ )
+ else:
+ lines.append(" self.%s," % (a.name,))
+ others.append(" other.%s," % (a.name,))
+
+ lines += others + [" )"]
+ else:
+ lines.append(" return True")
+
+ script = "\n".join(lines)
+
+ return _make_method("__eq__", script, unique_filename, globs)
+
+
+def _make_order(cls, attrs):
+ """
+ Create ordering methods for *cls* with *attrs*.
+ """
+ attrs = [a for a in attrs if a.order]
+
+ def attrs_to_tuple(obj):
+ """
+ Save us some typing.
+ """
+ return tuple(
+ key(value) if key else value
+ for value, key in (
+ (getattr(obj, a.name), a.order_key) for a in attrs
+ )
+ )
+
+ def __lt__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) < attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __le__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) <= attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __gt__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) > attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __ge__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) >= attrs_to_tuple(other)
+
+ return NotImplemented
+
+ return __lt__, __le__, __gt__, __ge__
+
+
+def _add_eq(cls, attrs=None):
+ """
+ Add equality methods to *cls* with *attrs*.
+ """
+ if attrs is None:
+ attrs = cls.__attrs_attrs__
+
+ cls.__eq__ = _make_eq(cls, attrs)
+ cls.__ne__ = _make_ne()
+
+ return cls
+
+
+_already_repring = threading.local()
+
+
+def _make_repr(attrs, ns):
+ """
+ Make a repr method that includes relevant *attrs*, adding *ns* to the full
+ name.
+ """
+
+ # Figure out which attributes to include, and which function to use to
+ # format them. The a.repr value can be either bool or a custom callable.
+ attr_names_with_reprs = tuple(
+ (a.name, repr if a.repr is True else a.repr)
+ for a in attrs
+ if a.repr is not False
+ )
+
+ def __repr__(self):
+ """
+ Automatically created by attrs.
+ """
+ try:
+ working_set = _already_repring.working_set
+ except AttributeError:
+ working_set = set()
+ _already_repring.working_set = working_set
+
+ if id(self) in working_set:
+ return "..."
+ real_cls = self.__class__
+ if ns is None:
+ qualname = getattr(real_cls, "__qualname__", None)
+ if qualname is not None:
+ class_name = qualname.rsplit(">.", 1)[-1]
+ else:
+ class_name = real_cls.__name__
+ else:
+ class_name = ns + "." + real_cls.__name__
+
+ # Since 'self' remains on the stack (i.e.: strongly referenced) for the
+ # duration of this call, it's safe to depend on id(...) stability, and
+ # not need to track the instance and therefore worry about properties
+ # like weakref- or hash-ability.
+ working_set.add(id(self))
+ try:
+ result = [class_name, "("]
+ first = True
+ for name, attr_repr in attr_names_with_reprs:
+ if first:
+ first = False
+ else:
+ result.append(", ")
+ result.extend(
+ (name, "=", attr_repr(getattr(self, name, NOTHING)))
+ )
+ return "".join(result) + ")"
+ finally:
+ working_set.remove(id(self))
+
+ return __repr__
+
+
+def _add_repr(cls, ns=None, attrs=None):
+ """
+ Add a repr method to *cls*.
+ """
+ if attrs is None:
+ attrs = cls.__attrs_attrs__
+
+ cls.__repr__ = _make_repr(attrs, ns)
+ return cls
+
+
+def fields(cls):
+ """
+ Return the tuple of ``attrs`` attributes for a class.
+
+ The tuple also allows accessing the fields by their names (see below for
+ examples).
+
+ :param type cls: Class to introspect.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ :rtype: tuple (with name accessors) of `attr.Attribute`
+
+ .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
+ by name.
+ """
+ if not isclass(cls):
+ raise TypeError("Passed object must be a class.")
+ attrs = getattr(cls, "__attrs_attrs__", None)
+ if attrs is None:
+ raise NotAnAttrsClassError(
+ "{cls!r} is not an attrs-decorated class.".format(cls=cls)
+ )
+ return attrs
+
+
+def fields_dict(cls):
+ """
+ Return an ordered dictionary of ``attrs`` attributes for a class, whose
+ keys are the attribute names.
+
+ :param type cls: Class to introspect.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+ class.
+
+ :rtype: an ordered dict where keys are attribute names and values are
+ `attr.Attribute`\\ s. This will be a `dict` if it's
+ naturally ordered like on Python 3.6+ or an
+ :class:`~collections.OrderedDict` otherwise.
+
+ .. versionadded:: 18.1.0
+ """
+ if not isclass(cls):
+ raise TypeError("Passed object must be a class.")
+ attrs = getattr(cls, "__attrs_attrs__", None)
+ if attrs is None:
+ raise NotAnAttrsClassError(
+ "{cls!r} is not an attrs-decorated class.".format(cls=cls)
+ )
+ return ordered_dict(((a.name, a) for a in attrs))
+
+
+def validate(inst):
+ """
+ Validate all attributes on *inst* that have a validator.
+
+ Leaves all exceptions through.
+
+ :param inst: Instance of a class with ``attrs`` attributes.
+ """
+ if _config._run_validators is False:
+ return
+
+ for a in fields(inst.__class__):
+ v = a.validator
+ if v is not None:
+ v(inst, a, getattr(inst, a.name))
+
+
+def _is_slot_cls(cls):
+ return "__slots__" in cls.__dict__
+
+
+def _is_slot_attr(a_name, base_attr_map):
+ """
+ Check if the attribute name comes from a slot class.
+ """
+ return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
+
+
+def _make_init(
+ cls,
+ attrs,
+ pre_init,
+ post_init,
+ frozen,
+ slots,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ has_global_on_setattr,
+ attrs_init,
+):
+ if frozen and has_global_on_setattr:
+ raise ValueError("Frozen classes can't use on_setattr.")
+
+ needs_cached_setattr = cache_hash or frozen
+ filtered_attrs = []
+ attr_dict = {}
+ for a in attrs:
+ if not a.init and a.default is NOTHING:
+ continue
+
+ filtered_attrs.append(a)
+ attr_dict[a.name] = a
+
+ if a.on_setattr is not None:
+ if frozen is True:
+ raise ValueError("Frozen classes can't use on_setattr.")
+
+ needs_cached_setattr = True
+ elif (
+ has_global_on_setattr and a.on_setattr is not setters.NO_OP
+ ) or _is_slot_attr(a.name, base_attr_map):
+ needs_cached_setattr = True
+
+ unique_filename = _generate_unique_filename(cls, "init")
+
+ script, globs, annotations = _attrs_to_init_script(
+ filtered_attrs,
+ frozen,
+ slots,
+ pre_init,
+ post_init,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ needs_cached_setattr,
+ has_global_on_setattr,
+ attrs_init,
+ )
+ if cls.__module__ in sys.modules:
+ # This makes typing.get_type_hints(CLS.__init__) resolve string types.
+ globs.update(sys.modules[cls.__module__].__dict__)
+
+ globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
+
+ if needs_cached_setattr:
+ # Save the lookup overhead in __init__ if we need to circumvent
+ # setattr hooks.
+ globs["_cached_setattr"] = _obj_setattr
+
+ init = _make_method(
+ "__attrs_init__" if attrs_init else "__init__",
+ script,
+ unique_filename,
+ globs,
+ )
+ init.__annotations__ = annotations
+
+ return init
+
+
+def _setattr(attr_name, value_var, has_on_setattr):
+ """
+ Use the cached object.setattr to set *attr_name* to *value_var*.
+ """
+ return "_setattr('%s', %s)" % (attr_name, value_var)
+
+
+def _setattr_with_converter(attr_name, value_var, has_on_setattr):
+ """
+ Use the cached object.setattr to set *attr_name* to *value_var*, but run
+ its converter first.
+ """
+ return "_setattr('%s', %s(%s))" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+
+def _assign(attr_name, value, has_on_setattr):
+ """
+ Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise
+ relegate to _setattr.
+ """
+ if has_on_setattr:
+ return _setattr(attr_name, value, True)
+
+ return "self.%s = %s" % (attr_name, value)
+
+
+def _assign_with_converter(attr_name, value_var, has_on_setattr):
+ """
+ Unless *attr_name* has an on_setattr hook, use normal assignment after
+ conversion. Otherwise relegate to _setattr_with_converter.
+ """
+ if has_on_setattr:
+ return _setattr_with_converter(attr_name, value_var, True)
+
+ return "self.%s = %s(%s)" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+
+if PY2:
+
+ def _unpack_kw_only_py2(attr_name, default=None):
+ """
+ Unpack *attr_name* from _kw_only dict.
+ """
+ if default is not None:
+ arg_default = ", %s" % default
+ else:
+ arg_default = ""
+ return "%s = _kw_only.pop('%s'%s)" % (
+ attr_name,
+ attr_name,
+ arg_default,
+ )
+
+ def _unpack_kw_only_lines_py2(kw_only_args):
+ """
+ Unpack all *kw_only_args* from _kw_only dict and handle errors.
+
+ Given a list of strings "{attr_name}" and "{attr_name}={default}"
+ generates list of lines of code that pop attrs from _kw_only dict and
+ raise TypeError similar to builtin if required attr is missing or
+ extra key is passed.
+
+ >>> print("\n".join(_unpack_kw_only_lines_py2(["a", "b=42"])))
+ try:
+ a = _kw_only.pop('a')
+ b = _kw_only.pop('b', 42)
+ except KeyError as _key_error:
+ raise TypeError(
+ ...
+ if _kw_only:
+ raise TypeError(
+ ...
+ """
+ lines = ["try:"]
+ lines.extend(
+ " " + _unpack_kw_only_py2(*arg.split("="))
+ for arg in kw_only_args
+ )
+ lines += """\
+except KeyError as _key_error:
+ raise TypeError(
+ '__init__() missing required keyword-only argument: %s' % _key_error
+ )
+if _kw_only:
+ raise TypeError(
+ '__init__() got an unexpected keyword argument %r'
+ % next(iter(_kw_only))
+ )
+""".split(
+ "\n"
+ )
+ return lines
+
+
+def _attrs_to_init_script(
+ attrs,
+ frozen,
+ slots,
+ pre_init,
+ post_init,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ needs_cached_setattr,
+ has_global_on_setattr,
+ attrs_init,
+):
+ """
+ Return a script of an initializer for *attrs* and a dict of globals.
+
+ The globals are expected by the generated script.
+
+ If *frozen* is True, we cannot set the attributes directly so we use
+ a cached ``object.__setattr__``.
+ """
+ lines = []
+ if pre_init:
+ lines.append("self.__attrs_pre_init__()")
+
+ if needs_cached_setattr:
+ lines.append(
+ # Circumvent the __setattr__ descriptor to save one lookup per
+ # assignment.
+ # Note _setattr will be used again below if cache_hash is True
+ "_setattr = _cached_setattr.__get__(self, self.__class__)"
+ )
+
+ if frozen is True:
+ if slots is True:
+ fmt_setter = _setattr
+ fmt_setter_with_converter = _setattr_with_converter
+ else:
+ # Dict frozen classes assign directly to __dict__.
+ # But only if the attribute doesn't come from an ancestor slot
+ # class.
+ # Note _inst_dict will be used again below if cache_hash is True
+ lines.append("_inst_dict = self.__dict__")
+
+ def fmt_setter(attr_name, value_var, has_on_setattr):
+ if _is_slot_attr(attr_name, base_attr_map):
+ return _setattr(attr_name, value_var, has_on_setattr)
+
+ return "_inst_dict['%s'] = %s" % (attr_name, value_var)
+
+ def fmt_setter_with_converter(
+ attr_name, value_var, has_on_setattr
+ ):
+ if has_on_setattr or _is_slot_attr(attr_name, base_attr_map):
+ return _setattr_with_converter(
+ attr_name, value_var, has_on_setattr
+ )
+
+ return "_inst_dict['%s'] = %s(%s)" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+ else:
+ # Not frozen.
+ fmt_setter = _assign
+ fmt_setter_with_converter = _assign_with_converter
+
+ args = []
+ kw_only_args = []
+ attrs_to_validate = []
+
+ # This is a dictionary of names to validator and converter callables.
+ # Injecting this into __init__ globals lets us avoid lookups.
+ names_for_globals = {}
+ annotations = {"return": None}
+
+ for a in attrs:
+ if a.validator:
+ attrs_to_validate.append(a)
+
+ attr_name = a.name
+ has_on_setattr = a.on_setattr is not None or (
+ a.on_setattr is not setters.NO_OP and has_global_on_setattr
+ )
+ arg_name = a.name.lstrip("_")
+
+ has_factory = isinstance(a.default, Factory)
+ if has_factory and a.default.takes_self:
+ maybe_self = "self"
+ else:
+ maybe_self = ""
+
+ if a.init is False:
+ if has_factory:
+ init_factory_name = _init_factory_pat.format(a.name)
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name,
+ init_factory_name + "(%s)" % (maybe_self,),
+ has_on_setattr,
+ )
+ )
+ conv_name = _init_converter_pat % (a.name,)
+ names_for_globals[conv_name] = a.converter
+ else:
+ lines.append(
+ fmt_setter(
+ attr_name,
+ init_factory_name + "(%s)" % (maybe_self,),
+ has_on_setattr,
+ )
+ )
+ names_for_globals[init_factory_name] = a.default.factory
+ else:
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name,
+ "attr_dict['%s'].default" % (attr_name,),
+ has_on_setattr,
+ )
+ )
+ conv_name = _init_converter_pat % (a.name,)
+ names_for_globals[conv_name] = a.converter
+ else:
+ lines.append(
+ fmt_setter(
+ attr_name,
+ "attr_dict['%s'].default" % (attr_name,),
+ has_on_setattr,
+ )
+ )
+ elif a.default is not NOTHING and not has_factory:
+ arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name)
+ if a.kw_only:
+ kw_only_args.append(arg)
+ else:
+ args.append(arg)
+
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
+
+ elif has_factory:
+ arg = "%s=NOTHING" % (arg_name,)
+ if a.kw_only:
+ kw_only_args.append(arg)
+ else:
+ args.append(arg)
+ lines.append("if %s is not NOTHING:" % (arg_name,))
+
+ init_factory_name = _init_factory_pat.format(a.name)
+ if a.converter is not None:
+ lines.append(
+ " "
+ + fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ lines.append("else:")
+ lines.append(
+ " "
+ + fmt_setter_with_converter(
+ attr_name,
+ init_factory_name + "(" + maybe_self + ")",
+ has_on_setattr,
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(
+ " " + fmt_setter(attr_name, arg_name, has_on_setattr)
+ )
+ lines.append("else:")
+ lines.append(
+ " "
+ + fmt_setter(
+ attr_name,
+ init_factory_name + "(" + maybe_self + ")",
+ has_on_setattr,
+ )
+ )
+ names_for_globals[init_factory_name] = a.default.factory
+ else:
+ if a.kw_only:
+ kw_only_args.append(arg_name)
+ else:
+ args.append(arg_name)
+
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
+
+ if a.init is True:
+ if a.type is not None and a.converter is None:
+ annotations[arg_name] = a.type
+ elif a.converter is not None and not PY2:
+ # Try to get the type from the converter.
+ sig = None
+ try:
+ sig = inspect.signature(a.converter)
+ except (ValueError, TypeError): # inspect failed
+ pass
+ if sig:
+ sig_params = list(sig.parameters.values())
+ if (
+ sig_params
+ and sig_params[0].annotation
+ is not inspect.Parameter.empty
+ ):
+ annotations[arg_name] = sig_params[0].annotation
+
+ if attrs_to_validate: # we can skip this if there are no validators.
+ names_for_globals["_config"] = _config
+ lines.append("if _config._run_validators is True:")
+ for a in attrs_to_validate:
+ val_name = "__attr_validator_" + a.name
+ attr_name = "__attr_" + a.name
+ lines.append(
+ " %s(self, %s, self.%s)" % (val_name, attr_name, a.name)
+ )
+ names_for_globals[val_name] = a.validator
+ names_for_globals[attr_name] = a
+
+ if post_init:
+ lines.append("self.__attrs_post_init__()")
+
+ # because this is set only after __attrs_post_init is called, a crash
+ # will result if post-init tries to access the hash code. This seemed
+ # preferable to setting this beforehand, in which case alteration to
+ # field values during post-init combined with post-init accessing the
+ # hash code would result in silent bugs.
+ if cache_hash:
+ if frozen:
+ if slots:
+ # if frozen and slots, then _setattr defined above
+ init_hash_cache = "_setattr('%s', %s)"
+ else:
+ # if frozen and not slots, then _inst_dict defined above
+ init_hash_cache = "_inst_dict['%s'] = %s"
+ else:
+ init_hash_cache = "self.%s = %s"
+ lines.append(init_hash_cache % (_hash_cache_field, "None"))
+
+ # For exceptions we rely on BaseException.__init__ for proper
+ # initialization.
+ if is_exc:
+ vals = ",".join("self." + a.name for a in attrs if a.init)
+
+ lines.append("BaseException.__init__(self, %s)" % (vals,))
+
+ args = ", ".join(args)
+ if kw_only_args:
+ if PY2:
+ lines = _unpack_kw_only_lines_py2(kw_only_args) + lines
+
+ args += "%s**_kw_only" % (", " if args else "",) # leading comma
+ else:
+ args += "%s*, %s" % (
+ ", " if args else "", # leading comma
+ ", ".join(kw_only_args), # kw_only args
+ )
+ return (
+ """\
+def {init_name}(self, {args}):
+ {lines}
+""".format(
+ init_name=("__attrs_init__" if attrs_init else "__init__"),
+ args=args,
+ lines="\n ".join(lines) if lines else "pass",
+ ),
+ names_for_globals,
+ annotations,
+ )
+
+
+class Attribute(object):
+ """
+ *Read-only* representation of an attribute.
+
+ Instances of this class are frequently used for introspection purposes
+ like:
+
+ - `fields` returns a tuple of them.
+ - Validators get them passed as the first argument.
+ - The *field transformer* hook receives a list of them.
+
+ :attribute name: The name of the attribute.
+ :attribute inherited: Whether or not that attribute has been inherited from
+ a base class.
+
+ Plus *all* arguments of `attr.ib` (except for ``factory``
+ which is only syntactic sugar for ``default=Factory(...)``.
+
+ .. versionadded:: 20.1.0 *inherited*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionchanged:: 20.2.0 *inherited* is not taken into account for
+ equality checks and hashing anymore.
+ .. versionadded:: 21.1.0 *eq_key* and *order_key*
+
+ For the full version history of the fields, see `attr.ib`.
+ """
+
+ __slots__ = (
+ "name",
+ "default",
+ "validator",
+ "repr",
+ "eq",
+ "eq_key",
+ "order",
+ "order_key",
+ "hash",
+ "init",
+ "metadata",
+ "type",
+ "converter",
+ "kw_only",
+ "inherited",
+ "on_setattr",
+ )
+
+ def __init__(
+ self,
+ name,
+ default,
+ validator,
+ repr,
+ cmp, # XXX: unused, remove along with other cmp code.
+ hash,
+ init,
+ inherited,
+ metadata=None,
+ type=None,
+ converter=None,
+ kw_only=False,
+ eq=None,
+ eq_key=None,
+ order=None,
+ order_key=None,
+ on_setattr=None,
+ ):
+ eq, eq_key, order, order_key = _determine_attrib_eq_order(
+ cmp, eq_key or eq, order_key or order, True
+ )
+
+ # Cache this descriptor here to speed things up later.
+ bound_setattr = _obj_setattr.__get__(self, Attribute)
+
+ # Despite the big red warning, people *do* instantiate `Attribute`
+ # themselves.
+ bound_setattr("name", name)
+ bound_setattr("default", default)
+ bound_setattr("validator", validator)
+ bound_setattr("repr", repr)
+ bound_setattr("eq", eq)
+ bound_setattr("eq_key", eq_key)
+ bound_setattr("order", order)
+ bound_setattr("order_key", order_key)
+ bound_setattr("hash", hash)
+ bound_setattr("init", init)
+ bound_setattr("converter", converter)
+ bound_setattr(
+ "metadata",
+ (
+ metadata_proxy(metadata)
+ if metadata
+ else _empty_metadata_singleton
+ ),
+ )
+ bound_setattr("type", type)
+ bound_setattr("kw_only", kw_only)
+ bound_setattr("inherited", inherited)
+ bound_setattr("on_setattr", on_setattr)
+
+ def __setattr__(self, name, value):
+ raise FrozenInstanceError()
+
+ @classmethod
+ def from_counting_attr(cls, name, ca, type=None):
+ # type holds the annotated value. deal with conflicts:
+ if type is None:
+ type = ca.type
+ elif ca.type is not None:
+ raise ValueError(
+ "Type annotation and type argument cannot both be present"
+ )
+ inst_dict = {
+ k: getattr(ca, k)
+ for k in Attribute.__slots__
+ if k
+ not in (
+ "name",
+ "validator",
+ "default",
+ "type",
+ "inherited",
+ ) # exclude methods and deprecated alias
+ }
+ return cls(
+ name=name,
+ validator=ca._validator,
+ default=ca._default,
+ type=type,
+ cmp=None,
+ inherited=False,
+ **inst_dict
+ )
+
+ @property
+ def cmp(self):
+ """
+ Simulate the presence of a cmp attribute and warn.
+ """
+ warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2)
+
+ return self.eq and self.order
+
+ # Don't use attr.evolve since fields(Attribute) doesn't work
+ def evolve(self, **changes):
+ """
+ Copy *self* and apply *changes*.
+
+ This works similarly to `attr.evolve` but that function does not work
+ with ``Attribute``.
+
+ It is mainly meant to be used for `transform-fields`.
+
+ .. versionadded:: 20.3.0
+ """
+ new = copy.copy(self)
+
+ new._setattrs(changes.items())
+
+ return new
+
+ # Don't use _add_pickle since fields(Attribute) doesn't work
+ def __getstate__(self):
+ """
+ Play nice with pickle.
+ """
+ return tuple(
+ getattr(self, name) if name != "metadata" else dict(self.metadata)
+ for name in self.__slots__
+ )
+
+ def __setstate__(self, state):
+ """
+ Play nice with pickle.
+ """
+ self._setattrs(zip(self.__slots__, state))
+
+ def _setattrs(self, name_values_pairs):
+ bound_setattr = _obj_setattr.__get__(self, Attribute)
+ for name, value in name_values_pairs:
+ if name != "metadata":
+ bound_setattr(name, value)
+ else:
+ bound_setattr(
+ name,
+ metadata_proxy(value)
+ if value
+ else _empty_metadata_singleton,
+ )
+
+
+_a = [
+ Attribute(
+ name=name,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ eq=True,
+ order=False,
+ hash=(name != "metadata"),
+ init=True,
+ inherited=False,
+ )
+ for name in Attribute.__slots__
+]
+
+Attribute = _add_hash(
+ _add_eq(
+ _add_repr(Attribute, attrs=_a),
+ attrs=[a for a in _a if a.name != "inherited"],
+ ),
+ attrs=[a for a in _a if a.hash and a.name != "inherited"],
+)
+
+
+class _CountingAttr(object):
+ """
+ Intermediate representation of attributes that uses a counter to preserve
+ the order in which the attributes have been defined.
+
+ *Internal* data structure of the attrs library. Running into is most
+ likely the result of a bug like a forgotten `@attr.s` decorator.
+ """
+
+ __slots__ = (
+ "counter",
+ "_default",
+ "repr",
+ "eq",
+ "eq_key",
+ "order",
+ "order_key",
+ "hash",
+ "init",
+ "metadata",
+ "_validator",
+ "converter",
+ "type",
+ "kw_only",
+ "on_setattr",
+ )
+ __attrs_attrs__ = tuple(
+ Attribute(
+ name=name,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=True,
+ init=True,
+ kw_only=False,
+ eq=True,
+ eq_key=None,
+ order=False,
+ order_key=None,
+ inherited=False,
+ on_setattr=None,
+ )
+ for name in (
+ "counter",
+ "_default",
+ "repr",
+ "eq",
+ "order",
+ "hash",
+ "init",
+ "on_setattr",
+ )
+ ) + (
+ Attribute(
+ name="metadata",
+ default=None,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=False,
+ init=True,
+ kw_only=False,
+ eq=True,
+ eq_key=None,
+ order=False,
+ order_key=None,
+ inherited=False,
+ on_setattr=None,
+ ),
+ )
+ cls_counter = 0
+
+ def __init__(
+ self,
+ default,
+ validator,
+ repr,
+ cmp,
+ hash,
+ init,
+ converter,
+ metadata,
+ type,
+ kw_only,
+ eq,
+ eq_key,
+ order,
+ order_key,
+ on_setattr,
+ ):
+ _CountingAttr.cls_counter += 1
+ self.counter = _CountingAttr.cls_counter
+ self._default = default
+ self._validator = validator
+ self.converter = converter
+ self.repr = repr
+ self.eq = eq
+ self.eq_key = eq_key
+ self.order = order
+ self.order_key = order_key
+ self.hash = hash
+ self.init = init
+ self.metadata = metadata
+ self.type = type
+ self.kw_only = kw_only
+ self.on_setattr = on_setattr
+
+ def validator(self, meth):
+ """
+ Decorator that adds *meth* to the list of validators.
+
+ Returns *meth* unchanged.
+
+ .. versionadded:: 17.1.0
+ """
+ if self._validator is None:
+ self._validator = meth
+ else:
+ self._validator = and_(self._validator, meth)
+ return meth
+
+ def default(self, meth):
+ """
+ Decorator that allows to set the default for an attribute.
+
+ Returns *meth* unchanged.
+
+ :raises DefaultAlreadySetError: If default has been set before.
+
+ .. versionadded:: 17.1.0
+ """
+ if self._default is not NOTHING:
+ raise DefaultAlreadySetError()
+
+ self._default = Factory(meth, takes_self=True)
+
+ return meth
+
+
+_CountingAttr = _add_eq(_add_repr(_CountingAttr))
+
+
+class Factory(object):
+ """
+ Stores a factory callable.
+
+ If passed as the default value to `attr.ib`, the factory is used to
+ generate a new value.
+
+ :param callable factory: A callable that takes either none or exactly one
+ mandatory positional argument depending on *takes_self*.
+ :param bool takes_self: Pass the partially initialized instance that is
+ being initialized as a positional argument.
+
+ .. versionadded:: 17.1.0 *takes_self*
+ """
+
+ __slots__ = ("factory", "takes_self")
+
+ def __init__(self, factory, takes_self=False):
+ """
+ `Factory` is part of the default machinery so if we want a default
+ value here, we have to implement it ourselves.
+ """
+ self.factory = factory
+ self.takes_self = takes_self
+
+ def __getstate__(self):
+ """
+ Play nice with pickle.
+ """
+ return tuple(getattr(self, name) for name in self.__slots__)
+
+ def __setstate__(self, state):
+ """
+ Play nice with pickle.
+ """
+ for name, value in zip(self.__slots__, state):
+ setattr(self, name, value)
+
+
+_f = [
+ Attribute(
+ name=name,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ eq=True,
+ order=False,
+ hash=True,
+ init=True,
+ inherited=False,
+ )
+ for name in Factory.__slots__
+]
+
+Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
+
+
+def make_class(name, attrs, bases=(object,), **attributes_arguments):
+ """
+ A quick way to create a new class called *name* with *attrs*.
+
+ :param str name: The name for the new class.
+
+ :param attrs: A list of names or a dictionary of mappings of names to
+ attributes.
+
+ If *attrs* is a list or an ordered dict (`dict` on Python 3.6+,
+ `collections.OrderedDict` otherwise), the order is deduced from
+ the order of the names or attributes inside *attrs*. Otherwise the
+ order of the definition of the attributes is used.
+ :type attrs: `list` or `dict`
+
+ :param tuple bases: Classes that the new class will subclass.
+
+ :param attributes_arguments: Passed unmodified to `attr.s`.
+
+ :return: A new class with *attrs*.
+ :rtype: type
+
+ .. versionadded:: 17.1.0 *bases*
+ .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
+ """
+ if isinstance(attrs, dict):
+ cls_dict = attrs
+ elif isinstance(attrs, (list, tuple)):
+ cls_dict = dict((a, attrib()) for a in attrs)
+ else:
+ raise TypeError("attrs argument must be a dict or a list.")
+
+ pre_init = cls_dict.pop("__attrs_pre_init__", None)
+ post_init = cls_dict.pop("__attrs_post_init__", None)
+ user_init = cls_dict.pop("__init__", None)
+
+ body = {}
+ if pre_init is not None:
+ body["__attrs_pre_init__"] = pre_init
+ if post_init is not None:
+ body["__attrs_post_init__"] = post_init
+ if user_init is not None:
+ body["__init__"] = user_init
+
+ type_ = new_class(name, bases, {}, lambda ns: ns.update(body))
+
+ # For pickling to work, the __module__ variable needs to be set to the
+ # frame where the class is created. Bypass this step in environments where
+ # sys._getframe is not defined (Jython for example) or sys._getframe is not
+ # defined for arguments greater than 0 (IronPython).
+ try:
+ type_.__module__ = sys._getframe(1).f_globals.get(
+ "__name__", "__main__"
+ )
+ except (AttributeError, ValueError):
+ pass
+
+ # We do it here for proper warnings with meaningful stacklevel.
+ cmp = attributes_arguments.pop("cmp", None)
+ (
+ attributes_arguments["eq"],
+ attributes_arguments["order"],
+ ) = _determine_attrs_eq_order(
+ cmp,
+ attributes_arguments.get("eq"),
+ attributes_arguments.get("order"),
+ True,
+ )
+
+ return _attrs(these=cls_dict, **attributes_arguments)(type_)
+
+
+# These are required by within this module so we define them here and merely
+# import into .validators / .converters.
+
+
+@attrs(slots=True, hash=True)
+class _AndValidator(object):
+ """
+ Compose many validators to a single one.
+ """
+
+ _validators = attrib()
+
+ def __call__(self, inst, attr, value):
+ for v in self._validators:
+ v(inst, attr, value)
+
+
+def and_(*validators):
+ """
+ A validator that composes multiple validators into one.
+
+ When called on a value, it runs all wrapped validators.
+
+ :param callables validators: Arbitrary number of validators.
+
+ .. versionadded:: 17.1.0
+ """
+ vals = []
+ for validator in validators:
+ vals.extend(
+ validator._validators
+ if isinstance(validator, _AndValidator)
+ else [validator]
+ )
+
+ return _AndValidator(tuple(vals))
+
+
+def pipe(*converters):
+ """
+ A converter that composes multiple converters into one.
+
+ When called on a value, it runs all wrapped converters, returning the
+ *last* value.
+
+ Type annotations will be inferred from the wrapped converters', if
+ they have any.
+
+ :param callables converters: Arbitrary number of converters.
+
+ .. versionadded:: 20.1.0
+ """
+
+ def pipe_converter(val):
+ for converter in converters:
+ val = converter(val)
+
+ return val
+
+ if not PY2:
+ if not converters:
+ # If the converter list is empty, pipe_converter is the identity.
+ A = typing.TypeVar("A")
+ pipe_converter.__annotations__ = {"val": A, "return": A}
+ else:
+ # Get parameter type.
+ sig = None
+ try:
+ sig = inspect.signature(converters[0])
+ except (ValueError, TypeError): # inspect failed
+ pass
+ if sig:
+ params = list(sig.parameters.values())
+ if (
+ params
+ and params[0].annotation is not inspect.Parameter.empty
+ ):
+ pipe_converter.__annotations__["val"] = params[
+ 0
+ ].annotation
+ # Get return type.
+ sig = None
+ try:
+ sig = inspect.signature(converters[-1])
+ except (ValueError, TypeError): # inspect failed
+ pass
+ if sig and sig.return_annotation is not inspect.Signature().empty:
+ pipe_converter.__annotations__[
+ "return"
+ ] = sig.return_annotation
+
+ return pipe_converter
diff --git a/lib/spack/external/attr/_next_gen.py b/lib/spack/external/attr/_next_gen.py
new file mode 100644
index 0000000000..fab0af966a
--- /dev/null
+++ b/lib/spack/external/attr/_next_gen.py
@@ -0,0 +1,158 @@
+"""
+These are Python 3.6+-only and keyword-only APIs that call `attr.s` and
+`attr.ib` with different default values.
+"""
+
+from functools import partial
+
+from attr.exceptions import UnannotatedAttributeError
+
+from . import setters
+from ._make import NOTHING, _frozen_setattrs, attrib, attrs
+
+
+def define(
+ maybe_cls=None,
+ *,
+ these=None,
+ repr=None,
+ hash=None,
+ init=None,
+ slots=True,
+ frozen=False,
+ weakref_slot=True,
+ str=False,
+ auto_attribs=None,
+ kw_only=False,
+ cache_hash=False,
+ auto_exc=True,
+ eq=None,
+ order=False,
+ auto_detect=True,
+ getstate_setstate=None,
+ on_setattr=None,
+ field_transformer=None,
+):
+ r"""
+ The only behavioral differences are the handling of the *auto_attribs*
+ option:
+
+ :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves
+ exactly like `attr.s`. If left `None`, `attr.s` will try to guess:
+
+ 1. If any attributes are annotated and no unannotated `attr.ib`\ s
+ are found, it assumes *auto_attribs=True*.
+ 2. Otherwise it assumes *auto_attribs=False* and tries to collect
+ `attr.ib`\ s.
+
+ and that mutable classes (``frozen=False``) validate on ``__setattr__``.
+
+ .. versionadded:: 20.1.0
+ """
+
+ def do_it(cls, auto_attribs):
+ return attrs(
+ maybe_cls=cls,
+ these=these,
+ repr=repr,
+ hash=hash,
+ init=init,
+ slots=slots,
+ frozen=frozen,
+ weakref_slot=weakref_slot,
+ str=str,
+ auto_attribs=auto_attribs,
+ kw_only=kw_only,
+ cache_hash=cache_hash,
+ auto_exc=auto_exc,
+ eq=eq,
+ order=order,
+ auto_detect=auto_detect,
+ collect_by_mro=True,
+ getstate_setstate=getstate_setstate,
+ on_setattr=on_setattr,
+ field_transformer=field_transformer,
+ )
+
+ def wrap(cls):
+ """
+ Making this a wrapper ensures this code runs during class creation.
+
+ We also ensure that frozen-ness of classes is inherited.
+ """
+ nonlocal frozen, on_setattr
+
+ had_on_setattr = on_setattr not in (None, setters.NO_OP)
+
+ # By default, mutable classes validate on setattr.
+ if frozen is False and on_setattr is None:
+ on_setattr = setters.validate
+
+ # However, if we subclass a frozen class, we inherit the immutability
+ # and disable on_setattr.
+ for base_cls in cls.__bases__:
+ if base_cls.__setattr__ is _frozen_setattrs:
+ if had_on_setattr:
+ raise ValueError(
+ "Frozen classes can't use on_setattr "
+ "(frozen-ness was inherited)."
+ )
+
+ on_setattr = setters.NO_OP
+ break
+
+ if auto_attribs is not None:
+ return do_it(cls, auto_attribs)
+
+ try:
+ return do_it(cls, True)
+ except UnannotatedAttributeError:
+ return do_it(cls, False)
+
+ # maybe_cls's type depends on the usage of the decorator. It's a class
+ # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
+ if maybe_cls is None:
+ return wrap
+ else:
+ return wrap(maybe_cls)
+
+
+mutable = define
+frozen = partial(define, frozen=True, on_setattr=None)
+
+
+def field(
+ *,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ hash=None,
+ init=True,
+ metadata=None,
+ converter=None,
+ factory=None,
+ kw_only=False,
+ eq=None,
+ order=None,
+ on_setattr=None,
+):
+ """
+ Identical to `attr.ib`, except keyword-only and with some arguments
+ removed.
+
+ .. versionadded:: 20.1.0
+ """
+ return attrib(
+ default=default,
+ validator=validator,
+ repr=repr,
+ hash=hash,
+ init=init,
+ metadata=metadata,
+ converter=converter,
+ factory=factory,
+ kw_only=kw_only,
+ eq=eq,
+ order=order,
+ on_setattr=on_setattr,
+ )
diff --git a/lib/spack/external/attr/_version_info.py b/lib/spack/external/attr/_version_info.py
new file mode 100644
index 0000000000..014e78a1b4
--- /dev/null
+++ b/lib/spack/external/attr/_version_info.py
@@ -0,0 +1,85 @@
+from __future__ import absolute_import, division, print_function
+
+from functools import total_ordering
+
+from ._funcs import astuple
+from ._make import attrib, attrs
+
+
+@total_ordering
+@attrs(eq=False, order=False, slots=True, frozen=True)
+class VersionInfo(object):
+ """
+ A version object that can be compared to tuple of length 1--4:
+
+ >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
+ True
+ >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
+ True
+ >>> vi = attr.VersionInfo(19, 2, 0, "final")
+ >>> vi < (19, 1, 1)
+ False
+ >>> vi < (19,)
+ False
+ >>> vi == (19, 2,)
+ True
+ >>> vi == (19, 2, 1)
+ False
+
+ .. versionadded:: 19.2
+ """
+
+ year = attrib(type=int)
+ minor = attrib(type=int)
+ micro = attrib(type=int)
+ releaselevel = attrib(type=str)
+
+ @classmethod
+ def _from_version_string(cls, s):
+ """
+ Parse *s* and return a _VersionInfo.
+ """
+ v = s.split(".")
+ if len(v) == 3:
+ v.append("final")
+
+ return cls(
+ year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
+ )
+
+ def _ensure_tuple(self, other):
+ """
+ Ensure *other* is a tuple of a valid length.
+
+ Returns a possibly transformed *other* and ourselves as a tuple of
+ the same length as *other*.
+ """
+
+ if self.__class__ is other.__class__:
+ other = astuple(other)
+
+ if not isinstance(other, tuple):
+ raise NotImplementedError
+
+ if not (1 <= len(other) <= 4):
+ raise NotImplementedError
+
+ return astuple(self)[: len(other)], other
+
+ def __eq__(self, other):
+ try:
+ us, them = self._ensure_tuple(other)
+ except NotImplementedError:
+ return NotImplemented
+
+ return us == them
+
+ def __lt__(self, other):
+ try:
+ us, them = self._ensure_tuple(other)
+ except NotImplementedError:
+ return NotImplemented
+
+ # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
+ # have to do anything special with releaselevel for now.
+ return us < them
diff --git a/lib/spack/external/attr/converters.py b/lib/spack/external/attr/converters.py
new file mode 100644
index 0000000000..2777db6d0a
--- /dev/null
+++ b/lib/spack/external/attr/converters.py
@@ -0,0 +1,111 @@
+"""
+Commonly useful converters.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+from ._compat import PY2
+from ._make import NOTHING, Factory, pipe
+
+
+if not PY2:
+ import inspect
+ import typing
+
+
+__all__ = [
+ "pipe",
+ "optional",
+ "default_if_none",
+]
+
+
+def optional(converter):
+ """
+ A converter that allows an attribute to be optional. An optional attribute
+ is one which can be set to ``None``.
+
+ Type annotations will be inferred from the wrapped converter's, if it
+ has any.
+
+ :param callable converter: the converter that is used for non-``None``
+ values.
+
+ .. versionadded:: 17.1.0
+ """
+
+ def optional_converter(val):
+ if val is None:
+ return None
+ return converter(val)
+
+ if not PY2:
+ sig = None
+ try:
+ sig = inspect.signature(converter)
+ except (ValueError, TypeError): # inspect failed
+ pass
+ if sig:
+ params = list(sig.parameters.values())
+ if params and params[0].annotation is not inspect.Parameter.empty:
+ optional_converter.__annotations__["val"] = typing.Optional[
+ params[0].annotation
+ ]
+ if sig.return_annotation is not inspect.Signature.empty:
+ optional_converter.__annotations__["return"] = typing.Optional[
+ sig.return_annotation
+ ]
+
+ return optional_converter
+
+
+def default_if_none(default=NOTHING, factory=None):
+ """
+ A converter that allows to replace ``None`` values by *default* or the
+ result of *factory*.
+
+ :param default: Value to be used if ``None`` is passed. Passing an instance
+ of `attr.Factory` is supported, however the ``takes_self`` option
+ is *not*.
+ :param callable factory: A callable that takes no parameters whose result
+ is used if ``None`` is passed.
+
+ :raises TypeError: If **neither** *default* or *factory* is passed.
+ :raises TypeError: If **both** *default* and *factory* are passed.
+ :raises ValueError: If an instance of `attr.Factory` is passed with
+ ``takes_self=True``.
+
+ .. versionadded:: 18.2.0
+ """
+ if default is NOTHING and factory is None:
+ raise TypeError("Must pass either `default` or `factory`.")
+
+ if default is not NOTHING and factory is not None:
+ raise TypeError(
+ "Must pass either `default` or `factory` but not both."
+ )
+
+ if factory is not None:
+ default = Factory(factory)
+
+ if isinstance(default, Factory):
+ if default.takes_self:
+ raise ValueError(
+ "`takes_self` is not supported by default_if_none."
+ )
+
+ def default_if_none_converter(val):
+ if val is not None:
+ return val
+
+ return default.factory()
+
+ else:
+
+ def default_if_none_converter(val):
+ if val is not None:
+ return val
+
+ return default
+
+ return default_if_none_converter
diff --git a/lib/spack/external/attr/exceptions.py b/lib/spack/external/attr/exceptions.py
new file mode 100644
index 0000000000..f6f9861bea
--- /dev/null
+++ b/lib/spack/external/attr/exceptions.py
@@ -0,0 +1,92 @@
+from __future__ import absolute_import, division, print_function
+
+
+class FrozenError(AttributeError):
+ """
+ A frozen/immutable instance or attribute have been attempted to be
+ modified.
+
+ It mirrors the behavior of ``namedtuples`` by using the same error message
+ and subclassing `AttributeError`.
+
+ .. versionadded:: 20.1.0
+ """
+
+ msg = "can't set attribute"
+ args = [msg]
+
+
+class FrozenInstanceError(FrozenError):
+ """
+ A frozen instance has been attempted to be modified.
+
+ .. versionadded:: 16.1.0
+ """
+
+
+class FrozenAttributeError(FrozenError):
+ """
+ A frozen attribute has been attempted to be modified.
+
+ .. versionadded:: 20.1.0
+ """
+
+
+class AttrsAttributeNotFoundError(ValueError):
+ """
+ An ``attrs`` function couldn't find an attribute that the user asked for.
+
+ .. versionadded:: 16.2.0
+ """
+
+
+class NotAnAttrsClassError(ValueError):
+ """
+ A non-``attrs`` class has been passed into an ``attrs`` function.
+
+ .. versionadded:: 16.2.0
+ """
+
+
+class DefaultAlreadySetError(RuntimeError):
+ """
+ A default has been set using ``attr.ib()`` and is attempted to be reset
+ using the decorator.
+
+ .. versionadded:: 17.1.0
+ """
+
+
+class UnannotatedAttributeError(RuntimeError):
+ """
+ A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
+ annotation.
+
+ .. versionadded:: 17.3.0
+ """
+
+
+class PythonTooOldError(RuntimeError):
+ """
+ It was attempted to use an ``attrs`` feature that requires a newer Python
+ version.
+
+ .. versionadded:: 18.2.0
+ """
+
+
+class NotCallableError(TypeError):
+ """
+ A ``attr.ib()`` requiring a callable has been set with a value
+ that is not callable.
+
+ .. versionadded:: 19.2.0
+ """
+
+ def __init__(self, msg, value):
+ super(TypeError, self).__init__(msg, value)
+ self.msg = msg
+ self.value = value
+
+ def __str__(self):
+ return str(self.msg)
diff --git a/lib/spack/external/attr/filters.py b/lib/spack/external/attr/filters.py
new file mode 100644
index 0000000000..dc47e8fa38
--- /dev/null
+++ b/lib/spack/external/attr/filters.py
@@ -0,0 +1,52 @@
+"""
+Commonly useful filters for `attr.asdict`.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+from ._compat import isclass
+from ._make import Attribute
+
+
+def _split_what(what):
+ """
+ Returns a tuple of `frozenset`s of classes and attributes.
+ """
+ return (
+ frozenset(cls for cls in what if isclass(cls)),
+ frozenset(cls for cls in what if isinstance(cls, Attribute)),
+ )
+
+
+def include(*what):
+ """
+ Whitelist *what*.
+
+ :param what: What to whitelist.
+ :type what: `list` of `type` or `attr.Attribute`\\ s
+
+ :rtype: `callable`
+ """
+ cls, attrs = _split_what(what)
+
+ def include_(attribute, value):
+ return value.__class__ in cls or attribute in attrs
+
+ return include_
+
+
+def exclude(*what):
+ """
+ Blacklist *what*.
+
+ :param what: What to blacklist.
+ :type what: `list` of classes or `attr.Attribute`\\ s.
+
+ :rtype: `callable`
+ """
+ cls, attrs = _split_what(what)
+
+ def exclude_(attribute, value):
+ return value.__class__ not in cls and attribute not in attrs
+
+ return exclude_
diff --git a/lib/spack/external/attr/setters.py b/lib/spack/external/attr/setters.py
new file mode 100644
index 0000000000..240014b3c1
--- /dev/null
+++ b/lib/spack/external/attr/setters.py
@@ -0,0 +1,77 @@
+"""
+Commonly used hooks for on_setattr.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+from . import _config
+from .exceptions import FrozenAttributeError
+
+
+def pipe(*setters):
+ """
+ Run all *setters* and return the return value of the last one.
+
+ .. versionadded:: 20.1.0
+ """
+
+ def wrapped_pipe(instance, attrib, new_value):
+ rv = new_value
+
+ for setter in setters:
+ rv = setter(instance, attrib, rv)
+
+ return rv
+
+ return wrapped_pipe
+
+
+def frozen(_, __, ___):
+ """
+ Prevent an attribute to be modified.
+
+ .. versionadded:: 20.1.0
+ """
+ raise FrozenAttributeError()
+
+
+def validate(instance, attrib, new_value):
+ """
+ Run *attrib*'s validator on *new_value* if it has one.
+
+ .. versionadded:: 20.1.0
+ """
+ if _config._run_validators is False:
+ return new_value
+
+ v = attrib.validator
+ if not v:
+ return new_value
+
+ v(instance, attrib, new_value)
+
+ return new_value
+
+
+def convert(instance, attrib, new_value):
+ """
+ Run *attrib*'s converter -- if it has one -- on *new_value* and return the
+ result.
+
+ .. versionadded:: 20.1.0
+ """
+ c = attrib.converter
+ if c:
+ return c(new_value)
+
+ return new_value
+
+
+NO_OP = object()
+"""
+Sentinel for disabling class-wide *on_setattr* hooks for certain attributes.
+
+Does not work in `pipe` or within lists.
+
+.. versionadded:: 20.1.0
+"""
diff --git a/lib/spack/external/attr/validators.py b/lib/spack/external/attr/validators.py
new file mode 100644
index 0000000000..b9a73054e9
--- /dev/null
+++ b/lib/spack/external/attr/validators.py
@@ -0,0 +1,379 @@
+"""
+Commonly useful validators.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import re
+
+from ._make import _AndValidator, and_, attrib, attrs
+from .exceptions import NotCallableError
+
+
+__all__ = [
+ "and_",
+ "deep_iterable",
+ "deep_mapping",
+ "in_",
+ "instance_of",
+ "is_callable",
+ "matches_re",
+ "optional",
+ "provides",
+]
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _InstanceOfValidator(object):
+ type = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not isinstance(value, self.type):
+ raise TypeError(
+ "'{name}' must be {type!r} (got {value!r} that is a "
+ "{actual!r}).".format(
+ name=attr.name,
+ type=self.type,
+ actual=value.__class__,
+ value=value,
+ ),
+ attr,
+ self.type,
+ value,
+ )
+
+ def __repr__(self):
+ return "<instance_of validator for type {type!r}>".format(
+ type=self.type
+ )
+
+
+def instance_of(type):
+ """
+ A validator that raises a `TypeError` if the initializer is called
+ with a wrong type for this particular attribute (checks are performed using
+ `isinstance` therefore it's also valid to pass a tuple of types).
+
+ :param type: The type to check for.
+ :type type: type or tuple of types
+
+ :raises TypeError: With a human readable error message, the attribute
+ (of type `attr.Attribute`), the expected type, and the value it
+ got.
+ """
+ return _InstanceOfValidator(type)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MatchesReValidator(object):
+ regex = attrib()
+ flags = attrib()
+ match_func = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not self.match_func(value):
+ raise ValueError(
+ "'{name}' must match regex {regex!r}"
+ " ({value!r} doesn't)".format(
+ name=attr.name, regex=self.regex.pattern, value=value
+ ),
+ attr,
+ self.regex,
+ value,
+ )
+
+ def __repr__(self):
+ return "<matches_re validator for pattern {regex!r}>".format(
+ regex=self.regex
+ )
+
+
+def matches_re(regex, flags=0, func=None):
+ r"""
+ A validator that raises `ValueError` if the initializer is called
+ with a string that doesn't match *regex*.
+
+ :param str regex: a regex string to match against
+ :param int flags: flags that will be passed to the underlying re function
+ (default 0)
+ :param callable func: which underlying `re` function to call (options
+ are `re.fullmatch`, `re.search`, `re.match`, default
+ is ``None`` which means either `re.fullmatch` or an emulation of
+ it on Python 2). For performance reasons, they won't be used directly
+ but on a pre-`re.compile`\ ed pattern.
+
+ .. versionadded:: 19.2.0
+ """
+ fullmatch = getattr(re, "fullmatch", None)
+ valid_funcs = (fullmatch, None, re.search, re.match)
+ if func not in valid_funcs:
+ raise ValueError(
+ "'func' must be one of %s."
+ % (
+ ", ".join(
+ sorted(
+ e and e.__name__ or "None" for e in set(valid_funcs)
+ )
+ ),
+ )
+ )
+
+ pattern = re.compile(regex, flags)
+ if func is re.match:
+ match_func = pattern.match
+ elif func is re.search:
+ match_func = pattern.search
+ else:
+ if fullmatch:
+ match_func = pattern.fullmatch
+ else:
+ pattern = re.compile(r"(?:{})\Z".format(regex), flags)
+ match_func = pattern.match
+
+ return _MatchesReValidator(pattern, flags, match_func)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _ProvidesValidator(object):
+ interface = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not self.interface.providedBy(value):
+ raise TypeError(
+ "'{name}' must provide {interface!r} which {value!r} "
+ "doesn't.".format(
+ name=attr.name, interface=self.interface, value=value
+ ),
+ attr,
+ self.interface,
+ value,
+ )
+
+ def __repr__(self):
+ return "<provides validator for interface {interface!r}>".format(
+ interface=self.interface
+ )
+
+
+def provides(interface):
+ """
+ A validator that raises a `TypeError` if the initializer is called
+ with an object that does not provide the requested *interface* (checks are
+ performed using ``interface.providedBy(value)`` (see `zope.interface
+ <https://zopeinterface.readthedocs.io/en/latest/>`_).
+
+ :param interface: The interface to check for.
+ :type interface: ``zope.interface.Interface``
+
+ :raises TypeError: With a human readable error message, the attribute
+ (of type `attr.Attribute`), the expected interface, and the
+ value it got.
+ """
+ return _ProvidesValidator(interface)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _OptionalValidator(object):
+ validator = attrib()
+
+ def __call__(self, inst, attr, value):
+ if value is None:
+ return
+
+ self.validator(inst, attr, value)
+
+ def __repr__(self):
+ return "<optional validator for {what} or None>".format(
+ what=repr(self.validator)
+ )
+
+
+def optional(validator):
+ """
+ A validator that makes an attribute optional. An optional attribute is one
+ which can be set to ``None`` in addition to satisfying the requirements of
+ the sub-validator.
+
+ :param validator: A validator (or a list of validators) that is used for
+ non-``None`` values.
+ :type validator: callable or `list` of callables.
+
+ .. versionadded:: 15.1.0
+ .. versionchanged:: 17.1.0 *validator* can be a list of validators.
+ """
+ if isinstance(validator, list):
+ return _OptionalValidator(_AndValidator(validator))
+ return _OptionalValidator(validator)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _InValidator(object):
+ options = attrib()
+
+ def __call__(self, inst, attr, value):
+ try:
+ in_options = value in self.options
+ except TypeError: # e.g. `1 in "abc"`
+ in_options = False
+
+ if not in_options:
+ raise ValueError(
+ "'{name}' must be in {options!r} (got {value!r})".format(
+ name=attr.name, options=self.options, value=value
+ )
+ )
+
+ def __repr__(self):
+ return "<in_ validator with options {options!r}>".format(
+ options=self.options
+ )
+
+
+def in_(options):
+ """
+ A validator that raises a `ValueError` if the initializer is called
+ with a value that does not belong in the options provided. The check is
+ performed using ``value in options``.
+
+ :param options: Allowed options.
+ :type options: list, tuple, `enum.Enum`, ...
+
+ :raises ValueError: With a human readable error message, the attribute (of
+ type `attr.Attribute`), the expected options, and the value it
+ got.
+
+ .. versionadded:: 17.1.0
+ """
+ return _InValidator(options)
+
+
+@attrs(repr=False, slots=False, hash=True)
+class _IsCallableValidator(object):
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not callable(value):
+ message = (
+ "'{name}' must be callable "
+ "(got {value!r} that is a {actual!r})."
+ )
+ raise NotCallableError(
+ msg=message.format(
+ name=attr.name, value=value, actual=value.__class__
+ ),
+ value=value,
+ )
+
+ def __repr__(self):
+ return "<is_callable validator>"
+
+
+def is_callable():
+ """
+ A validator that raises a `attr.exceptions.NotCallableError` if the
+ initializer is called with a value for this particular attribute
+ that is not callable.
+
+ .. versionadded:: 19.1.0
+
+ :raises `attr.exceptions.NotCallableError`: With a human readable error
+ message containing the attribute (`attr.Attribute`) name,
+ and the value it got.
+ """
+ return _IsCallableValidator()
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _DeepIterable(object):
+ member_validator = attrib(validator=is_callable())
+ iterable_validator = attrib(
+ default=None, validator=optional(is_callable())
+ )
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if self.iterable_validator is not None:
+ self.iterable_validator(inst, attr, value)
+
+ for member in value:
+ self.member_validator(inst, attr, member)
+
+ def __repr__(self):
+ iterable_identifier = (
+ ""
+ if self.iterable_validator is None
+ else " {iterable!r}".format(iterable=self.iterable_validator)
+ )
+ return (
+ "<deep_iterable validator for{iterable_identifier}"
+ " iterables of {member!r}>"
+ ).format(
+ iterable_identifier=iterable_identifier,
+ member=self.member_validator,
+ )
+
+
+def deep_iterable(member_validator, iterable_validator=None):
+ """
+ A validator that performs deep validation of an iterable.
+
+ :param member_validator: Validator to apply to iterable members
+ :param iterable_validator: Validator to apply to iterable itself
+ (optional)
+
+ .. versionadded:: 19.1.0
+
+ :raises TypeError: if any sub-validators fail
+ """
+ return _DeepIterable(member_validator, iterable_validator)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _DeepMapping(object):
+ key_validator = attrib(validator=is_callable())
+ value_validator = attrib(validator=is_callable())
+ mapping_validator = attrib(default=None, validator=optional(is_callable()))
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if self.mapping_validator is not None:
+ self.mapping_validator(inst, attr, value)
+
+ for key in value:
+ self.key_validator(inst, attr, key)
+ self.value_validator(inst, attr, value[key])
+
+ def __repr__(self):
+ return (
+ "<deep_mapping validator for objects mapping {key!r} to {value!r}>"
+ ).format(key=self.key_validator, value=self.value_validator)
+
+
+def deep_mapping(key_validator, value_validator, mapping_validator=None):
+ """
+ A validator that performs deep validation of a dictionary.
+
+ :param key_validator: Validator to apply to dictionary keys
+ :param value_validator: Validator to apply to dictionary values
+ :param mapping_validator: Validator to apply to top-level mapping
+ attribute (optional)
+
+ .. versionadded:: 19.1.0
+
+ :raises TypeError: if any sub-validators fail
+ """
+ return _DeepMapping(key_validator, value_validator, mapping_validator)
diff --git a/lib/spack/external/ctest_log_parser.py b/lib/spack/external/ctest_log_parser.py
index 61cc6df029..2b2746003a 100644
--- a/lib/spack/external/ctest_log_parser.py
+++ b/lib/spack/external/ctest_log_parser.py
@@ -150,8 +150,6 @@ _error_exceptions = [
": note",
" ok",
"Note:",
- "makefile:",
- "Makefile:",
":[ \\t]+Where:",
"[^ :]:[0-9]+: Warning",
"------ Build started: .* ------",
@@ -189,8 +187,6 @@ _warning_exceptions = [
"/usr/.*/X11/XResource\\.h:[0-9]+: war.*: ANSI C\\+\\+ forbids declaration",
"WARNING 84 :",
"WARNING 47 :",
- "makefile:",
- "Makefile:",
"warning: Clock skew detected. Your build may be incomplete.",
"/usr/openwin/include/GL/[^:]+:",
"bind_at_load",
diff --git a/lib/spack/external/distro.py b/lib/spack/external/distro.py
index e3805de75f..7892741347 100644
--- a/lib/spack/external/distro.py
+++ b/lib/spack/external/distro.py
@@ -1,4 +1,4 @@
-# Copyright 2015,2016 Nir Cohen
+# Copyright 2015,2016,2017 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,32 +17,64 @@ The ``distro`` package (``distro`` stands for Linux Distribution) provides
information about the Linux distribution it runs on, such as a reliable
machine-readable distro ID, or version information.
-It is a renewed alternative implementation for Python's original
+It is the recommended replacement for Python's original
:py:func:`platform.linux_distribution` function, but it provides much more
functionality. An alternative implementation became necessary because Python
-3.5 deprecated this function, and Python 3.7 is expected to remove it
-altogether. Its predecessor function :py:func:`platform.dist` was already
-deprecated since Python 2.6 and is also expected to be removed in Python 3.7.
-Still, there are many cases in which access to Linux distribution information
-is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
-more information.
+3.5 deprecated this function, and Python 3.8 removed it altogether. Its
+predecessor function :py:func:`platform.dist` was already deprecated since
+Python 2.6 and removed in Python 3.8. Still, there are many cases in which
+access to OS distribution information is needed. See `Python issue 1322
+<https://bugs.python.org/issue1322>`_ for more information.
"""
+import argparse
+import json
+import logging
import os
import re
-import sys
-import json
import shlex
-import logging
-import argparse
import subprocess
+import sys
+import warnings
+
+__version__ = "1.6.0"
+
+# Use `if False` to avoid an ImportError on Python 2. After dropping Python 2
+# support, can use typing.TYPE_CHECKING instead. See:
+# https://docs.python.org/3/library/typing.html#typing.TYPE_CHECKING
+if False: # pragma: nocover
+ from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ Optional,
+ Sequence,
+ TextIO,
+ Tuple,
+ Type,
+ TypedDict,
+ Union,
+ )
+
+ VersionDict = TypedDict(
+ "VersionDict", {"major": str, "minor": str, "build_number": str}
+ )
+ InfoDict = TypedDict(
+ "InfoDict",
+ {
+ "id": str,
+ "version": str,
+ "version_parts": VersionDict,
+ "like": str,
+ "codename": str,
+ },
+ )
-if not sys.platform.startswith('linux'):
- raise ImportError('Unsupported platform: {0}'.format(sys.platform))
-
-_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc')
-_OS_RELEASE_BASENAME = 'os-release'
+_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc")
+_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib")
+_OS_RELEASE_BASENAME = "os-release"
#: Translation table for normalizing the "ID" attribute defined in os-release
#: files, for use by the :func:`distro.id` method.
@@ -51,7 +83,9 @@ _OS_RELEASE_BASENAME = 'os-release'
#: with blanks translated to underscores.
#:
#: * Value: Normalized value.
-NORMALIZED_OS_ID = {}
+NORMALIZED_OS_ID = {
+ "ol": "oracle", # Oracle Linux
+}
#: Translation table for normalizing the "Distributor ID" attribute returned by
#: the lsb_release command, for use by the :func:`distro.id` method.
@@ -61,10 +95,11 @@ NORMALIZED_OS_ID = {}
#:
#: * Value: Normalized value.
NORMALIZED_LSB_ID = {
- 'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
- 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation
- 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server
- 'redhatenterprisecomputenode': 'rhel', # RHEL 6 ComputeNode
+ "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4
+ "enterpriseenterpriseserver": "oracle", # Oracle Linux 5
+ "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation
+ "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server
+ "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode
}
#: Translation table for normalizing the distro ID derived from the file name
@@ -75,30 +110,40 @@ NORMALIZED_LSB_ID = {
#:
#: * Value: Normalized value.
NORMALIZED_DISTRO_ID = {
- 'redhat': 'rhel', # RHEL 6.x, 7.x
+ "redhat": "rhel", # RHEL 6.x, 7.x
}
# Pattern for content of distro release file (reversed)
_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
- r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
+ r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)"
+)
# Pattern for base file name of distro release file
-_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
- r'(\w+)[-_](release|version)$')
+_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
- 'debian_version',
- 'lsb-release',
- 'oem-release',
+ "debian_version",
+ "lsb-release",
+ "oem-release",
_OS_RELEASE_BASENAME,
- 'system-release'
+ "system-release",
+ "plesk-release",
+ "iredmail-release",
)
def linux_distribution(full_distribution_name=True):
+ # type: (bool) -> Tuple[str, str, str]
"""
- Return information about the current Linux distribution as a tuple
+ .. deprecated:: 1.6.0
+
+ :func:`distro.linux_distribution()` is deprecated. It should only be
+ used as a compatibility shim with Python's
+ :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`,
+ :func:`distro.version` and :func:`distro.name` instead.
+
+ Return information about the current OS distribution as a tuple
``(id_name, version, codename)`` with items as follows:
* ``id_name``: If *full_distribution_name* is false, the result of
@@ -114,22 +159,30 @@ def linux_distribution(full_distribution_name=True):
The data it returns may not exactly be the same, because it uses more data
sources than the original function, and that may lead to different data if
- the Linux distribution is not consistent across multiple data sources it
+ the OS distribution is not consistent across multiple data sources it
provides (there are indeed such distributions ...).
Another reason for differences is the fact that the :func:`distro.id`
method normalizes the distro ID string to a reliable machine-readable value
- for a number of popular Linux distributions.
+ for a number of popular OS distributions.
"""
+ warnings.warn(
+ "distro.linux_distribution() is deprecated. It should only be used as a "
+ "compatibility shim with Python's platform.linux_distribution(). Please use "
+ "distro.id(), distro.version() and distro.name() instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
return _distro.linux_distribution(full_distribution_name)
def id():
+ # type: () -> str
"""
- Return the distro ID of the current Linux distribution, as a
+ Return the distro ID of the current distribution, as a
machine-readable string.
- For a number of Linux distributions, the returned distro ID value is
+ For a number of OS distributions, the returned distro ID value is
*reliable*, in the sense that it is documented and that it does not change
across releases of the distribution.
@@ -162,6 +215,10 @@ def id():
"scientific" Scientific Linux
"slackware" Slackware
"xenserver" XenServer
+ "openbsd" OpenBSD
+ "netbsd" NetBSD
+ "freebsd" FreeBSD
+ "midnightbsd" MidnightBSD
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
@@ -191,7 +248,7 @@ def id():
* a normalization of the ID is performed, based upon
`normalization tables`_. The purpose of this normalization is to ensure
that the ID is as reliable as possible, even across incompatible changes
- in the Linux distributions. A common reason for an incompatible change is
+ in the OS distributions. A common reason for an incompatible change is
the addition of an os-release file, or the addition of the lsb_release
command, with ID values that differ from what was previously determined
from the distro release file name.
@@ -200,8 +257,9 @@ def id():
def name(pretty=False):
+ # type: (bool) -> str
"""
- Return the name of the current Linux distribution, as a human-readable
+ Return the name of the current OS distribution, as a human-readable
string.
If *pretty* is false, the name is returned without version or codename.
@@ -239,8 +297,9 @@ def name(pretty=False):
def version(pretty=False, best=False):
+ # type: (bool, bool) -> str
"""
- Return the version of the current Linux distribution, as a human-readable
+ Return the version of the current OS distribution, as a human-readable
string.
If *pretty* is false, the version is returned without codename (e.g.
@@ -283,8 +342,9 @@ def version(pretty=False, best=False):
def version_parts(best=False):
+ # type: (bool) -> Tuple[str, str, str]
"""
- Return the version of the current Linux distribution as a tuple
+ Return the version of the current OS distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
@@ -300,8 +360,9 @@ def version_parts(best=False):
def major_version(best=False):
+ # type: (bool) -> str
"""
- Return the major version of the current Linux distribution, as a string,
+ Return the major version of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The major version is the first
part of the dot-separated version string.
@@ -313,8 +374,9 @@ def major_version(best=False):
def minor_version(best=False):
+ # type: (bool) -> str
"""
- Return the minor version of the current Linux distribution, as a string,
+ Return the minor version of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The minor version is the second
part of the dot-separated version string.
@@ -326,8 +388,9 @@ def minor_version(best=False):
def build_number(best=False):
+ # type: (bool) -> str
"""
- Return the build number of the current Linux distribution, as a string,
+ Return the build number of the current OS distribution, as a string,
if provided.
Otherwise, the empty string is returned. The build number is the third part
of the dot-separated version string.
@@ -339,9 +402,10 @@ def build_number(best=False):
def like():
+ # type: () -> str
"""
Return a space-separated list of distro IDs of distributions that are
- closely related to the current Linux distribution in regards to packaging
+ closely related to the current OS distribution in regards to packaging
and programming interfaces, for example distributions the current
distribution is a derivative from.
@@ -356,8 +420,9 @@ def like():
def codename():
+ # type: () -> str
"""
- Return the codename for the release of the current Linux distribution,
+ Return the codename for the release of the current OS distribution,
as a string.
If the distribution does not have a codename, an empty string is returned.
@@ -380,8 +445,9 @@ def codename():
def info(pretty=False, best=False):
+ # type: (bool, bool) -> InfoDict
"""
- Return certain machine-readable information items about the current Linux
+ Return certain machine-readable information items about the current OS
distribution in a dictionary, as shown in the following example:
.. sourcecode:: python
@@ -424,9 +490,10 @@ def info(pretty=False, best=False):
def os_release_info():
+ # type: () -> Dict[str, str]
"""
Return a dictionary containing key-value pairs for the information items
- from the os-release file data source of the current Linux distribution.
+ from the os-release file data source of the current OS distribution.
See `os-release file`_ for details about these information items.
"""
@@ -434,9 +501,10 @@ def os_release_info():
def lsb_release_info():
+ # type: () -> Dict[str, str]
"""
Return a dictionary containing key-value pairs for the information items
- from the lsb_release command data source of the current Linux distribution.
+ from the lsb_release command data source of the current OS distribution.
See `lsb_release command output`_ for details about these information
items.
@@ -445,19 +513,30 @@ def lsb_release_info():
def distro_release_info():
+ # type: () -> Dict[str, str]
"""
Return a dictionary containing key-value pairs for the information items
- from the distro release file data source of the current Linux distribution.
+ from the distro release file data source of the current OS distribution.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_info()
+def uname_info():
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+ """
+ return _distro.uname_info()
+
+
def os_release_attr(attribute):
+ # type: (str) -> str
"""
Return a single named information item from the os-release file data source
- of the current Linux distribution.
+ of the current OS distribution.
Parameters:
@@ -474,9 +553,10 @@ def os_release_attr(attribute):
def lsb_release_attr(attribute):
+ # type: (str) -> str
"""
Return a single named information item from the lsb_release command output
- data source of the current Linux distribution.
+ data source of the current OS distribution.
Parameters:
@@ -494,9 +574,10 @@ def lsb_release_attr(attribute):
def distro_release_attr(attribute):
+ # type: (str) -> str
"""
Return a single named information item from the distro release file
- data source of the current Linux distribution.
+ data source of the current OS distribution.
Parameters:
@@ -512,15 +593,55 @@ def distro_release_attr(attribute):
return _distro.distro_release_attr(attribute)
+def uname_attr(attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+ """
+ return _distro.uname_attr(attribute)
+
+
+try:
+ from functools import cached_property
+except ImportError:
+ # Python < 3.8
+ class cached_property(object): # type: ignore
+ """A version of @property which caches the value. On access, it calls the
+ underlying function and sets the value in `__dict__` so future accesses
+ will not re-call the property.
+ """
+
+ def __init__(self, f):
+ # type: (Callable[[Any], Any]) -> None
+ self._fname = f.__name__
+ self._f = f
+
+ def __get__(self, obj, owner):
+ # type: (Any, Type[Any]) -> Any
+ assert obj is not None, "call {} on an instance".format(self._fname)
+ ret = obj.__dict__[self._fname] = self._f(obj)
+ return ret
+
+
class LinuxDistribution(object):
"""
- Provides information about a Linux distribution.
+ Provides information about a OS distribution.
This package creates a private module-global instance of this class with
default initialization arguments, that is used by the
`consolidated accessor functions`_ and `single source accessor functions`_.
By using default initialization arguments, that module-global instance
- returns data about the current Linux distribution (i.e. the distro this
+ returns data about the current OS distribution (i.e. the distro this
package runs on).
Normally, it is not necessary to create additional instances of this class.
@@ -530,10 +651,15 @@ class LinuxDistribution(object):
lsb_release command.
"""
- def __init__(self,
- include_lsb=True,
- os_release_file='',
- distro_release_file=''):
+ def __init__(
+ self,
+ include_lsb=True,
+ os_release_file="",
+ distro_release_file="",
+ include_uname=True,
+ root_dir=None,
+ ):
+ # type: (bool, str, str, bool, Optional[str]) -> None
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
@@ -567,6 +693,14 @@ class LinuxDistribution(object):
distro release file can be found, the data source for the distro
release file will be empty.
+ * ``include_uname`` (bool): Controls whether uname command output is
+ included as a data source. If the uname command is not available in
+ the program execution path the data source for the uname command will
+ be empty.
+
+ * ``root_dir`` (string): The absolute path to the root directory to use
+ to find distro-related information files.
+
Public instance attributes:
* ``os_release_file`` (string): The path name of the
@@ -577,6 +711,13 @@ class LinuxDistribution(object):
`distro release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
+ * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
+ This controls whether the lsb information will be loaded.
+
+ * ``include_uname`` (bool): The result of the ``include_uname``
+ parameter. This controls whether the uname information will
+ be loaded.
+
Raises:
* :py:exc:`IOError`: Some I/O issue with an os-release file or distro
@@ -589,33 +730,52 @@ class LinuxDistribution(object):
* :py:exc:`UnicodeError`: A data source has unexpected characters or
uses an unexpected encoding.
"""
- self.os_release_file = os_release_file or \
- os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
- self.distro_release_file = distro_release_file or '' # updated later
- self._os_release_info = self._get_os_release_info()
- self._lsb_release_info = self._get_lsb_release_info() \
- if include_lsb else {}
- self._distro_release_info = self._get_distro_release_info()
+ self.root_dir = root_dir
+ self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR
+ self.usr_lib_dir = (
+ os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR
+ )
+
+ if os_release_file:
+ self.os_release_file = os_release_file
+ else:
+ etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME)
+ usr_lib_os_release_file = os.path.join(
+ self.usr_lib_dir, _OS_RELEASE_BASENAME
+ )
+
+ # NOTE: The idea is to respect order **and** have it set
+ # at all times for API backwards compatibility.
+ if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile(
+ usr_lib_os_release_file
+ ):
+ self.os_release_file = etc_dir_os_release_file
+ else:
+ self.os_release_file = usr_lib_os_release_file
+
+ self.distro_release_file = distro_release_file or "" # updated later
+ self.include_lsb = include_lsb
+ self.include_uname = include_uname
def __repr__(self):
- """Return repr of all info
- """
- return \
- "LinuxDistribution(" \
- "os_release_file={0!r}, " \
- "distro_release_file={1!r}, " \
- "_os_release_info={2!r}, " \
- "_lsb_release_info={3!r}, " \
- "_distro_release_info={4!r})".format(
- self.os_release_file,
- self.distro_release_file,
- self._os_release_info,
- self._lsb_release_info,
- self._distro_release_info)
+ # type: () -> str
+ """Return repr of all info"""
+ return (
+ "LinuxDistribution("
+ "os_release_file={self.os_release_file!r}, "
+ "distro_release_file={self.distro_release_file!r}, "
+ "include_lsb={self.include_lsb!r}, "
+ "include_uname={self.include_uname!r}, "
+ "_os_release_info={self._os_release_info!r}, "
+ "_lsb_release_info={self._lsb_release_info!r}, "
+ "_distro_release_info={self._distro_release_info!r}, "
+ "_uname_info={self._uname_info!r})".format(self=self)
+ )
def linux_distribution(self, full_distribution_name=True):
+ # type: (bool) -> Tuple[str, str, str]
"""
- Return information about the Linux distribution that is compatible
+ Return information about the OS distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
of its parameters.
@@ -624,101 +784,119 @@ class LinuxDistribution(object):
return (
self.name() if full_distribution_name else self.id(),
self.version(),
- self.codename()
+ self.codename(),
)
def id(self):
- """Return the distro ID of the Linux distribution, as a string.
+ # type: () -> str
+ """Return the distro ID of the OS distribution, as a string.
For details, see :func:`distro.id`.
"""
+
def normalize(distro_id, table):
- distro_id = distro_id.lower().replace(' ', '_')
+ # type: (str, Dict[str, str]) -> str
+ distro_id = distro_id.lower().replace(" ", "_")
return table.get(distro_id, distro_id)
- distro_id = self.os_release_attr('id')
+ distro_id = self.os_release_attr("id")
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
- distro_id = self.lsb_release_attr('distributor_id')
+ distro_id = self.lsb_release_attr("distributor_id")
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
- distro_id = self.distro_release_attr('id')
+ distro_id = self.distro_release_attr("id")
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
- return ''
+ distro_id = self.uname_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ return ""
def name(self, pretty=False):
+ # type: (bool) -> str
"""
- Return the name of the Linux distribution, as a string.
+ Return the name of the OS distribution, as a string.
For details, see :func:`distro.name`.
"""
- name = self.os_release_attr('name') \
- or self.lsb_release_attr('distributor_id') \
- or self.distro_release_attr('name')
+ name = (
+ self.os_release_attr("name")
+ or self.lsb_release_attr("distributor_id")
+ or self.distro_release_attr("name")
+ or self.uname_attr("name")
+ )
if pretty:
- name = self.os_release_attr('pretty_name') \
- or self.lsb_release_attr('description')
+ name = self.os_release_attr("pretty_name") or self.lsb_release_attr(
+ "description"
+ )
if not name:
- name = self.distro_release_attr('name')
+ name = self.distro_release_attr("name") or self.uname_attr("name")
version = self.version(pretty=True)
if version:
- name = name + ' ' + version
- return name or ''
+ name = name + " " + version
+ return name or ""
def version(self, pretty=False, best=False):
+ # type: (bool, bool) -> str
"""
- Return the version of the Linux distribution, as a string.
+ Return the version of the OS distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
- self.os_release_attr('version_id'),
- self.lsb_release_attr('release'),
- self.distro_release_attr('version_id'),
- self._parse_distro_release_content(
- self.os_release_attr('pretty_name')).get('version_id', ''),
+ self.os_release_attr("version_id"),
+ self.lsb_release_attr("release"),
+ self.distro_release_attr("version_id"),
+ self._parse_distro_release_content(self.os_release_attr("pretty_name")).get(
+ "version_id", ""
+ ),
self._parse_distro_release_content(
- self.lsb_release_attr('description')).get('version_id', '')
+ self.lsb_release_attr("description")
+ ).get("version_id", ""),
+ self.uname_attr("release"),
]
- version = ''
+ version = ""
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
- if v.count(".") > version.count(".") or version == '':
+ if v.count(".") > version.count(".") or version == "":
version = v
else:
for v in versions:
- if v != '':
+ if v != "":
version = v
break
if pretty and version and self.codename():
- version = u'{0} ({1})'.format(version, self.codename())
+ version = "{0} ({1})".format(version, self.codename())
return version
def version_parts(self, best=False):
+ # type: (bool) -> Tuple[str, str, str]
"""
- Return the version of the Linux distribution, as a tuple of version
+ Return the version of the OS distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
"""
version_str = self.version(best=best)
if version_str:
- version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
+ version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?")
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
- return major, minor or '', build_number or ''
- return '', '', ''
+ return major, minor or "", build_number or ""
+ return "", "", ""
def major_version(self, best=False):
+ # type: (bool) -> str
"""
Return the major version number of the current distribution.
@@ -727,43 +905,54 @@ class LinuxDistribution(object):
return self.version_parts(best)[0]
def minor_version(self, best=False):
+ # type: (bool) -> str
"""
- Return the minor version number of the Linux distribution.
+ Return the minor version number of the current distribution.
For details, see :func:`distro.minor_version`.
"""
return self.version_parts(best)[1]
def build_number(self, best=False):
+ # type: (bool) -> str
"""
- Return the build number of the Linux distribution.
+ Return the build number of the current distribution.
For details, see :func:`distro.build_number`.
"""
return self.version_parts(best)[2]
def like(self):
+ # type: () -> str
"""
- Return the IDs of distributions that are like the Linux distribution.
+ Return the IDs of distributions that are like the OS distribution.
For details, see :func:`distro.like`.
"""
- return self.os_release_attr('id_like') or ''
+ return self.os_release_attr("id_like") or ""
def codename(self):
+ # type: () -> str
"""
- Return the codename of the Linux distribution.
+ Return the codename of the OS distribution.
For details, see :func:`distro.codename`.
"""
- return self.os_release_attr('codename') \
- or self.lsb_release_attr('codename') \
- or self.distro_release_attr('codename') \
- or ''
+ try:
+ # Handle os_release specially since distros might purposefully set
+ # this to empty string to have no codename
+ return self._os_release_info["codename"]
+ except KeyError:
+ return (
+ self.lsb_release_attr("codename")
+ or self.distro_release_attr("codename")
+ or ""
+ )
def info(self, pretty=False, best=False):
+ # type: (bool, bool) -> InfoDict
"""
- Return certain machine-readable information about the Linux
+ Return certain machine-readable information about the OS
distribution.
For details, see :func:`distro.info`.
@@ -774,25 +963,27 @@ class LinuxDistribution(object):
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
- build_number=self.build_number(best)
+ build_number=self.build_number(best),
),
like=self.like(),
codename=self.codename(),
)
def os_release_info(self):
+ # type: () -> Dict[str, str]
"""
Return a dictionary containing key-value pairs for the information
- items from the os-release file data source of the Linux distribution.
+ items from the os-release file data source of the OS distribution.
For details, see :func:`distro.os_release_info`.
"""
return self._os_release_info
def lsb_release_info(self):
+ # type: () -> Dict[str, str]
"""
Return a dictionary containing key-value pairs for the information
- items from the lsb_release command data source of the Linux
+ items from the lsb_release command data source of the OS
distribution.
For details, see :func:`distro.lsb_release_info`.
@@ -800,43 +991,69 @@ class LinuxDistribution(object):
return self._lsb_release_info
def distro_release_info(self):
+ # type: () -> Dict[str, str]
"""
Return a dictionary containing key-value pairs for the information
- items from the distro release file data source of the Linux
+ items from the distro release file data source of the OS
distribution.
For details, see :func:`distro.distro_release_info`.
"""
return self._distro_release_info
+ def uname_info(self):
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the uname command data source of the OS distribution.
+
+ For details, see :func:`distro.uname_info`.
+ """
+ return self._uname_info
+
def os_release_attr(self, attribute):
+ # type: (str) -> str
"""
Return a single named information item from the os-release file data
- source of the Linux distribution.
+ source of the OS distribution.
For details, see :func:`distro.os_release_attr`.
"""
- return self._os_release_info.get(attribute, '')
+ return self._os_release_info.get(attribute, "")
def lsb_release_attr(self, attribute):
+ # type: (str) -> str
"""
Return a single named information item from the lsb_release command
- output data source of the Linux distribution.
+ output data source of the OS distribution.
For details, see :func:`distro.lsb_release_attr`.
"""
- return self._lsb_release_info.get(attribute, '')
+ return self._lsb_release_info.get(attribute, "")
def distro_release_attr(self, attribute):
+ # type: (str) -> str
"""
Return a single named information item from the distro release file
- data source of the Linux distribution.
+ data source of the OS distribution.
For details, see :func:`distro.distro_release_attr`.
"""
- return self._distro_release_info.get(attribute, '')
+ return self._distro_release_info.get(attribute, "")
- def _get_os_release_info(self):
+ def uname_attr(self, attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the uname command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.uname_attr`.
+ """
+ return self._uname_info.get(attribute, "")
+
+ @cached_property
+ def _os_release_info(self):
+ # type: () -> Dict[str, str]
"""
Get the information items from the specified os-release file.
@@ -850,6 +1067,7 @@ class LinuxDistribution(object):
@staticmethod
def _parse_os_release_content(lines):
+ # type: (TextIO) -> Dict[str, str]
"""
Parse the lines of an os-release file.
@@ -874,7 +1092,7 @@ class LinuxDistribution(object):
# parsed content is a unicode object. The following fix resolves that
# (... but it should be fixed in shlex...):
if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
- lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
+ lexer.wordchars = lexer.wordchars.decode("iso-8859-1")
tokens = list(lexer)
for token in tokens:
@@ -884,61 +1102,59 @@ class LinuxDistribution(object):
# stripped, etc.), so the tokens are now either:
# * variable assignments: var=value
# * commands or their arguments (not allowed in os-release)
- if '=' in token:
- k, v = token.split('=', 1)
- if isinstance(v, bytes):
- v = v.decode('utf-8')
+ if "=" in token:
+ k, v = token.split("=", 1)
props[k.lower()] = v
- if k == 'VERSION':
- # this handles cases in which the codename is in
- # the `(CODENAME)` (rhel, centos, fedora) format
- # or in the `, CODENAME` format (Ubuntu).
- codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v)
- if codename:
- codename = codename.group()
- codename = codename.strip('()')
- codename = codename.strip(',')
- codename = codename.strip()
- # codename appears within paranthese.
- props['codename'] = codename
- else:
- props['codename'] = ''
else:
# Ignore any tokens that are not variable assignments
pass
+
+ if "version_codename" in props:
+ # os-release added a version_codename field. Use that in
+ # preference to anything else Note that some distros purposefully
+ # do not have code names. They should be setting
+ # version_codename=""
+ props["codename"] = props["version_codename"]
+ elif "ubuntu_codename" in props:
+ # Same as above but a non-standard field name used on older Ubuntus
+ props["codename"] = props["ubuntu_codename"]
+ elif "version" in props:
+ # If there is no version_codename, parse it from the version
+ match = re.search(r"(\(\D+\))|,(\s+)?\D+", props["version"])
+ if match:
+ codename = match.group()
+ codename = codename.strip("()")
+ codename = codename.strip(",")
+ codename = codename.strip()
+ # codename appears within paranthese.
+ props["codename"] = codename
+
return props
- def _get_lsb_release_info(self):
+ @cached_property
+ def _lsb_release_info(self):
+ # type: () -> Dict[str, str]
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
"""
- cmd = 'lsb_release -a'
- process = subprocess.Popen(
- cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- stdout, stderr = process.communicate()
- stdout, stderr = stdout.decode('utf-8'), stderr.decode('utf-8')
- code = process.returncode
- if code == 0:
- content = stdout.splitlines()
- return self._parse_lsb_release_content(content)
- elif code == 127: # Command not found
+ if not self.include_lsb:
return {}
- else:
- if sys.version_info[:2] >= (3, 5):
- raise subprocess.CalledProcessError(code, cmd, stdout, stderr)
- elif sys.version_info[:2] >= (2, 7):
- raise subprocess.CalledProcessError(code, cmd, stdout)
- elif sys.version_info[:2] == (2, 6):
- raise subprocess.CalledProcessError(code, cmd)
+ with open(os.devnull, "wb") as devnull:
+ try:
+ cmd = ("lsb_release", "-a")
+ stdout = subprocess.check_output(cmd, stderr=devnull)
+ # Command not found or lsb_release returned error
+ except (OSError, subprocess.CalledProcessError):
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_lsb_release_content(content)
@staticmethod
def _parse_lsb_release_content(lines):
+ # type: (Iterable[str]) -> Dict[str, str]
"""
Parse the output of the lsb_release command.
@@ -953,16 +1169,62 @@ class LinuxDistribution(object):
"""
props = {}
for line in lines:
- line = line.decode('utf-8') if isinstance(line, bytes) else line
- kv = line.strip('\n').split(':', 1)
+ kv = line.strip("\n").split(":", 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
- props.update({k.replace(' ', '_').lower(): v.strip()})
+ props.update({k.replace(" ", "_").lower(): v.strip()})
+ return props
+
+ @cached_property
+ def _uname_info(self):
+ # type: () -> Dict[str, str]
+ with open(os.devnull, "wb") as devnull:
+ try:
+ cmd = ("uname", "-rs")
+ stdout = subprocess.check_output(cmd, stderr=devnull)
+ except OSError:
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_uname_content(content)
+
+ @staticmethod
+ def _parse_uname_content(lines):
+ # type: (Sequence[str]) -> Dict[str, str]
+ props = {}
+ match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip())
+ if match:
+ name, version = match.groups()
+
+ # This is to prevent the Linux kernel version from
+ # appearing as the 'best' version on otherwise
+ # identifiable distributions.
+ if name == "Linux":
+ return {}
+ props["id"] = name.lower()
+ props["name"] = name
+ props["release"] = version
return props
- def _get_distro_release_info(self):
+ @staticmethod
+ def _to_str(text):
+ # type: (Union[bytes, str]) -> str
+ encoding = sys.getfilesystemencoding()
+ encoding = "utf-8" if encoding == "ascii" else encoding
+
+ if sys.version_info[0] >= 3:
+ if isinstance(text, bytes):
+ return text.decode(encoding)
+ else:
+ if isinstance(text, unicode): # noqa
+ return text.encode(encoding)
+
+ return text
+
+ @cached_property
+ def _distro_release_info(self):
+ # type: () -> Dict[str, str]
"""
Get the information items from the specified distro release file.
@@ -972,20 +1234,21 @@ class LinuxDistribution(object):
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
- distro_info = self._parse_distro_release_file(
- self.distro_release_file)
+ distro_info = self._parse_distro_release_file(self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
- if match:
- distro_info['id'] = match.group(1)
+ if "name" in distro_info and "cloudlinux" in distro_info["name"].lower():
+ distro_info["id"] = "cloudlinux"
+ elif match:
+ distro_info["id"] = match.group(1)
return distro_info
else:
try:
- basenames = os.listdir(_UNIXCONFDIR)
+ basenames = os.listdir(self.etc_dir)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
@@ -995,33 +1258,41 @@ class LinuxDistribution(object):
# sure about the *-release files. Check common entries of
# /etc for information. If they turn out to not be there the
# error is handled in `_parse_distro_release_file()`.
- basenames = ['SuSE-release',
- 'arch-release',
- 'base-release',
- 'centos-release',
- 'fedora-release',
- 'gentoo-release',
- 'mageia-release',
- 'manjaro-release',
- 'oracle-release',
- 'redhat-release',
- 'sl-release',
- 'slackware-version']
+ basenames = [
+ "SuSE-release",
+ "arch-release",
+ "base-release",
+ "centos-release",
+ "fedora-release",
+ "gentoo-release",
+ "mageia-release",
+ "mandrake-release",
+ "mandriva-release",
+ "mandrivalinux-release",
+ "manjaro-release",
+ "oracle-release",
+ "redhat-release",
+ "sl-release",
+ "slackware-version",
+ ]
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
- filepath = os.path.join(_UNIXCONFDIR, basename)
+ filepath = os.path.join(self.etc_dir, basename)
distro_info = self._parse_distro_release_file(filepath)
- if 'name' in distro_info:
+ if "name" in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
- distro_info['id'] = match.group(1)
+ distro_info["id"] = match.group(1)
+ if "cloudlinux" in distro_info["name"].lower():
+ distro_info["id"] = "cloudlinux"
return distro_info
return {}
def _parse_distro_release_file(self, filepath):
+ # type: (str) -> Dict[str, str]
"""
Parse a distro release file.
@@ -1040,11 +1311,12 @@ class LinuxDistribution(object):
except (OSError, IOError):
# Ignore not being able to read a specific, seemingly version
# related file.
- # See https://github.com/nir0s/distro/issues/162
+ # See https://github.com/python-distro/distro/issues/162
return {}
@staticmethod
def _parse_distro_release_content(line):
+ # type: (str) -> Dict[str, str]
"""
Parse a line from a distro release file.
@@ -1055,20 +1327,17 @@ class LinuxDistribution(object):
Returns:
A dictionary containing all information items.
"""
- if isinstance(line, bytes):
- line = line.decode('utf-8')
- matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
- line.strip()[::-1])
+ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
- distro_info['name'] = matches.group(3)[::-1]
+ distro_info["name"] = matches.group(3)[::-1]
if matches.group(2):
- distro_info['version_id'] = matches.group(2)[::-1]
+ distro_info["version_id"] = matches.group(2)[::-1]
if matches.group(1):
- distro_info['codename'] = matches.group(1)[::-1]
+ distro_info["codename"] = matches.group(1)[::-1]
elif line:
- distro_info['name'] = line.strip()
+ distro_info["name"] = line.strip()
return distro_info
@@ -1076,27 +1345,42 @@ _distro = LinuxDistribution()
def main():
+ # type: () -> None
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
- parser = argparse.ArgumentParser(description="Linux distro info tool")
+ parser = argparse.ArgumentParser(description="OS distro info tool")
parser.add_argument(
- '--json',
- '-j',
- help="Output in machine readable format",
- action="store_true")
+ "--json", "-j", help="Output in machine readable format", action="store_true"
+ )
+
+ parser.add_argument(
+ "--root-dir",
+ "-r",
+ type=str,
+ dest="root_dir",
+ help="Path to the root filesystem directory (defaults to /)",
+ )
+
args = parser.parse_args()
+ if args.root_dir:
+ dist = LinuxDistribution(
+ include_lsb=False, include_uname=False, root_dir=args.root_dir
+ )
+ else:
+ dist = _distro
+
if args.json:
- logger.info(json.dumps(info(), indent=4, sort_keys=True))
+ logger.info(json.dumps(dist.info(), indent=4, sort_keys=True))
else:
- logger.info('Name: %s', name(pretty=True))
- distribution_version = version(pretty=True)
- logger.info('Version: %s', distribution_version)
- distribution_codename = codename()
- logger.info('Codename: %s', distribution_codename)
+ logger.info("Name: %s", dist.name(pretty=True))
+ distribution_version = dist.version(pretty=True)
+ logger.info("Version: %s", distribution_version)
+ distribution_codename = dist.codename()
+ logger.info("Codename: %s", distribution_codename)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/lib/spack/external/functools_backport.py b/lib/spack/external/functools_backport.py
deleted file mode 100644
index b3c913ffd7..0000000000
--- a/lib/spack/external/functools_backport.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Backport of Python 2.7's total_ordering.
-#
-
-def total_ordering(cls):
- """Class decorator that fills in missing ordering methods"""
- convert = {
- '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
- ('__le__', lambda self, other: self < other or self == other),
- ('__ge__', lambda self, other: not self < other)],
- '__le__': [('__ge__', lambda self, other: not self <= other or self == other),
- ('__lt__', lambda self, other: self <= other and not self == other),
- ('__gt__', lambda self, other: not self <= other)],
- '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
- ('__ge__', lambda self, other: self > other or self == other),
- ('__le__', lambda self, other: not self > other)],
- '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
- ('__gt__', lambda self, other: self >= other and not self == other),
- ('__lt__', lambda self, other: not self >= other)]
- }
- roots = set(dir(cls)) & set(convert)
- if not roots:
- raise ValueError('must define at least one ordering operation: < > <= >=')
- root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
- for opname, opfunc in convert[root]:
- if opname not in roots:
- opfunc.__name__ = opname
- opfunc.__doc__ = getattr(int, opname).__doc__
- setattr(cls, opname, opfunc)
- return cls
-
-
-@total_ordering
-class reverse_order(object):
- """Helper for creating key functions.
-
- This is a wrapper that inverts the sense of the natural
- comparisons on the object.
- """
- def __init__(self, value):
- self.value = value
-
- def __eq__(self, other):
- return other.value == self.value
-
- def __lt__(self, other):
- return other.value < self.value
diff --git a/lib/spack/external/jinja2/LICENSE.rst b/lib/spack/external/jinja2/LICENSE.rst
new file mode 100644
index 0000000000..c37cae49ec
--- /dev/null
+++ b/lib/spack/external/jinja2/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2007 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lib/spack/external/jinja2/__init__.py b/lib/spack/external/jinja2/__init__.py
index 42aa763d57..f17866f6c4 100644
--- a/lib/spack/external/jinja2/__init__.py
+++ b/lib/spack/external/jinja2/__init__.py
@@ -1,83 +1,44 @@
# -*- coding: utf-8 -*-
+"""Jinja is a template engine written in pure Python. It provides a
+non-XML syntax that supports inline expressions and an optional
+sandboxed environment.
"""
- jinja2
- ~~~~~~
-
- Jinja2 is a template engine written in pure Python. It provides a
- Django inspired non-XML syntax but supports inline expressions and
- an optional sandboxed environment.
-
- Nutshell
- --------
-
- Here a small example of a Jinja2 template::
-
- {% extends 'base.html' %}
- {% block title %}Memberlist{% endblock %}
- {% block content %}
- <ul>
- {% for user in users %}
- <li><a href="{{ user.url }}">{{ user.username }}</a></li>
- {% endfor %}
- </ul>
- {% endblock %}
-
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-__docformat__ = 'restructuredtext en'
-__version__ = '2.10'
-
-# high level interface
-from jinja2.environment import Environment, Template
-
-# loaders
-from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
- DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
- ModuleLoader
-
-# bytecode caches
-from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
- MemcachedBytecodeCache
-
-# undefined types
-from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
- make_logging_undefined
-
-# exceptions
-from jinja2.exceptions import TemplateError, UndefinedError, \
- TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
- TemplateAssertionError, TemplateRuntimeError
-
-# decorators and public utilities
-from jinja2.filters import environmentfilter, contextfilter, \
- evalcontextfilter
-from jinja2.utils import Markup, escape, clear_caches, \
- environmentfunction, evalcontextfunction, contextfunction, \
- is_undefined, select_autoescape
-
-__all__ = [
- 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
- 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
- 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
- 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
- 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
- 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
- 'TemplateRuntimeError',
- 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
- 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
- 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
- 'select_autoescape',
-]
-
-
-def _patch_async():
- from jinja2.utils import have_async_gen
- if have_async_gen:
- from jinja2.asyncsupport import patch_all
- patch_all()
-
-
-_patch_async()
-del _patch_async
+from markupsafe import escape
+from markupsafe import Markup
+
+from .bccache import BytecodeCache
+from .bccache import FileSystemBytecodeCache
+from .bccache import MemcachedBytecodeCache
+from .environment import Environment
+from .environment import Template
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateError
+from .exceptions import TemplateNotFound
+from .exceptions import TemplateRuntimeError
+from .exceptions import TemplatesNotFound
+from .exceptions import TemplateSyntaxError
+from .exceptions import UndefinedError
+from .filters import contextfilter
+from .filters import environmentfilter
+from .filters import evalcontextfilter
+from .loaders import BaseLoader
+from .loaders import ChoiceLoader
+from .loaders import DictLoader
+from .loaders import FileSystemLoader
+from .loaders import FunctionLoader
+from .loaders import ModuleLoader
+from .loaders import PackageLoader
+from .loaders import PrefixLoader
+from .runtime import ChainableUndefined
+from .runtime import DebugUndefined
+from .runtime import make_logging_undefined
+from .runtime import StrictUndefined
+from .runtime import Undefined
+from .utils import clear_caches
+from .utils import contextfunction
+from .utils import environmentfunction
+from .utils import evalcontextfunction
+from .utils import is_undefined
+from .utils import select_autoescape
+
+__version__ = "2.11.3"
diff --git a/lib/spack/external/jinja2/_compat.py b/lib/spack/external/jinja2/_compat.py
index 61d85301a4..1f044954a0 100644
--- a/lib/spack/external/jinja2/_compat.py
+++ b/lib/spack/external/jinja2/_compat.py
@@ -1,22 +1,12 @@
# -*- coding: utf-8 -*-
-"""
- jinja2._compat
- ~~~~~~~~~~~~~~
-
- Some py2/py3 compatibility support based on a stripped down
- version of six so we don't have to depend on a specific version
- of it.
-
- :copyright: Copyright 2013 by the Jinja team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
+# flake8: noqa
+import marshal
import sys
PY2 = sys.version_info[0] == 2
-PYPY = hasattr(sys, 'pypy_translation_info')
+PYPY = hasattr(sys, "pypy_translation_info")
_identity = lambda x: x
-
if not PY2:
unichr = chr
range_type = range
@@ -30,6 +20,7 @@ if not PY2:
import pickle
from io import BytesIO, StringIO
+
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
@@ -46,6 +37,9 @@ if not PY2:
implements_to_string = _identity
encode_filename = _identity
+ marshal_dump = marshal.dump
+ marshal_load = marshal.load
+
else:
unichr = unichr
text_type = unicode
@@ -59,11 +53,13 @@ else:
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
+
NativeStringIO = BytesIO
- exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
+ exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
from itertools import imap, izip, ifilter
+
intern = intern
def implements_iterator(cls):
@@ -73,14 +69,25 @@ else:
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
- cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
+ cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
return cls
def encode_filename(filename):
if isinstance(filename, unicode):
- return filename.encode('utf-8')
+ return filename.encode("utf-8")
return filename
+ def marshal_dump(code, f):
+ if isinstance(f, file):
+ marshal.dump(code, f)
+ else:
+ f.write(marshal.dumps(code))
+
+ def marshal_load(f):
+ if isinstance(f, file):
+ return marshal.load(f)
+ return marshal.loads(f.read())
+
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
@@ -90,10 +97,36 @@ def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
+
+ return type.__new__(metaclass, "temporary_class", (), {})
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
+
+
+try:
+ from collections import abc
+except ImportError:
+ import collections as abc
+
+
+try:
+ from os import fspath
+except ImportError:
+ try:
+ from pathlib import PurePath
+ except ImportError:
+ PurePath = None
+
+ def fspath(path):
+ if hasattr(path, "__fspath__"):
+ return path.__fspath__()
+
+ # Python 3.5 doesn't have __fspath__ yet, use str.
+ if PurePath is not None and isinstance(path, PurePath):
+ return str(path)
+
+ return path
diff --git a/lib/spack/external/jinja2/_identifier.py b/lib/spack/external/jinja2/_identifier.py
index 2eac35d5c3..224d5449d1 100644
--- a/lib/spack/external/jinja2/_identifier.py
+++ b/lib/spack/external/jinja2/_identifier.py
@@ -1,2 +1,6 @@
+import re
+
# generated by scripts/generate_identifier_pattern.py
-pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'
+pattern = re.compile(
+ r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
+)
diff --git a/lib/spack/external/jinja2/asyncfilters.py b/lib/spack/external/jinja2/asyncfilters.py
index 5c1f46d7fa..3d98dbcc00 100644
--- a/lib/spack/external/jinja2/asyncfilters.py
+++ b/lib/spack/external/jinja2/asyncfilters.py
@@ -1,12 +1,13 @@
from functools import wraps
-from jinja2.asyncsupport import auto_aiter
-from jinja2 import filters
+from . import filters
+from .asyncsupport import auto_aiter
+from .asyncsupport import auto_await
async def auto_to_seq(value):
seq = []
- if hasattr(value, '__aiter__'):
+ if hasattr(value, "__aiter__"):
async for item in value:
seq.append(item)
else:
@@ -16,8 +17,7 @@ async def auto_to_seq(value):
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
- seq, func = filters.prepare_select_or_reject(
- args, kwargs, modfunc, lookup_attr)
+ seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
if seq:
async for item in auto_aiter(seq):
if func(item):
@@ -26,14 +26,19 @@ async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
def dualfilter(normal_filter, async_filter):
wrap_evalctx = False
- if getattr(normal_filter, 'environmentfilter', False):
- is_async = lambda args: args[0].is_async
+ if getattr(normal_filter, "environmentfilter", False) is True:
+
+ def is_async(args):
+ return args[0].is_async
+
wrap_evalctx = False
else:
- if not getattr(normal_filter, 'evalcontextfilter', False) and \
- not getattr(normal_filter, 'contextfilter', False):
- wrap_evalctx = True
- is_async = lambda args: args[0].environment.is_async
+ has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True
+ has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True
+ wrap_evalctx = not has_evalctxfilter and not has_ctxfilter
+
+ def is_async(args):
+ return args[0].environment.is_async
@wraps(normal_filter)
def wrapper(*args, **kwargs):
@@ -55,6 +60,7 @@ def dualfilter(normal_filter, async_filter):
def asyncfiltervariant(original):
def decorator(f):
return dualfilter(original, f)
+
return decorator
@@ -63,19 +69,22 @@ async def do_first(environment, seq):
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
- return environment.undefined('No first item, sequence was empty.')
+ return environment.undefined("No first item, sequence was empty.")
@asyncfiltervariant(filters.do_groupby)
async def do_groupby(environment, value, attribute):
expr = filters.make_attrgetter(environment, attribute)
- return [filters._GroupTuple(key, await auto_to_seq(values))
- for key, values in filters.groupby(sorted(
- await auto_to_seq(value), key=expr), expr)]
+ return [
+ filters._GroupTuple(key, await auto_to_seq(values))
+ for key, values in filters.groupby(
+ sorted(await auto_to_seq(value), key=expr), expr
+ )
+ ]
@asyncfiltervariant(filters.do_join)
-async def do_join(eval_ctx, value, d=u'', attribute=None):
+async def do_join(eval_ctx, value, d=u"", attribute=None):
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
@@ -109,7 +118,7 @@ async def do_map(*args, **kwargs):
seq, func = filters.prepare_map(args, kwargs)
if seq:
async for item in auto_aiter(seq):
- yield func(item)
+ yield await auto_await(func(item))
@asyncfiltervariant(filters.do_sum)
@@ -118,7 +127,10 @@ async def do_sum(environment, iterable, attribute=None, start=0):
if attribute is not None:
func = filters.make_attrgetter(environment, attribute)
else:
- func = lambda x: x
+
+ def func(x):
+ return x
+
async for item in auto_aiter(iterable):
rv += func(item)
return rv
@@ -130,17 +142,17 @@ async def do_slice(value, slices, fill_with=None):
ASYNC_FILTERS = {
- 'first': do_first,
- 'groupby': do_groupby,
- 'join': do_join,
- 'list': do_list,
+ "first": do_first,
+ "groupby": do_groupby,
+ "join": do_join,
+ "list": do_list,
# we intentionally do not support do_last because that would be
# ridiculous
- 'reject': do_reject,
- 'rejectattr': do_rejectattr,
- 'map': do_map,
- 'select': do_select,
- 'selectattr': do_selectattr,
- 'sum': do_sum,
- 'slice': do_slice,
+ "reject": do_reject,
+ "rejectattr": do_rejectattr,
+ "map": do_map,
+ "select": do_select,
+ "selectattr": do_selectattr,
+ "sum": do_sum,
+ "slice": do_slice,
}
diff --git a/lib/spack/external/jinja2/asyncsupport.py b/lib/spack/external/jinja2/asyncsupport.py
index b1e7b5ce9a..78ba3739d8 100644
--- a/lib/spack/external/jinja2/asyncsupport.py
+++ b/lib/spack/external/jinja2/asyncsupport.py
@@ -1,29 +1,27 @@
# -*- coding: utf-8 -*-
+"""The code for async support. Importing this patches Jinja on supported
+Python versions.
"""
- jinja2.asyncsupport
- ~~~~~~~~~~~~~~~~~~~
-
- Has all the code for async support which is implemented as a patch
- for supported Python versions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
import asyncio
import inspect
from functools import update_wrapper
-from jinja2.utils import concat, internalcode, Markup
-from jinja2.environment import TemplateModule
-from jinja2.runtime import LoopContextBase, _last_iteration
+from markupsafe import Markup
+
+from .environment import TemplateModule
+from .runtime import LoopContext
+from .utils import concat
+from .utils import internalcode
+from .utils import missing
async def concat_async(async_gen):
rv = []
+
async def collect():
async for event in async_gen:
rv.append(event)
+
await collect()
return concat(rv)
@@ -34,10 +32,7 @@ async def generate_async(self, *args, **kwargs):
async for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
- exc_info = sys.exc_info()
- else:
- return
- yield self.environment.handle_exception(exc_info, True)
+ yield self.environment.handle_exception()
def wrap_generate_func(original_generate):
@@ -48,17 +43,18 @@ def wrap_generate_func(original_generate):
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
pass
+
def generate(self, *args, **kwargs):
if not self.environment.is_async:
return original_generate(self, *args, **kwargs)
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
+
return update_wrapper(generate, original_generate)
async def render_async(self, *args, **kwargs):
if not self.environment.is_async:
- raise RuntimeError('The environment was not created with async mode '
- 'enabled.')
+ raise RuntimeError("The environment was not created with async mode enabled.")
vars = dict(*args, **kwargs)
ctx = self.new_context(vars)
@@ -66,8 +62,7 @@ async def render_async(self, *args, **kwargs):
try:
return await concat_async(self.root_render_func(ctx))
except Exception:
- exc_info = sys.exc_info()
- return self.environment.handle_exception(exc_info, True)
+ return self.environment.handle_exception()
def wrap_render_func(original_render):
@@ -76,6 +71,7 @@ def wrap_render_func(original_render):
return original_render(self, *args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.render_async(*args, **kwargs))
+
return update_wrapper(render, original_render)
@@ -109,6 +105,7 @@ def wrap_macro_invoke(original_invoke):
if not self._environment.is_async:
return original_invoke(self, arguments, autoescape)
return async_invoke(self, arguments, autoescape)
+
return update_wrapper(_invoke, original_invoke)
@@ -124,9 +121,9 @@ def wrap_default_module(original_default_module):
@internalcode
def _get_default_module(self):
if self.environment.is_async:
- raise RuntimeError('Template module attribute is unavailable '
- 'in async mode')
+ raise RuntimeError("Template module attribute is unavailable in async mode")
return original_default_module(self)
+
return _get_default_module
@@ -139,30 +136,30 @@ async def make_module_async(self, vars=None, shared=False, locals=None):
def patch_template():
- from jinja2 import Template
+ from . import Template
+
Template.generate = wrap_generate_func(Template.generate)
- Template.generate_async = update_wrapper(
- generate_async, Template.generate_async)
- Template.render_async = update_wrapper(
- render_async, Template.render_async)
+ Template.generate_async = update_wrapper(generate_async, Template.generate_async)
+ Template.render_async = update_wrapper(render_async, Template.render_async)
Template.render = wrap_render_func(Template.render)
- Template._get_default_module = wrap_default_module(
- Template._get_default_module)
+ Template._get_default_module = wrap_default_module(Template._get_default_module)
Template._get_default_module_async = get_default_module_async
Template.make_module_async = update_wrapper(
- make_module_async, Template.make_module_async)
+ make_module_async, Template.make_module_async
+ )
def patch_runtime():
- from jinja2.runtime import BlockReference, Macro
- BlockReference.__call__ = wrap_block_reference_call(
- BlockReference.__call__)
+ from .runtime import BlockReference, Macro
+
+ BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
Macro._invoke = wrap_macro_invoke(Macro._invoke)
def patch_filters():
- from jinja2.filters import FILTERS
- from jinja2.asyncfilters import ASYNC_FILTERS
+ from .filters import FILTERS
+ from .asyncfilters import ASYNC_FILTERS
+
FILTERS.update(ASYNC_FILTERS)
@@ -179,7 +176,7 @@ async def auto_await(value):
async def auto_aiter(iterable):
- if hasattr(iterable, '__aiter__'):
+ if hasattr(iterable, "__aiter__"):
async for item in iterable:
yield item
return
@@ -187,70 +184,81 @@ async def auto_aiter(iterable):
yield item
-class AsyncLoopContext(LoopContextBase):
-
- def __init__(self, async_iterator, undefined, after, length, recurse=None,
- depth0=0):
- LoopContextBase.__init__(self, undefined, recurse, depth0)
- self._async_iterator = async_iterator
- self._after = after
- self._length = length
+class AsyncLoopContext(LoopContext):
+ _to_iterator = staticmethod(auto_aiter)
@property
- def length(self):
- if self._length is None:
- raise TypeError('Loop length for some iterators cannot be '
- 'lazily calculated in async mode')
+ async def length(self):
+ if self._length is not None:
+ return self._length
+
+ try:
+ self._length = len(self._iterable)
+ except TypeError:
+ iterable = [x async for x in self._iterator]
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
+
return self._length
- def __aiter__(self):
- return AsyncLoopContextIterator(self)
+ @property
+ async def revindex0(self):
+ return await self.length - self.index
+ @property
+ async def revindex(self):
+ return await self.length - self.index0
+
+ async def _peek_next(self):
+ if self._after is not missing:
+ return self._after
+
+ try:
+ self._after = await self._iterator.__anext__()
+ except StopAsyncIteration:
+ self._after = missing
-class AsyncLoopContextIterator(object):
- __slots__ = ('context',)
+ return self._after
- def __init__(self, context):
- self.context = context
+ @property
+ async def last(self):
+ return await self._peek_next() is missing
+
+ @property
+ async def nextitem(self):
+ rv = await self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
def __aiter__(self):
return self
async def __anext__(self):
- ctx = self.context
- ctx.index0 += 1
- if ctx._after is _last_iteration:
- raise StopAsyncIteration()
- ctx._before = ctx._current
- ctx._current = ctx._after
- try:
- ctx._after = await ctx._async_iterator.__anext__()
- except StopAsyncIteration:
- ctx._after = _last_iteration
- return ctx._current, ctx
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = await self._iterator.__anext__()
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
- # Length is more complicated and less efficient in async mode. The
- # reason for this is that we cannot know if length will be used
- # upfront but because length is a property we cannot lazily execute it
- # later. This means that we need to buffer it up and measure :(
- #
- # We however only do this for actual iterators, not for async
- # iterators as blocking here does not seem like the best idea in the
- # world.
- try:
- length = len(iterable)
- except (TypeError, AttributeError):
- if not hasattr(iterable, '__aiter__'):
- iterable = tuple(iterable)
- length = len(iterable)
- else:
- length = None
- async_iterator = auto_aiter(iterable)
- try:
- after = await async_iterator.__anext__()
- except StopAsyncIteration:
- after = _last_iteration
- return AsyncLoopContext(async_iterator, undefined, after, length, recurse,
- depth0)
+ import warnings
+
+ warnings.warn(
+ "This template must be recompiled with at least Jinja 2.11, or"
+ " it will fail in 3.0.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return AsyncLoopContext(iterable, undefined, recurse, depth0)
+
+
+patch_all()
diff --git a/lib/spack/external/jinja2/bccache.py b/lib/spack/external/jinja2/bccache.py
index 080e527cab..9c0661030f 100644
--- a/lib/spack/external/jinja2/bccache.py
+++ b/lib/spack/external/jinja2/bccache.py
@@ -1,60 +1,37 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.bccache
- ~~~~~~~~~~~~~~
-
- This module implements the bytecode cache system Jinja is optionally
- using. This is useful if you have very complex template situations and
- the compiliation of all those templates slow down your application too
- much.
-
- Situations where this is useful are often forking web applications that
- are initialized on the first request.
+"""The optional bytecode cache system. This is useful if you have very
+complex template situations and the compilation of all those templates
+slows down your application too much.
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
+Situations where this is useful are often forking web applications that
+are initialized on the first request.
"""
-from os import path, listdir
+import errno
+import fnmatch
import os
-import sys
import stat
-import errno
-import marshal
+import sys
import tempfile
-import fnmatch
from hashlib import sha1
-from jinja2.utils import open_if_exists
-from jinja2._compat import BytesIO, pickle, PY2, text_type
-
-
-# marshal works better on 3.x, one hack less required
-if not PY2:
- marshal_dump = marshal.dump
- marshal_load = marshal.load
-else:
-
- def marshal_dump(code, f):
- if isinstance(f, file):
- marshal.dump(code, f)
- else:
- f.write(marshal.dumps(code))
-
- def marshal_load(f):
- if isinstance(f, file):
- return marshal.load(f)
- return marshal.loads(f.read())
-
-
-bc_version = 3
-
-# magic version used to only change with new jinja versions. With 2.6
-# we change this to also take Python version changes into account. The
-# reason for this is that Python tends to segfault if fed earlier bytecode
-# versions because someone thought it would be a good idea to reuse opcodes
-# or make Python incompatible with earlier versions.
-bc_magic = 'j2'.encode('ascii') + \
- pickle.dumps(bc_version, 2) + \
- pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
+from os import listdir
+from os import path
+
+from ._compat import BytesIO
+from ._compat import marshal_dump
+from ._compat import marshal_load
+from ._compat import pickle
+from ._compat import text_type
+from .utils import open_if_exists
+
+bc_version = 4
+# Magic bytes to identify Jinja bytecode cache files. Contains the
+# Python major and minor version to avoid loading incompatible bytecode
+# if a project upgrades its Python version.
+bc_magic = (
+ b"j2"
+ + pickle.dumps(bc_version, 2)
+ + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
+)
class Bucket(object):
@@ -98,7 +75,7 @@ class Bucket(object):
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
- raise TypeError('can\'t write empty bucket')
+ raise TypeError("can't write empty bucket")
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
@@ -140,7 +117,7 @@ class BytecodeCache(object):
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
- Jinja2.
+ Jinja.
"""
def load_bytecode(self, bucket):
@@ -158,24 +135,24 @@ class BytecodeCache(object):
raise NotImplementedError()
def clear(self):
- """Clears the cache. This method is not used by Jinja2 but should be
+ """Clears the cache. This method is not used by Jinja but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
- hash = sha1(name.encode('utf-8'))
+ hash = sha1(name.encode("utf-8"))
if filename is not None:
- filename = '|' + filename
+ filename = "|" + filename
if isinstance(filename, text_type):
- filename = filename.encode('utf-8')
+ filename = filename.encode("utf-8")
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
- return sha1(source.encode('utf-8')).hexdigest()
+ return sha1(source.encode("utf-8")).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
@@ -210,7 +187,7 @@ class FileSystemBytecodeCache(BytecodeCache):
This bytecode cache supports clearing of the cache using the clear method.
"""
- def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
+ def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
@@ -218,19 +195,21 @@ class FileSystemBytecodeCache(BytecodeCache):
def _get_default_cache_dir(self):
def _unsafe_dir():
- raise RuntimeError('Cannot determine safe temp directory. You '
- 'need to explicitly provide one.')
+ raise RuntimeError(
+ "Cannot determine safe temp directory. You "
+ "need to explicitly provide one."
+ )
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
- if os.name == 'nt':
+ if os.name == "nt":
return tmpdir
- if not hasattr(os, 'getuid'):
+ if not hasattr(os, "getuid"):
_unsafe_dir()
- dirname = '_jinja2-cache-%d' % os.getuid()
+ dirname = "_jinja2-cache-%d" % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
@@ -241,18 +220,22 @@ class FileSystemBytecodeCache(BytecodeCache):
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
- if actual_dir_stat.st_uid != os.getuid() \
- or not stat.S_ISDIR(actual_dir_stat.st_mode) \
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+ if (
+ actual_dir_stat.st_uid != os.getuid()
+ or not stat.S_ISDIR(actual_dir_stat.st_mode)
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
+ ):
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
- if actual_dir_stat.st_uid != os.getuid() \
- or not stat.S_ISDIR(actual_dir_stat.st_mode) \
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+ if (
+ actual_dir_stat.st_uid != os.getuid()
+ or not stat.S_ISDIR(actual_dir_stat.st_mode)
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
+ ):
_unsafe_dir()
return actual_dir
@@ -261,7 +244,7 @@ class FileSystemBytecodeCache(BytecodeCache):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
- f = open_if_exists(self._get_cache_filename(bucket), 'rb')
+ f = open_if_exists(self._get_cache_filename(bucket), "rb")
if f is not None:
try:
bucket.load_bytecode(f)
@@ -269,7 +252,7 @@ class FileSystemBytecodeCache(BytecodeCache):
f.close()
def dump_bytecode(self, bucket):
- f = open(self._get_cache_filename(bucket), 'wb')
+ f = open(self._get_cache_filename(bucket), "wb")
try:
bucket.write_bytecode(f)
finally:
@@ -280,7 +263,8 @@ class FileSystemBytecodeCache(BytecodeCache):
# write access on the file system and the function does not exist
# normally.
from os import remove
- files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
+
+ files = fnmatch.filter(listdir(self.directory), self.pattern % "*")
for filename in files:
try:
remove(path.join(self.directory, filename))
@@ -296,9 +280,8 @@ class MemcachedBytecodeCache(BytecodeCache):
Libraries compatible with this class:
- - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- - `python-memcached <https://www.tummy.com/Community/software/python-memcached/>`_
- - `cmemcache <http://gijsbert.org/cmemcache/>`_
+ - `cachelib <https://github.com/pallets/cachelib>`_
+ - `python-memcached <https://pypi.org/project/python-memcached/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
@@ -334,8 +317,13 @@ class MemcachedBytecodeCache(BytecodeCache):
`ignore_memcache_errors` parameter.
"""
- def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
- ignore_memcache_errors=True):
+ def __init__(
+ self,
+ client,
+ prefix="jinja2/bytecode/",
+ timeout=None,
+ ignore_memcache_errors=True,
+ ):
self.client = client
self.prefix = prefix
self.timeout = timeout
diff --git a/lib/spack/external/jinja2/compiler.py b/lib/spack/external/jinja2/compiler.py
index d534a82739..63297b42c3 100644
--- a/lib/spack/external/jinja2/compiler.py
+++ b/lib/spack/external/jinja2/compiler.py
@@ -1,59 +1,62 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.compiler
- ~~~~~~~~~~~~~~~
-
- Compiles nodes into python code.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
+"""Compiles nodes from the parser into Python code."""
+from collections import namedtuple
+from functools import update_wrapper
from itertools import chain
-from copy import deepcopy
from keyword import iskeyword as is_python_keyword
-from functools import update_wrapper
-from jinja2 import nodes
-from jinja2.nodes import EvalContext
-from jinja2.visitor import NodeVisitor
-from jinja2.optimizer import Optimizer
-from jinja2.exceptions import TemplateAssertionError
-from jinja2.utils import Markup, concat, escape
-from jinja2._compat import range_type, text_type, string_types, \
- iteritems, NativeStringIO, imap, izip
-from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
- VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
+from markupsafe import escape
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import imap
+from ._compat import iteritems
+from ._compat import izip
+from ._compat import NativeStringIO
+from ._compat import range_type
+from ._compat import string_types
+from ._compat import text_type
+from .exceptions import TemplateAssertionError
+from .idtracking import Symbols
+from .idtracking import VAR_LOAD_ALIAS
+from .idtracking import VAR_LOAD_PARAMETER
+from .idtracking import VAR_LOAD_RESOLVE
+from .idtracking import VAR_LOAD_UNDEFINED
+from .nodes import EvalContext
+from .optimizer import Optimizer
+from .utils import concat
+from .visitor import NodeVisitor
operators = {
- 'eq': '==',
- 'ne': '!=',
- 'gt': '>',
- 'gteq': '>=',
- 'lt': '<',
- 'lteq': '<=',
- 'in': 'in',
- 'notin': 'not in'
+ "eq": "==",
+ "ne": "!=",
+ "gt": ">",
+ "gteq": ">=",
+ "lt": "<",
+ "lteq": "<=",
+ "in": "in",
+ "notin": "not in",
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
-if hasattr(dict, 'iteritems'):
- dict_item_iter = 'iteritems'
+if hasattr(dict, "iteritems"):
+ dict_item_iter = "iteritems"
else:
- dict_item_iter = 'items'
+ dict_item_iter = "items"
-code_features = ['division']
+code_features = ["division"]
# does this python version support generator stops? (PEP 0479)
try:
- exec('from __future__ import generator_stop')
- code_features.append('generator_stop')
+ exec("from __future__ import generator_stop")
+ code_features.append("generator_stop")
except SyntaxError:
pass
# does this python version support yield from?
try:
- exec('def f(): yield from x()')
+ exec("def f(): yield from x()")
except SyntaxError:
supports_yield_from = False
else:
@@ -68,17 +71,19 @@ def optimizeconst(f):
if new_node != node:
return self.visit(new_node, frame)
return f(self, node, frame, **kwargs)
+
return update_wrapper(new_func, f)
-def generate(node, environment, name, filename, stream=None,
- defer_init=False, optimized=True):
+def generate(
+ node, environment, name, filename, stream=None, defer_init=False, optimized=True
+):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
- raise TypeError('Can\'t compile non template nodes')
- generator = environment.code_generator_class(environment, name, filename,
- stream, defer_init,
- optimized)
+ raise TypeError("Can't compile non template nodes")
+ generator = environment.code_generator_class(
+ environment, name, filename, stream, defer_init, optimized
+ )
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
@@ -119,7 +124,6 @@ def find_undeclared(nodes, names):
class MacroRef(object):
-
def __init__(self, node):
self.node = node
self.accesses_caller = False
@@ -132,8 +136,7 @@ class Frame(object):
def __init__(self, eval_ctx, parent=None, level=None):
self.eval_ctx = eval_ctx
- self.symbols = Symbols(parent and parent.symbols or None,
- level=level)
+ self.symbols = Symbols(parent and parent.symbols or None, level=level)
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
@@ -223,7 +226,7 @@ class UndeclaredNameVisitor(NodeVisitor):
self.undeclared = set()
def visit_Name(self, node):
- if node.ctx == 'load' and node.name in self.names:
+ if node.ctx == "load" and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
@@ -242,9 +245,9 @@ class CompilerExit(Exception):
class CodeGenerator(NodeVisitor):
-
- def __init__(self, environment, name, filename, stream=None,
- defer_init=False, optimized=True):
+ def __init__(
+ self, environment, name, filename, stream=None, defer_init=False, optimized=True
+ ):
if stream is None:
stream = NativeStringIO()
self.environment = environment
@@ -306,7 +309,7 @@ class CodeGenerator(NodeVisitor):
self._param_def_block = []
# Tracks the current context.
- self._context_reference_stack = ['context']
+ self._context_reference_stack = ["context"]
# -- Various compilation helpers
@@ -317,30 +320,30 @@ class CodeGenerator(NodeVisitor):
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
- return 't_%d' % self._last_identifier
+ return "t_%d" % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
- self.writeline('%s = []' % frame.buffer)
+ self.writeline("%s = []" % frame.buffer)
def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
- self.writeline('if context.eval_ctx.autoescape:')
+ self.writeline("if context.eval_ctx.autoescape:")
self.indent()
- self.writeline('return Markup(concat(%s))' % frame.buffer)
+ self.writeline("return Markup(concat(%s))" % frame.buffer)
self.outdent()
- self.writeline('else:')
+ self.writeline("else:")
self.indent()
- self.writeline('return concat(%s)' % frame.buffer)
+ self.writeline("return concat(%s)" % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
- self.writeline('return Markup(concat(%s))' % frame.buffer)
+ self.writeline("return Markup(concat(%s))" % frame.buffer)
return
- self.writeline('return concat(%s)' % frame.buffer)
+ self.writeline("return concat(%s)" % frame.buffer)
def indent(self):
"""Indent by one."""
@@ -353,14 +356,14 @@ class CodeGenerator(NodeVisitor):
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
- self.writeline('yield ', node)
+ self.writeline("yield ", node)
else:
- self.writeline('%s.append(' % frame.buffer, node)
+ self.writeline("%s.append(" % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
- self.write(')')
+ self.write(")")
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
@@ -373,7 +376,7 @@ class CodeGenerator(NodeVisitor):
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
- self.writeline('pass')
+ self.writeline("pass")
for node in nodes:
self.visit(node, frame)
except CompilerExit:
@@ -383,14 +386,13 @@ class CodeGenerator(NodeVisitor):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
- self.stream.write('\n' * self._new_lines)
+ self.stream.write("\n" * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
- self.debug_info.append((self._write_debug_info,
- self.code_lineno))
+ self.debug_info.append((self._write_debug_info, self.code_lineno))
self._write_debug_info = None
self._first_write = False
- self.stream.write(' ' * self._indentation)
+ self.stream.write(" " * self._indentation)
self._new_lines = 0
self.stream.write(x)
@@ -410,7 +412,7 @@ class CodeGenerator(NodeVisitor):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
- error could occour. The extra keyword arguments should be given
+ error could occur. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
@@ -422,41 +424,41 @@ class CodeGenerator(NodeVisitor):
break
for arg in node.args:
- self.write(', ')
+ self.write(", ")
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
- self.write(', ')
+ self.write(", ")
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
- self.write(', %s=%s' % (key, value))
+ self.write(", %s=%s" % (key, value))
if node.dyn_args:
- self.write(', *')
+ self.write(", *")
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
- self.write(', **dict({')
+ self.write(", **dict({")
else:
- self.write(', **{')
+ self.write(", **{")
for kwarg in node.kwargs:
- self.write('%r: ' % kwarg.key)
+ self.write("%r: " % kwarg.key)
self.visit(kwarg.value, frame)
- self.write(', ')
+ self.write(", ")
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
- self.write('%r: %s, ' % (key, value))
+ self.write("%r: %s, " % (key, value))
if node.dyn_kwargs is not None:
- self.write('}, **')
+ self.write("}, **")
self.visit(node.dyn_kwargs, frame)
- self.write(')')
+ self.write(")")
else:
- self.write('}')
+ self.write("}")
elif node.dyn_kwargs is not None:
- self.write(', **')
+ self.write(", **")
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes):
@@ -464,13 +466,14 @@ class CodeGenerator(NodeVisitor):
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
- for dependency in 'filters', 'tests':
+ for dependency in "filters", "tests":
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
- self.writeline('%s = environment.%s[%r]' %
- (mapping[name], dependency, name))
+ self.writeline(
+ "%s = environment.%s[%r]" % (mapping[name], dependency, name)
+ )
def enter_frame(self, frame):
undefs = []
@@ -478,16 +481,15 @@ class CodeGenerator(NodeVisitor):
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
- self.writeline('%s = %s(%r)' %
- (target, self.get_resolve_func(), param))
+ self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param))
elif action == VAR_LOAD_ALIAS:
- self.writeline('%s = %s' % (target, param))
+ self.writeline("%s = %s" % (target, param))
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
- raise NotImplementedError('unknown load instruction')
+ raise NotImplementedError("unknown load instruction")
if undefs:
- self.writeline('%s = missing' % ' = '.join(undefs))
+ self.writeline("%s = missing" % " = ".join(undefs))
def leave_frame(self, frame, with_python_scope=False):
if not with_python_scope:
@@ -495,12 +497,12 @@ class CodeGenerator(NodeVisitor):
for target, _ in iteritems(frame.symbols.loads):
undefs.append(target)
if undefs:
- self.writeline('%s = missing' % ' = '.join(undefs))
+ self.writeline("%s = missing" % " = ".join(undefs))
def func(self, name):
if self.environment.is_async:
- return 'async def %s' % name
- return 'def %s' % name
+ return "async def %s" % name
+ return "def %s" % name
def macro_body(self, node, frame):
"""Dump the function def of a macro or call block."""
@@ -512,16 +514,16 @@ class CodeGenerator(NodeVisitor):
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
- if arg.name == 'caller':
+ if arg.name == "caller":
explicit_caller = idx
- if arg.name in ('kwargs', 'varargs'):
+ if arg.name in ("kwargs", "varargs"):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
- undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
+ undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
- if 'caller' in undeclared:
- # In older Jinja2 versions there was a bug that allowed caller
+ if "caller" in undeclared:
+ # In older Jinja versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
@@ -531,23 +533,26 @@ class CodeGenerator(NodeVisitor):
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
- self.fail('When defining macros or call blocks the '
- 'special "caller" argument must be omitted '
- 'or be given a default.', node.lineno)
+ self.fail(
+ "When defining macros or call blocks the "
+ 'special "caller" argument must be omitted '
+ "or be given a default.",
+ node.lineno,
+ )
else:
- args.append(frame.symbols.declare_parameter('caller'))
+ args.append(frame.symbols.declare_parameter("caller"))
macro_ref.accesses_caller = True
- if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
- args.append(frame.symbols.declare_parameter('kwargs'))
+ if "kwargs" in undeclared and "kwargs" not in skip_special_params:
+ args.append(frame.symbols.declare_parameter("kwargs"))
macro_ref.accesses_kwargs = True
- if 'varargs' in undeclared and not 'varargs' in skip_special_params:
- args.append(frame.symbols.declare_parameter('varargs'))
+ if "varargs" in undeclared and "varargs" not in skip_special_params:
+ args.append(frame.symbols.declare_parameter("varargs"))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
- self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
+ self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node)
self.indent()
self.buffer(frame)
@@ -556,17 +561,17 @@ class CodeGenerator(NodeVisitor):
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
- self.writeline('if %s is missing:' % ref)
+ self.writeline("if %s is missing:" % ref)
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
- self.writeline('%s = undefined(%r, name=%r)' % (
- ref,
- 'parameter %r was not provided' % arg.name,
- arg.name))
+ self.writeline(
+ "%s = undefined(%r, name=%r)"
+ % (ref, "parameter %r was not provided" % arg.name, arg.name)
+ )
else:
- self.writeline('%s = ' % ref)
+ self.writeline("%s = " % ref)
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
@@ -581,35 +586,46 @@ class CodeGenerator(NodeVisitor):
def macro_def(self, macro_ref, frame):
"""Dump the macro definition for the def created by macro_body."""
- arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
- name = getattr(macro_ref.node, 'name', None)
+ arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
+ name = getattr(macro_ref.node, "name", None)
if len(macro_ref.node.args) == 1:
- arg_tuple += ','
- self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
- 'context.eval_ctx.autoescape)' %
- (name, arg_tuple, macro_ref.accesses_kwargs,
- macro_ref.accesses_varargs, macro_ref.accesses_caller))
+ arg_tuple += ","
+ self.write(
+ "Macro(environment, macro, %r, (%s), %r, %r, %r, "
+ "context.eval_ctx.autoescape)"
+ % (
+ name,
+ arg_tuple,
+ macro_ref.accesses_kwargs,
+ macro_ref.accesses_varargs,
+ macro_ref.accesses_caller,
+ )
+ )
def position(self, node):
"""Return a human readable position for the node."""
- rv = 'line %d' % node.lineno
+ rv = "line %d" % node.lineno
if self.name is not None:
- rv += ' in ' + repr(self.name)
+ rv += " in " + repr(self.name)
return rv
def dump_local_context(self, frame):
- return '{%s}' % ', '.join(
- '%r: %s' % (name, target) for name, target
- in iteritems(frame.symbols.dump_stores()))
+ return "{%s}" % ", ".join(
+ "%r: %s" % (name, target)
+ for name, target in iteritems(frame.symbols.dump_stores())
+ )
def write_commons(self):
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
- self.writeline('resolve = context.resolve_or_missing')
- self.writeline('undefined = environment.undefined')
- self.writeline('if 0: yield None')
+ self.writeline("resolve = context.resolve_or_missing")
+ self.writeline("undefined = environment.undefined")
+ # always use the standard Undefined class for the implicit else of
+ # conditional expressions
+ self.writeline("cond_expr_undefined = Undefined")
+ self.writeline("if 0: yield None")
def push_parameter_definitions(self, frame):
"""Pushes all parameter targets from the given frame into a local
@@ -642,12 +658,12 @@ class CodeGenerator(NodeVisitor):
def get_resolve_func(self):
target = self._context_reference_stack[-1]
- if target == 'context':
- return 'resolve'
- return '%s.resolve' % target
+ if target == "context":
+ return "resolve"
+ return "%s.resolve" % target
def derive_context(self, frame):
- return '%s.derived(%s)' % (
+ return "%s.derived(%s)" % (
self.get_context_ref(),
self.dump_local_context(frame),
)
@@ -669,44 +685,48 @@ class CodeGenerator(NodeVisitor):
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
- public_names = [x for x in vars if x[:1] != '_']
+ public_names = [x for x in vars if x[:1] != "_"]
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
- self.writeline('context.vars[%r] = %s' % (name, ref))
+ self.writeline("context.vars[%r] = %s" % (name, ref))
else:
- self.writeline('context.vars.update({')
+ self.writeline("context.vars.update({")
for idx, name in enumerate(vars):
if idx:
- self.write(', ')
+ self.write(", ")
ref = frame.symbols.ref(name)
- self.write('%r: %s' % (name, ref))
- self.write('})')
+ self.write("%r: %s" % (name, ref))
+ self.write("})")
if public_names:
if len(public_names) == 1:
- self.writeline('context.exported_vars.add(%r)' %
- public_names[0])
+ self.writeline("context.exported_vars.add(%r)" % public_names[0])
else:
- self.writeline('context.exported_vars.update((%s))' %
- ', '.join(imap(repr, public_names)))
+ self.writeline(
+ "context.exported_vars.update((%s))"
+ % ", ".join(imap(repr, public_names))
+ )
# -- Statement Visitors
def visit_Template(self, node, frame=None):
- assert frame is None, 'no root frame allowed'
+ assert frame is None, "no root frame allowed"
eval_ctx = EvalContext(self.environment, self.name)
- from jinja2.runtime import __all__ as exported
- self.writeline('from __future__ import %s' % ', '.join(code_features))
- self.writeline('from jinja2.runtime import ' + ', '.join(exported))
+ from .runtime import exported
+
+ self.writeline("from __future__ import %s" % ", ".join(code_features))
+ self.writeline("from jinja2.runtime import " + ", ".join(exported))
if self.environment.is_async:
- self.writeline('from jinja2.asyncsupport import auto_await, '
- 'auto_aiter, make_async_loop_context')
+ self.writeline(
+ "from jinja2.asyncsupport import auto_await, "
+ "auto_aiter, AsyncLoopContext"
+ )
# if we want a deferred initialization we cannot move the
# environment into a local name
- envenv = not self.defer_init and ', environment=environment' or ''
+ envenv = not self.defer_init and ", environment=environment" or ""
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
@@ -715,7 +735,7 @@ class CodeGenerator(NodeVisitor):
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
- self.fail('block %r defined twice' % block.name, block.lineno)
+ self.fail("block %r defined twice" % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
@@ -723,32 +743,32 @@ class CodeGenerator(NodeVisitor):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
- if '.' in imp:
- module, obj = imp.rsplit('.', 1)
- self.writeline('from %s import %s as %s' %
- (module, obj, alias))
+ if "." in imp:
+ module, obj = imp.rsplit(".", 1)
+ self.writeline("from %s import %s as %s" % (module, obj, alias))
else:
- self.writeline('import %s as %s' % (imp, alias))
+ self.writeline("import %s as %s" % (imp, alias))
# add the load name
- self.writeline('name = %r' % self.name)
+ self.writeline("name = %r" % self.name)
# generate the root render function.
- self.writeline('%s(context, missing=missing%s):' %
- (self.func('root'), envenv), extra=1)
+ self.writeline(
+ "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1
+ )
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
- if 'self' in find_undeclared(node.body, ('self',)):
- ref = frame.symbols.declare_parameter('self')
- self.writeline('%s = TemplateReference(context)' % ref)
+ if "self" in find_undeclared(node.body, ("self",)):
+ ref = frame.symbols.declare_parameter("self")
+ self.writeline("%s = TemplateReference(context)" % ref)
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
- self.writeline('parent_template = None')
+ self.writeline("parent_template = None")
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
@@ -759,39 +779,42 @@ class CodeGenerator(NodeVisitor):
if have_extends:
if not self.has_known_extends:
self.indent()
- self.writeline('if parent_template is not None:')
+ self.writeline("if parent_template is not None:")
self.indent()
if supports_yield_from and not self.environment.is_async:
- self.writeline('yield from parent_template.'
- 'root_render_func(context)')
+ self.writeline("yield from parent_template.root_render_func(context)")
else:
- self.writeline('%sfor event in parent_template.'
- 'root_render_func(context):' %
- (self.environment.is_async and 'async ' or ''))
+ self.writeline(
+ "%sfor event in parent_template."
+ "root_render_func(context):"
+ % (self.environment.is_async and "async " or "")
+ )
self.indent()
- self.writeline('yield event')
+ self.writeline("yield event")
self.outdent()
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
- self.writeline('%s(context, missing=missing%s):' %
- (self.func('block_' + name), envenv),
- block, 1)
+ self.writeline(
+ "%s(context, missing=missing%s):"
+ % (self.func("block_" + name), envenv),
+ block,
+ 1,
+ )
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
- undeclared = find_undeclared(block.body, ('self', 'super'))
- if 'self' in undeclared:
- ref = block_frame.symbols.declare_parameter('self')
- self.writeline('%s = TemplateReference(context)' % ref)
- if 'super' in undeclared:
- ref = block_frame.symbols.declare_parameter('super')
- self.writeline('%s = context.super(%r, '
- 'block_%s)' % (ref, name, name))
+ undeclared = find_undeclared(block.body, ("self", "super"))
+ if "self" in undeclared:
+ ref = block_frame.symbols.declare_parameter("self")
+ self.writeline("%s = TemplateReference(context)" % ref)
+ if "super" in undeclared:
+ ref = block_frame.symbols.declare_parameter("super")
+ self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name))
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.enter_frame(block_frame)
@@ -800,13 +823,15 @@ class CodeGenerator(NodeVisitor):
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
- self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
- for x in self.blocks),
- extra=1)
+ self.writeline(
+ "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks),
+ extra=1,
+ )
# add a function that returns the debug info
- self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
- in self.debug_info))
+ self.writeline(
+ "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info)
+ )
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
@@ -817,7 +842,7 @@ class CodeGenerator(NodeVisitor):
if self.has_known_extends:
return
if self.extends_so_far > 0:
- self.writeline('if parent_template is None:')
+ self.writeline("if parent_template is None:")
self.indent()
level += 1
@@ -826,16 +851,22 @@ class CodeGenerator(NodeVisitor):
else:
context = self.get_context_ref()
- if supports_yield_from and not self.environment.is_async and \
- frame.buffer is None:
- self.writeline('yield from context.blocks[%r][0](%s)' % (
- node.name, context), node)
+ if (
+ supports_yield_from
+ and not self.environment.is_async
+ and frame.buffer is None
+ ):
+ self.writeline(
+ "yield from context.blocks[%r][0](%s)" % (node.name, context), node
+ )
else:
- loop = self.environment.is_async and 'async for' or 'for'
- self.writeline('%s event in context.blocks[%r][0](%s):' % (
- loop, node.name, context), node)
+ loop = self.environment.is_async and "async for" or "for"
+ self.writeline(
+ "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context),
+ node,
+ )
self.indent()
- self.simple_write('event', frame)
+ self.simple_write("event", frame)
self.outdent()
self.outdent(level)
@@ -843,8 +874,7 @@ class CodeGenerator(NodeVisitor):
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
- self.fail('cannot use extend from a non top-level scope',
- node.lineno)
+ self.fail("cannot use extend from a non top-level scope", node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
@@ -856,10 +886,9 @@ class CodeGenerator(NodeVisitor):
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
- self.writeline('if parent_template is not None:')
+ self.writeline("if parent_template is not None:")
self.indent()
- self.writeline('raise TemplateRuntimeError(%r)' %
- 'extended multiple times')
+ self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times")
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
@@ -868,14 +897,14 @@ class CodeGenerator(NodeVisitor):
else:
self.outdent()
- self.writeline('parent_template = environment.get_template(', node)
+ self.writeline("parent_template = environment.get_template(", node)
self.visit(node.template, frame)
- self.write(', %r)' % self.name)
- self.writeline('for name, parent_block in parent_template.'
- 'blocks.%s():' % dict_item_iter)
+ self.write(", %r)" % self.name)
+ self.writeline(
+ "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter
+ )
self.indent()
- self.writeline('context.blocks.setdefault(name, []).'
- 'append(parent_block)')
+ self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
self.outdent()
# if this extends statement was in the root level we can take
@@ -890,52 +919,56 @@ class CodeGenerator(NodeVisitor):
def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
- self.writeline('try:')
+ self.writeline("try:")
self.indent()
- func_name = 'get_or_select_template'
+ func_name = "get_or_select_template"
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
- func_name = 'get_template'
+ func_name = "get_template"
elif isinstance(node.template.value, (tuple, list)):
- func_name = 'select_template'
+ func_name = "select_template"
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
- func_name = 'select_template'
+ func_name = "select_template"
- self.writeline('template = environment.%s(' % func_name, node)
+ self.writeline("template = environment.%s(" % func_name, node)
self.visit(node.template, frame)
- self.write(', %r)' % self.name)
+ self.write(", %r)" % self.name)
if node.ignore_missing:
self.outdent()
- self.writeline('except TemplateNotFound:')
+ self.writeline("except TemplateNotFound:")
self.indent()
- self.writeline('pass')
+ self.writeline("pass")
self.outdent()
- self.writeline('else:')
+ self.writeline("else:")
self.indent()
skip_event_yield = False
if node.with_context:
- loop = self.environment.is_async and 'async for' or 'for'
- self.writeline('%s event in template.root_render_func('
- 'template.new_context(context.get_all(), True, '
- '%s)):' % (loop, self.dump_local_context(frame)))
+ loop = self.environment.is_async and "async for" or "for"
+ self.writeline(
+ "%s event in template.root_render_func("
+ "template.new_context(context.get_all(), True, "
+ "%s)):" % (loop, self.dump_local_context(frame))
+ )
elif self.environment.is_async:
- self.writeline('for event in (await '
- 'template._get_default_module_async())'
- '._body_stream:')
+ self.writeline(
+ "for event in (await "
+ "template._get_default_module_async())"
+ "._body_stream:"
+ )
else:
if supports_yield_from:
- self.writeline('yield from template._get_default_module()'
- '._body_stream')
+ self.writeline("yield from template._get_default_module()._body_stream")
skip_event_yield = True
else:
- self.writeline('for event in template._get_default_module()'
- '._body_stream:')
+ self.writeline(
+ "for event in template._get_default_module()._body_stream:"
+ )
if not skip_event_yield:
self.indent()
- self.simple_write('event', frame)
+ self.simple_write("event", frame)
self.outdent()
if node.ignore_missing:
@@ -943,40 +976,50 @@ class CodeGenerator(NodeVisitor):
def visit_Import(self, node, frame):
"""Visit regular imports."""
- self.writeline('%s = ' % frame.symbols.ref(node.target), node)
+ self.writeline("%s = " % frame.symbols.ref(node.target), node)
if frame.toplevel:
- self.write('context.vars[%r] = ' % node.target)
+ self.write("context.vars[%r] = " % node.target)
if self.environment.is_async:
- self.write('await ')
- self.write('environment.get_template(')
+ self.write("await ")
+ self.write("environment.get_template(")
self.visit(node.template, frame)
- self.write(', %r).' % self.name)
+ self.write(", %r)." % self.name)
if node.with_context:
- self.write('make_module%s(context.get_all(), True, %s)'
- % (self.environment.is_async and '_async' or '',
- self.dump_local_context(frame)))
+ self.write(
+ "make_module%s(context.get_all(), True, %s)"
+ % (
+ self.environment.is_async and "_async" or "",
+ self.dump_local_context(frame),
+ )
+ )
elif self.environment.is_async:
- self.write('_get_default_module_async()')
+ self.write("_get_default_module_async()")
else:
- self.write('_get_default_module()')
- if frame.toplevel and not node.target.startswith('_'):
- self.writeline('context.exported_vars.discard(%r)' % node.target)
+ self.write("_get_default_module()")
+ if frame.toplevel and not node.target.startswith("_"):
+ self.writeline("context.exported_vars.discard(%r)" % node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
- self.write('included_template = %senvironment.get_template('
- % (self.environment.is_async and 'await ' or ''))
+ self.write(
+ "included_template = %senvironment.get_template("
+ % (self.environment.is_async and "await " or "")
+ )
self.visit(node.template, frame)
- self.write(', %r).' % self.name)
+ self.write(", %r)." % self.name)
if node.with_context:
- self.write('make_module%s(context.get_all(), True, %s)'
- % (self.environment.is_async and '_async' or '',
- self.dump_local_context(frame)))
+ self.write(
+ "make_module%s(context.get_all(), True, %s)"
+ % (
+ self.environment.is_async and "_async" or "",
+ self.dump_local_context(frame),
+ )
+ )
elif self.environment.is_async:
- self.write('_get_default_module_async()')
+ self.write("_get_default_module_async()")
else:
- self.write('_get_default_module()')
+ self.write("_get_default_module()")
var_names = []
discarded_names = []
@@ -985,41 +1028,51 @@ class CodeGenerator(NodeVisitor):
name, alias = name
else:
alias = name
- self.writeline('%s = getattr(included_template, '
- '%r, missing)' % (frame.symbols.ref(alias), name))
- self.writeline('if %s is missing:' % frame.symbols.ref(alias))
+ self.writeline(
+ "%s = getattr(included_template, "
+ "%r, missing)" % (frame.symbols.ref(alias), name)
+ )
+ self.writeline("if %s is missing:" % frame.symbols.ref(alias))
self.indent()
- self.writeline('%s = undefined(%r %% '
- 'included_template.__name__, '
- 'name=%r)' %
- (frame.symbols.ref(alias),
- 'the template %%r (imported on %s) does '
- 'not export the requested name %s' % (
- self.position(node),
- repr(name)
- ), name))
+ self.writeline(
+ "%s = undefined(%r %% "
+ "included_template.__name__, "
+ "name=%r)"
+ % (
+ frame.symbols.ref(alias),
+ "the template %%r (imported on %s) does "
+ "not export the requested name %s"
+ % (self.position(node), repr(name)),
+ name,
+ )
+ )
self.outdent()
if frame.toplevel:
var_names.append(alias)
- if not alias.startswith('_'):
+ if not alias.startswith("_"):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
- self.writeline('context.vars[%r] = %s' %
- (name, frame.symbols.ref(name)))
+ self.writeline(
+ "context.vars[%r] = %s" % (name, frame.symbols.ref(name))
+ )
else:
- self.writeline('context.vars.update({%s})' % ', '.join(
- '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
- ))
+ self.writeline(
+ "context.vars.update({%s})"
+ % ", ".join(
+ "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names
+ )
+ )
if discarded_names:
if len(discarded_names) == 1:
- self.writeline('context.exported_vars.discard(%r)' %
- discarded_names[0])
+ self.writeline("context.exported_vars.discard(%r)" % discarded_names[0])
else:
- self.writeline('context.exported_vars.difference_'
- 'update((%s))' % ', '.join(imap(repr, discarded_names)))
+ self.writeline(
+ "context.exported_vars.difference_"
+ "update((%s))" % ", ".join(imap(repr, discarded_names))
+ )
def visit_For(self, node, frame):
loop_frame = frame.inner()
@@ -1029,35 +1082,35 @@ class CodeGenerator(NodeVisitor):
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
- extended_loop = node.recursive or 'loop' in \
- find_undeclared(node.iter_child_nodes(
- only=('body',)), ('loop',))
+ extended_loop = node.recursive or "loop" in find_undeclared(
+ node.iter_child_nodes(only=("body",)), ("loop",)
+ )
loop_ref = None
if extended_loop:
- loop_ref = loop_frame.symbols.declare_parameter('loop')
+ loop_ref = loop_frame.symbols.declare_parameter("loop")
- loop_frame.symbols.analyze_node(node, for_branch='body')
+ loop_frame.symbols.analyze_node(node, for_branch="body")
if node.else_:
- else_frame.symbols.analyze_node(node, for_branch='else')
+ else_frame.symbols.analyze_node(node, for_branch="else")
if node.test:
loop_filter_func = self.temporary_identifier()
- test_frame.symbols.analyze_node(node, for_branch='test')
- self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
+ test_frame.symbols.analyze_node(node, for_branch="test")
+ self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test)
self.indent()
self.enter_frame(test_frame)
- self.writeline(self.environment.is_async and 'async for ' or 'for ')
+ self.writeline(self.environment.is_async and "async for " or "for ")
self.visit(node.target, loop_frame)
- self.write(' in ')
- self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
- self.write(':')
+ self.write(" in ")
+ self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter")
+ self.write(":")
self.indent()
- self.writeline('if ', node.test)
+ self.writeline("if ", node.test)
self.visit(node.test, test_frame)
- self.write(':')
+ self.write(":")
self.indent()
- self.writeline('yield ')
+ self.writeline("yield ")
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
@@ -1066,8 +1119,9 @@ class CodeGenerator(NodeVisitor):
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
- self.writeline('%s(reciter, loop_render_func, depth=0):' %
- self.func('loop'), node)
+ self.writeline(
+ "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node
+ )
self.indent()
self.buffer(loop_frame)
@@ -1077,57 +1131,60 @@ class CodeGenerator(NodeVisitor):
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
- self.writeline('%s = missing' % loop_ref)
+ self.writeline("%s = missing" % loop_ref)
for name in node.find_all(nodes.Name):
- if name.ctx == 'store' and name.name == 'loop':
- self.fail('Can\'t assign to special loop variable '
- 'in for-loop target', name.lineno)
+ if name.ctx == "store" and name.name == "loop":
+ self.fail(
+ "Can't assign to special loop variable in for-loop target",
+ name.lineno,
+ )
if node.else_:
iteration_indicator = self.temporary_identifier()
- self.writeline('%s = 1' % iteration_indicator)
+ self.writeline("%s = 1" % iteration_indicator)
- self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
+ self.writeline(self.environment.is_async and "async for " or "for ", node)
self.visit(node.target, loop_frame)
if extended_loop:
if self.environment.is_async:
- self.write(', %s in await make_async_loop_context(' % loop_ref)
+ self.write(", %s in AsyncLoopContext(" % loop_ref)
else:
- self.write(', %s in LoopContext(' % loop_ref)
+ self.write(", %s in LoopContext(" % loop_ref)
else:
- self.write(' in ')
+ self.write(" in ")
if node.test:
- self.write('%s(' % loop_filter_func)
+ self.write("%s(" % loop_filter_func)
if node.recursive:
- self.write('reciter')
+ self.write("reciter")
else:
if self.environment.is_async and not extended_loop:
- self.write('auto_aiter(')
+ self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
- self.write(')')
+ self.write(")")
if node.test:
- self.write(')')
+ self.write(")")
if node.recursive:
- self.write(', undefined, loop_render_func, depth):')
+ self.write(", undefined, loop_render_func, depth):")
else:
- self.write(extended_loop and ', undefined):' or ':')
+ self.write(extended_loop and ", undefined):" or ":")
self.indent()
self.enter_frame(loop_frame)
self.blockvisit(node.body, loop_frame)
if node.else_:
- self.writeline('%s = 0' % iteration_indicator)
+ self.writeline("%s = 0" % iteration_indicator)
self.outdent()
- self.leave_frame(loop_frame, with_python_scope=node.recursive
- and not node.else_)
+ self.leave_frame(
+ loop_frame, with_python_scope=node.recursive and not node.else_
+ )
if node.else_:
- self.writeline('if %s:' % iteration_indicator)
+ self.writeline("if %s:" % iteration_indicator)
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
@@ -1141,33 +1198,33 @@ class CodeGenerator(NodeVisitor):
self.outdent()
self.start_write(frame, node)
if self.environment.is_async:
- self.write('await ')
- self.write('loop(')
+ self.write("await ")
+ self.write("loop(")
if self.environment.is_async:
- self.write('auto_aiter(')
+ self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async:
- self.write(')')
- self.write(', loop)')
+ self.write(")")
+ self.write(", loop)")
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
- self.writeline('if ', node)
+ self.writeline("if ", node)
self.visit(node.test, if_frame)
- self.write(':')
+ self.write(":")
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
for elif_ in node.elif_:
- self.writeline('elif ', elif_)
+ self.writeline("elif ", elif_)
self.visit(elif_.test, if_frame)
- self.write(':')
+ self.write(":")
self.indent()
self.blockvisit(elif_.body, if_frame)
self.outdent()
if node.else_:
- self.writeline('else:')
+ self.writeline("else:")
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
@@ -1176,16 +1233,15 @@ class CodeGenerator(NodeVisitor):
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
- if not node.name.startswith('_'):
- self.write('context.exported_vars.add(%r)' % node.name)
- ref = frame.symbols.ref(node.name)
- self.writeline('context.vars[%r] = ' % node.name)
- self.write('%s = ' % frame.symbols.ref(node.name))
+ if not node.name.startswith("_"):
+ self.write("context.exported_vars.add(%r)" % node.name)
+ self.writeline("context.vars[%r] = " % node.name)
+ self.write("%s = " % frame.symbols.ref(node.name))
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node, frame):
call_frame, macro_ref = self.macro_body(node, frame)
- self.writeline('caller = ')
+ self.writeline("caller = ")
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
@@ -1206,10 +1262,10 @@ class CodeGenerator(NodeVisitor):
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
- for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
+ for target, expr in izip(node.targets, node.values):
self.newline()
self.visit(target, with_frame)
- self.write(' = ')
+ self.write(" = ")
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
@@ -1218,156 +1274,187 @@ class CodeGenerator(NodeVisitor):
self.newline(node)
self.visit(node.node, frame)
- def visit_Output(self, node, frame):
- # if we have a known extends statement, we don't output anything
- # if we are in a require_output_check section
- if self.has_known_extends and frame.require_output_check:
- return
+ _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src"))
+ #: The default finalize function if the environment isn't configured
+ #: with one. Or if the environment has one, this is called on that
+ #: function's output for constants.
+ _default_finalize = text_type
+ _finalize = None
+
+ def _make_finalize(self):
+ """Build the finalize function to be used on constants and at
+ runtime. Cached so it's only created once for all output nodes.
+
+ Returns a ``namedtuple`` with the following attributes:
+
+ ``const``
+ A function to finalize constant data at compile time.
+
+ ``src``
+ Source code to output around nodes to be evaluated at
+ runtime.
+ """
+ if self._finalize is not None:
+ return self._finalize
+
+ finalize = default = self._default_finalize
+ src = None
- allow_constant_finalize = True
if self.environment.finalize:
- func = self.environment.finalize
- if getattr(func, 'contextfunction', False) or \
- getattr(func, 'evalcontextfunction', False):
- allow_constant_finalize = False
- elif getattr(func, 'environmentfunction', False):
- finalize = lambda x: text_type(
- self.environment.finalize(self.environment, x))
- else:
- finalize = lambda x: text_type(self.environment.finalize(x))
+ src = "environment.finalize("
+ env_finalize = self.environment.finalize
+
+ def finalize(value):
+ return default(env_finalize(value))
+
+ if getattr(env_finalize, "contextfunction", False) is True:
+ src += "context, "
+ finalize = None # noqa: F811
+ elif getattr(env_finalize, "evalcontextfunction", False) is True:
+ src += "context.eval_ctx, "
+ finalize = None
+ elif getattr(env_finalize, "environmentfunction", False) is True:
+ src += "environment, "
+
+ def finalize(value):
+ return default(env_finalize(self.environment, value))
+
+ self._finalize = self._FinalizeInfo(finalize, src)
+ return self._finalize
+
+ def _output_const_repr(self, group):
+ """Given a group of constant values converted from ``Output``
+ child nodes, produce a string to write to the template module
+ source.
+ """
+ return repr(concat(group))
+
+ def _output_child_to_const(self, node, frame, finalize):
+ """Try to optimize a child of an ``Output`` node by trying to
+ convert it to constant, finalized data at compile time.
+
+ If :exc:`Impossible` is raised, the node is not constant and
+ will be evaluated at runtime. Any other exception will also be
+ evaluated at runtime for easier debugging.
+ """
+ const = node.as_const(frame.eval_ctx)
+
+ if frame.eval_ctx.autoescape:
+ const = escape(const)
+
+ # Template data doesn't go through finalize.
+ if isinstance(node, nodes.TemplateData):
+ return text_type(const)
+
+ return finalize.const(const)
+
+ def _output_child_pre(self, node, frame, finalize):
+ """Output extra source code before visiting a child of an
+ ``Output`` node.
+ """
+ if frame.eval_ctx.volatile:
+ self.write("(escape if context.eval_ctx.autoescape else to_string)(")
+ elif frame.eval_ctx.autoescape:
+ self.write("escape(")
else:
- finalize = text_type
+ self.write("to_string(")
+
+ if finalize.src is not None:
+ self.write(finalize.src)
+
+ def _output_child_post(self, node, frame, finalize):
+ """Output extra source code after visiting a child of an
+ ``Output`` node.
+ """
+ self.write(")")
+
+ if finalize.src is not None:
+ self.write(")")
- # if we are inside a frame that requires output checking, we do so
- outdent_later = False
+ def visit_Output(self, node, frame):
+ # If an extends is active, don't render outside a block.
if frame.require_output_check:
- self.writeline('if parent_template is None:')
+ # A top-level extends is known to exist at compile time.
+ if self.has_known_extends:
+ return
+
+ self.writeline("if parent_template is None:")
self.indent()
- outdent_later = True
- # try to evaluate as many chunks as possible into a static
- # string at compile time.
+ finalize = self._make_finalize()
body = []
+
+ # Evaluate constants at compile time if possible. Each item in
+ # body will be either a list of static data or a node to be
+ # evaluated at runtime.
for child in node.nodes:
try:
- if not allow_constant_finalize:
+ if not (
+ # If the finalize function requires runtime context,
+ # constants can't be evaluated at compile time.
+ finalize.const
+ # Unless it's basic template data that won't be
+ # finalized anyway.
+ or isinstance(child, nodes.TemplateData)
+ ):
raise nodes.Impossible()
- const = child.as_const(frame.eval_ctx)
- except nodes.Impossible:
- body.append(child)
- continue
- # the frame can't be volatile here, becaus otherwise the
- # as_const() function would raise an Impossible exception
- # at that point.
- try:
- if frame.eval_ctx.autoescape:
- if hasattr(const, '__html__'):
- const = const.__html__()
- else:
- const = escape(const)
- const = finalize(const)
- except Exception:
- # if something goes wrong here we evaluate the node
- # at runtime for easier debugging
+
+ const = self._output_child_to_const(child, frame, finalize)
+ except (nodes.Impossible, Exception):
+ # The node was not constant and needs to be evaluated at
+ # runtime. Or another error was raised, which is easier
+ # to debug at runtime.
body.append(child)
continue
+
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
- # if we have less than 3 nodes or a buffer we yield or extend/append
- if len(body) < 3 or frame.buffer is not None:
- if frame.buffer is not None:
- # for one item we append, for more we extend
- if len(body) == 1:
- self.writeline('%s.append(' % frame.buffer)
+ if frame.buffer is not None:
+ if len(body) == 1:
+ self.writeline("%s.append(" % frame.buffer)
+ else:
+ self.writeline("%s.extend((" % frame.buffer)
+
+ self.indent()
+
+ for item in body:
+ if isinstance(item, list):
+ # A group of constant data to join and output.
+ val = self._output_const_repr(item)
+
+ if frame.buffer is None:
+ self.writeline("yield " + val)
else:
- self.writeline('%s.extend((' % frame.buffer)
- self.indent()
- for item in body:
- if isinstance(item, list):
- val = repr(concat(item))
- if frame.buffer is None:
- self.writeline('yield ' + val)
- else:
- self.writeline(val + ',')
+ self.writeline(val + ",")
+ else:
+ if frame.buffer is None:
+ self.writeline("yield ", item)
else:
- if frame.buffer is None:
- self.writeline('yield ', item)
- else:
- self.newline(item)
- close = 1
- if frame.eval_ctx.volatile:
- self.write('(escape if context.eval_ctx.autoescape'
- ' else to_string)(')
- elif frame.eval_ctx.autoescape:
- self.write('escape(')
- else:
- self.write('to_string(')
- if self.environment.finalize is not None:
- self.write('environment.finalize(')
- if getattr(self.environment.finalize,
- "contextfunction", False):
- self.write('context, ')
- close += 1
- self.visit(item, frame)
- self.write(')' * close)
- if frame.buffer is not None:
- self.write(',')
- if frame.buffer is not None:
- # close the open parentheses
- self.outdent()
- self.writeline(len(body) == 1 and ')' or '))')
+ self.newline(item)
- # otherwise we create a format string as this is faster in that case
- else:
- format = []
- arguments = []
- for item in body:
- if isinstance(item, list):
- format.append(concat(item).replace('%', '%%'))
- else:
- format.append('%s')
- arguments.append(item)
- self.writeline('yield ')
- self.write(repr(concat(format)) + ' % (')
- self.indent()
- for argument in arguments:
- self.newline(argument)
- close = 0
- if frame.eval_ctx.volatile:
- self.write('(escape if context.eval_ctx.autoescape else'
- ' to_string)(')
- close += 1
- elif frame.eval_ctx.autoescape:
- self.write('escape(')
- close += 1
- if self.environment.finalize is not None:
- self.write('environment.finalize(')
- if getattr(self.environment.finalize,
- 'contextfunction', False):
- self.write('context, ')
- elif getattr(self.environment.finalize,
- 'evalcontextfunction', False):
- self.write('context.eval_ctx, ')
- elif getattr(self.environment.finalize,
- 'environmentfunction', False):
- self.write('environment, ')
- close += 1
- self.visit(argument, frame)
- self.write(')' * close + ', ')
+ # A node to be evaluated at runtime.
+ self._output_child_pre(item, frame, finalize)
+ self.visit(item, frame)
+ self._output_child_post(item, frame, finalize)
+
+ if frame.buffer is not None:
+ self.write(",")
+
+ if frame.buffer is not None:
self.outdent()
- self.writeline(')')
+ self.writeline(")" if len(body) == 1 else "))")
- if outdent_later:
+ if frame.require_output_check:
self.outdent()
def visit_Assign(self, node, frame):
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
- self.write(' = ')
+ self.write(" = ")
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
@@ -1384,20 +1471,19 @@ class CodeGenerator(NodeVisitor):
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
- self.write(' = (Markup if context.eval_ctx.autoescape '
- 'else identity)(')
+ self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
if node.filter is not None:
self.visit_Filter(node.filter, block_frame)
else:
- self.write('concat(%s)' % block_frame.buffer)
- self.write(')')
+ self.write("concat(%s)" % block_frame.buffer)
+ self.write(")")
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
- if node.ctx == 'store' and frame.toplevel:
+ if node.ctx == "store" and frame.toplevel:
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
@@ -1405,12 +1491,17 @@ class CodeGenerator(NodeVisitor):
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
- if node.ctx == 'load':
+ if node.ctx == "load":
load = frame.symbols.find_load(ref)
- if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
- not self.parameter_is_undeclared(ref)):
- self.write('(undefined(name=%r) if %s is missing else %s)' %
- (node.name, ref, ref))
+ if not (
+ load is not None
+ and load[0] == VAR_LOAD_PARAMETER
+ and not self.parameter_is_undeclared(ref)
+ ):
+ self.write(
+ "(undefined(name=%r) if %s is missing else %s)"
+ % (node.name, ref, ref)
+ )
return
self.write(ref)
@@ -1420,12 +1511,14 @@ class CodeGenerator(NodeVisitor):
# `foo.bar` notation they will be parsed as a normal attribute access
# when used anywhere but in a `set` context
ref = frame.symbols.ref(node.name)
- self.writeline('if not isinstance(%s, Namespace):' % ref)
+ self.writeline("if not isinstance(%s, Namespace):" % ref)
self.indent()
- self.writeline('raise TemplateRuntimeError(%r)' %
- 'cannot assign attribute on non-namespace object')
+ self.writeline(
+ "raise TemplateRuntimeError(%r)"
+ % "cannot assign attribute on non-namespace object"
+ )
self.outdent()
- self.writeline('%s[%r]' % (ref, node.attr))
+ self.writeline("%s[%r]" % (ref, node.attr))
def visit_Const(self, node, frame):
val = node.as_const(frame.eval_ctx)
@@ -1438,230 +1531,256 @@ class CodeGenerator(NodeVisitor):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
- self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
- % node.data)
+ self.write(
+ "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data
+ )
def visit_Tuple(self, node, frame):
- self.write('(')
+ self.write("(")
idx = -1
for idx, item in enumerate(node.items):
if idx:
- self.write(', ')
+ self.write(", ")
self.visit(item, frame)
- self.write(idx == 0 and ',)' or ')')
+ self.write(idx == 0 and ",)" or ")")
def visit_List(self, node, frame):
- self.write('[')
+ self.write("[")
for idx, item in enumerate(node.items):
if idx:
- self.write(', ')
+ self.write(", ")
self.visit(item, frame)
- self.write(']')
+ self.write("]")
def visit_Dict(self, node, frame):
- self.write('{')
+ self.write("{")
for idx, item in enumerate(node.items):
if idx:
- self.write(', ')
+ self.write(", ")
self.visit(item.key, frame)
- self.write(': ')
+ self.write(": ")
self.visit(item.value, frame)
- self.write('}')
+ self.write("}")
- def binop(operator, interceptable=True):
+ def binop(operator, interceptable=True): # noqa: B902
@optimizeconst
def visitor(self, node, frame):
- if self.environment.sandboxed and \
- operator in self.environment.intercepted_binops:
- self.write('environment.call_binop(context, %r, ' % operator)
+ if (
+ self.environment.sandboxed
+ and operator in self.environment.intercepted_binops
+ ):
+ self.write("environment.call_binop(context, %r, " % operator)
self.visit(node.left, frame)
- self.write(', ')
+ self.write(", ")
self.visit(node.right, frame)
else:
- self.write('(')
+ self.write("(")
self.visit(node.left, frame)
- self.write(' %s ' % operator)
+ self.write(" %s " % operator)
self.visit(node.right, frame)
- self.write(')')
+ self.write(")")
+
return visitor
- def uaop(operator, interceptable=True):
+ def uaop(operator, interceptable=True): # noqa: B902
@optimizeconst
def visitor(self, node, frame):
- if self.environment.sandboxed and \
- operator in self.environment.intercepted_unops:
- self.write('environment.call_unop(context, %r, ' % operator)
+ if (
+ self.environment.sandboxed
+ and operator in self.environment.intercepted_unops
+ ):
+ self.write("environment.call_unop(context, %r, " % operator)
self.visit(node.node, frame)
else:
- self.write('(' + operator)
+ self.write("(" + operator)
self.visit(node.node, frame)
- self.write(')')
+ self.write(")")
+
return visitor
- visit_Add = binop('+')
- visit_Sub = binop('-')
- visit_Mul = binop('*')
- visit_Div = binop('/')
- visit_FloorDiv = binop('//')
- visit_Pow = binop('**')
- visit_Mod = binop('%')
- visit_And = binop('and', interceptable=False)
- visit_Or = binop('or', interceptable=False)
- visit_Pos = uaop('+')
- visit_Neg = uaop('-')
- visit_Not = uaop('not ', interceptable=False)
+ visit_Add = binop("+")
+ visit_Sub = binop("-")
+ visit_Mul = binop("*")
+ visit_Div = binop("/")
+ visit_FloorDiv = binop("//")
+ visit_Pow = binop("**")
+ visit_Mod = binop("%")
+ visit_And = binop("and", interceptable=False)
+ visit_Or = binop("or", interceptable=False)
+ visit_Pos = uaop("+")
+ visit_Neg = uaop("-")
+ visit_Not = uaop("not ", interceptable=False)
del binop, uaop
@optimizeconst
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
- func_name = '(context.eval_ctx.volatile and' \
- ' markup_join or unicode_join)'
+ func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)"
elif frame.eval_ctx.autoescape:
- func_name = 'markup_join'
+ func_name = "markup_join"
else:
- func_name = 'unicode_join'
- self.write('%s((' % func_name)
+ func_name = "unicode_join"
+ self.write("%s((" % func_name)
for arg in node.nodes:
self.visit(arg, frame)
- self.write(', ')
- self.write('))')
+ self.write(", ")
+ self.write("))")
@optimizeconst
def visit_Compare(self, node, frame):
+ self.write("(")
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
+ self.write(")")
def visit_Operand(self, node, frame):
- self.write(' %s ' % operators[node.op])
+ self.write(" %s " % operators[node.op])
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node, frame):
- self.write('environment.getattr(')
+ if self.environment.is_async:
+ self.write("(await auto_await(")
+
+ self.write("environment.getattr(")
self.visit(node.node, frame)
- self.write(', %r)' % node.attr)
+ self.write(", %r)" % node.attr)
+
+ if self.environment.is_async:
+ self.write("))")
@optimizeconst
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
- self.write('[')
+ self.write("[")
self.visit(node.arg, frame)
- self.write(']')
+ self.write("]")
else:
- self.write('environment.getitem(')
+ if self.environment.is_async:
+ self.write("(await auto_await(")
+
+ self.write("environment.getitem(")
self.visit(node.node, frame)
- self.write(', ')
+ self.write(", ")
self.visit(node.arg, frame)
- self.write(')')
+ self.write(")")
+
+ if self.environment.is_async:
+ self.write("))")
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
- self.write(':')
+ self.write(":")
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
- self.write(':')
+ self.write(":")
self.visit(node.step, frame)
@optimizeconst
def visit_Filter(self, node, frame):
if self.environment.is_async:
- self.write('await auto_await(')
- self.write(self.filters[node.name] + '(')
+ self.write("await auto_await(")
+ self.write(self.filters[node.name] + "(")
func = self.environment.filters.get(node.name)
if func is None:
- self.fail('no filter named %r' % node.name, node.lineno)
- if getattr(func, 'contextfilter', False):
- self.write('context, ')
- elif getattr(func, 'evalcontextfilter', False):
- self.write('context.eval_ctx, ')
- elif getattr(func, 'environmentfilter', False):
- self.write('environment, ')
+ self.fail("no filter named %r" % node.name, node.lineno)
+ if getattr(func, "contextfilter", False) is True:
+ self.write("context, ")
+ elif getattr(func, "evalcontextfilter", False) is True:
+ self.write("context.eval_ctx, ")
+ elif getattr(func, "environmentfilter", False) is True:
+ self.write("environment, ")
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
- self.write('(context.eval_ctx.autoescape and'
- ' Markup(concat(%s)) or concat(%s))' %
- (frame.buffer, frame.buffer))
+ self.write(
+ "(context.eval_ctx.autoescape and"
+ " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer)
+ )
elif frame.eval_ctx.autoescape:
- self.write('Markup(concat(%s))' % frame.buffer)
+ self.write("Markup(concat(%s))" % frame.buffer)
else:
- self.write('concat(%s)' % frame.buffer)
+ self.write("concat(%s)" % frame.buffer)
self.signature(node, frame)
- self.write(')')
+ self.write(")")
if self.environment.is_async:
- self.write(')')
+ self.write(")")
@optimizeconst
def visit_Test(self, node, frame):
- self.write(self.tests[node.name] + '(')
+ self.write(self.tests[node.name] + "(")
if node.name not in self.environment.tests:
- self.fail('no test named %r' % node.name, node.lineno)
+ self.fail("no test named %r" % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
- self.write(')')
+ self.write(")")
@optimizeconst
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
- self.write('undefined(%r)' % ('the inline if-'
- 'expression on %s evaluated to false and '
- 'no else section was defined.' % self.position(node)))
-
- self.write('(')
+ self.write(
+ "cond_expr_undefined(%r)"
+ % (
+ "the inline if-"
+ "expression on %s evaluated to false and "
+ "no else section was defined." % self.position(node)
+ )
+ )
+
+ self.write("(")
self.visit(node.expr1, frame)
- self.write(' if ')
+ self.write(" if ")
self.visit(node.test, frame)
- self.write(' else ')
+ self.write(" else ")
write_expr2()
- self.write(')')
+ self.write(")")
@optimizeconst
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.is_async:
- self.write('await auto_await(')
+ self.write("await auto_await(")
if self.environment.sandboxed:
- self.write('environment.call(context, ')
+ self.write("environment.call(context, ")
else:
- self.write('context.call(')
+ self.write("context.call(")
self.visit(node.node, frame)
- extra_kwargs = forward_caller and {'caller': 'caller'} or None
+ extra_kwargs = forward_caller and {"caller": "caller"} or None
self.signature(node, frame, extra_kwargs)
- self.write(')')
+ self.write(")")
if self.environment.is_async:
- self.write(')')
+ self.write(")")
def visit_Keyword(self, node, frame):
- self.write(node.key + '=')
+ self.write(node.key + "=")
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
- self.write('Markup(')
+ self.write("Markup(")
self.visit(node.expr, frame)
- self.write(')')
+ self.write(")")
def visit_MarkSafeIfAutoescape(self, node, frame):
- self.write('(context.eval_ctx.autoescape and Markup or identity)(')
+ self.write("(context.eval_ctx.autoescape and Markup or identity)(")
self.visit(node.expr, frame)
- self.write(')')
+ self.write(")")
def visit_EnvironmentAttribute(self, node, frame):
- self.write('environment.' + node.name)
+ self.write("environment." + node.name)
def visit_ExtensionAttribute(self, node, frame):
- self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
+ self.write("environment.extensions[%r].%s" % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
@@ -1670,13 +1789,16 @@ class CodeGenerator(NodeVisitor):
self.write(node.name)
def visit_ContextReference(self, node, frame):
- self.write('context')
+ self.write("context")
+
+ def visit_DerivedContextReference(self, node, frame):
+ self.write(self.derive_context(frame))
def visit_Continue(self, node, frame):
- self.writeline('continue', node)
+ self.writeline("continue", node)
def visit_Break(self, node, frame):
- self.writeline('break', node)
+ self.writeline("break", node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
@@ -1687,8 +1809,8 @@ class CodeGenerator(NodeVisitor):
def visit_OverlayScope(self, node, frame):
ctx = self.temporary_identifier()
- self.writeline('%s = %s' % (ctx, self.derive_context(frame)))
- self.writeline('%s.vars = ' % ctx)
+ self.writeline("%s = %s" % (ctx, self.derive_context(frame)))
+ self.writeline("%s.vars = " % ctx)
self.visit(node.context, frame)
self.push_context_reference(ctx)
@@ -1701,7 +1823,7 @@ class CodeGenerator(NodeVisitor):
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
- self.writeline('context.eval_ctx.%s = ' % keyword.key)
+ self.writeline("context.eval_ctx.%s = " % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
@@ -1713,9 +1835,9 @@ class CodeGenerator(NodeVisitor):
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
- self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
+ self.writeline("%s = context.eval_ctx.save()" % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
- self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
+ self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name)
diff --git a/lib/spack/external/jinja2/constants.py b/lib/spack/external/jinja2/constants.py
index 11efd1ed15..bf7f2ca721 100644
--- a/lib/spack/external/jinja2/constants.py
+++ b/lib/spack/external/jinja2/constants.py
@@ -1,17 +1,6 @@
# -*- coding: utf-8 -*-
-"""
- jinja.constants
- ~~~~~~~~~~~~~~~
-
- Various constants.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-
#: list of lorem ipsum words used by the lipsum() helper function
-LOREM_IPSUM_WORDS = u'''\
+LOREM_IPSUM_WORDS = u"""\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
@@ -29,4 +18,4 @@ ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
-viverra volutpat vulputate'''
+viverra volutpat vulputate"""
diff --git a/lib/spack/external/jinja2/debug.py b/lib/spack/external/jinja2/debug.py
index b61139f0cd..5d8aec31d0 100644
--- a/lib/spack/external/jinja2/debug.py
+++ b/lib/spack/external/jinja2/debug.py
@@ -1,372 +1,268 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.debug
- ~~~~~~~~~~~~
-
- Implements the debug interface for Jinja. This module does some pretty
- ugly stuff with the Python traceback system in order to achieve tracebacks
- with correct line numbers, locals and contents.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
import sys
-import traceback
-from types import TracebackType, CodeType
-from jinja2.utils import missing, internal_code
-from jinja2.exceptions import TemplateSyntaxError
-from jinja2._compat import iteritems, reraise, PY2
+from types import CodeType
-# on pypy we can take advantage of transparent proxies
-try:
- from __pypy__ import tproxy
-except ImportError:
- tproxy = None
+from . import TemplateSyntaxError
+from ._compat import PYPY
+from .utils import internal_code
+from .utils import missing
-# how does the raise helper look like?
-try:
- exec("raise TypeError, 'foo'")
-except SyntaxError:
- raise_helper = 'raise __jinja_exception__[1]'
-except TypeError:
- raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
+def rewrite_traceback_stack(source=None):
+ """Rewrite the current exception to replace any tracebacks from
+ within compiled template code with tracebacks that look like they
+ came from the template source.
+ This must be called within an ``except`` block.
-class TracebackFrameProxy(object):
- """Proxies a traceback frame."""
+ :param exc_info: A :meth:`sys.exc_info` tuple. If not provided,
+ the current ``exc_info`` is used.
+ :param source: For ``TemplateSyntaxError``, the original source if
+ known.
+ :return: A :meth:`sys.exc_info` tuple that can be re-raised.
+ """
+ exc_type, exc_value, tb = sys.exc_info()
- def __init__(self, tb):
- self.tb = tb
- self._tb_next = None
+ if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
+ exc_value.translated = True
+ exc_value.source = source
- @property
- def tb_next(self):
- return self._tb_next
+ try:
+ # Remove the old traceback on Python 3, otherwise the frames
+ # from the compiler still show up.
+ exc_value.with_traceback(None)
+ except AttributeError:
+ pass
- def set_next(self, next):
- if tb_set_next is not None:
- try:
- tb_set_next(self.tb, next and next.tb or None)
- except Exception:
- # this function can fail due to all the hackery it does
- # on various python implementations. We just catch errors
- # down and ignore them if necessary.
- pass
- self._tb_next = next
-
- @property
- def is_jinja_frame(self):
- return '__jinja_template__' in self.tb.tb_frame.f_globals
-
- def __getattr__(self, name):
- return getattr(self.tb, name)
-
-
-def make_frame_proxy(frame):
- proxy = TracebackFrameProxy(frame)
- if tproxy is None:
- return proxy
- def operation_handler(operation, *args, **kwargs):
- if operation in ('__getattribute__', '__getattr__'):
- return getattr(proxy, args[0])
- elif operation == '__setattr__':
- proxy.__setattr__(*args, **kwargs)
- else:
- return getattr(proxy, operation)(*args, **kwargs)
- return tproxy(TracebackType, operation_handler)
-
-
-class ProcessedTraceback(object):
- """Holds a Jinja preprocessed traceback for printing or reraising."""
-
- def __init__(self, exc_type, exc_value, frames):
- assert frames, 'no frames for this traceback?'
- self.exc_type = exc_type
- self.exc_value = exc_value
- self.frames = frames
-
- # newly concatenate the frames (which are proxies)
- prev_tb = None
- for tb in self.frames:
- if prev_tb is not None:
- prev_tb.set_next(tb)
- prev_tb = tb
- prev_tb.set_next(None)
-
- def render_as_text(self, limit=None):
- """Return a string with the traceback."""
- lines = traceback.format_exception(self.exc_type, self.exc_value,
- self.frames[0], limit=limit)
- return ''.join(lines).rstrip()
-
- def render_as_html(self, full=False):
- """Return a unicode string with the traceback as rendered HTML."""
- from jinja2.debugrenderer import render_traceback
- return u'%s\n\n<!--\n%s\n-->' % (
- render_traceback(self, full=full),
- self.render_as_text().decode('utf-8', 'replace')
+ # Outside of runtime, so the frame isn't executing template
+ # code, but it still needs to point at the template.
+ tb = fake_traceback(
+ exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
)
-
- @property
- def is_template_syntax_error(self):
- """`True` if this is a template syntax error."""
- return isinstance(self.exc_value, TemplateSyntaxError)
-
- @property
- def exc_info(self):
- """Exception info tuple with a proxy around the frame objects."""
- return self.exc_type, self.exc_value, self.frames[0]
-
- @property
- def standard_exc_info(self):
- """Standard python exc_info for re-raising"""
- tb = self.frames[0]
- # the frame will be an actual traceback (or transparent proxy) if
- # we are on pypy or a python implementation with support for tproxy
- if type(tb) is not TracebackType:
- tb = tb.tb
- return self.exc_type, self.exc_value, tb
-
-
-def make_traceback(exc_info, source_hint=None):
- """Creates a processed traceback object from the exc_info."""
- exc_type, exc_value, tb = exc_info
- if isinstance(exc_value, TemplateSyntaxError):
- exc_info = translate_syntax_error(exc_value, source_hint)
- initial_skip = 0
else:
- initial_skip = 1
- return translate_exception(exc_info, initial_skip)
-
-
-def translate_syntax_error(error, source=None):
- """Rewrites a syntax error to please traceback systems."""
- error.source = source
- error.translated = True
- exc_info = (error.__class__, error, None)
- filename = error.filename
- if filename is None:
- filename = '<unknown>'
- return fake_exc_info(exc_info, filename, error.lineno)
+ # Skip the frame for the render function.
+ tb = tb.tb_next
+ stack = []
-def translate_exception(exc_info, initial_skip=0):
- """If passed an exc_info it will automatically rewrite the exceptions
- all the way down to the correct line numbers and frames.
- """
- tb = exc_info[2]
- frames = []
-
- # skip some internal frames if wanted
- for x in range(initial_skip):
- if tb is not None:
- tb = tb.tb_next
- initial_tb = tb
-
+ # Build the stack of traceback object, replacing any in template
+ # code with the source file and line information.
while tb is not None:
- # skip frames decorated with @internalcode. These are internal
- # calls we can't avoid and that are useless in template debugging
- # output.
+ # Skip frames decorated with @internalcode. These are internal
+ # calls that aren't useful in template debugging output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
- # save a reference to the next frame if we override the current
- # one with a faked one.
- next = tb.tb_next
+ template = tb.tb_frame.f_globals.get("__jinja_template__")
- # fake template exceptions
- template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
- tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
- lineno)[2]
+ fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
+ stack.append(fake_tb)
+ else:
+ stack.append(tb)
- frames.append(make_frame_proxy(tb))
- tb = next
+ tb = tb.tb_next
- # if we don't have any exceptions in the frames left, we have to
- # reraise it unchanged.
- # XXX: can we backup here? when could this happen?
- if not frames:
- reraise(exc_info[0], exc_info[1], exc_info[2])
+ tb_next = None
- return ProcessedTraceback(exc_info[0], exc_info[1], frames)
+ # Assign tb_next in reverse to avoid circular references.
+ for tb in reversed(stack):
+ tb_next = tb_set_next(tb, tb_next)
+ return exc_type, exc_value, tb_next
-def get_jinja_locals(real_locals):
- ctx = real_locals.get('context')
- if ctx:
- locals = ctx.get_all().copy()
+
+def fake_traceback(exc_value, tb, filename, lineno):
+ """Produce a new traceback object that looks like it came from the
+ template source instead of the compiled code. The filename, line
+ number, and location name will point to the template, and the local
+ variables will be the current template context.
+
+ :param exc_value: The original exception to be re-raised to create
+ the new traceback.
+ :param tb: The original traceback to get the local variables and
+ code info from.
+ :param filename: The template filename.
+ :param lineno: The line number in the template source.
+ """
+ if tb is not None:
+ # Replace the real locals with the context that would be
+ # available at that point in the template.
+ locals = get_template_locals(tb.tb_frame.f_locals)
+ locals.pop("__jinja_exception__", None)
else:
locals = {}
+ globals = {
+ "__name__": filename,
+ "__file__": filename,
+ "__jinja_exception__": exc_value,
+ }
+ # Raise an exception at the correct line number.
+ code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
+
+ # Build a new code object that points to the template file and
+ # replaces the location with a block name.
+ try:
+ location = "template"
+
+ if tb is not None:
+ function = tb.tb_frame.f_code.co_name
+
+ if function == "root":
+ location = "top-level template code"
+ elif function.startswith("block_"):
+ location = 'block "%s"' % function[6:]
+
+ # Collect arguments for the new code object. CodeType only
+ # accepts positional arguments, and arguments were inserted in
+ # new Python versions.
+ code_args = []
+
+ for attr in (
+ "argcount",
+ "posonlyargcount", # Python 3.8
+ "kwonlyargcount", # Python 3
+ "nlocals",
+ "stacksize",
+ "flags",
+ "code", # codestring
+ "consts", # constants
+ "names",
+ "varnames",
+ ("filename", filename),
+ ("name", location),
+ "firstlineno",
+ "lnotab",
+ "freevars",
+ "cellvars",
+ ):
+ if isinstance(attr, tuple):
+ # Replace with given value.
+ code_args.append(attr[1])
+ continue
+
+ try:
+ # Copy original value if it exists.
+ code_args.append(getattr(code, "co_" + attr))
+ except AttributeError:
+ # Some arguments were added later.
+ continue
+
+ code = CodeType(*code_args)
+ except Exception:
+ # Some environments such as Google App Engine don't support
+ # modifying code objects.
+ pass
+
+ # Execute the new code, which is guaranteed to raise, and return
+ # the new traceback without this frame.
+ try:
+ exec(code, globals, locals)
+ except BaseException:
+ return sys.exc_info()[2].tb_next
+
+
+def get_template_locals(real_locals):
+ """Based on the runtime locals, get the context that would be
+ available at that point in the template.
+ """
+ # Start with the current template context.
+ ctx = real_locals.get("context")
+
+ if ctx:
+ data = ctx.get_all().copy()
+ else:
+ data = {}
+
+ # Might be in a derived context that only sets local variables
+ # rather than pushing a context. Local variables follow the scheme
+ # l_depth_name. Find the highest-depth local that has a value for
+ # each name.
local_overrides = {}
- for name, value in iteritems(real_locals):
- if not name.startswith('l_') or value is missing:
+ for name, value in real_locals.items():
+ if not name.startswith("l_") or value is missing:
+ # Not a template variable, or no longer relevant.
continue
+
try:
- _, depth, name = name.split('_', 2)
+ _, depth, name = name.split("_", 2)
depth = int(depth)
except ValueError:
continue
+
cur_depth = local_overrides.get(name, (-1,))[0]
+
if cur_depth < depth:
local_overrides[name] = (depth, value)
- for name, (_, value) in iteritems(local_overrides):
+ # Modify the context with any derived context.
+ for name, (_, value) in local_overrides.items():
if value is missing:
- locals.pop(name, None)
+ data.pop(name, None)
else:
- locals[name] = value
+ data[name] = value
- return locals
+ return data
-def fake_exc_info(exc_info, filename, lineno):
- """Helper for `translate_exception`."""
- exc_type, exc_value, tb = exc_info
+if sys.version_info >= (3, 7):
+ # tb_next is directly assignable as of Python 3.7
+ def tb_set_next(tb, tb_next):
+ tb.tb_next = tb_next
+ return tb
- # figure the real context out
- if tb is not None:
- locals = get_jinja_locals(tb.tb_frame.f_locals)
- # if there is a local called __jinja_exception__, we get
- # rid of it to not break the debug functionality.
- locals.pop('__jinja_exception__', None)
- else:
- locals = {}
-
- # assamble fake globals we need
- globals = {
- '__name__': filename,
- '__file__': filename,
- '__jinja_exception__': exc_info[:2],
-
- # we don't want to keep the reference to the template around
- # to not cause circular dependencies, but we mark it as Jinja
- # frame for the ProcessedTraceback
- '__jinja_template__': None
- }
-
- # and fake the exception
- code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
-
- # if it's possible, change the name of the code. This won't work
- # on some python environments such as google appengine
+elif PYPY:
+ # PyPy might have special support, and won't work with ctypes.
try:
- if tb is None:
- location = 'template'
- else:
- function = tb.tb_frame.f_code.co_name
- if function == 'root':
- location = 'top-level template code'
- elif function.startswith('block_'):
- location = 'block "%s"' % function[6:]
- else:
- location = 'template'
-
- if PY2:
- code = CodeType(0, code.co_nlocals, code.co_stacksize,
- code.co_flags, code.co_code, code.co_consts,
- code.co_names, code.co_varnames, filename,
- location, code.co_firstlineno,
- code.co_lnotab, (), ())
- else:
- code = CodeType(0, code.co_kwonlyargcount,
- code.co_nlocals, code.co_stacksize,
- code.co_flags, code.co_code, code.co_consts,
- code.co_names, code.co_varnames, filename,
- location, code.co_firstlineno,
- code.co_lnotab, (), ())
- except Exception as e:
- pass
+ import tputil
+ except ImportError:
+ # Without tproxy support, use the original traceback.
+ def tb_set_next(tb, tb_next):
+ return tb
- # execute the code and catch the new traceback
- try:
- exec(code, globals, locals)
- except:
- exc_info = sys.exc_info()
- new_tb = exc_info[2].tb_next
+ else:
+ # With tproxy support, create a proxy around the traceback that
+ # returns the new tb_next.
+ def tb_set_next(tb, tb_next):
+ def controller(op):
+ if op.opname == "__getattribute__" and op.args[0] == "tb_next":
+ return tb_next
- # return without this frame
- return exc_info[:2] + (new_tb,)
+ return op.delegate()
+ return tputil.make_proxy(controller, obj=tb)
-def _init_ugly_crap():
- """This function implements a few ugly things so that we can patch the
- traceback objects. The function returned allows resetting `tb_next` on
- any python traceback object. Do not attempt to use this on non cpython
- interpreters
- """
- import ctypes
- from types import TracebackType
- if PY2:
- # figure out size of _Py_ssize_t for Python 2:
- if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
- _Py_ssize_t = ctypes.c_int64
- else:
- _Py_ssize_t = ctypes.c_int
- else:
- # platform ssize_t on Python 3
- _Py_ssize_t = ctypes.c_ssize_t
+else:
+ # Use ctypes to assign tb_next at the C level since it's read-only
+ # from Python.
+ import ctypes
- # regular python
- class _PyObject(ctypes.Structure):
- pass
- _PyObject._fields_ = [
- ('ob_refcnt', _Py_ssize_t),
- ('ob_type', ctypes.POINTER(_PyObject))
- ]
-
- # python with trace
- if hasattr(sys, 'getobjects'):
- class _PyObject(ctypes.Structure):
- pass
- _PyObject._fields_ = [
- ('_ob_next', ctypes.POINTER(_PyObject)),
- ('_ob_prev', ctypes.POINTER(_PyObject)),
- ('ob_refcnt', _Py_ssize_t),
- ('ob_type', ctypes.POINTER(_PyObject))
+ class _CTraceback(ctypes.Structure):
+ _fields_ = [
+ # Extra PyObject slots when compiled with Py_TRACE_REFS.
+ ("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
+ # Only care about tb_next as an object, not a traceback.
+ ("tb_next", ctypes.py_object),
]
- class _Traceback(_PyObject):
- pass
- _Traceback._fields_ = [
- ('tb_next', ctypes.POINTER(_Traceback)),
- ('tb_frame', ctypes.POINTER(_PyObject)),
- ('tb_lasti', ctypes.c_int),
- ('tb_lineno', ctypes.c_int)
- ]
-
- def tb_set_next(tb, next):
- """Set the tb_next attribute of a traceback object."""
- if not (isinstance(tb, TracebackType) and
- (next is None or isinstance(next, TracebackType))):
- raise TypeError('tb_set_next arguments must be traceback objects')
- obj = _Traceback.from_address(id(tb))
- if tb.tb_next is not None:
- old = _Traceback.from_address(id(tb.tb_next))
- old.ob_refcnt -= 1
- if next is None:
- obj.tb_next = ctypes.POINTER(_Traceback)()
- else:
- next = _Traceback.from_address(id(next))
- next.ob_refcnt += 1
- obj.tb_next = ctypes.pointer(next)
+ def tb_set_next(tb, tb_next):
+ c_tb = _CTraceback.from_address(id(tb))
- return tb_set_next
+ # Clear out the old tb_next.
+ if tb.tb_next is not None:
+ c_tb_next = ctypes.py_object(tb.tb_next)
+ c_tb.tb_next = ctypes.py_object()
+ ctypes.pythonapi.Py_DecRef(c_tb_next)
+ # Assign the new tb_next.
+ if tb_next is not None:
+ c_tb_next = ctypes.py_object(tb_next)
+ ctypes.pythonapi.Py_IncRef(c_tb_next)
+ c_tb.tb_next = c_tb_next
-# try to get a tb_set_next implementation if we don't have transparent
-# proxies.
-tb_set_next = None
-if tproxy is None:
- try:
- tb_set_next = _init_ugly_crap()
- except:
- pass
- del _init_ugly_crap
+ return tb
diff --git a/lib/spack/external/jinja2/defaults.py b/lib/spack/external/jinja2/defaults.py
index 7c93dec0ae..8e0e7d7710 100644
--- a/lib/spack/external/jinja2/defaults.py
+++ b/lib/spack/external/jinja2/defaults.py
@@ -1,56 +1,44 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.defaults
- ~~~~~~~~~~~~~~~
-
- Jinja default filters and tags.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2._compat import range_type
-from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace
-
+from ._compat import range_type
+from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
+from .tests import TESTS as DEFAULT_TESTS # noqa: F401
+from .utils import Cycler
+from .utils import generate_lorem_ipsum
+from .utils import Joiner
+from .utils import Namespace
# defaults for the parser / lexer
-BLOCK_START_STRING = '{%'
-BLOCK_END_STRING = '%}'
-VARIABLE_START_STRING = '{{'
-VARIABLE_END_STRING = '}}'
-COMMENT_START_STRING = '{#'
-COMMENT_END_STRING = '#}'
+BLOCK_START_STRING = "{%"
+BLOCK_END_STRING = "%}"
+VARIABLE_START_STRING = "{{"
+VARIABLE_END_STRING = "}}"
+COMMENT_START_STRING = "{#"
+COMMENT_END_STRING = "#}"
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
-NEWLINE_SEQUENCE = '\n'
+NEWLINE_SEQUENCE = "\n"
KEEP_TRAILING_NEWLINE = False
-
# default filters, tests and namespace
-from jinja2.filters import FILTERS as DEFAULT_FILTERS
-from jinja2.tests import TESTS as DEFAULT_TESTS
+
DEFAULT_NAMESPACE = {
- 'range': range_type,
- 'dict': dict,
- 'lipsum': generate_lorem_ipsum,
- 'cycler': Cycler,
- 'joiner': Joiner,
- 'namespace': Namespace
+ "range": range_type,
+ "dict": dict,
+ "lipsum": generate_lorem_ipsum,
+ "cycler": Cycler,
+ "joiner": Joiner,
+ "namespace": Namespace,
}
-
# default policies
DEFAULT_POLICIES = {
- 'compiler.ascii_str': True,
- 'urlize.rel': 'noopener',
- 'urlize.target': None,
- 'truncate.leeway': 5,
- 'json.dumps_function': None,
- 'json.dumps_kwargs': {'sort_keys': True},
- 'ext.i18n.trimmed': False,
+ "compiler.ascii_str": True,
+ "urlize.rel": "noopener",
+ "urlize.target": None,
+ "truncate.leeway": 5,
+ "json.dumps_function": None,
+ "json.dumps_kwargs": {"sort_keys": True},
+ "ext.i18n.trimmed": False,
}
-
-
-# export all constants
-__all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/lib/spack/external/jinja2/environment.py b/lib/spack/external/jinja2/environment.py
index 549d9afab4..8430390eea 100644
--- a/lib/spack/external/jinja2/environment.py
+++ b/lib/spack/external/jinja2/environment.py
@@ -1,60 +1,83 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.environment
- ~~~~~~~~~~~~~~~~~~
-
- Provides a class that holds runtime and parsing time options.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
+"""Classes for managing templates and their runtime and compile time
+options.
"""
import os
import sys
import weakref
-from functools import reduce, partial
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
- DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.lexer import get_lexer, TokenStream
-from jinja2.parser import Parser
-from jinja2.nodes import EvalContext
-from jinja2.compiler import generate, CodeGenerator
-from jinja2.runtime import Undefined, new_context, Context
-from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
- TemplatesNotFound, TemplateRuntimeError
-from jinja2.utils import import_string, LRUCache, Markup, missing, \
- concat, consume, internalcode, have_async_gen
-from jinja2._compat import imap, ifilter, string_types, iteritems, \
- text_type, reraise, implements_iterator, implements_to_string, \
- encode_filename, PY2, PYPY
-
+from functools import partial
+from functools import reduce
+
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import encode_filename
+from ._compat import implements_iterator
+from ._compat import implements_to_string
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import PYPY
+from ._compat import reraise
+from ._compat import string_types
+from ._compat import text_type
+from .compiler import CodeGenerator
+from .compiler import generate
+from .defaults import BLOCK_END_STRING
+from .defaults import BLOCK_START_STRING
+from .defaults import COMMENT_END_STRING
+from .defaults import COMMENT_START_STRING
+from .defaults import DEFAULT_FILTERS
+from .defaults import DEFAULT_NAMESPACE
+from .defaults import DEFAULT_POLICIES
+from .defaults import DEFAULT_TESTS
+from .defaults import KEEP_TRAILING_NEWLINE
+from .defaults import LINE_COMMENT_PREFIX
+from .defaults import LINE_STATEMENT_PREFIX
+from .defaults import LSTRIP_BLOCKS
+from .defaults import NEWLINE_SEQUENCE
+from .defaults import TRIM_BLOCKS
+from .defaults import VARIABLE_END_STRING
+from .defaults import VARIABLE_START_STRING
+from .exceptions import TemplateNotFound
+from .exceptions import TemplateRuntimeError
+from .exceptions import TemplatesNotFound
+from .exceptions import TemplateSyntaxError
+from .exceptions import UndefinedError
+from .lexer import get_lexer
+from .lexer import TokenStream
+from .nodes import EvalContext
+from .parser import Parser
+from .runtime import Context
+from .runtime import new_context
+from .runtime import Undefined
+from .utils import concat
+from .utils import consume
+from .utils import have_async_gen
+from .utils import import_string
+from .utils import internalcode
+from .utils import LRUCache
+from .utils import missing
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
-# the function to create jinja traceback objects. This is dynamically
-# imported on the first exception in the exception handler.
-_make_traceback = None
+def get_spontaneous_environment(cls, *args):
+ """Return a new spontaneous environment. A spontaneous environment
+ is used for templates created directly rather than through an
+ existing environment.
-def get_spontaneous_environment(*args):
- """Return a new spontaneous environment. A spontaneous environment is an
- unnamed and unaccessible (in theory) environment that is used for
- templates generated from a string and not from the file system.
+ :param cls: Environment class to create.
+ :param args: Positional arguments passed to environment.
"""
+ key = (cls, args)
+
try:
- env = _spontaneous_environments.get(args)
- except TypeError:
- return Environment(*args)
- if env is not None:
+ return _spontaneous_environments[key]
+ except KeyError:
+ _spontaneous_environments[key] = env = cls(*args)
+ env.shared = True
return env
- _spontaneous_environments[args] = env = Environment(*args)
- env.shared = True
- return env
def create_cache(size):
@@ -93,20 +116,25 @@ def fail_for_missing_callable(string, name):
try:
name._fail_with_undefined_error()
except Exception as e:
- msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e)
+ msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e)
raise TemplateRuntimeError(msg)
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
- assert issubclass(environment.undefined, Undefined), 'undefined must ' \
- 'be a subclass of undefined because filters depend on it.'
- assert environment.block_start_string != \
- environment.variable_start_string != \
- environment.comment_start_string, 'block, variable and comment ' \
- 'start strings must be different'
- assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
- 'newline_sequence set to unknown line ending string.'
+ assert issubclass(
+ environment.undefined, Undefined
+ ), "undefined must be a subclass of undefined because filters depend on it."
+ assert (
+ environment.block_start_string
+ != environment.variable_start_string
+ != environment.comment_start_string
+ ), "block, variable and comment start strings must be different"
+ assert environment.newline_sequence in (
+ "\r",
+ "\r\n",
+ "\n",
+ ), "newline_sequence set to unknown line ending string."
return environment
@@ -191,7 +219,7 @@ class Environment(object):
`autoescape`
If set to ``True`` the XML/HTML autoescaping feature is enabled by
default. For more details about autoescaping see
- :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
+ :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return ``True`` or ``False`` depending on autoescape should be
enabled by default.
@@ -249,10 +277,6 @@ class Environment(object):
#: must not be modified
shared = False
- #: these are currently EXPERIMENTAL undocumented features.
- exception_handler = None
- exception_formatter = None
-
#: the class that is used for code generation. See
#: :class:`~jinja2.compiler.CodeGenerator` for more information.
code_generator_class = CodeGenerator
@@ -261,29 +285,31 @@ class Environment(object):
#: :class:`~jinja2.runtime.Context` for more information.
context_class = Context
- def __init__(self,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- loader=None,
- cache_size=400,
- auto_reload=True,
- bytecode_cache=None,
- enable_async=False):
+ def __init__(
+ self,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ loader=None,
+ cache_size=400,
+ auto_reload=True,
+ bytecode_cache=None,
+ enable_async=False,
+ ):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
@@ -334,6 +360,9 @@ class Environment(object):
self.enable_async = enable_async
self.is_async = self.enable_async and have_async_gen
+ if self.is_async:
+ # runs patch_all() to enable async support
+ from . import asyncsupport # noqa: F401
_environment_sanity_check(self)
@@ -353,15 +382,28 @@ class Environment(object):
if not hasattr(self, key):
setattr(self, key, value)
- def overlay(self, block_start_string=missing, block_end_string=missing,
- variable_start_string=missing, variable_end_string=missing,
- comment_start_string=missing, comment_end_string=missing,
- line_statement_prefix=missing, line_comment_prefix=missing,
- trim_blocks=missing, lstrip_blocks=missing,
- extensions=missing, optimized=missing,
- undefined=missing, finalize=missing, autoescape=missing,
- loader=missing, cache_size=missing, auto_reload=missing,
- bytecode_cache=missing):
+ def overlay(
+ self,
+ block_start_string=missing,
+ block_end_string=missing,
+ variable_start_string=missing,
+ variable_end_string=missing,
+ comment_start_string=missing,
+ comment_end_string=missing,
+ line_statement_prefix=missing,
+ line_comment_prefix=missing,
+ trim_blocks=missing,
+ lstrip_blocks=missing,
+ extensions=missing,
+ optimized=missing,
+ undefined=missing,
+ finalize=missing,
+ autoescape=missing,
+ loader=missing,
+ cache_size=missing,
+ auto_reload=missing,
+ bytecode_cache=missing,
+ ):
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
@@ -374,7 +416,7 @@ class Environment(object):
through.
"""
args = dict(locals())
- del args['self'], args['cache_size'], args['extensions']
+ del args["self"], args["cache_size"], args["extensions"]
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
@@ -402,8 +444,7 @@ class Environment(object):
def iter_extensions(self):
"""Iterates over the extensions by priority."""
- return iter(sorted(self.extensions.values(),
- key=lambda x: x.priority))
+ return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
@@ -435,8 +476,9 @@ class Environment(object):
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
- def call_filter(self, name, value, args=None, kwargs=None,
- context=None, eval_ctx=None):
+ def call_filter(
+ self, name, value, args=None, kwargs=None, context=None, eval_ctx=None
+ ):
"""Invokes a filter on a value the same way the compiler does it.
Note that on Python 3 this might return a coroutine in case the
@@ -448,21 +490,22 @@ class Environment(object):
"""
func = self.filters.get(name)
if func is None:
- fail_for_missing_callable('no filter named %r', name)
+ fail_for_missing_callable("no filter named %r", name)
args = [value] + list(args or ())
- if getattr(func, 'contextfilter', False):
+ if getattr(func, "contextfilter", False) is True:
if context is None:
- raise TemplateRuntimeError('Attempted to invoke context '
- 'filter without context')
+ raise TemplateRuntimeError(
+ "Attempted to invoke context filter without context"
+ )
args.insert(0, context)
- elif getattr(func, 'evalcontextfilter', False):
+ elif getattr(func, "evalcontextfilter", False) is True:
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
- elif getattr(func, 'environmentfilter', False):
+ elif getattr(func, "environmentfilter", False) is True:
args.insert(0, self)
return func(*args, **(kwargs or {}))
@@ -473,7 +516,7 @@ class Environment(object):
"""
func = self.tests.get(name)
if func is None:
- fail_for_missing_callable('no test named %r', name)
+ fail_for_missing_callable("no test named %r", name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
@@ -483,14 +526,13 @@ class Environment(object):
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
- If you are :ref:`developing Jinja2 extensions <writing-extensions>`
+ If you are :ref:`developing Jinja extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source)
+ self.handle_exception(source=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
@@ -510,16 +552,18 @@ class Environment(object):
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source)
+ self.handle_exception(source=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
- return reduce(lambda s, e: e.preprocess(s, name, filename),
- self.iter_extensions(), text_type(source))
+ return reduce(
+ lambda s, e: e.preprocess(s, name, filename),
+ self.iter_extensions(),
+ text_type(source),
+ )
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
@@ -539,8 +583,14 @@ class Environment(object):
.. versionadded:: 2.5
"""
- return generate(source, self, name, filename, defer_init=defer_init,
- optimized=self.optimized)
+ return generate(
+ source,
+ self,
+ name,
+ filename,
+ defer_init=defer_init,
+ optimized=self.optimized,
+ )
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
@@ -548,11 +598,10 @@ class Environment(object):
.. versionadded:: 2.5
"""
- return compile(source, filename, 'exec')
+ return compile(source, filename, "exec")
@internalcode
- def compile(self, source, name=None, filename=None, raw=False,
- defer_init=False):
+ def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
@@ -577,18 +626,16 @@ class Environment(object):
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
- source = self._generate(source, name, filename,
- defer_init=defer_init)
+ source = self._generate(source, name, filename, defer_init=defer_init)
if raw:
return source
if filename is None:
- filename = '<template>'
+ filename = "<template>"
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source_hint)
+ self.handle_exception(source=source_hint)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
@@ -618,26 +665,32 @@ class Environment(object):
.. versionadded:: 2.1
"""
- parser = Parser(self, source, state='variable')
- exc_info = None
+ parser = Parser(self, source, state="variable")
try:
expr = parser.parse_expression()
if not parser.stream.eos:
- raise TemplateSyntaxError('chunk after expression',
- parser.stream.current.lineno,
- None, None)
+ raise TemplateSyntaxError(
+ "chunk after expression", parser.stream.current.lineno, None, None
+ )
expr.set_environment(self)
except TemplateSyntaxError:
- exc_info = sys.exc_info()
- if exc_info is not None:
- self.handle_exception(exc_info, source_hint=source)
- body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
+ if sys.exc_info() is not None:
+ self.handle_exception(source=source)
+
+ body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
- def compile_templates(self, target, extensions=None, filter_func=None,
- zip='deflated', log_function=None,
- ignore_errors=True, py_compile=False):
+ def compile_templates(
+ self,
+ target,
+ extensions=None,
+ filter_func=None,
+ zip="deflated",
+ log_function=None,
+ ignore_errors=True,
+ py_compile=False,
+ ):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
@@ -660,42 +713,52 @@ class Environment(object):
.. versionadded:: 2.4
"""
- from jinja2.loaders import ModuleLoader
+ from .loaders import ModuleLoader
if log_function is None:
- log_function = lambda x: None
+
+ def log_function(x):
+ pass
if py_compile:
if not PY2 or PYPY:
- from warnings import warn
- warn(Warning('py_compile has no effect on pypy or Python 3'))
+ import warnings
+
+ warnings.warn(
+ "'py_compile=True' has no effect on PyPy or Python"
+ " 3 and will be removed in version 3.0",
+ DeprecationWarning,
+ stacklevel=2,
+ )
py_compile = False
else:
import imp
import marshal
- py_header = imp.get_magic() + \
- u'\xff\xff\xff\xff'.encode('iso-8859-15')
+
+ py_header = imp.get_magic() + u"\xff\xff\xff\xff".encode("iso-8859-15")
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
- py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
+ py_header += u"\x00\x00\x00\x00".encode("iso-8859-15")
- def write_file(filename, data, mode):
+ def write_file(filename, data):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
- f = open(os.path.join(target, filename), mode)
- try:
+ if isinstance(data, text_type):
+ data = data.encode("utf8")
+
+ with open(os.path.join(target, filename), "wb") as f:
f.write(data)
- finally:
- f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
- zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
- stored=ZIP_STORED)[zip])
+
+ zip_file = ZipFile(
+ target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
+ )
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
@@ -717,18 +780,16 @@ class Environment(object):
if py_compile:
c = self._compile(code, encode_filename(filename))
- write_file(filename + 'c', py_header +
- marshal.dumps(c), 'wb')
- log_function('Byte-compiled "%s" as %s' %
- (name, filename + 'c'))
+ write_file(filename + "c", py_header + marshal.dumps(c))
+ log_function('Byte-compiled "%s" as %s' % (name, filename + "c"))
else:
- write_file(filename, code, 'w')
+ write_file(filename, code)
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
- log_function('Finished compiling templates')
+ log_function("Finished compiling templates")
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
@@ -746,38 +807,29 @@ class Environment(object):
.. versionadded:: 2.4
"""
- x = self.loader.list_templates()
+ names = self.loader.list_templates()
+
if extensions is not None:
if filter_func is not None:
- raise TypeError('either extensions or filter_func '
- 'can be passed, but not both')
- filter_func = lambda x: '.' in x and \
- x.rsplit('.', 1)[1] in extensions
+ raise TypeError(
+ "either extensions or filter_func can be passed, but not both"
+ )
+
+ def filter_func(x):
+ return "." in x and x.rsplit(".", 1)[1] in extensions
+
if filter_func is not None:
- x = list(ifilter(filter_func, x))
- return x
+ names = [name for name in names if filter_func(name)]
+
+ return names
- def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
+ def handle_exception(self, source=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
- global _make_traceback
- if exc_info is None:
- exc_info = sys.exc_info()
-
- # the debugging module is imported when it's used for the first time.
- # we're doing a lot of stuff there and for applications that do not
- # get any exceptions in template rendering there is no need to load
- # all of that.
- if _make_traceback is None:
- from jinja2.debug import make_traceback as _make_traceback
- traceback = _make_traceback(exc_info, source_hint)
- if rendered and self.exception_formatter is not None:
- return self.exception_formatter(traceback)
- if self.exception_handler is not None:
- self.exception_handler(traceback)
- exc_type, exc_value, tb = traceback.standard_exc_info
- reraise(exc_type, exc_value, tb)
+ from .debug import rewrite_traceback_stack
+
+ reraise(*rewrite_traceback_stack(source=source))
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
@@ -794,12 +846,13 @@ class Environment(object):
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
- raise TypeError('no loader for this environment specified')
+ raise TypeError("no loader for this environment specified")
cache_key = (weakref.ref(self.loader), name)
if self.cache is not None:
template = self.cache.get(cache_key)
- if template is not None and (not self.auto_reload or
- template.is_up_to_date):
+ if template is not None and (
+ not self.auto_reload or template.is_up_to_date
+ ):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
@@ -835,15 +888,24 @@ class Environment(object):
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
- .. versionadded:: 2.3
+ .. versionchanged:: 2.11
+ If names is :class:`Undefined`, an :exc:`UndefinedError` is
+ raised instead. If no templates were found and names
+ contains :class:`Undefined`, the message is more helpful.
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
+
+ .. versionadded:: 2.3
"""
+ if isinstance(names, Undefined):
+ names._fail_with_undefined_error()
+
if not names:
- raise TemplatesNotFound(message=u'Tried to select from an empty list '
- u'of templates.')
+ raise TemplatesNotFound(
+ message=u"Tried to select from an empty list " u"of templates."
+ )
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
@@ -852,20 +914,19 @@ class Environment(object):
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
- except TemplateNotFound:
+ except (TemplateNotFound, UndefinedError):
pass
raise TemplatesNotFound(names)
@internalcode
- def get_or_select_template(self, template_name_or_list,
- parent=None, globals=None):
+ def get_or_select_template(self, template_name_or_list, parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
- if isinstance(template_name_or_list, string_types):
+ if isinstance(template_name_or_list, (string_types, Undefined)):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
@@ -916,32 +977,57 @@ class Template(object):
StopIteration
"""
- def __new__(cls, source,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- enable_async=False):
+ #: Type of environment to create when creating a template directly
+ #: rather than through an existing environment.
+ environment_class = Environment
+
+ def __new__(
+ cls,
+ source,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ enable_async=False,
+ ):
env = get_spontaneous_environment(
- block_start_string, block_end_string, variable_start_string,
- variable_end_string, comment_start_string, comment_end_string,
- line_statement_prefix, line_comment_prefix, trim_blocks,
- lstrip_blocks, newline_sequence, keep_trailing_newline,
- frozenset(extensions), optimized, undefined, finalize, autoescape,
- None, 0, False, None, enable_async)
+ cls.environment_class,
+ block_start_string,
+ block_end_string,
+ variable_start_string,
+ variable_end_string,
+ comment_start_string,
+ comment_end_string,
+ line_statement_prefix,
+ line_comment_prefix,
+ trim_blocks,
+ lstrip_blocks,
+ newline_sequence,
+ keep_trailing_newline,
+ frozenset(extensions),
+ optimized,
+ undefined,
+ finalize,
+ autoescape,
+ None,
+ 0,
+ False,
+ None,
+ enable_async,
+ )
return env.from_string(source, template_class=cls)
@classmethod
@@ -949,10 +1035,7 @@ class Template(object):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
- namespace = {
- 'environment': environment,
- '__file__': code.co_filename
- }
+ namespace = {"environment": environment, "__file__": code.co_filename}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
@@ -972,21 +1055,21 @@ class Template(object):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
- t.name = namespace['name']
- t.filename = namespace['__file__']
- t.blocks = namespace['blocks']
+ t.name = namespace["name"]
+ t.filename = namespace["__file__"]
+ t.blocks = namespace["blocks"]
# render function and module
- t.root_render_func = namespace['root']
+ t.root_render_func = namespace["root"]
t._module = None
# debug and loader helpers
- t._debug_info = namespace['debug_info']
+ t._debug_info = namespace["debug_info"]
t._uptodate = None
# store the reference
- namespace['environment'] = environment
- namespace['__jinja_template__'] = t
+ namespace["environment"] = environment
+ namespace["__jinja_template__"] = t
return t
@@ -1004,8 +1087,7 @@ class Template(object):
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
- exc_info = sys.exc_info()
- return self.environment.handle_exception(exc_info, True)
+ self.environment.handle_exception()
def render_async(self, *args, **kwargs):
"""This works similar to :meth:`render` but returns a coroutine
@@ -1017,8 +1099,9 @@ class Template(object):
await template.render_async(knights='that say nih; asynchronously')
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
@@ -1039,29 +1122,28 @@ class Template(object):
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
- exc_info = sys.exc_info()
- else:
- return
- yield self.environment.handle_exception(exc_info, True)
+ yield self.environment.handle_exception()
def generate_async(self, *args, **kwargs):
"""An async version of :meth:`generate`. Works very similarly but
returns an async iterator instead.
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
- is passed as it to the context without adding the globals.
+ is passed as is to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
- return new_context(self.environment, self.name, self.blocks,
- vars, shared, self.globals, locals)
+ return new_context(
+ self.environment, self.name, self.blocks, vars, shared, self.globals, locals
+ )
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
@@ -1074,13 +1156,14 @@ class Template(object):
def make_module_async(self, vars=None, shared=False, locals=None):
"""As template module creation can invoke template code for
- asynchronous exections this method must be used instead of the
+ asynchronous executions this method must be used instead of the
normal :meth:`make_module` one. Likewise the module attribute
becomes unavailable in async mode.
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
@internalcode
def _get_default_module(self):
@@ -1124,15 +1207,16 @@ class Template(object):
@property
def debug_info(self):
"""The debug info mapping."""
- return [tuple(imap(int, x.split('='))) for x in
- self._debug_info.split('&')]
+ if self._debug_info:
+ return [tuple(map(int, x.split("="))) for x in self._debug_info.split("&")]
+ return []
def __repr__(self):
if self.name is None:
- name = 'memory:%x' % id(self)
+ name = "memory:%x" % id(self)
else:
name = repr(self.name)
- return '<%s %s>' % (self.__class__.__name__, name)
+ return "<%s %s>" % (self.__class__.__name__, name)
@implements_to_string
@@ -1145,10 +1229,12 @@ class TemplateModule(object):
def __init__(self, template, context, body_stream=None):
if body_stream is None:
if context.environment.is_async:
- raise RuntimeError('Async mode requires a body stream '
- 'to be passed to a template module. Use '
- 'the async methods of the API you are '
- 'using.')
+ raise RuntimeError(
+ "Async mode requires a body stream "
+ "to be passed to a template module. Use "
+ "the async methods of the API you are "
+ "using."
+ )
body_stream = list(template.root_render_func(context))
self._body_stream = body_stream
self.__dict__.update(context.get_exported())
@@ -1162,10 +1248,10 @@ class TemplateModule(object):
def __repr__(self):
if self.__name__ is None:
- name = 'memory:%x' % id(self)
+ name = "memory:%x" % id(self)
else:
name = repr(self.__name__)
- return '<%s %s>' % (self.__class__.__name__, name)
+ return "<%s %s>" % (self.__class__.__name__, name)
class TemplateExpression(object):
@@ -1181,7 +1267,7 @@ class TemplateExpression(object):
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
- rv = context.vars['result']
+ rv = context.vars["result"]
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@@ -1203,7 +1289,7 @@ class TemplateStream(object):
self._gen = gen
self.disable_buffering()
- def dump(self, fp, encoding=None, errors='strict'):
+ def dump(self, fp, encoding=None, errors="strict"):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
@@ -1215,15 +1301,15 @@ class TemplateStream(object):
close = False
if isinstance(fp, string_types):
if encoding is None:
- encoding = 'utf-8'
- fp = open(fp, 'wb')
+ encoding = "utf-8"
+ fp = open(fp, "wb")
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
- if hasattr(fp, 'writelines'):
+ if hasattr(fp, "writelines"):
fp.writelines(iterable)
else:
for item in iterable:
@@ -1259,7 +1345,7 @@ class TemplateStream(object):
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
- raise ValueError('buffer size too small')
+ raise ValueError("buffer size too small")
self.buffered = True
self._next = partial(next, self._buffered_generator(size))
diff --git a/lib/spack/external/jinja2/exceptions.py b/lib/spack/external/jinja2/exceptions.py
index c018a33e32..0bf2003e30 100644
--- a/lib/spack/external/jinja2/exceptions.py
+++ b/lib/spack/external/jinja2/exceptions.py
@@ -1,23 +1,18 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.exceptions
- ~~~~~~~~~~~~~~~~~
-
- Jinja exceptions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2._compat import imap, text_type, PY2, implements_to_string
+from ._compat import imap
+from ._compat import implements_to_string
+from ._compat import PY2
+from ._compat import text_type
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
+
def __init__(self, message=None):
if message is not None:
- message = text_type(message).encode('utf-8')
+ message = text_type(message).encode("utf-8")
Exception.__init__(self, message)
@property
@@ -25,11 +20,13 @@ class TemplateError(Exception):
if self.args:
message = self.args[0]
if message is not None:
- return message.decode('utf-8', 'replace')
+ return message.decode("utf-8", "replace")
def __unicode__(self):
- return self.message or u''
+ return self.message or u""
+
else:
+
def __init__(self, message=None):
Exception.__init__(self, message)
@@ -43,16 +40,28 @@ class TemplateError(Exception):
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
- """Raised if a template does not exist."""
+ """Raised if a template does not exist.
+
+ .. versionchanged:: 2.11
+ If the given name is :class:`Undefined` and no message was
+ provided, an :exc:`UndefinedError` is raised.
+ """
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
- IOError.__init__(self)
+ IOError.__init__(self, name)
+
if message is None:
+ from .runtime import Undefined
+
+ if isinstance(name, Undefined):
+ name._fail_with_undefined_error()
+
message = name
+
self.message = message
self.name = name
self.templates = [name]
@@ -66,13 +75,28 @@ class TemplatesNotFound(TemplateNotFound):
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
+ .. versionchanged:: 2.11
+ If a name in the list of names is :class:`Undefined`, a message
+ about it being undefined is shown rather than the empty string.
+
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
- message = u'none of the templates given were found: ' + \
- u', '.join(imap(text_type, names))
+ from .runtime import Undefined
+
+ parts = []
+
+ for name in names:
+ if isinstance(name, Undefined):
+ parts.append(name._undefined_message)
+ else:
+ parts.append(name)
+
+ message = u"none of the templates given were found: " + u", ".join(
+ imap(text_type, parts)
+ )
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@@ -98,11 +122,11 @@ class TemplateSyntaxError(TemplateError):
return self.message
# otherwise attach some stuff
- location = 'line %d' % self.lineno
+ location = "line %d" % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
- lines = [self.message, ' ' + location]
+ lines = [self.message, " " + location]
# if the source is set, add the line to the output
if self.source is not None:
@@ -111,9 +135,16 @@ class TemplateSyntaxError(TemplateError):
except IndexError:
line = None
if line:
- lines.append(' ' + line.strip())
+ lines.append(" " + line.strip())
+
+ return u"\n".join(lines)
- return u'\n'.join(lines)
+ def __reduce__(self):
+ # https://bugs.python.org/issue1692335 Exceptions that take
+ # multiple required arguments have problems with pickling.
+ # Without this, raises TypeError: __init__() missing 1 required
+ # positional argument: 'lineno'
+ return self.__class__, (self.message, self.lineno, self.name, self.filename)
class TemplateAssertionError(TemplateSyntaxError):
diff --git a/lib/spack/external/jinja2/ext.py b/lib/spack/external/jinja2/ext.py
index 0734a84f73..9141be4dac 100644
--- a/lib/spack/external/jinja2/ext.py
+++ b/lib/spack/external/jinja2/ext.py
@@ -1,42 +1,49 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.ext
- ~~~~~~~~~~
-
- Jinja extensions allow to add custom tags similar to the way django custom
- tags work. By default two example extensions exist: an i18n and a cache
- extension.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
+"""Extension API for adding custom tags and behavior."""
+import pprint
import re
-
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.environment import Environment
-from jinja2.runtime import concat
-from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
-from jinja2.utils import contextfunction, import_string, Markup
-from jinja2._compat import with_metaclass, string_types, iteritems
-
+from sys import version_info
+
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import iteritems
+from ._compat import string_types
+from ._compat import with_metaclass
+from .defaults import BLOCK_END_STRING
+from .defaults import BLOCK_START_STRING
+from .defaults import COMMENT_END_STRING
+from .defaults import COMMENT_START_STRING
+from .defaults import KEEP_TRAILING_NEWLINE
+from .defaults import LINE_COMMENT_PREFIX
+from .defaults import LINE_STATEMENT_PREFIX
+from .defaults import LSTRIP_BLOCKS
+from .defaults import NEWLINE_SEQUENCE
+from .defaults import TRIM_BLOCKS
+from .defaults import VARIABLE_END_STRING
+from .defaults import VARIABLE_START_STRING
+from .environment import Environment
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .nodes import ContextReference
+from .runtime import concat
+from .utils import contextfunction
+from .utils import import_string
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
-GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
+GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
+
+_ws_re = re.compile(r"\s*\n\s*")
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
- def __new__(cls, name, bases, d):
- rv = type.__new__(cls, name, bases, d)
- rv.identifier = rv.__module__ + '.' + rv.__name__
+ def __new__(mcs, name, bases, d):
+ rv = type.__new__(mcs, name, bases, d)
+ rv.identifier = rv.__module__ + "." + rv.__name__
return rv
@@ -91,10 +98,6 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
-
- In the `ext` folder of the Jinja2 source distribution there is a file
- called `inlinegettext.py` which implements a filter that utilizes this
- method.
"""
return stream
@@ -116,8 +119,9 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
- def call_method(self, name, args=None, kwargs=None, dyn_args=None,
- dyn_kwargs=None, lineno=None):
+ def call_method(
+ self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
+ ):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
@@ -125,13 +129,19 @@ class Extension(with_metaclass(ExtensionRegistry, object)):
args = []
if kwargs is None:
kwargs = []
- return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
- dyn_args, dyn_kwargs, lineno=lineno)
+ return nodes.Call(
+ self.attr(name, lineno=lineno),
+ args,
+ kwargs,
+ dyn_args,
+ dyn_kwargs,
+ lineno=lineno,
+ )
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
- return __context.call(__context.resolve('gettext'), *args, **kwargs)
+ return __context.call(__context.resolve("gettext"), *args, **kwargs)
def _make_new_gettext(func):
@@ -140,24 +150,31 @@ def _make_new_gettext(func):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
+ # Always treat as a format string, even if there are no
+ # variables. This makes translation strings more consistent
+ # and predictable. This requires escaping
return rv % variables
+
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
- variables.setdefault('num', __num)
+ variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
+ # Always treat as a format string, see gettext comment above.
return rv % variables
+
return ngettext
class InternationalizationExtension(Extension):
- """This extension adds gettext support to Jinja2."""
- tags = set(['trans'])
+ """This extension adds gettext support to Jinja."""
+
+ tags = {"trans"}
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
@@ -168,30 +185,28 @@ class InternationalizationExtension(Extension):
def __init__(self, environment):
Extension.__init__(self, environment)
- environment.globals['_'] = _gettext_alias
+ environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
- newstyle_gettext=False
+ newstyle_gettext=False,
)
def _install(self, translations, newstyle=None):
- gettext = getattr(translations, 'ugettext', None)
+ gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
- ngettext = getattr(translations, 'ungettext', None)
+ ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
- lambda x: x,
- lambda s, p, n: (n != 1 and (p,) or (s,))[0],
- newstyle
+ lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
@@ -200,13 +215,10 @@ class InternationalizationExtension(Extension):
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
- self.environment.globals.update(
- gettext=gettext,
- ngettext=ngettext
- )
+ self.environment.globals.update(gettext=gettext, ngettext=ngettext)
def _uninstall(self, translations):
- for key in 'gettext', 'ngettext':
+ for key in "gettext", "ngettext":
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
@@ -226,41 +238,44 @@ class InternationalizationExtension(Extension):
plural_expr_assignment = None
variables = {}
trimmed = None
- while parser.stream.current.type != 'block_end':
+ while parser.stream.current.type != "block_end":
if variables:
- parser.stream.expect('comma')
+ parser.stream.expect("comma")
# skip colon for python compatibility
- if parser.stream.skip_if('colon'):
+ if parser.stream.skip_if("colon"):
break
- name = parser.stream.expect('name')
+ name = parser.stream.expect("name")
if name.value in variables:
- parser.fail('translatable variable %r defined twice.' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
+ parser.fail(
+ "translatable variable %r defined twice." % name.value,
+ name.lineno,
+ exc=TemplateAssertionError,
+ )
# expressions
- if parser.stream.current.type == 'assign':
+ if parser.stream.current.type == "assign":
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
- elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
- trimmed = name.value == 'trimmed'
+ elif trimmed is None and name.value in ("trimmed", "notrimmed"):
+ trimmed = name.value == "trimmed"
continue
else:
- variables[name.value] = var = nodes.Name(name.value, 'load')
+ variables[name.value] = var = nodes.Name(name.value, "load")
if plural_expr is None:
if isinstance(var, nodes.Call):
- plural_expr = nodes.Name('_trans', 'load')
+ plural_expr = nodes.Name("_trans", "load")
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
- nodes.Name('_trans', 'store'), var)
+ nodes.Name("_trans", "store"), var
+ )
else:
plural_expr = var
- num_called_num = name.value == 'num'
+ num_called_num = name.value == "num"
- parser.stream.expect('block_end')
+ parser.stream.expect("block_end")
plural = None
have_plural = False
@@ -271,22 +286,24 @@ class InternationalizationExtension(Extension):
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
- plural_expr = nodes.Name(singular_names[0], 'load')
- num_called_num = singular_names[0] == 'num'
+ plural_expr = nodes.Name(singular_names[0], "load")
+ num_called_num = singular_names[0] == "num"
# if we have a pluralize block, we parse that too
- if parser.stream.current.test('name:pluralize'):
+ if parser.stream.current.test("name:pluralize"):
have_plural = True
next(parser.stream)
- if parser.stream.current.type != 'block_end':
- name = parser.stream.expect('name')
+ if parser.stream.current.type != "block_end":
+ name = parser.stream.expect("name")
if name.value not in variables:
- parser.fail('unknown variable %r for pluralization' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
+ parser.fail(
+ "unknown variable %r for pluralization" % name.value,
+ name.lineno,
+ exc=TemplateAssertionError,
+ )
plural_expr = variables[name.value]
- num_called_num = name.value == 'num'
- parser.stream.expect('block_end')
+ num_called_num = name.value == "num"
+ parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
@@ -296,88 +313,97 @@ class InternationalizationExtension(Extension):
# register free names as simple name expressions
for var in referenced:
if var not in variables:
- variables[var] = nodes.Name(var, 'load')
+ variables[var] = nodes.Name(var, "load")
if not have_plural:
plural_expr = None
elif plural_expr is None:
- parser.fail('pluralize without variables', lineno)
+ parser.fail("pluralize without variables", lineno)
if trimmed is None:
- trimmed = self.environment.policies['ext.i18n.trimmed']
+ trimmed = self.environment.policies["ext.i18n.trimmed"]
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
- node = self._make_node(singular, plural, variables, plural_expr,
- bool(referenced),
- num_called_num and have_plural)
+ node = self._make_node(
+ singular,
+ plural,
+ variables,
+ plural_expr,
+ bool(referenced),
+ num_called_num and have_plural,
+ )
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
- def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
- return _ws_re.sub(' ', string.strip())
+ def _trim_whitespace(self, string, _ws_re=_ws_re):
+ return _ws_re.sub(" ", string.strip())
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
- if parser.stream.current.type == 'data':
- buf.append(parser.stream.current.value.replace('%', '%%'))
+ if parser.stream.current.type == "data":
+ buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
- elif parser.stream.current.type == 'variable_begin':
+ elif parser.stream.current.type == "variable_begin":
next(parser.stream)
- name = parser.stream.expect('name').value
+ name = parser.stream.expect("name").value
referenced.append(name)
- buf.append('%%(%s)s' % name)
- parser.stream.expect('variable_end')
- elif parser.stream.current.type == 'block_begin':
+ buf.append("%%(%s)s" % name)
+ parser.stream.expect("variable_end")
+ elif parser.stream.current.type == "block_begin":
next(parser.stream)
- if parser.stream.current.test('name:endtrans'):
+ if parser.stream.current.test("name:endtrans"):
break
- elif parser.stream.current.test('name:pluralize'):
+ elif parser.stream.current.test("name:pluralize"):
if allow_pluralize:
break
- parser.fail('a translatable section can have only one '
- 'pluralize section')
- parser.fail('control structures in translatable sections are '
- 'not allowed')
+ parser.fail(
+ "a translatable section can have only one pluralize section"
+ )
+ parser.fail(
+ "control structures in translatable sections are not allowed"
+ )
elif parser.stream.eos:
- parser.fail('unclosed translation block')
+ parser.fail("unclosed translation block")
else:
- assert False, 'internal parser error'
+ raise RuntimeError("internal parser error")
return referenced, concat(buf)
- def _make_node(self, singular, plural, variables, plural_expr,
- vars_referenced, num_called_num):
+ def _make_node(
+ self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
+ ):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
- singular = singular.replace('%%', '%')
+ singular = singular.replace("%%", "%")
if plural:
- plural = plural.replace('%%', '%')
+ plural = plural.replace("%%", "%")
# singular only:
if plural_expr is None:
- gettext = nodes.Name('gettext', 'load')
- node = nodes.Call(gettext, [nodes.Const(singular)],
- [], None, None)
+ gettext = nodes.Name("gettext", "load")
+ node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
# singular and plural
else:
- ngettext = nodes.Name('ngettext', 'load')
- node = nodes.Call(ngettext, [
- nodes.Const(singular),
- nodes.Const(plural),
- plural_expr
- ], [], None, None)
+ ngettext = nodes.Name("ngettext", "load")
+ node = nodes.Call(
+ ngettext,
+ [nodes.Const(singular), nodes.Const(plural), plural_expr],
+ [],
+ None,
+ None,
+ )
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
@@ -386,7 +412,7 @@ class InternationalizationExtension(Extension):
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
- if num_called_num and key == 'num':
+ if num_called_num and key == "num":
continue
node.kwargs.append(nodes.Keyword(key, value))
@@ -396,18 +422,24 @@ class InternationalizationExtension(Extension):
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
- node = nodes.Mod(node, nodes.Dict([
- nodes.Pair(nodes.Const(key), value)
- for key, value in variables.items()
- ]))
+ node = nodes.Mod(
+ node,
+ nodes.Dict(
+ [
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]
+ ),
+ )
return nodes.Output([node])
class ExprStmtExtension(Extension):
- """Adds a `do` tag to Jinja2 that works like the print statement just
+ """Adds a `do` tag to Jinja that works like the print statement just
that it doesn't print the return value.
"""
- tags = set(['do'])
+
+ tags = set(["do"])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
@@ -417,11 +449,12 @@ class ExprStmtExtension(Extension):
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
- tags = set(['break', 'continue'])
+
+ tags = set(["break", "continue"])
def parse(self, parser):
token = next(parser.stream)
- if token.value == 'break':
+ if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
@@ -434,8 +467,50 @@ class AutoEscapeExtension(Extension):
pass
-def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
- babel_style=True):
+class DebugExtension(Extension):
+ """A ``{% debug %}`` tag that dumps the available variables,
+ filters, and tests.
+
+ .. code-block:: html+jinja
+
+ <pre>{% debug %}</pre>
+
+ .. code-block:: text
+
+ {'context': {'cycler': <class 'jinja2.utils.Cycler'>,
+ ...,
+ 'namespace': <class 'jinja2.utils.Namespace'>},
+ 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
+ ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
+ 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
+ ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
+
+ .. versionadded:: 2.11.0
+ """
+
+ tags = {"debug"}
+
+ def parse(self, parser):
+ lineno = parser.stream.expect("name:debug").lineno
+ context = ContextReference()
+ result = self.call_method("_render", [context], lineno=lineno)
+ return nodes.Output([result], lineno=lineno)
+
+ def _render(self, context):
+ result = {
+ "context": context.get_all(),
+ "filters": sorted(self.environment.filters.keys()),
+ "tests": sorted(self.environment.tests.keys()),
+ }
+
+ # Set the depth since the intent is to show the top few names.
+ if version_info[:2] >= (3, 4):
+ return pprint.pformat(result, depth=3, compact=True)
+ else:
+ return pprint.pformat(result, depth=3)
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
@@ -471,19 +546,20 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
- if not isinstance(node.node, nodes.Name) or \
- node.node.name not in gettext_functions:
+ if (
+ not isinstance(node.node, nodes.Name)
+ or node.node.name not in gettext_functions
+ ):
continue
strings = []
for arg in node.args:
- if isinstance(arg, nodes.Const) and \
- isinstance(arg.value, string_types):
+ if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
- for arg in node.kwargs:
+ for _ in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
@@ -517,9 +593,10 @@ class _CommentFinder(object):
def find_backwards(self, offset):
try:
- for _, token_type, token_value in \
- reversed(self.tokens[self.offset:offset]):
- if token_type in ('comment', 'linecomment'):
+ for _, token_type, token_value in reversed(
+ self.tokens[self.offset : offset]
+ ):
+ if token_type in ("comment", "linecomment"):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
@@ -533,7 +610,7 @@ class _CommentFinder(object):
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
- for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
@@ -545,7 +622,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
- try to find the best preceeding comment that begins with one of the
+ try to find the best preceding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
@@ -568,7 +645,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
(comments will be empty currently)
"""
extensions = set()
- for extension in options.get('extensions', '').split(','):
+ for extension in options.get("extensions", "").split(","):
extension = extension.strip()
if not extension:
continue
@@ -577,38 +654,37 @@ def babel_extract(fileobj, keywords, comment_tags, options):
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
- return options.get(key, str(default)).lower() in \
- ('1', 'on', 'yes', 'true')
+ return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
- silent = getbool(options, 'silent', True)
+ silent = getbool(options, "silent", True)
environment = Environment(
- options.get('block_start_string', BLOCK_START_STRING),
- options.get('block_end_string', BLOCK_END_STRING),
- options.get('variable_start_string', VARIABLE_START_STRING),
- options.get('variable_end_string', VARIABLE_END_STRING),
- options.get('comment_start_string', COMMENT_START_STRING),
- options.get('comment_end_string', COMMENT_END_STRING),
- options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
- options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
- getbool(options, 'trim_blocks', TRIM_BLOCKS),
- getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
+ options.get("block_start_string", BLOCK_START_STRING),
+ options.get("block_end_string", BLOCK_END_STRING),
+ options.get("variable_start_string", VARIABLE_START_STRING),
+ options.get("variable_end_string", VARIABLE_END_STRING),
+ options.get("comment_start_string", COMMENT_START_STRING),
+ options.get("comment_end_string", COMMENT_END_STRING),
+ options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
+ options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
+ getbool(options, "trim_blocks", TRIM_BLOCKS),
+ getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
- getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
+ getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
- auto_reload=False
+ auto_reload=False,
)
- if getbool(options, 'trimmed'):
- environment.policies['ext.i18n.trimmed'] = True
- if getbool(options, 'newstyle_gettext'):
+ if getbool(options, "trimmed"):
+ environment.policies["ext.i18n.trimmed"] = True
+ if getbool(options, "newstyle_gettext"):
environment.newstyle_gettext = True
- source = fileobj.read().decode(options.get('encoding', 'utf-8'))
+ source = fileobj.read().decode(options.get("encoding", "utf-8"))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
- except TemplateSyntaxError as e:
+ except TemplateSyntaxError:
if not silent:
raise
# skip templates with syntax errors
@@ -625,3 +701,4 @@ do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
+debug = DebugExtension
diff --git a/lib/spack/external/jinja2/filters.py b/lib/spack/external/jinja2/filters.py
index 267ddddaa0..74b108dcec 100644
--- a/lib/spack/external/jinja2/filters.py
+++ b/lib/spack/external/jinja2/filters.py
@@ -1,29 +1,31 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.filters
- ~~~~~~~~~~~~~~
-
- Bundled jinja filters.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import re
+"""Built-in template filters used with the ``|`` operator."""
import math
import random
+import re
import warnings
-
-from itertools import groupby, chain
from collections import namedtuple
-from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
- unicode_urlencode, htmlsafe_json_dumps
-from jinja2.runtime import Undefined
-from jinja2.exceptions import FilterArgumentError
-from jinja2._compat import imap, string_types, text_type, iteritems, PY2
+from itertools import chain
+from itertools import groupby
+
+from markupsafe import escape
+from markupsafe import Markup
+from markupsafe import soft_unicode
+from ._compat import abc
+from ._compat import imap
+from ._compat import iteritems
+from ._compat import string_types
+from ._compat import text_type
+from .exceptions import FilterArgumentError
+from .runtime import Undefined
+from .utils import htmlsafe_json_dumps
+from .utils import pformat
+from .utils import unicode_urlencode
+from .utils import urlize
-_word_re = re.compile(r'\w+', re.UNICODE)
-_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE)
+_word_re = re.compile(r"\w+", re.UNICODE)
+_word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE)
def contextfilter(f):
@@ -59,23 +61,21 @@ def ignore_case(value):
return value.lower() if isinstance(value, string_types) else value
-def make_attrgetter(environment, attribute, postprocess=None):
+def make_attrgetter(environment, attribute, postprocess=None, default=None):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
- if attribute is None:
- attribute = []
- elif isinstance(attribute, string_types):
- attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')]
- else:
- attribute = [attribute]
+ attribute = _prepare_attribute_parts(attribute)
def attrgetter(item):
for part in attribute:
item = environment.getitem(item, part)
+ if default and isinstance(item, Undefined):
+ item = default
+
if postprocess is not None:
item = postprocess(item)
@@ -84,32 +84,84 @@ def make_attrgetter(environment, attribute, postprocess=None):
return attrgetter
+def make_multi_attrgetter(environment, attribute, postprocess=None):
+ """Returns a callable that looks up the given comma separated
+ attributes from a passed object with the rules of the environment.
+ Dots are allowed to access attributes of each attribute. Integer
+ parts in paths are looked up as integers.
+
+ The value returned by the returned callable is a list of extracted
+ attribute values.
+
+ Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
+ """
+ attribute_parts = (
+ attribute.split(",") if isinstance(attribute, string_types) else [attribute]
+ )
+ attribute = [
+ _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts
+ ]
+
+ def attrgetter(item):
+ items = [None] * len(attribute)
+ for i, attribute_part in enumerate(attribute):
+ item_i = item
+ for part in attribute_part:
+ item_i = environment.getitem(item_i, part)
+
+ if postprocess is not None:
+ item_i = postprocess(item_i)
+
+ items[i] = item_i
+ return items
+
+ return attrgetter
+
+
+def _prepare_attribute_parts(attr):
+ if attr is None:
+ return []
+ elif isinstance(attr, string_types):
+ return [int(x) if x.isdigit() else x for x in attr.split(".")]
+ else:
+ return [attr]
+
+
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
- if hasattr(value, '__html__'):
+ if hasattr(value, "__html__"):
value = value.__html__()
return escape(text_type(value))
def do_urlencode(value):
- """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
- dictionaries and regular strings as well as pairwise iterables.
+ """Quote data for use in a URL path or query using UTF-8.
+
+ Basic wrapper around :func:`urllib.parse.quote` when given a
+ string, or :func:`urllib.parse.urlencode` for a dict or iterable.
+
+ :param value: Data to quote. A string will be quoted directly. A
+ dict or iterable of ``(key, value)`` pairs will be joined as a
+ query string.
+
+ When given a string, "/" is not quoted. HTTP servers treat "/" and
+ "%2F" equivalently in paths. If you need quoted slashes, use the
+ ``|replace("/", "%2F")`` filter.
.. versionadded:: 2.7
"""
- itemiter = None
- if isinstance(value, dict):
- itemiter = iteritems(value)
- elif not isinstance(value, string_types):
- try:
- itemiter = iter(value)
- except TypeError:
- pass
- if itemiter is None:
+ if isinstance(value, string_types) or not isinstance(value, abc.Iterable):
return unicode_urlencode(value)
- return u'&'.join(unicode_urlencode(k) + '=' +
- unicode_urlencode(v, for_qs=True)
- for k, v in itemiter)
+
+ if isinstance(value, dict):
+ items = iteritems(value)
+ else:
+ items = iter(value)
+
+ return u"&".join(
+ "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True))
+ for k, v in items
+ )
@evalcontextfilter
@@ -132,8 +184,11 @@ def do_replace(eval_ctx, s, old, new, count=None):
count = -1
if not eval_ctx.autoescape:
return text_type(s).replace(text_type(old), text_type(new), count)
- if hasattr(old, '__html__') or hasattr(new, '__html__') and \
- not hasattr(s, '__html__'):
+ if (
+ hasattr(old, "__html__")
+ or hasattr(new, "__html__")
+ and not hasattr(s, "__html__")
+ ):
s = escape(s)
else:
s = soft_unicode(s)
@@ -174,13 +229,13 @@ def do_xmlattr(_eval_ctx, d, autospace=True):
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
- rv = u' '.join(
+ rv = u" ".join(
u'%s="%s"' % (escape(key), escape(value))
for key, value in iteritems(d)
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
- rv = u' ' + rv
+ rv = u" " + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
@@ -197,39 +252,40 @@ def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
- return ''.join(
- [item[0].upper() + item[1:].lower()
- for item in _word_beginning_split_re.split(soft_unicode(s))
- if item])
+ return "".join(
+ [
+ item[0].upper() + item[1:].lower()
+ for item in _word_beginning_split_re.split(soft_unicode(s))
+ if item
+ ]
+ )
-def do_dictsort(value, case_sensitive=False, by='key', reverse=False):
+def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
- {% for item in mydict|dictsort %}
+ {% for key, value in mydict|dictsort %}
sort the dict by key, case insensitive
- {% for item in mydict|dictsort(reverse=true) %}
+ {% for key, value in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
- {% for item in mydict|dictsort(true) %}
+ {% for key, value in mydict|dictsort(true) %}
sort the dict by key, case sensitive
- {% for item in mydict|dictsort(false, 'value') %}
+ {% for key, value in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
"""
- if by == 'key':
+ if by == "key":
pos = 0
- elif by == 'value':
+ elif by == "value":
pos = 1
else:
- raise FilterArgumentError(
- 'You can only sort by either "key" or "value"'
- )
+ raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item):
value = item[pos]
@@ -243,48 +299,62 @@ def do_dictsort(value, case_sensitive=False, by='key', reverse=False):
@environmentfilter
-def do_sort(
- environment, value, reverse=False, case_sensitive=False, attribute=None
-):
- """Sort an iterable. Per default it sorts ascending, if you pass it
- true as first argument it will reverse the sorting.
+def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None):
+ """Sort an iterable using Python's :func:`sorted`.
+
+ .. sourcecode:: jinja
+
+ {% for city in cities|sort %}
+ ...
+ {% endfor %}
- If the iterable is made of strings the third parameter can be used to
- control the case sensitiveness of the comparison which is disabled by
- default.
+ :param reverse: Sort descending instead of ascending.
+ :param case_sensitive: When sorting strings, sort upper and lower
+ case separately.
+ :param attribute: When sorting objects or dicts, an attribute or
+ key to sort by. Can use dot notation like ``"address.city"``.
+ Can be a list of attributes like ``"age,name"``.
+
+ The sort is stable, it does not change the relative order of
+ elements that compare equal. This makes it is possible to chain
+ sorts on different attributes and ordering.
.. sourcecode:: jinja
- {% for item in iterable|sort %}
+ {% for user in users|sort(attribute="name")
+ |sort(reverse=true, attribute="age") %}
...
{% endfor %}
- It is also possible to sort by an attribute (for example to sort
- by the date of an object) by specifying the `attribute` parameter:
+ As a shortcut to chaining when the direction is the same for all
+ attributes, pass a comma separate list of attributes.
.. sourcecode:: jinja
- {% for item in iterable|sort(attribute='date') %}
+ {% for user users|sort(attribute="age,name") %}
...
{% endfor %}
+ .. versionchanged:: 2.11.0
+ The ``attribute`` parameter can be a comma separated list of
+ attributes, e.g. ``"age,name"``.
+
.. versionchanged:: 2.6
- The `attribute` parameter was added.
+ The ``attribute`` parameter was added.
"""
- key_func = make_attrgetter(
- environment, attribute,
- postprocess=ignore_case if not case_sensitive else None
+ key_func = make_multi_attrgetter(
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return sorted(value, key=key_func, reverse=reverse)
@environmentfilter
def do_unique(environment, value, case_sensitive=False, attribute=None):
- """Returns a list of unique items from the the given iterable.
+ """Returns a list of unique items from the given iterable.
.. sourcecode:: jinja
- {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
+ {{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
@@ -294,8 +364,7 @@ def do_unique(environment, value, case_sensitive=False, attribute=None):
:param attribute: Filter objects with unique values for this attribute.
"""
getter = make_attrgetter(
- environment, attribute,
- postprocess=ignore_case if not case_sensitive else None
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
seen = set()
@@ -313,11 +382,10 @@ def _min_or_max(environment, value, func, case_sensitive, attribute):
try:
first = next(it)
except StopIteration:
- return environment.undefined('No aggregated item, sequence was empty.')
+ return environment.undefined("No aggregated item, sequence was empty.")
key_func = make_attrgetter(
- environment, attribute,
- ignore_case if not case_sensitive else None
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return func(chain([first], it), key=key_func)
@@ -332,7 +400,7 @@ def do_min(environment, value, case_sensitive=False, attribute=None):
-> 1
:param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Get the object with the max value of this attribute.
+ :param attribute: Get the object with the min value of this attribute.
"""
return _min_or_max(environment, value, min, case_sensitive, attribute)
@@ -352,7 +420,7 @@ def do_max(environment, value, case_sensitive=False, attribute=None):
return _min_or_max(environment, value, max, case_sensitive, attribute)
-def do_default(value, default_value=u'', boolean=False):
+def do_default(value, default_value=u"", boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
@@ -368,6 +436,12 @@ def do_default(value, default_value=u'', boolean=False):
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
+
+ .. versionchanged:: 2.11
+ It's now possible to configure the :class:`~jinja2.Environment` with
+ :class:`~jinja2.ChainableUndefined` to make the `default` filter work
+ on nested elements and attributes that may contain undefined values
+ in the chain without getting an :exc:`~jinja2.UndefinedError`.
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
@@ -375,7 +449,7 @@ def do_default(value, default_value=u'', boolean=False):
@evalcontextfilter
-def do_join(eval_ctx, value, d=u'', attribute=None):
+def do_join(eval_ctx, value, d=u"", attribute=None):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
@@ -400,17 +474,17 @@ def do_join(eval_ctx, value, d=u'', attribute=None):
if attribute is not None:
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
- # no automatic escaping? joining is a lot eaiser then
+ # no automatic escaping? joining is a lot easier then
if not eval_ctx.autoescape:
return text_type(d).join(imap(text_type, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
- if not hasattr(d, '__html__'):
+ if not hasattr(d, "__html__"):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
- if hasattr(item, '__html__'):
+ if hasattr(item, "__html__"):
do_escape = True
else:
value[idx] = text_type(item)
@@ -435,16 +509,25 @@ def do_first(environment, seq):
try:
return next(iter(seq))
except StopIteration:
- return environment.undefined('No first item, sequence was empty.')
+ return environment.undefined("No first item, sequence was empty.")
@environmentfilter
def do_last(environment, seq):
- """Return the last item of a sequence."""
+ """
+ Return the last item of a sequence.
+
+ Note: Does not work with generators. You may want to explicitly
+ convert it to a list:
+
+ .. sourcecode:: jinja
+
+ {{ data | selectattr('name', '==', 'Jinja') | list | last }}
+ """
try:
return next(iter(reversed(seq)))
except StopIteration:
- return environment.undefined('No last item, sequence was empty.')
+ return environment.undefined("No last item, sequence was empty.")
@contextfilter
@@ -453,7 +536,7 @@ def do_random(context, seq):
try:
return random.choice(seq)
except IndexError:
- return context.environment.undefined('No random item, sequence was empty.')
+ return context.environment.undefined("No random item, sequence was empty.")
def do_filesizeformat(value, binary=False):
@@ -465,25 +548,25 @@ def do_filesizeformat(value, binary=False):
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
- (binary and 'KiB' or 'kB'),
- (binary and 'MiB' or 'MB'),
- (binary and 'GiB' or 'GB'),
- (binary and 'TiB' or 'TB'),
- (binary and 'PiB' or 'PB'),
- (binary and 'EiB' or 'EB'),
- (binary and 'ZiB' or 'ZB'),
- (binary and 'YiB' or 'YB')
+ (binary and "KiB" or "kB"),
+ (binary and "MiB" or "MB"),
+ (binary and "GiB" or "GB"),
+ (binary and "TiB" or "TB"),
+ (binary and "PiB" or "PB"),
+ (binary and "EiB" or "EB"),
+ (binary and "ZiB" or "ZB"),
+ (binary and "YiB" or "YB"),
]
if bytes == 1:
- return '1 Byte'
+ return "1 Byte"
elif bytes < base:
- return '%d Bytes' % bytes
+ return "%d Bytes" % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
- return '%.1f %s' % ((base * bytes / unit), prefix)
- return '%.1f %s' % ((base * bytes / unit), prefix)
+ return "%.1f %s" % ((base * bytes / unit), prefix)
+ return "%.1f %s" % ((base * bytes / unit), prefix)
def do_pprint(value, verbose=False):
@@ -496,8 +579,9 @@ def do_pprint(value, verbose=False):
@evalcontextfilter
-def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
- target=None, rel=None):
+def do_urlize(
+ eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None
+):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
@@ -520,22 +604,20 @@ def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
The *target* parameter was added.
"""
policies = eval_ctx.environment.policies
- rel = set((rel or '').split() or [])
+ rel = set((rel or "").split() or [])
if nofollow:
- rel.add('nofollow')
- rel.update((policies['urlize.rel'] or '').split())
+ rel.add("nofollow")
+ rel.update((policies["urlize.rel"] or "").split())
if target is None:
- target = policies['urlize.target']
- rel = ' '.join(sorted(rel)) or None
+ target = policies["urlize.target"]
+ rel = " ".join(sorted(rel)) or None
rv = urlize(value, trim_url_limit, rel=rel, target=target)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
-def do_indent(
- s, width=4, first=False, blank=False, indentfirst=None
-):
+def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
"""Return a copy of the string with each line indented by 4 spaces. The
first line and blank lines are not indented by default.
@@ -549,22 +631,31 @@ def do_indent(
Rename the ``indentfirst`` argument to ``first``.
"""
if indentfirst is not None:
- warnings.warn(DeprecationWarning(
- 'The "indentfirst" argument is renamed to "first".'
- ), stacklevel=2)
+ warnings.warn(
+ "The 'indentfirst' argument is renamed to 'first' and will"
+ " be removed in version 3.0.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
first = indentfirst
- s += u'\n' # this quirk is necessary for splitlines method
- indention = u' ' * width
+ indention = u" " * width
+ newline = u"\n"
+
+ if isinstance(s, Markup):
+ indention = Markup(indention)
+ newline = Markup(newline)
+
+ s += newline # this quirk is necessary for splitlines method
if blank:
- rv = (u'\n' + indention).join(s.splitlines())
+ rv = (newline + indention).join(s.splitlines())
else:
lines = s.splitlines()
rv = lines.pop(0)
if lines:
- rv += u'\n' + u'\n'.join(
+ rv += newline + newline.join(
indention + line if line else line for line in lines
)
@@ -575,7 +666,7 @@ def do_indent(
@environmentfilter
-def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
+def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
@@ -596,46 +687,81 @@ def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
{{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
-> "foo bar..."
- The default leeway on newer Jinja2 versions is 5 and was 0 before but
+ The default leeway on newer Jinja versions is 5 and was 0 before but
can be reconfigured globally.
"""
if leeway is None:
- leeway = env.policies['truncate.leeway']
- assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length)
- assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway
+ leeway = env.policies["truncate.leeway"]
+ assert length >= len(end), "expected length >= %s, got %s" % (len(end), length)
+ assert leeway >= 0, "expected leeway >= 0, got %s" % leeway
if len(s) <= length + leeway:
return s
if killwords:
- return s[:length - len(end)] + end
- result = s[:length - len(end)].rsplit(' ', 1)[0]
+ return s[: length - len(end)] + end
+ result = s[: length - len(end)].rsplit(" ", 1)[0]
return result + end
@environmentfilter
-def do_wordwrap(environment, s, width=79, break_long_words=True,
- wrapstring=None):
+def do_wordwrap(
+ environment,
+ s,
+ width=79,
+ break_long_words=True,
+ wrapstring=None,
+ break_on_hyphens=True,
+):
+ """Wrap a string to the given width. Existing newlines are treated
+ as paragraphs to be wrapped separately.
+
+ :param s: Original text to wrap.
+ :param width: Maximum length of wrapped lines.
+ :param break_long_words: If a word is longer than ``width``, break
+ it across lines.
+ :param break_on_hyphens: If a word contains hyphens, it may be split
+ across lines.
+ :param wrapstring: String to join each wrapped line. Defaults to
+ :attr:`Environment.newline_sequence`.
+
+ .. versionchanged:: 2.11
+ Existing newlines are treated as paragraphs wrapped separately.
+
+ .. versionchanged:: 2.11
+ Added the ``break_on_hyphens`` parameter.
+
+ .. versionchanged:: 2.7
+ Added the ``wrapstring`` parameter.
"""
- Return a copy of the string passed to the filter wrapped after
- ``79`` characters. You can override this default using the first
- parameter. If you set the second parameter to `false` Jinja will not
- split words apart if they are longer than `width`. By default, the newlines
- will be the default newlines for the environment, but this can be changed
- using the wrapstring keyword argument.
- .. versionadded:: 2.7
- Added support for the `wrapstring` parameter.
- """
+ import textwrap
+
if not wrapstring:
wrapstring = environment.newline_sequence
- import textwrap
- return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
- replace_whitespace=False,
- break_long_words=break_long_words))
+
+ # textwrap.wrap doesn't consider existing newlines when wrapping.
+ # If the string has a newline before width, wrap will still insert
+ # a newline at width, resulting in a short line. Instead, split and
+ # wrap each paragraph individually.
+ return wrapstring.join(
+ [
+ wrapstring.join(
+ textwrap.wrap(
+ line,
+ width=width,
+ expand_tabs=False,
+ replace_whitespace=False,
+ break_long_words=break_long_words,
+ break_on_hyphens=break_on_hyphens,
+ )
+ )
+ for line in s.splitlines()
+ ]
+ )
def do_wordcount(s):
"""Count the words in that string."""
- return len(_word_re.findall(s))
+ return len(_word_re.findall(soft_unicode(s)))
def do_int(value, default=0, base=10):
@@ -671,29 +797,40 @@ def do_float(value, default=0.0):
def do_format(value, *args, **kwargs):
- """
- Apply python string formatting on an object:
+ """Apply the given values to a `printf-style`_ format string, like
+ ``string % values``.
.. sourcecode:: jinja
- {{ "%s - %s"|format("Hello?", "Foo!") }}
- -> Hello? - Foo!
+ {{ "%s, %s!"|format(greeting, name) }}
+ Hello, World!
+
+ In most cases it should be more convenient and efficient to use the
+ ``%`` operator or :meth:`str.format`.
+
+ .. code-block:: text
+
+ {{ "%s, %s!" % (greeting, name) }}
+ {{ "{}, {}!".format(greeting, name) }}
+
+ .. _printf-style: https://docs.python.org/library/stdtypes.html
+ #printf-style-string-formatting
"""
if args and kwargs:
- raise FilterArgumentError('can\'t handle positional and keyword '
- 'arguments at the same time')
+ raise FilterArgumentError(
+ "can't handle positional and keyword arguments at the same time"
+ )
return soft_unicode(value) % (kwargs or args)
-def do_trim(value):
- """Strip leading and trailing whitespace."""
- return soft_unicode(value).strip()
+def do_trim(value, chars=None):
+ """Strip leading and trailing characters, by default whitespace."""
+ return soft_unicode(value).strip(chars)
def do_striptags(value):
- """Strip SGML/XML tags and replace adjacent whitespace by one space.
- """
- if hasattr(value, '__html__'):
+ """Strip SGML/XML tags and replace adjacent whitespace by one space."""
+ if hasattr(value, "__html__"):
value = value.__html__()
return Markup(text_type(value)).striptags()
@@ -705,7 +842,7 @@ def do_slice(value, slices, fill_with=None):
.. sourcecode:: html+jinja
- <div class="columwrapper">
+ <div class="columnwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
@@ -765,7 +902,7 @@ def do_batch(value, linecount, fill_with=None):
yield tmp
-def do_round(value, precision=0, method='common'):
+def do_round(value, precision=0, method="common"):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
@@ -791,9 +928,9 @@ def do_round(value, precision=0, method='common'):
{{ 42.55|round|int }}
-> 43
"""
- if not method in ('common', 'ceil', 'floor'):
- raise FilterArgumentError('method must be common, ceil or floor')
- if method == 'common':
+ if method not in {"common", "ceil", "floor"}:
+ raise FilterArgumentError("method must be common, ceil or floor")
+ if method == "common":
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
@@ -804,52 +941,51 @@ def do_round(value, precision=0, method='common'):
# we do not want to accidentally expose an auto generated repr in case
# people start to print this out in comments or something similar for
# debugging.
-_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
+_GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"])
_GroupTuple.__repr__ = tuple.__repr__
_GroupTuple.__str__ = tuple.__str__
+
@environmentfilter
def do_groupby(environment, value, attribute):
- """Group a sequence of objects by a common attribute.
+ """Group a sequence of objects by an attribute using Python's
+ :func:`itertools.groupby`. The attribute can use dot notation for
+ nested access, like ``"address.city"``. Unlike Python's ``groupby``,
+ the values are sorted first so only one group is returned for each
+ unique value.
- If you for example have a list of dicts or objects that represent persons
- with `gender`, `first_name` and `last_name` attributes and you want to
- group all users by genders you can do something like the following
- snippet:
+ For example, a list of ``User`` objects with a ``city`` attribute
+ can be rendered in groups. In this example, ``grouper`` refers to
+ the ``city`` value of the group.
.. sourcecode:: html+jinja
- <ul>
- {% for group in persons|groupby('gender') %}
- <li>{{ group.grouper }}<ul>
- {% for person in group.list %}
- <li>{{ person.first_name }} {{ person.last_name }}</li>
- {% endfor %}</ul></li>
- {% endfor %}
- </ul>
+ <ul>{% for city, items in users|groupby("city") %}
+ <li>{{ city }}
+ <ul>{% for user in items %}
+ <li>{{ user.name }}
+ {% endfor %}</ul>
+ </li>
+ {% endfor %}</ul>
- Additionally it's possible to use tuple unpacking for the grouper and
- list:
+ ``groupby`` yields namedtuples of ``(grouper, list)``, which
+ can be used instead of the tuple unpacking above. ``grouper`` is the
+ value of the attribute, and ``list`` is the items with that value.
.. sourcecode:: html+jinja
- <ul>
- {% for grouper, list in persons|groupby('gender') %}
- ...
- {% endfor %}
- </ul>
-
- As you can see the item we're grouping by is stored in the `grouper`
- attribute and the `list` contains all the objects that have this grouper
- in common.
+ <ul>{% for group in users|groupby("city") %}
+ <li>{{ group.grouper }}: {{ group.list|join(", ") }}
+ {% endfor %}</ul>
.. versionchanged:: 2.6
- It's now possible to use dotted notation to group by the child
- attribute of another attribute.
+ The attribute supports dot notation for nested access.
"""
expr = make_attrgetter(environment, attribute)
- return [_GroupTuple(key, list(values)) for key, values
- in groupby(sorted(value, key=expr), expr)]
+ return [
+ _GroupTuple(key, list(values))
+ for key, values in groupby(sorted(value, key=expr), expr)
+ ]
@environmentfilter
@@ -906,7 +1042,7 @@ def do_reverse(value):
rv.reverse()
return rv
except TypeError:
- raise FilterArgumentError('argument must be iterable')
+ raise FilterArgumentError("argument must be iterable")
@environmentfilter
@@ -927,8 +1063,9 @@ def do_attr(environment, obj, name):
except AttributeError:
pass
else:
- if environment.sandboxed and not \
- environment.is_safe_attribute(obj, name, value):
+ if environment.sandboxed and not environment.is_safe_attribute(
+ obj, name, value
+ ):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@@ -947,6 +1084,13 @@ def do_map(*args, **kwargs):
Users on this page: {{ users|map(attribute='username')|join(', ') }}
+ You can specify a ``default`` value to use if an object in the list
+ does not have the given attribute.
+
+ .. sourcecode:: jinja
+
+ {{ users|map(attribute="username", default="Anonymous")|join(", ") }}
+
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
@@ -955,6 +1099,17 @@ def do_map(*args, **kwargs):
Users on this page: {{ titles|map('lower')|join(', ') }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u.username for u in users)
+ (u.username or "Anonymous" for u in users)
+ (do_lower(x) for x in titles)
+
+ .. versionchanged:: 2.11.0
+ Added the ``default`` parameter.
+
.. versionadded:: 2.7
"""
seq, func = prepare_map(args, kwargs)
@@ -980,6 +1135,13 @@ def do_select(*args, **kwargs):
{{ numbers|select("lessthan", 42) }}
{{ strings|select("equalto", "mystring") }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (n for n in numbers if test_odd(n))
+ (n for n in numbers if test_divisibleby(n, 3))
+
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: x, False)
@@ -998,6 +1160,12 @@ def do_reject(*args, **kwargs):
{{ numbers|reject("odd") }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (n for n in numbers if not test_odd(n))
+
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: not x, False)
@@ -1019,6 +1187,13 @@ def do_selectattr(*args, **kwargs):
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u for user in users if user.is_active)
+ (u for user in users if test_none(user.email))
+
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: x, True)
@@ -1038,6 +1213,13 @@ def do_rejectattr(*args, **kwargs):
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u for user in users if not user.is_active)
+ (u for user in users if not test_none(user.email))
+
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: not x, True)
@@ -1070,32 +1252,38 @@ def do_tojson(eval_ctx, value, indent=None):
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
- dumper = policies['json.dumps_function']
- options = policies['json.dumps_kwargs']
+ dumper = policies["json.dumps_function"]
+ options = policies["json.dumps_kwargs"]
if indent is not None:
options = dict(options)
- options['indent'] = indent
+ options["indent"] = indent
return htmlsafe_json_dumps(value, dumper=dumper, **options)
def prepare_map(args, kwargs):
context = args[0]
seq = args[1]
+ default = None
- if len(args) == 2 and 'attribute' in kwargs:
- attribute = kwargs.pop('attribute')
+ if len(args) == 2 and "attribute" in kwargs:
+ attribute = kwargs.pop("attribute")
+ default = kwargs.pop("default", None)
if kwargs:
- raise FilterArgumentError('Unexpected keyword argument %r' %
- next(iter(kwargs)))
- func = make_attrgetter(context.environment, attribute)
+ raise FilterArgumentError(
+ "Unexpected keyword argument %r" % next(iter(kwargs))
+ )
+ func = make_attrgetter(context.environment, attribute, default=default)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
- raise FilterArgumentError('map requires a filter argument')
- func = lambda item: context.environment.call_filter(
- name, item, args, kwargs, context=context)
+ raise FilterArgumentError("map requires a filter argument")
+
+ def func(item):
+ return context.environment.call_filter(
+ name, item, args, kwargs, context=context
+ )
return seq, func
@@ -1107,18 +1295,22 @@ def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
try:
attr = args[2]
except LookupError:
- raise FilterArgumentError('Missing parameter for attribute name')
+ raise FilterArgumentError("Missing parameter for attribute name")
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
- transfunc = lambda x: x
+
+ def transfunc(x):
+ return x
try:
name = args[2 + off]
- args = args[3 + off:]
- func = lambda item: context.environment.call_test(
- name, item, args, kwargs)
+ args = args[3 + off :]
+
+ def func(item):
+ return context.environment.call_test(name, item, args, kwargs)
+
except LookupError:
func = bool
@@ -1134,57 +1326,57 @@ def select_or_reject(args, kwargs, modfunc, lookup_attr):
FILTERS = {
- 'abs': abs,
- 'attr': do_attr,
- 'batch': do_batch,
- 'capitalize': do_capitalize,
- 'center': do_center,
- 'count': len,
- 'd': do_default,
- 'default': do_default,
- 'dictsort': do_dictsort,
- 'e': escape,
- 'escape': escape,
- 'filesizeformat': do_filesizeformat,
- 'first': do_first,
- 'float': do_float,
- 'forceescape': do_forceescape,
- 'format': do_format,
- 'groupby': do_groupby,
- 'indent': do_indent,
- 'int': do_int,
- 'join': do_join,
- 'last': do_last,
- 'length': len,
- 'list': do_list,
- 'lower': do_lower,
- 'map': do_map,
- 'min': do_min,
- 'max': do_max,
- 'pprint': do_pprint,
- 'random': do_random,
- 'reject': do_reject,
- 'rejectattr': do_rejectattr,
- 'replace': do_replace,
- 'reverse': do_reverse,
- 'round': do_round,
- 'safe': do_mark_safe,
- 'select': do_select,
- 'selectattr': do_selectattr,
- 'slice': do_slice,
- 'sort': do_sort,
- 'string': soft_unicode,
- 'striptags': do_striptags,
- 'sum': do_sum,
- 'title': do_title,
- 'trim': do_trim,
- 'truncate': do_truncate,
- 'unique': do_unique,
- 'upper': do_upper,
- 'urlencode': do_urlencode,
- 'urlize': do_urlize,
- 'wordcount': do_wordcount,
- 'wordwrap': do_wordwrap,
- 'xmlattr': do_xmlattr,
- 'tojson': do_tojson,
+ "abs": abs,
+ "attr": do_attr,
+ "batch": do_batch,
+ "capitalize": do_capitalize,
+ "center": do_center,
+ "count": len,
+ "d": do_default,
+ "default": do_default,
+ "dictsort": do_dictsort,
+ "e": escape,
+ "escape": escape,
+ "filesizeformat": do_filesizeformat,
+ "first": do_first,
+ "float": do_float,
+ "forceescape": do_forceescape,
+ "format": do_format,
+ "groupby": do_groupby,
+ "indent": do_indent,
+ "int": do_int,
+ "join": do_join,
+ "last": do_last,
+ "length": len,
+ "list": do_list,
+ "lower": do_lower,
+ "map": do_map,
+ "min": do_min,
+ "max": do_max,
+ "pprint": do_pprint,
+ "random": do_random,
+ "reject": do_reject,
+ "rejectattr": do_rejectattr,
+ "replace": do_replace,
+ "reverse": do_reverse,
+ "round": do_round,
+ "safe": do_mark_safe,
+ "select": do_select,
+ "selectattr": do_selectattr,
+ "slice": do_slice,
+ "sort": do_sort,
+ "string": soft_unicode,
+ "striptags": do_striptags,
+ "sum": do_sum,
+ "title": do_title,
+ "trim": do_trim,
+ "truncate": do_truncate,
+ "unique": do_unique,
+ "upper": do_upper,
+ "urlencode": do_urlencode,
+ "urlize": do_urlize,
+ "wordcount": do_wordcount,
+ "wordwrap": do_wordwrap,
+ "xmlattr": do_xmlattr,
+ "tojson": do_tojson,
}
diff --git a/lib/spack/external/jinja2/idtracking.py b/lib/spack/external/jinja2/idtracking.py
index 491bfe0836..9a0d838017 100644
--- a/lib/spack/external/jinja2/idtracking.py
+++ b/lib/spack/external/jinja2/idtracking.py
@@ -1,11 +1,10 @@
-from jinja2.visitor import NodeVisitor
-from jinja2._compat import iteritems
+from ._compat import iteritems
+from .visitor import NodeVisitor
-
-VAR_LOAD_PARAMETER = 'param'
-VAR_LOAD_RESOLVE = 'resolve'
-VAR_LOAD_ALIAS = 'alias'
-VAR_LOAD_UNDEFINED = 'undefined'
+VAR_LOAD_PARAMETER = "param"
+VAR_LOAD_RESOLVE = "resolve"
+VAR_LOAD_ALIAS = "alias"
+VAR_LOAD_UNDEFINED = "undefined"
def find_symbols(nodes, parent_symbols=None):
@@ -23,7 +22,6 @@ def symbols_for_node(node, parent_symbols=None):
class Symbols(object):
-
def __init__(self, parent=None, level=None):
if level is None:
if parent is None:
@@ -41,7 +39,7 @@ class Symbols(object):
visitor.visit(node, **kwargs)
def _define_ref(self, name, load=None):
- ident = 'l_%d_%s' % (self.level, name)
+ ident = "l_%d_%s" % (self.level, name)
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
@@ -62,8 +60,10 @@ class Symbols(object):
def ref(self, name):
rv = self.find_ref(name)
if rv is None:
- raise AssertionError('Tried to resolve a name to a reference that '
- 'was unknown to the frame (%r)' % name)
+ raise AssertionError(
+ "Tried to resolve a name to a reference that "
+ "was unknown to the frame (%r)" % name
+ )
return rv
def copy(self):
@@ -118,7 +118,7 @@ class Symbols(object):
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name)
- assert target is not None, 'should not happen'
+ assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
@@ -149,7 +149,6 @@ class Symbols(object):
class RootVisitor(NodeVisitor):
-
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
@@ -157,35 +156,39 @@ class RootVisitor(NodeVisitor):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
- visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
- visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
- _simple_visit
+ visit_Template = (
+ visit_Block
+ ) = (
+ visit_Macro
+ ) = (
+ visit_FilterBlock
+ ) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
- for child in node.iter_child_nodes(exclude=('call',)):
+ for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
- def visit_For(self, node, for_branch='body', **kwargs):
- if for_branch == 'body':
+ def visit_For(self, node, for_branch="body", **kwargs):
+ if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
- elif for_branch == 'else':
+ elif for_branch == "else":
branch = node.else_
- elif for_branch == 'test':
+ elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
- raise RuntimeError('Unknown for branch')
+ raise RuntimeError("Unknown for branch")
for item in branch or ():
self.sym_visitor.visit(item)
@@ -196,8 +199,9 @@ class RootVisitor(NodeVisitor):
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
- raise NotImplementedError('Cannot find symbols for %r' %
- node.__class__.__name__)
+ raise NotImplementedError(
+ "Cannot find symbols for %r" % node.__class__.__name__
+ )
class FrameSymbolVisitor(NodeVisitor):
@@ -208,11 +212,11 @@ class FrameSymbolVisitor(NodeVisitor):
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
- if store_as_param or node.ctx == 'param':
+ if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
- elif node.ctx == 'store':
+ elif node.ctx == "store":
self.symbols.store(node.name)
- elif node.ctx == 'load':
+ elif node.ctx == "load":
self.symbols.load(node.name)
def visit_NSRef(self, node, **kwargs):
diff --git a/lib/spack/external/jinja2/lexer.py b/lib/spack/external/jinja2/lexer.py
index 6fd135dd5b..552356a12d 100644
--- a/lib/spack/external/jinja2/lexer.py
+++ b/lib/spack/external/jinja2/lexer.py
@@ -1,185 +1,194 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.lexer
- ~~~~~~~~~~~~
-
- This module implements a Jinja / Python combination lexer. The
- `Lexer` class provided by this module is used to do some preprocessing
- for Jinja.
-
- On the one hand it filters out invalid operators like the bitshift
- operators we don't allow in templates. On the other hand it separates
- template code and python code in expressions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
+"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
+is used to do some preprocessing. It filters out invalid operators like
+the bitshift operators we don't allow in templates. It separates
+template code and python code in expressions.
"""
import re
+from ast import literal_eval
from collections import deque
from operator import itemgetter
-from jinja2._compat import implements_iterator, intern, iteritems, text_type
-from jinja2.exceptions import TemplateSyntaxError
-from jinja2.utils import LRUCache
+from ._compat import implements_iterator
+from ._compat import intern
+from ._compat import iteritems
+from ._compat import text_type
+from .exceptions import TemplateSyntaxError
+from .utils import LRUCache
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
-whitespace_re = re.compile(r'\s+', re.U)
-string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
- r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
-integer_re = re.compile(r'\d+')
+whitespace_re = re.compile(r"\s+", re.U)
+newline_re = re.compile(r"(\r\n|\r|\n)")
+string_re = re.compile(
+ r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
+)
+integer_re = re.compile(r"(\d+_)*\d+")
+float_re = re.compile(
+ r"""
+ (?<!\.) # doesn't start with a .
+ (\d+_)*\d+ # digits, possibly _ separated
+ (
+ (\.(\d+_)*\d+)? # optional fractional part
+ e[+\-]?(\d+_)*\d+ # exponent part
+ |
+ \.(\d+_)*\d+ # required fractional part
+ )
+ """,
+ re.IGNORECASE | re.VERBOSE,
+)
try:
# check if this Python supports Unicode identifiers
- compile('föö', '<unknown>', 'eval')
+ compile("föö", "<unknown>", "eval")
except SyntaxError:
- # no Unicode support, use ASCII identifiers
- name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
+ # Python 2, no Unicode support, use ASCII identifiers
+ name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
check_ident = False
else:
- # Unicode support, build a pattern to match valid characters, and set flag
- # to use str.isidentifier to validate during lexing
- from jinja2 import _identifier
- name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
- check_ident = True
- # remove the pattern from memory after building the regex
- import sys
- del sys.modules['jinja2._identifier']
- import jinja2
- del jinja2._identifier
- del _identifier
+ # Unicode support, import generated re pattern and set flag to use
+ # str.isidentifier to validate during lexing.
+ from ._identifier import pattern as name_re
-float_re = re.compile(r'(?<!\.)\d+\.\d+')
-newline_re = re.compile(r'(\r\n|\r|\n)')
+ check_ident = True
# internal the tokens and keep references to them
-TOKEN_ADD = intern('add')
-TOKEN_ASSIGN = intern('assign')
-TOKEN_COLON = intern('colon')
-TOKEN_COMMA = intern('comma')
-TOKEN_DIV = intern('div')
-TOKEN_DOT = intern('dot')
-TOKEN_EQ = intern('eq')
-TOKEN_FLOORDIV = intern('floordiv')
-TOKEN_GT = intern('gt')
-TOKEN_GTEQ = intern('gteq')
-TOKEN_LBRACE = intern('lbrace')
-TOKEN_LBRACKET = intern('lbracket')
-TOKEN_LPAREN = intern('lparen')
-TOKEN_LT = intern('lt')
-TOKEN_LTEQ = intern('lteq')
-TOKEN_MOD = intern('mod')
-TOKEN_MUL = intern('mul')
-TOKEN_NE = intern('ne')
-TOKEN_PIPE = intern('pipe')
-TOKEN_POW = intern('pow')
-TOKEN_RBRACE = intern('rbrace')
-TOKEN_RBRACKET = intern('rbracket')
-TOKEN_RPAREN = intern('rparen')
-TOKEN_SEMICOLON = intern('semicolon')
-TOKEN_SUB = intern('sub')
-TOKEN_TILDE = intern('tilde')
-TOKEN_WHITESPACE = intern('whitespace')
-TOKEN_FLOAT = intern('float')
-TOKEN_INTEGER = intern('integer')
-TOKEN_NAME = intern('name')
-TOKEN_STRING = intern('string')
-TOKEN_OPERATOR = intern('operator')
-TOKEN_BLOCK_BEGIN = intern('block_begin')
-TOKEN_BLOCK_END = intern('block_end')
-TOKEN_VARIABLE_BEGIN = intern('variable_begin')
-TOKEN_VARIABLE_END = intern('variable_end')
-TOKEN_RAW_BEGIN = intern('raw_begin')
-TOKEN_RAW_END = intern('raw_end')
-TOKEN_COMMENT_BEGIN = intern('comment_begin')
-TOKEN_COMMENT_END = intern('comment_end')
-TOKEN_COMMENT = intern('comment')
-TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
-TOKEN_LINESTATEMENT_END = intern('linestatement_end')
-TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
-TOKEN_LINECOMMENT_END = intern('linecomment_end')
-TOKEN_LINECOMMENT = intern('linecomment')
-TOKEN_DATA = intern('data')
-TOKEN_INITIAL = intern('initial')
-TOKEN_EOF = intern('eof')
+TOKEN_ADD = intern("add")
+TOKEN_ASSIGN = intern("assign")
+TOKEN_COLON = intern("colon")
+TOKEN_COMMA = intern("comma")
+TOKEN_DIV = intern("div")
+TOKEN_DOT = intern("dot")
+TOKEN_EQ = intern("eq")
+TOKEN_FLOORDIV = intern("floordiv")
+TOKEN_GT = intern("gt")
+TOKEN_GTEQ = intern("gteq")
+TOKEN_LBRACE = intern("lbrace")
+TOKEN_LBRACKET = intern("lbracket")
+TOKEN_LPAREN = intern("lparen")
+TOKEN_LT = intern("lt")
+TOKEN_LTEQ = intern("lteq")
+TOKEN_MOD = intern("mod")
+TOKEN_MUL = intern("mul")
+TOKEN_NE = intern("ne")
+TOKEN_PIPE = intern("pipe")
+TOKEN_POW = intern("pow")
+TOKEN_RBRACE = intern("rbrace")
+TOKEN_RBRACKET = intern("rbracket")
+TOKEN_RPAREN = intern("rparen")
+TOKEN_SEMICOLON = intern("semicolon")
+TOKEN_SUB = intern("sub")
+TOKEN_TILDE = intern("tilde")
+TOKEN_WHITESPACE = intern("whitespace")
+TOKEN_FLOAT = intern("float")
+TOKEN_INTEGER = intern("integer")
+TOKEN_NAME = intern("name")
+TOKEN_STRING = intern("string")
+TOKEN_OPERATOR = intern("operator")
+TOKEN_BLOCK_BEGIN = intern("block_begin")
+TOKEN_BLOCK_END = intern("block_end")
+TOKEN_VARIABLE_BEGIN = intern("variable_begin")
+TOKEN_VARIABLE_END = intern("variable_end")
+TOKEN_RAW_BEGIN = intern("raw_begin")
+TOKEN_RAW_END = intern("raw_end")
+TOKEN_COMMENT_BEGIN = intern("comment_begin")
+TOKEN_COMMENT_END = intern("comment_end")
+TOKEN_COMMENT = intern("comment")
+TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
+TOKEN_LINESTATEMENT_END = intern("linestatement_end")
+TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
+TOKEN_LINECOMMENT_END = intern("linecomment_end")
+TOKEN_LINECOMMENT = intern("linecomment")
+TOKEN_DATA = intern("data")
+TOKEN_INITIAL = intern("initial")
+TOKEN_EOF = intern("eof")
# bind operators to token types
operators = {
- '+': TOKEN_ADD,
- '-': TOKEN_SUB,
- '/': TOKEN_DIV,
- '//': TOKEN_FLOORDIV,
- '*': TOKEN_MUL,
- '%': TOKEN_MOD,
- '**': TOKEN_POW,
- '~': TOKEN_TILDE,
- '[': TOKEN_LBRACKET,
- ']': TOKEN_RBRACKET,
- '(': TOKEN_LPAREN,
- ')': TOKEN_RPAREN,
- '{': TOKEN_LBRACE,
- '}': TOKEN_RBRACE,
- '==': TOKEN_EQ,
- '!=': TOKEN_NE,
- '>': TOKEN_GT,
- '>=': TOKEN_GTEQ,
- '<': TOKEN_LT,
- '<=': TOKEN_LTEQ,
- '=': TOKEN_ASSIGN,
- '.': TOKEN_DOT,
- ':': TOKEN_COLON,
- '|': TOKEN_PIPE,
- ',': TOKEN_COMMA,
- ';': TOKEN_SEMICOLON
+ "+": TOKEN_ADD,
+ "-": TOKEN_SUB,
+ "/": TOKEN_DIV,
+ "//": TOKEN_FLOORDIV,
+ "*": TOKEN_MUL,
+ "%": TOKEN_MOD,
+ "**": TOKEN_POW,
+ "~": TOKEN_TILDE,
+ "[": TOKEN_LBRACKET,
+ "]": TOKEN_RBRACKET,
+ "(": TOKEN_LPAREN,
+ ")": TOKEN_RPAREN,
+ "{": TOKEN_LBRACE,
+ "}": TOKEN_RBRACE,
+ "==": TOKEN_EQ,
+ "!=": TOKEN_NE,
+ ">": TOKEN_GT,
+ ">=": TOKEN_GTEQ,
+ "<": TOKEN_LT,
+ "<=": TOKEN_LTEQ,
+ "=": TOKEN_ASSIGN,
+ ".": TOKEN_DOT,
+ ":": TOKEN_COLON,
+ "|": TOKEN_PIPE,
+ ",": TOKEN_COMMA,
+ ";": TOKEN_SEMICOLON,
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
-assert len(operators) == len(reverse_operators), 'operators dropped'
-operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
- sorted(operators, key=lambda x: -len(x))))
-
-ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
- TOKEN_COMMENT_END, TOKEN_WHITESPACE,
- TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
- TOKEN_LINECOMMENT])
-ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
- TOKEN_COMMENT, TOKEN_LINECOMMENT])
+assert len(operators) == len(reverse_operators), "operators dropped"
+operator_re = re.compile(
+ "(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
+)
+
+ignored_tokens = frozenset(
+ [
+ TOKEN_COMMENT_BEGIN,
+ TOKEN_COMMENT,
+ TOKEN_COMMENT_END,
+ TOKEN_WHITESPACE,
+ TOKEN_LINECOMMENT_BEGIN,
+ TOKEN_LINECOMMENT_END,
+ TOKEN_LINECOMMENT,
+ ]
+)
+ignore_if_empty = frozenset(
+ [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
+)
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
- TOKEN_COMMENT_BEGIN: 'begin of comment',
- TOKEN_COMMENT_END: 'end of comment',
- TOKEN_COMMENT: 'comment',
- TOKEN_LINECOMMENT: 'comment',
- TOKEN_BLOCK_BEGIN: 'begin of statement block',
- TOKEN_BLOCK_END: 'end of statement block',
- TOKEN_VARIABLE_BEGIN: 'begin of print statement',
- TOKEN_VARIABLE_END: 'end of print statement',
- TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
- TOKEN_LINESTATEMENT_END: 'end of line statement',
- TOKEN_DATA: 'template data / text',
- TOKEN_EOF: 'end of template'
+ TOKEN_COMMENT_BEGIN: "begin of comment",
+ TOKEN_COMMENT_END: "end of comment",
+ TOKEN_COMMENT: "comment",
+ TOKEN_LINECOMMENT: "comment",
+ TOKEN_BLOCK_BEGIN: "begin of statement block",
+ TOKEN_BLOCK_END: "end of statement block",
+ TOKEN_VARIABLE_BEGIN: "begin of print statement",
+ TOKEN_VARIABLE_END: "end of print statement",
+ TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
+ TOKEN_LINESTATEMENT_END: "end of line statement",
+ TOKEN_DATA: "template data / text",
+ TOKEN_EOF: "end of template",
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
- if token.type == 'name':
+ if token.type == TOKEN_NAME:
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
- if ':' in expr:
- type, value = expr.split(':', 1)
- if type == 'name':
+ if ":" in expr:
+ type, value = expr.split(":", 1)
+ if type == TOKEN_NAME:
return value
else:
type = expr
@@ -197,21 +206,39 @@ def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
- (len(environment.comment_start_string), 'comment',
- e(environment.comment_start_string)),
- (len(environment.block_start_string), 'block',
- e(environment.block_start_string)),
- (len(environment.variable_start_string), 'variable',
- e(environment.variable_start_string))
+ (
+ len(environment.comment_start_string),
+ TOKEN_COMMENT_BEGIN,
+ e(environment.comment_start_string),
+ ),
+ (
+ len(environment.block_start_string),
+ TOKEN_BLOCK_BEGIN,
+ e(environment.block_start_string),
+ ),
+ (
+ len(environment.variable_start_string),
+ TOKEN_VARIABLE_BEGIN,
+ e(environment.variable_start_string),
+ ),
]
if environment.line_statement_prefix is not None:
- rules.append((len(environment.line_statement_prefix), 'linestatement',
- r'^[ \t\v]*' + e(environment.line_statement_prefix)))
+ rules.append(
+ (
+ len(environment.line_statement_prefix),
+ TOKEN_LINESTATEMENT_BEGIN,
+ r"^[ \t\v]*" + e(environment.line_statement_prefix),
+ )
+ )
if environment.line_comment_prefix is not None:
- rules.append((len(environment.line_comment_prefix), 'linecomment',
- r'(?:^|(?<=\S))[^\S\r\n]*' +
- e(environment.line_comment_prefix)))
+ rules.append(
+ (
+ len(environment.line_comment_prefix),
+ TOKEN_LINECOMMENT_BEGIN,
+ r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
+ )
+ )
return [x[1:] for x in sorted(rules, reverse=True)]
@@ -231,6 +258,7 @@ class Failure(object):
class Token(tuple):
"""Token class."""
+
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
@@ -240,7 +268,7 @@ class Token(tuple):
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
- elif self.type == 'name':
+ elif self.type == "name":
return self.value
return self.type
@@ -253,8 +281,8 @@ class Token(tuple):
# passed an iterable of not interned strings.
if self.type == expr:
return True
- elif ':' in expr:
- return expr.split(':', 1) == [self.type, self.value]
+ elif ":" in expr:
+ return expr.split(":", 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
@@ -265,11 +293,7 @@ class Token(tuple):
return False
def __repr__(self):
- return 'Token(%r, %r, %r)' % (
- self.lineno,
- self.type,
- self.value
- )
+ return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
@implements_iterator
@@ -306,7 +330,7 @@ class TokenStream(object):
self.name = name
self.filename = filename
self.closed = False
- self.current = Token(1, TOKEN_INITIAL, '')
+ self.current = Token(1, TOKEN_INITIAL, "")
next(self)
def __iter__(self):
@@ -314,9 +338,13 @@ class TokenStream(object):
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
+
__nonzero__ = __bool__ # py2
- eos = property(lambda x: not x, doc="Are we at the end of the stream?")
+ @property
+ def eos(self):
+ """Are we at the end of the stream?"""
+ return not self
def push(self, token):
"""Push a token back to the stream."""
@@ -332,7 +360,7 @@ class TokenStream(object):
def skip(self, n=1):
"""Got n tokens ahead."""
- for x in range(n):
+ for _ in range(n):
next(self)
def next_if(self, expr):
@@ -363,7 +391,7 @@ class TokenStream(object):
def close(self):
"""Close the stream."""
- self.current = Token(self.current.lineno, TOKEN_EOF, '')
+ self.current = Token(self.current.lineno, TOKEN_EOF, "")
self._iter = None
self.closed = True
@@ -374,14 +402,18 @@ class TokenStream(object):
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
- raise TemplateSyntaxError('unexpected end of template, '
- 'expected %r.' % expr,
- self.current.lineno,
- self.name, self.filename)
- raise TemplateSyntaxError("expected token %r, got %r" %
- (expr, describe_token(self.current)),
- self.current.lineno,
- self.name, self.filename)
+ raise TemplateSyntaxError(
+ "unexpected end of template, expected %r." % expr,
+ self.current.lineno,
+ self.name,
+ self.filename,
+ )
+ raise TemplateSyntaxError(
+ "expected token %r, got %r" % (expr, describe_token(self.current)),
+ self.current.lineno,
+ self.name,
+ self.filename,
+ )
try:
return self.current
finally:
@@ -390,18 +422,20 @@ class TokenStream(object):
def get_lexer(environment):
"""Return a lexer which is probably cached."""
- key = (environment.block_start_string,
- environment.block_end_string,
- environment.variable_start_string,
- environment.variable_end_string,
- environment.comment_start_string,
- environment.comment_end_string,
- environment.line_statement_prefix,
- environment.line_comment_prefix,
- environment.trim_blocks,
- environment.lstrip_blocks,
- environment.newline_sequence,
- environment.keep_trailing_newline)
+ key = (
+ environment.block_start_string,
+ environment.block_end_string,
+ environment.variable_start_string,
+ environment.variable_end_string,
+ environment.comment_start_string,
+ environment.comment_end_string,
+ environment.line_statement_prefix,
+ environment.line_comment_prefix,
+ environment.trim_blocks,
+ environment.lstrip_blocks,
+ environment.newline_sequence,
+ environment.keep_trailing_newline,
+ )
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
@@ -409,6 +443,19 @@ def get_lexer(environment):
return lexer
+class OptionalLStrip(tuple):
+ """A special tuple for marking a point in the state that can have
+ lstrip applied.
+ """
+
+ __slots__ = ()
+
+ # Even though it looks like a no-op, creating instances fails
+ # without this.
+ def __new__(cls, *members, **kwargs):
+ return super(OptionalLStrip, cls).__new__(cls, members)
+
+
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
@@ -419,9 +466,11 @@ class Lexer(object):
def __init__(self, environment):
# shortcuts
- c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
+ def c(x):
+ return re.compile(x, re.M | re.S)
+
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
@@ -429,7 +478,7 @@ class Lexer(object):
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
- (operator_re, TOKEN_OPERATOR, None)
+ (operator_re, TOKEN_OPERATOR, None),
]
# assemble the root lexing rule. because "|" is ungreedy
@@ -441,108 +490,120 @@ class Lexer(object):
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
- block_suffix_re = environment.trim_blocks and '\\n?' or ''
-
- # strip leading spaces if lstrip_blocks is enabled
- prefix_re = {}
- if environment.lstrip_blocks:
- # use '{%+' to manually disable lstrip_blocks behavior
- no_lstrip_re = e('+')
- # detect overlap between block and variable or comment strings
- block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
- # make sure we don't mistake a block for a variable or a comment
- m = block_diff.match(environment.comment_start_string)
- no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
- m = block_diff.match(environment.variable_start_string)
- no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
-
- # detect overlap between comment and variable strings
- comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
- m = comment_diff.match(environment.variable_start_string)
- no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
-
- lstrip_re = r'^[ \t]*'
- block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
- lstrip_re,
- e(environment.block_start_string),
- no_lstrip_re,
- e(environment.block_start_string),
- )
- comment_prefix_re = r'%s%s%s|%s\+?' % (
- lstrip_re,
- e(environment.comment_start_string),
- no_variable_re,
- e(environment.comment_start_string),
- )
- prefix_re['block'] = block_prefix_re
- prefix_re['comment'] = comment_prefix_re
- else:
- block_prefix_re = '%s' % e(environment.block_start_string)
+ block_suffix_re = environment.trim_blocks and "\\n?" or ""
+
+ # If lstrip is enabled, it should not be applied if there is any
+ # non-whitespace between the newline and block.
+ self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
- 'root': [
+ "root": [
# directives
- (c('(.*?)(?:%s)' % '|'.join(
- [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
- e(environment.block_start_string),
- block_prefix_re,
- e(environment.block_end_string),
- e(environment.block_end_string)
- )] + [
- r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
- for n, r in root_tag_rules
- ])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
+ (
+ c(
+ "(.*?)(?:%s)"
+ % "|".join(
+ [
+ r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
+ % (
+ e(environment.block_start_string),
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ )
+ ]
+ + [
+ r"(?P<%s>%s(\-|\+|))" % (n, r)
+ for n, r in root_tag_rules
+ ]
+ )
+ ),
+ OptionalLStrip(TOKEN_DATA, "#bygroup"),
+ "#bygroup",
+ ),
# data
- (c('.+'), TOKEN_DATA, None)
+ (c(".+"), TOKEN_DATA, None),
],
# comments
TOKEN_COMMENT_BEGIN: [
- (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
- e(environment.comment_end_string),
- e(environment.comment_end_string),
- block_suffix_re
- )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
- (c('(.)'), (Failure('Missing end of comment tag'),), None)
+ (
+ c(
+ r"(.*?)((?:\-%s\s*|%s)%s)"
+ % (
+ e(environment.comment_end_string),
+ e(environment.comment_end_string),
+ block_suffix_re,
+ )
+ ),
+ (TOKEN_COMMENT, TOKEN_COMMENT_END),
+ "#pop",
+ ),
+ (c("(.)"), (Failure("Missing end of comment tag"),), None),
],
# blocks
TOKEN_BLOCK_BEGIN: [
- (c(r'(?:\-%s\s*|%s)%s' % (
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re
- )), TOKEN_BLOCK_END, '#pop'),
- ] + tag_rules,
+ (
+ c(
+ r"(?:\-%s\s*|%s)%s"
+ % (
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re,
+ )
+ ),
+ TOKEN_BLOCK_END,
+ "#pop",
+ ),
+ ]
+ + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
- (c(r'\-%s\s*|%s' % (
- e(environment.variable_end_string),
- e(environment.variable_end_string)
- )), TOKEN_VARIABLE_END, '#pop')
- ] + tag_rules,
+ (
+ c(
+ r"\-%s\s*|%s"
+ % (
+ e(environment.variable_end_string),
+ e(environment.variable_end_string),
+ )
+ ),
+ TOKEN_VARIABLE_END,
+ "#pop",
+ )
+ ]
+ + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
- (c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
- e(environment.block_start_string),
- block_prefix_re,
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re
- )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
- (c('(.)'), (Failure('Missing end of raw directive'),), None)
+ (
+ c(
+ r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
+ % (
+ e(environment.block_start_string),
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re,
+ )
+ ),
+ OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
+ "#pop",
+ ),
+ (c("(.)"), (Failure("Missing end of raw directive"),), None),
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
- (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
- ] + tag_rules,
+ (c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
+ ]
+ + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
- (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
- TOKEN_LINECOMMENT_END), '#pop')
- ]
+ (
+ c(r"(.*?)()(?=\n|$)"),
+ (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
+ "#pop",
+ )
+ ],
}
def _normalize_newlines(self, value):
@@ -550,8 +611,7 @@ class Lexer(object):
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
- """Calls tokeniter + tokenize and wraps it in a token stream.
- """
+ """Calls tokeniter + tokenize and wraps it in a token stream."""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
@@ -562,37 +622,40 @@ class Lexer(object):
for lineno, token, value in stream:
if token in ignored_tokens:
continue
- elif token == 'linestatement_begin':
- token = 'block_begin'
- elif token == 'linestatement_end':
- token = 'block_end'
+ elif token == TOKEN_LINESTATEMENT_BEGIN:
+ token = TOKEN_BLOCK_BEGIN
+ elif token == TOKEN_LINESTATEMENT_END:
+ token = TOKEN_BLOCK_END
# we are not interested in those tokens in the parser
- elif token in ('raw_begin', 'raw_end'):
+ elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
continue
- elif token == 'data':
+ elif token == TOKEN_DATA:
value = self._normalize_newlines(value)
- elif token == 'keyword':
+ elif token == "keyword":
token = value
- elif token == 'name':
+ elif token == TOKEN_NAME:
value = str(value)
if check_ident and not value.isidentifier():
raise TemplateSyntaxError(
- 'Invalid character in identifier',
- lineno, name, filename)
- elif token == 'string':
+ "Invalid character in identifier", lineno, name, filename
+ )
+ elif token == TOKEN_STRING:
# try to unescape string
try:
- value = self._normalize_newlines(value[1:-1]) \
- .encode('ascii', 'backslashreplace') \
- .decode('unicode-escape')
+ value = (
+ self._normalize_newlines(value[1:-1])
+ .encode("ascii", "backslashreplace")
+ .decode("unicode-escape")
+ )
except Exception as e:
- msg = str(e).split(':')[-1].strip()
+ msg = str(e).split(":")[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
- elif token == 'integer':
- value = int(value)
- elif token == 'float':
- value = float(value)
- elif token == 'operator':
+ elif token == TOKEN_INTEGER:
+ value = int(value.replace("_", ""))
+ elif token == TOKEN_FLOAT:
+ # remove all "_" first to support more Python versions
+ value = literal_eval(value.replace("_", ""))
+ elif token == TOKEN_OPERATOR:
token = operators[value]
yield Token(lineno, token, value)
@@ -603,23 +666,23 @@ class Lexer(object):
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
- for newline in ('\r\n', '\r', '\n'):
+ for newline in ("\r\n", "\r", "\n"):
if source.endswith(newline):
- lines.append('')
+ lines.append("")
break
- source = '\n'.join(lines)
+ source = "\n".join(lines)
pos = 0
lineno = 1
- stack = ['root']
- if state is not None and state != 'root':
- assert state in ('variable', 'block'), 'invalid state'
- stack.append(state + '_begin')
- else:
- state = 'root'
+ stack = ["root"]
+ if state is not None and state != "root":
+ assert state in ("variable", "block"), "invalid state"
+ stack.append(state + "_begin")
statetokens = self.rules[stack[-1]]
source_length = len(source)
-
balancing_stack = []
+ lstrip_unless_re = self.lstrip_unless_re
+ newlines_stripped = 0
+ line_starting = True
while 1:
# tokenizer loop
@@ -633,13 +696,48 @@ class Lexer(object):
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
- if balancing_stack and \
- tokens in ('variable_end', 'block_end',
- 'linestatement_end'):
+ if balancing_stack and tokens in (
+ TOKEN_VARIABLE_END,
+ TOKEN_BLOCK_END,
+ TOKEN_LINESTATEMENT_END,
+ ):
continue
# tuples support more options
if isinstance(tokens, tuple):
+ groups = m.groups()
+
+ if isinstance(tokens, OptionalLStrip):
+ # Rule supports lstrip. Match will look like
+ # text, block type, whitespace control, type, control, ...
+ text = groups[0]
+
+ # Skipping the text and first type, every other group is the
+ # whitespace control for each type. One of the groups will be
+ # -, +, or empty string instead of None.
+ strip_sign = next(g for g in groups[2::2] if g is not None)
+
+ if strip_sign == "-":
+ # Strip all whitespace between the text and the tag.
+ stripped = text.rstrip()
+ newlines_stripped = text[len(stripped) :].count("\n")
+ groups = (stripped,) + groups[1:]
+ elif (
+ # Not marked for preserving whitespace.
+ strip_sign != "+"
+ # lstrip is enabled.
+ and lstrip_unless_re is not None
+ # Not a variable expression.
+ and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
+ ):
+ # The start of text between the last newline and the tag.
+ l_pos = text.rfind("\n") + 1
+ if l_pos > 0 or line_starting:
+ # If there's only whitespace between the newline and the
+ # tag, strip it.
+ if not lstrip_unless_re.search(text, l_pos):
+ groups = (text[:l_pos],) + groups[1:]
+
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
@@ -647,51 +745,57 @@ class Lexer(object):
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
- elif token == '#bygroup':
+ elif token == "#bygroup":
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
- lineno += value.count('\n')
+ lineno += value.count("\n")
break
else:
- raise RuntimeError('%r wanted to resolve '
- 'the token dynamically'
- ' but no group matched'
- % regex)
+ raise RuntimeError(
+ "%r wanted to resolve "
+ "the token dynamically"
+ " but no group matched" % regex
+ )
# normal group
else:
- data = m.group(idx + 1)
+ data = groups[idx]
if data or token not in ignore_if_empty:
yield lineno, token, data
- lineno += data.count('\n')
+ lineno += data.count("\n") + newlines_stripped
+ newlines_stripped = 0
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
- if tokens == 'operator':
- if data == '{':
- balancing_stack.append('}')
- elif data == '(':
- balancing_stack.append(')')
- elif data == '[':
- balancing_stack.append(']')
- elif data in ('}', ')', ']'):
+ if tokens == TOKEN_OPERATOR:
+ if data == "{":
+ balancing_stack.append("}")
+ elif data == "(":
+ balancing_stack.append(")")
+ elif data == "[":
+ balancing_stack.append("]")
+ elif data in ("}", ")", "]"):
if not balancing_stack:
- raise TemplateSyntaxError('unexpected \'%s\'' %
- data, lineno, name,
- filename)
+ raise TemplateSyntaxError(
+ "unexpected '%s'" % data, lineno, name, filename
+ )
expected_op = balancing_stack.pop()
if expected_op != data:
- raise TemplateSyntaxError('unexpected \'%s\', '
- 'expected \'%s\'' %
- (data, expected_op),
- lineno, name,
- filename)
+ raise TemplateSyntaxError(
+ "unexpected '%s', "
+ "expected '%s'" % (data, expected_op),
+ lineno,
+ name,
+ filename,
+ )
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
- lineno += data.count('\n')
+ lineno += data.count("\n")
+
+ line_starting = m.group()[-1:] == "\n"
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
@@ -701,19 +805,20 @@ class Lexer(object):
# handle state changes
if new_state is not None:
# remove the uppermost state
- if new_state == '#pop':
+ if new_state == "#pop":
stack.pop()
# resolve the new state by group checking
- elif new_state == '#bygroup':
+ elif new_state == "#bygroup":
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
- raise RuntimeError('%r wanted to resolve the '
- 'new state dynamically but'
- ' no group matched' %
- regex)
+ raise RuntimeError(
+ "%r wanted to resolve the "
+ "new state dynamically but"
+ " no group matched" % regex
+ )
# direct state name given
else:
stack.append(new_state)
@@ -722,8 +827,9 @@ class Lexer(object):
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
- raise RuntimeError('%r yielded empty string without '
- 'stack change' % regex)
+ raise RuntimeError(
+ "%r yielded empty string without stack change" % regex
+ )
# publish new function and start again
pos = pos2
break
@@ -734,6 +840,9 @@ class Lexer(object):
if pos >= source_length:
return
# something went wrong
- raise TemplateSyntaxError('unexpected char %r at %d' %
- (source[pos], pos), lineno,
- name, filename)
+ raise TemplateSyntaxError(
+ "unexpected char %r at %d" % (source[pos], pos),
+ lineno,
+ name,
+ filename,
+ )
diff --git a/lib/spack/external/jinja2/loaders.py b/lib/spack/external/jinja2/loaders.py
index 4c79793760..457c4b59a7 100644
--- a/lib/spack/external/jinja2/loaders.py
+++ b/lib/spack/external/jinja2/loaders.py
@@ -1,22 +1,21 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.loaders
- ~~~~~~~~~~~~~~
-
- Jinja loader classes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
+"""API and implementations for loading templates from different data
+sources.
"""
import os
import sys
import weakref
-from types import ModuleType
-from os import path
from hashlib import sha1
-from jinja2.exceptions import TemplateNotFound
-from jinja2.utils import open_if_exists, internalcode
-from jinja2._compat import string_types, iteritems
+from os import path
+from types import ModuleType
+
+from ._compat import abc
+from ._compat import fspath
+from ._compat import iteritems
+from ._compat import string_types
+from .exceptions import TemplateNotFound
+from .utils import internalcode
+from .utils import open_if_exists
def split_template_path(template):
@@ -24,12 +23,14 @@ def split_template_path(template):
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
- for piece in template.split('/'):
- if path.sep in piece \
- or (path.altsep and path.altsep in piece) or \
- piece == path.pardir:
+ for piece in template.split("/"):
+ if (
+ path.sep in piece
+ or (path.altsep and path.altsep in piece)
+ or piece == path.pardir
+ ):
raise TemplateNotFound(template)
- elif piece and piece != '.':
+ elif piece and piece != ".":
pieces.append(piece)
return pieces
@@ -86,15 +87,16 @@ class BaseLoader(object):
the template will be reloaded.
"""
if not self.has_source_access:
- raise RuntimeError('%s cannot provide access to the source' %
- self.__class__.__name__)
+ raise RuntimeError(
+ "%s cannot provide access to the source" % self.__class__.__name__
+ )
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
- raise TypeError('this loader cannot iterate over all templates')
+ raise TypeError("this loader cannot iterate over all templates")
@internalcode
def load(self, environment, name, globals=None):
@@ -131,8 +133,9 @@ class BaseLoader(object):
bucket.code = code
bcc.set_bucket(bucket)
- return environment.template_class.from_code(environment, code,
- globals, uptodate)
+ return environment.template_class.from_code(
+ environment, code, globals, uptodate
+ )
class FileSystemLoader(BaseLoader):
@@ -153,14 +156,20 @@ class FileSystemLoader(BaseLoader):
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
- .. versionchanged:: 2.8+
- The *followlinks* parameter was added.
+ .. versionchanged:: 2.8
+ The ``followlinks`` parameter was added.
"""
- def __init__(self, searchpath, encoding='utf-8', followlinks=False):
- if isinstance(searchpath, string_types):
+ def __init__(self, searchpath, encoding="utf-8", followlinks=False):
+ if not isinstance(searchpath, abc.Iterable) or isinstance(
+ searchpath, string_types
+ ):
searchpath = [searchpath]
- self.searchpath = list(searchpath)
+
+ # In Python 3.5, os.path.join doesn't support Path. This can be
+ # simplified to list(searchpath) when Python 3.5 is dropped.
+ self.searchpath = [fspath(p) for p in searchpath]
+
self.encoding = encoding
self.followlinks = followlinks
@@ -183,6 +192,7 @@ class FileSystemLoader(BaseLoader):
return path.getmtime(filename) == mtime
except OSError:
return False
+
return contents, filename, uptodate
raise TemplateNotFound(template)
@@ -190,12 +200,14 @@ class FileSystemLoader(BaseLoader):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
- for dirpath, dirnames, filenames in walk_dir:
+ for dirpath, _, filenames in walk_dir:
for filename in filenames:
- template = os.path.join(dirpath, filename) \
- [len(searchpath):].strip(os.path.sep) \
- .replace(os.path.sep, '/')
- if template[:2] == './':
+ template = (
+ os.path.join(dirpath, filename)[len(searchpath) :]
+ .strip(os.path.sep)
+ .replace(os.path.sep, "/")
+ )
+ if template[:2] == "./":
template = template[2:]
if template not in found:
found.add(template)
@@ -217,10 +229,11 @@ class PackageLoader(BaseLoader):
from the file system and not a zip file.
"""
- def __init__(self, package_name, package_path='templates',
- encoding='utf-8'):
- from pkg_resources import DefaultProvider, ResourceManager, \
- get_provider
+ def __init__(self, package_name, package_path="templates", encoding="utf-8"):
+ from pkg_resources import DefaultProvider
+ from pkg_resources import get_provider
+ from pkg_resources import ResourceManager
+
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
@@ -230,14 +243,17 @@ class PackageLoader(BaseLoader):
def get_source(self, environment, template):
pieces = split_template_path(template)
- p = '/'.join((self.package_path,) + tuple(pieces))
+ p = "/".join((self.package_path,) + tuple(pieces))
+
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
+
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
+
def uptodate():
try:
return path.getmtime(filename) == mtime
@@ -249,19 +265,24 @@ class PackageLoader(BaseLoader):
def list_templates(self):
path = self.package_path
- if path[:2] == './':
+
+ if path[:2] == "./":
path = path[2:]
- elif path == '.':
- path = ''
+ elif path == ".":
+ path = ""
+
offset = len(path)
results = []
+
def _walk(path):
for filename in self.provider.resource_listdir(path):
- fullname = path + '/' + filename
+ fullname = path + "/" + filename
+
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
- results.append(fullname[offset:].lstrip('/'))
+ results.append(fullname[offset:].lstrip("/"))
+
_walk(path)
results.sort()
return results
@@ -334,7 +355,7 @@ class PrefixLoader(BaseLoader):
by loading ``'app2/index.html'`` the file from the second.
"""
- def __init__(self, mapping, delimiter='/'):
+ def __init__(self, mapping, delimiter="/"):
self.mapping = mapping
self.delimiter = delimiter
@@ -434,19 +455,20 @@ class ModuleLoader(BaseLoader):
has_source_access = False
def __init__(self, path):
- package_name = '_jinja2_module_templates_%x' % id(self)
+ package_name = "_jinja2_module_templates_%x" % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
- if isinstance(path, string_types):
+
+ if not isinstance(path, abc.Iterable) or isinstance(path, string_types):
path = [path]
- else:
- path = list(path)
- mod.__path__ = path
- sys.modules[package_name] = weakref.proxy(mod,
- lambda x: sys.modules.pop(package_name, None))
+ mod.__path__ = [fspath(p) for p in path]
+
+ sys.modules[package_name] = weakref.proxy(
+ mod, lambda x: sys.modules.pop(package_name, None)
+ )
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
@@ -456,20 +478,20 @@ class ModuleLoader(BaseLoader):
@staticmethod
def get_template_key(name):
- return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
+ return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
@staticmethod
def get_module_filename(name):
- return ModuleLoader.get_template_key(name) + '.py'
+ return ModuleLoader.get_template_key(name) + ".py"
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
- module = '%s.%s' % (self.package_name, key)
+ module = "%s.%s" % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
- mod = __import__(module, None, None, ['root'])
+ mod = __import__(module, None, None, ["root"])
except ImportError:
raise TemplateNotFound(name)
@@ -478,4 +500,5 @@ class ModuleLoader(BaseLoader):
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
- environment, mod.__dict__, globals)
+ environment, mod.__dict__, globals
+ )
diff --git a/lib/spack/external/jinja2/meta.py b/lib/spack/external/jinja2/meta.py
index 7421914f77..3795aace59 100644
--- a/lib/spack/external/jinja2/meta.py
+++ b/lib/spack/external/jinja2/meta.py
@@ -1,25 +1,18 @@
# -*- coding: utf-8 -*-
+"""Functions that expose information about templates that might be
+interesting for introspection.
"""
- jinja2.meta
- ~~~~~~~~~~~
-
- This module implements various functions that exposes information about
- templates that might be interesting for various kinds of applications.
-
- :copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2 import nodes
-from jinja2.compiler import CodeGenerator
-from jinja2._compat import string_types, iteritems
+from . import nodes
+from ._compat import iteritems
+from ._compat import string_types
+from .compiler import CodeGenerator
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
- CodeGenerator.__init__(self, environment, '<introspection>',
- '<introspection>')
+ CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>")
self.undeclared_identifiers = set()
def write(self, x):
@@ -29,7 +22,7 @@ class TrackingCodeGenerator(CodeGenerator):
"""Remember all undeclared identifiers."""
CodeGenerator.enter_frame(self, frame)
for _, (action, param) in iteritems(frame.symbols.loads):
- if action == 'resolve':
+ if action == "resolve" and param not in self.environment.globals:
self.undeclared_identifiers.add(param)
@@ -72,8 +65,9 @@ def find_referenced_templates(ast):
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
- for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
- nodes.Include)):
+ for node in ast.find_all(
+ (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
+ ):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
@@ -96,8 +90,9 @@ def find_referenced_templates(ast):
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
- elif isinstance(node, nodes.Include) and \
- isinstance(node.template.value, (tuple, list)):
+ elif isinstance(node, nodes.Include) and isinstance(
+ node.template.value, (tuple, list)
+ ):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
diff --git a/lib/spack/external/jinja2/nativetypes.py b/lib/spack/external/jinja2/nativetypes.py
index fe17e4138d..a9ead4e2bb 100644
--- a/lib/spack/external/jinja2/nativetypes.py
+++ b/lib/spack/external/jinja2/nativetypes.py
@@ -1,19 +1,23 @@
-import sys
from ast import literal_eval
-from itertools import islice, chain
-from jinja2 import nodes
-from jinja2._compat import text_type
-from jinja2.compiler import CodeGenerator, has_safe_repr
-from jinja2.environment import Environment, Template
-from jinja2.utils import concat, escape
+from itertools import chain
+from itertools import islice
+
+from . import nodes
+from ._compat import text_type
+from .compiler import CodeGenerator
+from .compiler import has_safe_repr
+from .environment import Environment
+from .environment import Template
def native_concat(nodes):
- """Return a native Python type from the list of compiled nodes. If the
- result is a single node, its value is returned. Otherwise, the nodes are
- concatenated as strings. If the result can be parsed with
- :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
- string is returned.
+ """Return a native Python type from the list of compiled nodes. If
+ the result is a single node, its value is returned. Otherwise, the
+ nodes are concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise,
+ the string is returned.
+
+ :param nodes: Iterable of nodes to concatenate.
"""
head = list(islice(nodes, 2))
@@ -21,200 +25,70 @@ def native_concat(nodes):
return None
if len(head) == 1:
- out = head[0]
+ raw = head[0]
else:
- out = u''.join([text_type(v) for v in chain(head, nodes)])
+ raw = u"".join([text_type(v) for v in chain(head, nodes)])
try:
- return literal_eval(out)
+ return literal_eval(raw)
except (ValueError, SyntaxError, MemoryError):
- return out
+ return raw
class NativeCodeGenerator(CodeGenerator):
- """A code generator which avoids injecting ``to_string()`` calls around the
- internal code Jinja uses to render templates.
+ """A code generator which renders Python types by not adding
+ ``to_string()`` around output nodes.
"""
- def visit_Output(self, node, frame):
- """Same as :meth:`CodeGenerator.visit_Output`, but do not call
- ``to_string`` on output nodes in generated code.
- """
- if self.has_known_extends and frame.require_output_check:
- return
-
- finalize = self.environment.finalize
- finalize_context = getattr(finalize, 'contextfunction', False)
- finalize_eval = getattr(finalize, 'evalcontextfunction', False)
- finalize_env = getattr(finalize, 'environmentfunction', False)
-
- if finalize is not None:
- if finalize_context or finalize_eval:
- const_finalize = None
- elif finalize_env:
- def const_finalize(x):
- return finalize(self.environment, x)
- else:
- const_finalize = finalize
- else:
- def const_finalize(x):
- return x
-
- # If we are inside a frame that requires output checking, we do so.
- outdent_later = False
-
- if frame.require_output_check:
- self.writeline('if parent_template is None:')
- self.indent()
- outdent_later = True
-
- # Try to evaluate as many chunks as possible into a static string at
- # compile time.
- body = []
-
- for child in node.nodes:
- try:
- if const_finalize is None:
- raise nodes.Impossible()
-
- const = child.as_const(frame.eval_ctx)
- if not has_safe_repr(const):
- raise nodes.Impossible()
- except nodes.Impossible:
- body.append(child)
- continue
-
- # the frame can't be volatile here, because otherwise the as_const
- # function would raise an Impossible exception at that point
- try:
- if frame.eval_ctx.autoescape:
- if hasattr(const, '__html__'):
- const = const.__html__()
- else:
- const = escape(const)
-
- const = const_finalize(const)
- except Exception:
- # if something goes wrong here we evaluate the node at runtime
- # for easier debugging
- body.append(child)
- continue
-
- if body and isinstance(body[-1], list):
- body[-1].append(const)
- else:
- body.append([const])
-
- # if we have less than 3 nodes or a buffer we yield or extend/append
- if len(body) < 3 or frame.buffer is not None:
- if frame.buffer is not None:
- # for one item we append, for more we extend
- if len(body) == 1:
- self.writeline('%s.append(' % frame.buffer)
- else:
- self.writeline('%s.extend((' % frame.buffer)
-
- self.indent()
-
- for item in body:
- if isinstance(item, list):
- val = repr(native_concat(item))
-
- if frame.buffer is None:
- self.writeline('yield ' + val)
- else:
- self.writeline(val + ',')
- else:
- if frame.buffer is None:
- self.writeline('yield ', item)
- else:
- self.newline(item)
-
- close = 0
-
- if finalize is not None:
- self.write('environment.finalize(')
-
- if finalize_context:
- self.write('context, ')
-
- close += 1
-
- self.visit(item, frame)
-
- if close > 0:
- self.write(')' * close)
-
- if frame.buffer is not None:
- self.write(',')
-
- if frame.buffer is not None:
- # close the open parentheses
- self.outdent()
- self.writeline(len(body) == 1 and ')' or '))')
-
- # otherwise we create a format string as this is faster in that case
- else:
- format = []
- arguments = []
-
- for item in body:
- if isinstance(item, list):
- format.append(native_concat(item).replace('%', '%%'))
- else:
- format.append('%s')
- arguments.append(item)
-
- self.writeline('yield ')
- self.write(repr(concat(format)) + ' % (')
- self.indent()
-
- for argument in arguments:
- self.newline(argument)
- close = 0
-
- if finalize is not None:
- self.write('environment.finalize(')
-
- if finalize_context:
- self.write('context, ')
- elif finalize_eval:
- self.write('context.eval_ctx, ')
- elif finalize_env:
- self.write('environment, ')
-
- close += 1
-
- self.visit(argument, frame)
- self.write(')' * close + ', ')
-
- self.outdent()
- self.writeline(')')
+ @staticmethod
+ def _default_finalize(value):
+ return value
+
+ def _output_const_repr(self, group):
+ return repr(u"".join([text_type(v) for v in group]))
+
+ def _output_child_to_const(self, node, frame, finalize):
+ const = node.as_const(frame.eval_ctx)
+
+ if not has_safe_repr(const):
+ raise nodes.Impossible()
- if outdent_later:
- self.outdent()
+ if isinstance(node, nodes.TemplateData):
+ return const
+
+ return finalize.const(const)
+
+ def _output_child_pre(self, node, frame, finalize):
+ if finalize.src is not None:
+ self.write(finalize.src)
+
+ def _output_child_post(self, node, frame, finalize):
+ if finalize.src is not None:
+ self.write(")")
+
+
+class NativeEnvironment(Environment):
+ """An environment that renders templates to native Python types."""
+
+ code_generator_class = NativeCodeGenerator
class NativeTemplate(Template):
+ environment_class = NativeEnvironment
+
def render(self, *args, **kwargs):
- """Render the template to produce a native Python type. If the result
- is a single node, its value is returned. Otherwise, the nodes are
- concatenated as strings. If the result can be parsed with
- :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
- string is returned.
+ """Render the template to produce a native Python type. If the
+ result is a single node, its value is returned. Otherwise, the
+ nodes are concatenated as strings. If the result can be parsed
+ with :func:`ast.literal_eval`, the parsed value is returned.
+ Otherwise, the string is returned.
"""
vars = dict(*args, **kwargs)
try:
return native_concat(self.root_render_func(self.new_context(vars)))
except Exception:
- exc_info = sys.exc_info()
+ return self.environment.handle_exception()
- return self.environment.handle_exception(exc_info, True)
-
-class NativeEnvironment(Environment):
- """An environment that renders templates to native Python types."""
-
- code_generator_class = NativeCodeGenerator
- template_class = NativeTemplate
+NativeEnvironment.template_class = NativeTemplate
diff --git a/lib/spack/external/jinja2/nodes.py b/lib/spack/external/jinja2/nodes.py
index 4d9a01ad8b..95bd614a14 100644
--- a/lib/spack/external/jinja2/nodes.py
+++ b/lib/spack/external/jinja2/nodes.py
@@ -1,54 +1,39 @@
# -*- coding: utf-8 -*-
+"""AST nodes generated by the parser for the compiler. Also provides
+some node tree helper functions used by the parser and compiler in order
+to normalize nodes.
"""
- jinja2.nodes
- ~~~~~~~~~~~~
-
- This module implements additional nodes derived from the ast base node.
-
- It also provides some node tree helper functions like `in_lineno` and
- `get_nodes` used by the parser and translator in order to normalize
- python and jinja nodes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import types
import operator
-
from collections import deque
-from jinja2.utils import Markup
-from jinja2._compat import izip, with_metaclass, text_type, PY2
-
-#: the types we support for context functions
-_context_function_types = (types.FunctionType, types.MethodType)
+from markupsafe import Markup
+from ._compat import izip
+from ._compat import PY2
+from ._compat import text_type
+from ._compat import with_metaclass
_binop_to_func = {
- '*': operator.mul,
- '/': operator.truediv,
- '//': operator.floordiv,
- '**': operator.pow,
- '%': operator.mod,
- '+': operator.add,
- '-': operator.sub
+ "*": operator.mul,
+ "/": operator.truediv,
+ "//": operator.floordiv,
+ "**": operator.pow,
+ "%": operator.mod,
+ "+": operator.add,
+ "-": operator.sub,
}
-_uaop_to_func = {
- 'not': operator.not_,
- '+': operator.pos,
- '-': operator.neg
-}
+_uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg}
_cmpop_to_func = {
- 'eq': operator.eq,
- 'ne': operator.ne,
- 'gt': operator.gt,
- 'gteq': operator.ge,
- 'lt': operator.lt,
- 'lteq': operator.le,
- 'in': lambda a, b: a in b,
- 'notin': lambda a, b: a not in b
+ "eq": operator.eq,
+ "ne": operator.ne,
+ "gt": operator.gt,
+ "gteq": operator.ge,
+ "lt": operator.lt,
+ "lteq": operator.le,
+ "in": lambda a, b: a in b,
+ "notin": lambda a, b: a not in b,
}
@@ -61,16 +46,16 @@ class NodeType(type):
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
- def __new__(cls, name, bases, d):
- for attr in 'fields', 'attributes':
+ def __new__(mcs, name, bases, d):
+ for attr in "fields", "attributes":
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
- assert len(bases) == 1, 'multiple inheritance not allowed'
- assert len(storage) == len(set(storage)), 'layout conflict'
+ assert len(bases) == 1, "multiple inheritance not allowed"
+ assert len(storage) == len(set(storage)), "layout conflict"
d[attr] = tuple(storage)
- d.setdefault('abstract', False)
- return type.__new__(cls, name, bases, d)
+ d.setdefault("abstract", False)
+ return type.__new__(mcs, name, bases, d)
class EvalContext(object):
@@ -97,15 +82,17 @@ class EvalContext(object):
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
- raise RuntimeError('if no eval context is passed, the '
- 'node must have an attached '
- 'environment.')
+ raise RuntimeError(
+ "if no eval context is passed, the "
+ "node must have an attached "
+ "environment."
+ )
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
- """Baseclass for all Jinja2 nodes. There are a number of nodes available
+ """Baseclass for all Jinja nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
@@ -120,30 +107,32 @@ class Node(with_metaclass(NodeType, object)):
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
+
fields = ()
- attributes = ('lineno', 'environment')
+ attributes = ("lineno", "environment")
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
- raise TypeError('abstract nodes are not instanciable')
+ raise TypeError("abstract nodes are not instantiable")
if fields:
if len(fields) != len(self.fields):
if not self.fields:
- raise TypeError('%r takes 0 arguments' %
- self.__class__.__name__)
- raise TypeError('%r takes 0 or %d argument%s' % (
- self.__class__.__name__,
- len(self.fields),
- len(self.fields) != 1 and 's' or ''
- ))
+ raise TypeError("%r takes 0 arguments" % self.__class__.__name__)
+ raise TypeError(
+ "%r takes 0 or %d argument%s"
+ % (
+ self.__class__.__name__,
+ len(self.fields),
+ len(self.fields) != 1 and "s" or "",
+ )
+ )
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
- raise TypeError('unknown attribute %r' %
- next(iter(attributes)))
+ raise TypeError("unknown attribute %r" % next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
@@ -153,9 +142,11 @@ class Node(with_metaclass(NodeType, object)):
should be sets or tuples of field names.
"""
for name in self.fields:
- if (exclude is only is None) or \
- (exclude is not None and name not in exclude) or \
- (only is not None and name in only):
+ if (
+ (exclude is only is None)
+ or (exclude is not None and name not in exclude)
+ or (only is not None and name in only)
+ ):
try:
yield name, getattr(self, name)
except AttributeError:
@@ -166,7 +157,7 @@ class Node(with_metaclass(NodeType, object)):
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
- for field, item in self.iter_fields(exclude, only):
+ for _, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
@@ -200,7 +191,7 @@ class Node(with_metaclass(NodeType, object)):
todo = deque([self])
while todo:
node = todo.popleft()
- if 'ctx' in node.fields:
+ if "ctx" in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
@@ -210,7 +201,7 @@ class Node(with_metaclass(NodeType, object)):
todo = deque([self])
while todo:
node = todo.popleft()
- if 'lineno' in node.attributes:
+ if "lineno" in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
@@ -226,8 +217,9 @@ class Node(with_metaclass(NodeType, object)):
return self
def __eq__(self, other):
- return type(self) is type(other) and \
- tuple(self.iter_fields()) == tuple(other.iter_fields())
+ return type(self) is type(other) and tuple(self.iter_fields()) == tuple(
+ other.iter_fields()
+ )
def __ne__(self, other):
return not self.__eq__(other)
@@ -236,10 +228,9 @@ class Node(with_metaclass(NodeType, object)):
__hash__ = object.__hash__
def __repr__(self):
- return '%s(%s)' % (
+ return "%s(%s)" % (
self.__class__.__name__,
- ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
- arg in self.fields)
+ ", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields),
)
def dump(self):
@@ -248,37 +239,39 @@ class Node(with_metaclass(NodeType, object)):
buf.append(repr(node))
return
- buf.append('nodes.%s(' % node.__class__.__name__)
+ buf.append("nodes.%s(" % node.__class__.__name__)
if not node.fields:
- buf.append(')')
+ buf.append(")")
return
for idx, field in enumerate(node.fields):
if idx:
- buf.append(', ')
+ buf.append(", ")
value = getattr(node, field)
if isinstance(value, list):
- buf.append('[')
+ buf.append("[")
for idx, item in enumerate(value):
if idx:
- buf.append(', ')
+ buf.append(", ")
_dump(item)
- buf.append(']')
+ buf.append("]")
else:
_dump(value)
- buf.append(')')
+ buf.append(")")
+
buf = []
_dump(self)
- return ''.join(buf)
-
+ return "".join(buf)
class Stmt(Node):
"""Base node for all statements."""
+
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
+
abstract = True
@@ -286,19 +279,22 @@ class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
- fields = ('body',)
+
+ fields = ("body",)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
- fields = ('nodes',)
+
+ fields = ("nodes",)
class Extends(Stmt):
"""Represents an extends statement."""
- fields = ('template',)
+
+ fields = ("template",)
class For(Stmt):
@@ -309,12 +305,14 @@ class For(Stmt):
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
- fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
+
+ fields = ("target", "iter", "body", "else_", "test", "recursive")
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
- fields = ('test', 'body', 'elif_', 'else_')
+
+ fields = ("test", "body", "elif_", "else_")
class Macro(Stmt):
@@ -322,19 +320,22 @@ class Macro(Stmt):
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
- fields = ('name', 'args', 'defaults', 'body')
+
+ fields = ("name", "args", "defaults", "body")
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
- fields = ('call', 'args', 'defaults', 'body')
+
+ fields = ("call", "args", "defaults", "body")
class FilterBlock(Stmt):
"""Node for filter sections."""
- fields = ('body', 'filter')
+
+ fields = ("body", "filter")
class With(Stmt):
@@ -343,22 +344,26 @@ class With(Stmt):
.. versionadded:: 2.9.3
"""
- fields = ('targets', 'values', 'body')
+
+ fields = ("targets", "values", "body")
class Block(Stmt):
"""A node that represents a block."""
- fields = ('name', 'body', 'scoped')
+
+ fields = ("name", "body", "scoped")
class Include(Stmt):
"""A node that represents the include tag."""
- fields = ('template', 'with_context', 'ignore_missing')
+
+ fields = ("template", "with_context", "ignore_missing")
class Import(Stmt):
"""A node that represents the import tag."""
- fields = ('template', 'target', 'with_context')
+
+ fields = ("template", "target", "with_context")
class FromImport(Stmt):
@@ -372,26 +377,31 @@ class FromImport(Stmt):
The list of names may contain tuples if aliases are wanted.
"""
- fields = ('template', 'names', 'with_context')
+
+ fields = ("template", "names", "with_context")
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
- fields = ('node',)
+
+ fields = ("node",)
class Assign(Stmt):
"""Assigns an expression to a target."""
- fields = ('target', 'node')
+
+ fields = ("target", "node")
class AssignBlock(Stmt):
"""Assigns a block to a target."""
- fields = ('target', 'filter', 'body')
+
+ fields = ("target", "filter", "body")
class Expr(Node):
"""Baseclass for all expressions."""
+
abstract = True
def as_const(self, eval_ctx=None):
@@ -414,15 +424,18 @@ class Expr(Node):
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
- fields = ('left', 'right')
+
+ fields = ("left", "right")
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
- if self.environment.sandboxed and \
- self.operator in self.environment.intercepted_binops:
+ if (
+ self.environment.sandboxed
+ and self.operator in self.environment.intercepted_binops
+ ):
raise Impossible()
f = _binop_to_func[self.operator]
try:
@@ -433,15 +446,18 @@ class BinExpr(Expr):
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
- fields = ('node',)
+
+ fields = ("node",)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
- if self.environment.sandboxed and \
- self.operator in self.environment.intercepted_unops:
+ if (
+ self.environment.sandboxed
+ and self.operator in self.environment.intercepted_unops
+ ):
raise Impossible()
f = _uaop_to_func[self.operator]
try:
@@ -458,16 +474,17 @@ class Name(Expr):
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
- fields = ('name', 'ctx')
+
+ fields = ("name", "ctx")
def can_assign(self):
- return self.name not in ('true', 'false', 'none',
- 'True', 'False', 'None')
+ return self.name not in ("true", "false", "none", "True", "False", "None")
class NSRef(Expr):
"""Reference to a namespace value assignment"""
- fields = ('name', 'attr')
+
+ fields = ("name", "attr")
def can_assign(self):
# We don't need any special checks here; NSRef assignments have a
@@ -479,6 +496,7 @@ class NSRef(Expr):
class Literal(Expr):
"""Baseclass for literals."""
+
abstract = True
@@ -488,14 +506,18 @@ class Const(Literal):
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
- fields = ('value',)
+
+ fields = ("value",)
def as_const(self, eval_ctx=None):
rv = self.value
- if PY2 and type(rv) is text_type and \
- self.environment.policies['compiler.ascii_str']:
+ if (
+ PY2
+ and type(rv) is text_type
+ and self.environment.policies["compiler.ascii_str"]
+ ):
try:
- rv = rv.encode('ascii')
+ rv = rv.encode("ascii")
except UnicodeError:
pass
return rv
@@ -507,6 +529,7 @@ class Const(Literal):
an `Impossible` exception.
"""
from .compiler import has_safe_repr
+
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
@@ -514,7 +537,8 @@ class Const(Literal):
class TemplateData(Literal):
"""A constant template string."""
- fields = ('data',)
+
+ fields = ("data",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -530,7 +554,8 @@ class Tuple(Literal):
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
- fields = ('items', 'ctx')
+
+ fields = ("items", "ctx")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -545,7 +570,8 @@ class Tuple(Literal):
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
- fields = ('items',)
+
+ fields = ("items",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -556,7 +582,8 @@ class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
- fields = ('items',)
+
+ fields = ("items",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -565,7 +592,8 @@ class Dict(Literal):
class Pair(Helper):
"""A key, value pair for dicts."""
- fields = ('key', 'value')
+
+ fields = ("key", "value")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -574,7 +602,8 @@ class Pair(Helper):
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
- fields = ('key', 'value')
+
+ fields = ("key", "value")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -585,7 +614,8 @@ class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
- fields = ('test', 'expr1', 'expr2')
+
+ fields = ("test", "expr1", "expr2")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -626,7 +656,7 @@ class Filter(Expr):
filtered. Buffers are created by macros and filter blocks.
"""
- fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -636,28 +666,27 @@ class Filter(Expr):
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
- # call in a list beause it is assuming we are talking about the
+ # call in a list because it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
- if filter_ is None or getattr(filter_, 'contextfilter', False):
+ if filter_ is None or getattr(filter_, "contextfilter", False) is True:
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
# to not go down this path.
- if (
- eval_ctx.environment.is_async
- and getattr(filter_, 'asyncfiltervariant', False)
+ if eval_ctx.environment.is_async and getattr(
+ filter_, "asyncfiltervariant", False
):
raise Impossible()
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
- if getattr(filter_, 'evalcontextfilter', False):
+ if getattr(filter_, "evalcontextfilter", False) is True:
args.insert(0, eval_ctx)
- elif getattr(filter_, 'environmentfilter', False):
+ elif getattr(filter_, "environmentfilter", False) is True:
args.insert(0, self.environment)
try:
@@ -671,7 +700,7 @@ class Test(Expr):
rest of the fields are the same as for :class:`Call`.
"""
- fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
def as_const(self, eval_ctx=None):
test = self.environment.tests.get(self.name)
@@ -696,20 +725,23 @@ class Call(Expr):
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
- fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+ fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
- fields = ('node', 'arg', 'ctx')
+
+ fields = ("node", "arg", "ctx")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
- if self.ctx != 'load':
+ if self.ctx != "load":
raise Impossible()
try:
- return self.environment.getitem(self.node.as_const(eval_ctx),
- self.arg.as_const(eval_ctx))
+ return self.environment.getitem(
+ self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
+ )
except Exception:
raise Impossible()
@@ -721,15 +753,15 @@ class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
- fields = ('node', 'attr', 'ctx')
+
+ fields = ("node", "attr", "ctx")
def as_const(self, eval_ctx=None):
- if self.ctx != 'load':
+ if self.ctx != "load":
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
- return self.environment.getattr(self.node.as_const(eval_ctx),
- self.attr)
+ return self.environment.getattr(self.node.as_const(eval_ctx), self.attr)
except Exception:
raise Impossible()
@@ -741,14 +773,17 @@ class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
- fields = ('start', 'stop', 'step')
+
+ fields = ("start", "stop", "step")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
+
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
+
return slice(const(self.start), const(self.stop), const(self.step))
@@ -756,82 +791,103 @@ class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
- fields = ('nodes',)
+
+ fields = ("nodes",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
- return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
+ return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
- fields = ('expr', 'ops')
+
+ fields = ("expr", "ops")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
+
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
+
+ if not result:
+ return False
+
value = new_value
except Exception:
raise Impossible()
+
return result
class Operand(Helper):
"""Holds an operator and an expression."""
- fields = ('op', 'expr')
+
+ fields = ("op", "expr")
+
if __debug__:
- Operand.__doc__ += '\nThe following operators are available: ' + \
- ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
- set(_uaop_to_func) | set(_cmpop_to_func)))
+ Operand.__doc__ += "\nThe following operators are available: " + ", ".join(
+ sorted(
+ "``%s``" % x
+ for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func)
+ )
+ )
class Mul(BinExpr):
"""Multiplies the left with the right node."""
- operator = '*'
+
+ operator = "*"
class Div(BinExpr):
"""Divides the left by the right node."""
- operator = '/'
+
+ operator = "/"
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
- operator = '//'
+
+ operator = "//"
class Add(BinExpr):
"""Add the left to the right node."""
- operator = '+'
+
+ operator = "+"
class Sub(BinExpr):
"""Subtract the right from the left node."""
- operator = '-'
+
+ operator = "-"
class Mod(BinExpr):
"""Left modulo right."""
- operator = '%'
+
+ operator = "%"
class Pow(BinExpr):
"""Left to the power of right."""
- operator = '**'
+
+ operator = "**"
class And(BinExpr):
"""Short circuited AND."""
- operator = 'and'
+
+ operator = "and"
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -840,7 +896,8 @@ class And(BinExpr):
class Or(BinExpr):
"""Short circuited OR."""
- operator = 'or'
+
+ operator = "or"
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -849,17 +906,20 @@ class Or(BinExpr):
class Not(UnaryExpr):
"""Negate the expression."""
- operator = 'not'
+
+ operator = "not"
class Neg(UnaryExpr):
"""Make the expression negative."""
- operator = '-'
+
+ operator = "-"
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
- operator = '+'
+
+ operator = "+"
# Helpers for extensions
@@ -869,7 +929,8 @@ class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
- fields = ('name',)
+
+ fields = ("name",)
class ExtensionAttribute(Expr):
@@ -879,7 +940,8 @@ class ExtensionAttribute(Expr):
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
- fields = ('identifier', 'name')
+
+ fields = ("identifier", "name")
class ImportedName(Expr):
@@ -888,7 +950,8 @@ class ImportedName(Expr):
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
- fields = ('importname',)
+
+ fields = ("importname",)
class InternalName(Expr):
@@ -898,16 +961,20 @@ class InternalName(Expr):
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
- fields = ('name',)
+
+ fields = ("name",)
def __init__(self):
- raise TypeError('Can\'t create internal names. Use the '
- '`free_identifier` method on a parser.')
+ raise TypeError(
+ "Can't create internal names. Use the "
+ "`free_identifier` method on a parser."
+ )
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
- fields = ('expr',)
+
+ fields = ("expr",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -920,7 +987,8 @@ class MarkSafeIfAutoescape(Expr):
.. versionadded:: 2.5
"""
- fields = ('expr',)
+
+ fields = ("expr",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -942,6 +1010,20 @@ class ContextReference(Expr):
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
+
+ This is basically equivalent to using the
+ :func:`~jinja2.contextfunction` decorator when using the
+ high-level API, which causes a reference to the context to be passed
+ as the first argument to a function.
+ """
+
+
+class DerivedContextReference(Expr):
+ """Return the current template context including locals. Behaves
+ exactly like :class:`ContextReference`, but includes local
+ variables, such as from a ``for`` loop.
+
+ .. versionadded:: 2.11
"""
@@ -955,7 +1037,8 @@ class Break(Stmt):
class Scope(Stmt):
"""An artificial scope."""
- fields = ('body',)
+
+ fields = ("body",)
class OverlayScope(Stmt):
@@ -971,7 +1054,8 @@ class OverlayScope(Stmt):
.. versionadded:: 2.10
"""
- fields = ('context', 'body')
+
+ fields = ("context", "body")
class EvalContextModifier(Stmt):
@@ -982,7 +1066,8 @@ class EvalContextModifier(Stmt):
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
- fields = ('options',)
+
+ fields = ("options",)
class ScopedEvalContextModifier(EvalContextModifier):
@@ -990,10 +1075,14 @@ class ScopedEvalContextModifier(EvalContextModifier):
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
- fields = ('body',)
+
+ fields = ("body",)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
- raise TypeError('can\'t create custom node types')
-NodeType.__new__ = staticmethod(_failing_new); del _failing_new
+ raise TypeError("can't create custom node types")
+
+
+NodeType.__new__ = staticmethod(_failing_new)
+del _failing_new
diff --git a/lib/spack/external/jinja2/optimizer.py b/lib/spack/external/jinja2/optimizer.py
index 65ab3ceb71..7bc78c4524 100644
--- a/lib/spack/external/jinja2/optimizer.py
+++ b/lib/spack/external/jinja2/optimizer.py
@@ -1,23 +1,15 @@
# -*- coding: utf-8 -*-
+"""The optimizer tries to constant fold expressions and modify the AST
+in place so that it should be faster to evaluate.
+
+Because the AST does not contain all the scoping information and the
+compiler has to find that out, we cannot do all the optimizations we
+want. For example, loop unrolling doesn't work because unrolled loops
+would have a different scope. The solution would be a second syntax tree
+that stored the scoping rules.
"""
- jinja2.optimizer
- ~~~~~~~~~~~~~~~~
-
- The jinja optimizer is currently trying to constant fold a few expressions
- and modify the AST in place so that it should be easier to evaluate it.
-
- Because the AST does not contain all the scoping information and the
- compiler has to find that out, we cannot do all the optimizations we
- want. For example loop unrolling doesn't work because unrolled loops would
- have a different scoping.
-
- The solution would be a second syntax tree that has the scoping rules stored.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-from jinja2 import nodes
-from jinja2.visitor import NodeTransformer
+from . import nodes
+from .visitor import NodeTransformer
def optimize(node, environment):
@@ -28,22 +20,22 @@ def optimize(node, environment):
class Optimizer(NodeTransformer):
-
def __init__(self, environment):
self.environment = environment
- def fold(self, node, eval_ctx=None):
- """Do constant folding."""
- node = self.generic_visit(node)
- try:
- return nodes.Const.from_untrusted(node.as_const(eval_ctx),
- lineno=node.lineno,
- environment=self.environment)
- except nodes.Impossible:
- return node
-
- visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
- visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
- visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
- visit_Filter = visit_Test = visit_CondExpr = fold
- del fold
+ def generic_visit(self, node, *args, **kwargs):
+ node = super(Optimizer, self).generic_visit(node, *args, **kwargs)
+
+ # Do constant folding. Some other nodes besides Expr have
+ # as_const, but folding them causes errors later on.
+ if isinstance(node, nodes.Expr):
+ try:
+ return nodes.Const.from_untrusted(
+ node.as_const(args[0] if args else None),
+ lineno=node.lineno,
+ environment=self.environment,
+ )
+ except nodes.Impossible:
+ pass
+
+ return node
diff --git a/lib/spack/external/jinja2/parser.py b/lib/spack/external/jinja2/parser.py
index ed00d9708e..d5881066f7 100644
--- a/lib/spack/external/jinja2/parser.py
+++ b/lib/spack/external/jinja2/parser.py
@@ -1,41 +1,46 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.parser
- ~~~~~~~~~~~~~
-
- Implements the template parser.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2 import nodes
-from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
-from jinja2.lexer import describe_token, describe_token_expr
-from jinja2._compat import imap
-
-
-_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
- 'macro', 'include', 'from', 'import',
- 'set', 'with', 'autoescape'])
-_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
+"""Parse tokens from the lexer into nodes for the compiler."""
+from . import nodes
+from ._compat import imap
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .lexer import describe_token
+from .lexer import describe_token_expr
+
+_statement_keywords = frozenset(
+ [
+ "for",
+ "if",
+ "block",
+ "extends",
+ "print",
+ "macro",
+ "include",
+ "from",
+ "import",
+ "set",
+ "with",
+ "autoescape",
+ ]
+)
+_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
_math_nodes = {
- 'add': nodes.Add,
- 'sub': nodes.Sub,
- 'mul': nodes.Mul,
- 'div': nodes.Div,
- 'floordiv': nodes.FloorDiv,
- 'mod': nodes.Mod,
+ "add": nodes.Add,
+ "sub": nodes.Sub,
+ "mul": nodes.Mul,
+ "div": nodes.Div,
+ "floordiv": nodes.FloorDiv,
+ "mod": nodes.Mod,
}
class Parser(object):
- """This is the central parsing class Jinja2 uses. It's passed to
+ """This is the central parsing class Jinja uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
- def __init__(self, environment, source, name=None, filename=None,
- state=None):
+ def __init__(self, environment, source, name=None, filename=None, state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
@@ -63,31 +68,37 @@ class Parser(object):
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
- currently_looking = ' or '.join(
- "'%s'" % describe_token_expr(expr)
- for expr in end_token_stack[-1])
+ currently_looking = " or ".join(
+ "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
+ )
else:
currently_looking = None
if name is None:
- message = ['Unexpected end of template.']
+ message = ["Unexpected end of template."]
else:
- message = ['Encountered unknown tag \'%s\'.' % name]
+ message = ["Encountered unknown tag '%s'." % name]
if currently_looking:
if name is not None and name in expected:
- message.append('You probably made a nesting mistake. Jinja '
- 'is expecting this tag, but currently looking '
- 'for %s.' % currently_looking)
+ message.append(
+ "You probably made a nesting mistake. Jinja "
+ "is expecting this tag, but currently looking "
+ "for %s." % currently_looking
+ )
else:
- message.append('Jinja was looking for the following tags: '
- '%s.' % currently_looking)
+ message.append(
+ "Jinja was looking for the following tags: "
+ "%s." % currently_looking
+ )
if self._tag_stack:
- message.append('The innermost block that needs to be '
- 'closed is \'%s\'.' % self._tag_stack[-1])
+ message.append(
+ "The innermost block that needs to be "
+ "closed is '%s'." % self._tag_stack[-1]
+ )
- self.fail(' '.join(message), lineno)
+ self.fail(" ".join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
@@ -105,7 +116,7 @@ class Parser(object):
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
- if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
+ if self.stream.current.type in ("variable_end", "block_end", "rparen"):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
@@ -115,22 +126,22 @@ class Parser(object):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
- nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
+ nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
- if token.type != 'name':
- self.fail('tag name expected', token.lineno)
+ if token.type != "name":
+ self.fail("tag name expected", token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
- return getattr(self, 'parse_' + self.stream.current.value)()
- if token.value == 'call':
+ return getattr(self, "parse_" + self.stream.current.value)()
+ if token.value == "call":
return self.parse_call_block()
- if token.value == 'filter':
+ if token.value == "filter":
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
@@ -157,16 +168,16 @@ class Parser(object):
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
- self.stream.skip_if('colon')
+ self.stream.skip_if("colon")
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
- self.stream.expect('block_end')
+ self.stream.expect("block_end")
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
- if self.stream.current.type == 'eof':
+ if self.stream.current.type == "eof":
self.fail_eof(end_tokens)
if drop_needle:
@@ -177,50 +188,47 @@ class Parser(object):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
- if self.stream.skip_if('assign'):
+ if self.stream.skip_if("assign"):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
- body = self.parse_statements(('name:endset',),
- drop_needle=True)
+ body = self.parse_statements(("name:endset",), drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
- lineno = self.stream.expect('name:for').lineno
- target = self.parse_assign_target(extra_end_rules=('name:in',))
- self.stream.expect('name:in')
- iter = self.parse_tuple(with_condexpr=False,
- extra_end_rules=('name:recursive',))
+ lineno = self.stream.expect("name:for").lineno
+ target = self.parse_assign_target(extra_end_rules=("name:in",))
+ self.stream.expect("name:in")
+ iter = self.parse_tuple(
+ with_condexpr=False, extra_end_rules=("name:recursive",)
+ )
test = None
- if self.stream.skip_if('name:if'):
+ if self.stream.skip_if("name:if"):
test = self.parse_expression()
- recursive = self.stream.skip_if('name:recursive')
- body = self.parse_statements(('name:endfor', 'name:else'))
- if next(self.stream).value == 'endfor':
+ recursive = self.stream.skip_if("name:recursive")
+ body = self.parse_statements(("name:endfor", "name:else"))
+ if next(self.stream).value == "endfor":
else_ = []
else:
- else_ = self.parse_statements(('name:endfor',), drop_needle=True)
- return nodes.For(target, iter, body, else_, test,
- recursive, lineno=lineno)
+ else_ = self.parse_statements(("name:endfor",), drop_needle=True)
+ return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
- node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
+ node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
- node.body = self.parse_statements(('name:elif', 'name:else',
- 'name:endif'))
+ node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
node.elif_ = []
node.else_ = []
token = next(self.stream)
- if token.test('name:elif'):
+ if token.test("name:elif"):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
- elif token.test('name:else'):
- result.else_ = self.parse_statements(('name:endif',),
- drop_needle=True)
+ elif token.test("name:else"):
+ result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
break
return result
@@ -228,45 +236,42 @@ class Parser(object):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
- while self.stream.current.type != 'block_end':
- lineno = self.stream.current.lineno
+ while self.stream.current.type != "block_end":
if targets:
- self.stream.expect('comma')
+ self.stream.expect("comma")
target = self.parse_assign_target()
- target.set_ctx('param')
+ target.set_ctx("param")
targets.append(target)
- self.stream.expect('assign')
+ self.stream.expect("assign")
values.append(self.parse_expression())
node.targets = targets
node.values = values
- node.body = self.parse_statements(('name:endwith',),
- drop_needle=True)
+ node.body = self.parse_statements(("name:endwith",), drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
- node.options = [
- nodes.Keyword('autoescape', self.parse_expression())
- ]
- node.body = self.parse_statements(('name:endautoescape',),
- drop_needle=True)
+ node.options = [nodes.Keyword("autoescape", self.parse_expression())]
+ node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
- node.name = self.stream.expect('name').value
- node.scoped = self.stream.skip_if('name:scoped')
+ node.name = self.stream.expect("name").value
+ node.scoped = self.stream.skip_if("name:scoped")
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
- if self.stream.current.type == 'sub':
- self.fail('Block names in Jinja have to be valid Python '
- 'identifiers and may not contain hyphens, use an '
- 'underscore instead.')
-
- node.body = self.parse_statements(('name:endblock',), drop_needle=True)
- self.stream.skip_if('name:' + node.name)
+ if self.stream.current.type == "sub":
+ self.fail(
+ "Block names in Jinja have to be valid Python "
+ "identifiers and may not contain hyphens, use an "
+ "underscore instead."
+ )
+
+ node.body = self.parse_statements(("name:endblock",), drop_needle=True)
+ self.stream.skip_if("name:" + node.name)
return node
def parse_extends(self):
@@ -275,9 +280,10 @@ class Parser(object):
return node
def parse_import_context(self, node, default):
- if self.stream.current.test_any('name:with', 'name:without') and \
- self.stream.look().test('name:context'):
- node.with_context = next(self.stream).value == 'with'
+ if self.stream.current.test_any(
+ "name:with", "name:without"
+ ) and self.stream.look().test("name:context"):
+ node.with_context = next(self.stream).value == "with"
self.stream.skip()
else:
node.with_context = default
@@ -286,8 +292,9 @@ class Parser(object):
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- if self.stream.current.test('name:ignore') and \
- self.stream.look().test('name:missing'):
+ if self.stream.current.test("name:ignore") and self.stream.look().test(
+ "name:missing"
+ ):
node.ignore_missing = True
self.stream.skip(2)
else:
@@ -297,67 +304,71 @@ class Parser(object):
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- self.stream.expect('name:as')
+ self.stream.expect("name:as")
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- self.stream.expect('name:import')
+ self.stream.expect("name:import")
node.names = []
def parse_context():
- if self.stream.current.value in ('with', 'without') and \
- self.stream.look().test('name:context'):
- node.with_context = next(self.stream).value == 'with'
+ if self.stream.current.value in (
+ "with",
+ "without",
+ ) and self.stream.look().test("name:context"):
+ node.with_context = next(self.stream).value == "with"
self.stream.skip()
return True
return False
while 1:
if node.names:
- self.stream.expect('comma')
- if self.stream.current.type == 'name':
+ self.stream.expect("comma")
+ if self.stream.current.type == "name":
if parse_context():
break
target = self.parse_assign_target(name_only=True)
- if target.name.startswith('_'):
- self.fail('names starting with an underline can not '
- 'be imported', target.lineno,
- exc=TemplateAssertionError)
- if self.stream.skip_if('name:as'):
+ if target.name.startswith("_"):
+ self.fail(
+ "names starting with an underline can not be imported",
+ target.lineno,
+ exc=TemplateAssertionError,
+ )
+ if self.stream.skip_if("name:as"):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
- if parse_context() or self.stream.current.type != 'comma':
+ if parse_context() or self.stream.current.type != "comma":
break
else:
- self.stream.expect('name')
- if not hasattr(node, 'with_context'):
+ self.stream.expect("name")
+ if not hasattr(node, "with_context"):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
- self.stream.expect('lparen')
- while self.stream.current.type != 'rparen':
+ self.stream.expect("lparen")
+ while self.stream.current.type != "rparen":
if args:
- self.stream.expect('comma')
+ self.stream.expect("comma")
arg = self.parse_assign_target(name_only=True)
- arg.set_ctx('param')
- if self.stream.skip_if('assign'):
+ arg.set_ctx("param")
+ if self.stream.skip_if("assign"):
defaults.append(self.parse_expression())
elif defaults:
- self.fail('non-default argument follows default argument')
+ self.fail("non-default argument follows default argument")
args.append(arg)
- self.stream.expect('rparen')
+ self.stream.expect("rparen")
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
- if self.stream.current.type == 'lparen':
+ if self.stream.current.type == "lparen":
self.parse_signature(node)
else:
node.args = []
@@ -365,37 +376,40 @@ class Parser(object):
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
- self.fail('expected call', node.lineno)
- node.body = self.parse_statements(('name:endcall',), drop_needle=True)
+ self.fail("expected call", node.lineno)
+ node.body = self.parse_statements(("name:endcall",), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
- node.body = self.parse_statements(('name:endfilter',),
- drop_needle=True)
+ node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
- node.body = self.parse_statements(('name:endmacro',),
- drop_needle=True)
+ node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
- while self.stream.current.type != 'block_end':
+ while self.stream.current.type != "block_end":
if node.nodes:
- self.stream.expect('comma')
+ self.stream.expect("comma")
node.nodes.append(self.parse_expression())
return node
- def parse_assign_target(self, with_tuple=True, name_only=False,
- extra_end_rules=None, with_namespace=False):
- """Parse an assignment target. As Jinja2 allows assignments to
+ def parse_assign_target(
+ self,
+ with_tuple=True,
+ name_only=False,
+ extra_end_rules=None,
+ with_namespace=False,
+ ):
+ """Parse an assignment target. As Jinja allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
@@ -403,24 +417,26 @@ class Parser(object):
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
- if with_namespace and self.stream.look().type == 'dot':
- token = self.stream.expect('name')
+ if with_namespace and self.stream.look().type == "dot":
+ token = self.stream.expect("name")
next(self.stream) # dot
- attr = self.stream.expect('name')
+ attr = self.stream.expect("name")
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
- token = self.stream.expect('name')
- target = nodes.Name(token.value, 'store', lineno=token.lineno)
+ token = self.stream.expect("name")
+ target = nodes.Name(token.value, "store", lineno=token.lineno)
else:
if with_tuple:
- target = self.parse_tuple(simplified=True,
- extra_end_rules=extra_end_rules)
+ target = self.parse_tuple(
+ simplified=True, extra_end_rules=extra_end_rules
+ )
else:
target = self.parse_primary()
- target.set_ctx('store')
+ target.set_ctx("store")
if not target.can_assign():
- self.fail('can\'t assign to %r' % target.__class__.
- __name__.lower(), target.lineno)
+ self.fail(
+ "can't assign to %r" % target.__class__.__name__.lower(), target.lineno
+ )
return target
def parse_expression(self, with_condexpr=True):
@@ -435,9 +451,9 @@ class Parser(object):
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
- while self.stream.skip_if('name:if'):
+ while self.stream.skip_if("name:if"):
expr2 = self.parse_or()
- if self.stream.skip_if('name:else'):
+ if self.stream.skip_if("name:else"):
expr3 = self.parse_condexpr()
else:
expr3 = None
@@ -448,7 +464,7 @@ class Parser(object):
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
- while self.stream.skip_if('name:or'):
+ while self.stream.skip_if("name:or"):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
@@ -457,14 +473,14 @@ class Parser(object):
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
- while self.stream.skip_if('name:and'):
+ while self.stream.skip_if("name:and"):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
- if self.stream.current.test('name:not'):
+ if self.stream.current.test("name:not"):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
@@ -478,12 +494,13 @@ class Parser(object):
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
- elif self.stream.skip_if('name:in'):
- ops.append(nodes.Operand('in', self.parse_math1()))
- elif (self.stream.current.test('name:not') and
- self.stream.look().test('name:in')):
+ elif self.stream.skip_if("name:in"):
+ ops.append(nodes.Operand("in", self.parse_math1()))
+ elif self.stream.current.test("name:not") and self.stream.look().test(
+ "name:in"
+ ):
self.stream.skip(2)
- ops.append(nodes.Operand('notin', self.parse_math1()))
+ ops.append(nodes.Operand("notin", self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
@@ -494,7 +511,7 @@ class Parser(object):
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
- while self.stream.current.type in ('add', 'sub'):
+ while self.stream.current.type in ("add", "sub"):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
@@ -505,7 +522,7 @@ class Parser(object):
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
- while self.stream.current.type == 'tilde':
+ while self.stream.current.type == "tilde":
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
@@ -515,7 +532,7 @@ class Parser(object):
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
- while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
+ while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
@@ -526,7 +543,7 @@ class Parser(object):
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
- while self.stream.current.type == 'pow':
+ while self.stream.current.type == "pow":
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
@@ -536,10 +553,10 @@ class Parser(object):
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
- if token_type == 'sub':
+ if token_type == "sub":
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
- elif token_type == 'add':
+ elif token_type == "add":
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
@@ -551,40 +568,44 @@ class Parser(object):
def parse_primary(self):
token = self.stream.current
- if token.type == 'name':
- if token.value in ('true', 'false', 'True', 'False'):
- node = nodes.Const(token.value in ('true', 'True'),
- lineno=token.lineno)
- elif token.value in ('none', 'None'):
+ if token.type == "name":
+ if token.value in ("true", "false", "True", "False"):
+ node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
+ elif token.value in ("none", "None"):
node = nodes.Const(None, lineno=token.lineno)
else:
- node = nodes.Name(token.value, 'load', lineno=token.lineno)
+ node = nodes.Name(token.value, "load", lineno=token.lineno)
next(self.stream)
- elif token.type == 'string':
+ elif token.type == "string":
next(self.stream)
buf = [token.value]
lineno = token.lineno
- while self.stream.current.type == 'string':
+ while self.stream.current.type == "string":
buf.append(self.stream.current.value)
next(self.stream)
- node = nodes.Const(''.join(buf), lineno=lineno)
- elif token.type in ('integer', 'float'):
+ node = nodes.Const("".join(buf), lineno=lineno)
+ elif token.type in ("integer", "float"):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
- elif token.type == 'lparen':
+ elif token.type == "lparen":
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
- self.stream.expect('rparen')
- elif token.type == 'lbracket':
+ self.stream.expect("rparen")
+ elif token.type == "lbracket":
node = self.parse_list()
- elif token.type == 'lbrace':
+ elif token.type == "lbrace":
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
- def parse_tuple(self, simplified=False, with_condexpr=True,
- extra_end_rules=None, explicit_parentheses=False):
+ def parse_tuple(
+ self,
+ simplified=False,
+ with_condexpr=True,
+ extra_end_rules=None,
+ explicit_parentheses=False,
+ ):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
@@ -609,16 +630,19 @@ class Parser(object):
elif with_condexpr:
parse = self.parse_expression
else:
- parse = lambda: self.parse_expression(with_condexpr=False)
+
+ def parse():
+ return self.parse_expression(with_condexpr=False)
+
args = []
is_tuple = False
while 1:
if args:
- self.stream.expect('comma')
+ self.stream.expect("comma")
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
- if self.stream.current.type == 'comma':
+ if self.stream.current.type == "comma":
is_tuple = True
else:
break
@@ -633,46 +657,48 @@ class Parser(object):
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
- self.fail('Expected an expression, got \'%s\'' %
- describe_token(self.stream.current))
+ self.fail(
+ "Expected an expression, got '%s'"
+ % describe_token(self.stream.current)
+ )
- return nodes.Tuple(args, 'load', lineno=lineno)
+ return nodes.Tuple(args, "load", lineno=lineno)
def parse_list(self):
- token = self.stream.expect('lbracket')
+ token = self.stream.expect("lbracket")
items = []
- while self.stream.current.type != 'rbracket':
+ while self.stream.current.type != "rbracket":
if items:
- self.stream.expect('comma')
- if self.stream.current.type == 'rbracket':
+ self.stream.expect("comma")
+ if self.stream.current.type == "rbracket":
break
items.append(self.parse_expression())
- self.stream.expect('rbracket')
+ self.stream.expect("rbracket")
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
- token = self.stream.expect('lbrace')
+ token = self.stream.expect("lbrace")
items = []
- while self.stream.current.type != 'rbrace':
+ while self.stream.current.type != "rbrace":
if items:
- self.stream.expect('comma')
- if self.stream.current.type == 'rbrace':
+ self.stream.expect("comma")
+ if self.stream.current.type == "rbrace":
break
key = self.parse_expression()
- self.stream.expect('colon')
+ self.stream.expect("colon")
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
- self.stream.expect('rbrace')
+ self.stream.expect("rbrace")
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
- if token_type == 'dot' or token_type == 'lbracket':
+ if token_type == "dot" or token_type == "lbracket":
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
- elif token_type == 'lparen':
+ elif token_type == "lparen":
node = self.parse_call(node)
else:
break
@@ -681,13 +707,13 @@ class Parser(object):
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
- if token_type == 'pipe':
+ if token_type == "pipe":
node = self.parse_filter(node)
- elif token_type == 'name' and self.stream.current.value == 'is':
+ elif token_type == "name" and self.stream.current.value == "is":
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
- elif token_type == 'lparen':
+ elif token_type == "lparen":
node = self.parse_call(node)
else:
break
@@ -695,53 +721,54 @@ class Parser(object):
def parse_subscript(self, node):
token = next(self.stream)
- if token.type == 'dot':
+ if token.type == "dot":
attr_token = self.stream.current
next(self.stream)
- if attr_token.type == 'name':
- return nodes.Getattr(node, attr_token.value, 'load',
- lineno=token.lineno)
- elif attr_token.type != 'integer':
- self.fail('expected name or number', attr_token.lineno)
+ if attr_token.type == "name":
+ return nodes.Getattr(
+ node, attr_token.value, "load", lineno=token.lineno
+ )
+ elif attr_token.type != "integer":
+ self.fail("expected name or number", attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
- return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
- if token.type == 'lbracket':
+ return nodes.Getitem(node, arg, "load", lineno=token.lineno)
+ if token.type == "lbracket":
args = []
- while self.stream.current.type != 'rbracket':
+ while self.stream.current.type != "rbracket":
if args:
- self.stream.expect('comma')
+ self.stream.expect("comma")
args.append(self.parse_subscribed())
- self.stream.expect('rbracket')
+ self.stream.expect("rbracket")
if len(args) == 1:
arg = args[0]
else:
- arg = nodes.Tuple(args, 'load', lineno=token.lineno)
- return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
- self.fail('expected subscript expression', self.lineno)
+ arg = nodes.Tuple(args, "load", lineno=token.lineno)
+ return nodes.Getitem(node, arg, "load", lineno=token.lineno)
+ self.fail("expected subscript expression", token.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
- if self.stream.current.type == 'colon':
+ if self.stream.current.type == "colon":
next(self.stream)
args = [None]
else:
node = self.parse_expression()
- if self.stream.current.type != 'colon':
+ if self.stream.current.type != "colon":
return node
next(self.stream)
args = [node]
- if self.stream.current.type == 'colon':
+ if self.stream.current.type == "colon":
args.append(None)
- elif self.stream.current.type not in ('rbracket', 'comma'):
+ elif self.stream.current.type not in ("rbracket", "comma"):
args.append(self.parse_expression())
else:
args.append(None)
- if self.stream.current.type == 'colon':
+ if self.stream.current.type == "colon":
next(self.stream)
- if self.stream.current.type not in ('rbracket', 'comma'):
+ if self.stream.current.type not in ("rbracket", "comma"):
args.append(self.parse_expression())
else:
args.append(None)
@@ -751,7 +778,7 @@ class Parser(object):
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
- token = self.stream.expect('lparen')
+ token = self.stream.expect("lparen")
args = []
kwargs = []
dyn_args = dyn_kwargs = None
@@ -759,91 +786,100 @@ class Parser(object):
def ensure(expr):
if not expr:
- self.fail('invalid syntax for function call expression',
- token.lineno)
+ self.fail("invalid syntax for function call expression", token.lineno)
- while self.stream.current.type != 'rparen':
+ while self.stream.current.type != "rparen":
if require_comma:
- self.stream.expect('comma')
+ self.stream.expect("comma")
# support for trailing comma
- if self.stream.current.type == 'rparen':
+ if self.stream.current.type == "rparen":
break
- if self.stream.current.type == 'mul':
+ if self.stream.current.type == "mul":
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
- elif self.stream.current.type == 'pow':
+ elif self.stream.current.type == "pow":
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
- ensure(dyn_args is None and dyn_kwargs is None)
- if self.stream.current.type == 'name' and \
- self.stream.look().type == 'assign':
+ if (
+ self.stream.current.type == "name"
+ and self.stream.look().type == "assign"
+ ):
+ # Parsing a kwarg
+ ensure(dyn_kwargs is None)
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
- kwargs.append(nodes.Keyword(key, value,
- lineno=value.lineno))
+ kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
else:
- ensure(not kwargs)
+ # Parsing an arg
+ ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
args.append(self.parse_expression())
require_comma = True
- self.stream.expect('rparen')
+ self.stream.expect("rparen")
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
- return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
- lineno=token.lineno)
+ return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
- while self.stream.current.type == 'pipe' or start_inline:
+ while self.stream.current.type == "pipe" or start_inline:
if not start_inline:
next(self.stream)
- token = self.stream.expect('name')
+ token = self.stream.expect("name")
name = token.value
- while self.stream.current.type == 'dot':
+ while self.stream.current.type == "dot":
next(self.stream)
- name += '.' + self.stream.expect('name').value
- if self.stream.current.type == 'lparen':
+ name += "." + self.stream.expect("name").value
+ if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
- node = nodes.Filter(node, name, args, kwargs, dyn_args,
- dyn_kwargs, lineno=token.lineno)
+ node = nodes.Filter(
+ node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
+ )
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
- if self.stream.current.test('name:not'):
+ if self.stream.current.test("name:not"):
next(self.stream)
negated = True
else:
negated = False
- name = self.stream.expect('name').value
- while self.stream.current.type == 'dot':
+ name = self.stream.expect("name").value
+ while self.stream.current.type == "dot":
next(self.stream)
- name += '.' + self.stream.expect('name').value
+ name += "." + self.stream.expect("name").value
dyn_args = dyn_kwargs = None
kwargs = []
- if self.stream.current.type == 'lparen':
+ if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
- elif (self.stream.current.type in ('name', 'string', 'integer',
- 'float', 'lparen', 'lbracket',
- 'lbrace') and not
- self.stream.current.test_any('name:else', 'name:or',
- 'name:and')):
- if self.stream.current.test('name:is'):
- self.fail('You cannot chain multiple tests with is')
- args = [self.parse_primary()]
+ elif self.stream.current.type in (
+ "name",
+ "string",
+ "integer",
+ "float",
+ "lparen",
+ "lbracket",
+ "lbrace",
+ ) and not self.stream.current.test_any("name:else", "name:or", "name:and"):
+ if self.stream.current.test("name:is"):
+ self.fail("You cannot chain multiple tests with is")
+ arg_node = self.parse_primary()
+ arg_node = self.parse_postfix(arg_node)
+ args = [arg_node]
else:
args = []
- node = nodes.Test(node, name, args, kwargs, dyn_args,
- dyn_kwargs, lineno=token.lineno)
+ node = nodes.Test(
+ node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
+ )
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
@@ -865,29 +901,29 @@ class Parser(object):
try:
while self.stream:
token = self.stream.current
- if token.type == 'data':
+ if token.type == "data":
if token.value:
- add_data(nodes.TemplateData(token.value,
- lineno=token.lineno))
+ add_data(nodes.TemplateData(token.value, lineno=token.lineno))
next(self.stream)
- elif token.type == 'variable_begin':
+ elif token.type == "variable_begin":
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
- self.stream.expect('variable_end')
- elif token.type == 'block_begin':
+ self.stream.expect("variable_end")
+ elif token.type == "block_begin":
flush_data()
next(self.stream)
- if end_tokens is not None and \
- self.stream.current.test_any(*end_tokens):
+ if end_tokens is not None and self.stream.current.test_any(
+ *end_tokens
+ ):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
- self.stream.expect('block_end')
+ self.stream.expect("block_end")
else:
- raise AssertionError('internal parsing error')
+ raise AssertionError("internal parsing error")
flush_data()
finally:
diff --git a/lib/spack/external/jinja2/runtime.py b/lib/spack/external/jinja2/runtime.py
index 52dfeaebd6..3ad7968624 100644
--- a/lib/spack/external/jinja2/runtime.py
+++ b/lib/spack/external/jinja2/runtime.py
@@ -1,43 +1,62 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.runtime
- ~~~~~~~~~~~~~~
-
- Runtime helpers.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
+"""The runtime functions and state used by compiled templates."""
import sys
-
from itertools import chain
from types import MethodType
-from jinja2.nodes import EvalContext, _context_function_types
-from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
- internalcode, object_type_repr, evalcontextfunction, Namespace
-from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
- TemplateNotFound
-from jinja2._compat import imap, text_type, iteritems, \
- implements_iterator, implements_to_string, string_types, PY2, \
- with_metaclass
-
+from markupsafe import escape # noqa: F401
+from markupsafe import Markup
+from markupsafe import soft_unicode
+
+from ._compat import abc
+from ._compat import imap
+from ._compat import implements_iterator
+from ._compat import implements_to_string
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import with_metaclass
+from .exceptions import TemplateNotFound # noqa: F401
+from .exceptions import TemplateRuntimeError # noqa: F401
+from .exceptions import UndefinedError
+from .nodes import EvalContext
+from .utils import concat
+from .utils import evalcontextfunction
+from .utils import internalcode
+from .utils import missing
+from .utils import Namespace # noqa: F401
+from .utils import object_type_repr
# these variables are exported to the template runtime
-__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
- 'TemplateRuntimeError', 'missing', 'concat', 'escape',
- 'markup_join', 'unicode_join', 'to_string', 'identity',
- 'TemplateNotFound', 'Namespace']
+exported = [
+ "LoopContext",
+ "TemplateReference",
+ "Macro",
+ "Markup",
+ "TemplateRuntimeError",
+ "missing",
+ "concat",
+ "escape",
+ "markup_join",
+ "unicode_join",
+ "to_string",
+ "identity",
+ "TemplateNotFound",
+ "Namespace",
+ "Undefined",
+]
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
-#: the identity function. Useful for certain things in the environment
-identity = lambda x: x
-_first_iteration = object()
-_last_iteration = object()
+def identity(x):
+ """Returns its argument. Useful for certain things in the
+ environment.
+ """
+ return x
def markup_join(seq):
@@ -46,8 +65,8 @@ def markup_join(seq):
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
- if hasattr(arg, '__html__'):
- return Markup(u'').join(chain(buf, iterator))
+ if hasattr(arg, "__html__"):
+ return Markup(u"").join(chain(buf, iterator))
return concat(buf)
@@ -56,9 +75,16 @@ def unicode_join(seq):
return concat(imap(text_type, seq))
-def new_context(environment, template_name, blocks, vars=None,
- shared=None, globals=None, locals=None):
- """Internal helper to for context creation."""
+def new_context(
+ environment,
+ template_name,
+ blocks,
+ vars=None,
+ shared=None,
+ globals=None,
+ locals=None,
+):
+ """Internal helper for context creation."""
if vars is None:
vars = {}
if shared:
@@ -73,8 +99,7 @@ def new_context(environment, template_name, blocks, vars=None,
for key, value in iteritems(locals):
if value is not missing:
parent[key] = value
- return environment.context_class(environment, parent, template_name,
- blocks)
+ return environment.context_class(environment, parent, template_name, blocks)
class TemplateReference(object):
@@ -88,20 +113,16 @@ class TemplateReference(object):
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
- return '<%s %r>' % (
- self.__class__.__name__,
- self.__context.name
- )
+ return "<%s %r>" % (self.__class__.__name__, self.__context.name)
def _get_func(x):
- return getattr(x, '__func__', x)
+ return getattr(x, "__func__", x)
class ContextMeta(type):
-
- def __new__(cls, name, bases, d):
- rv = type.__new__(cls, name, bases, d)
+ def __new__(mcs, name, bases, d):
+ rv = type.__new__(mcs, name, bases, d)
if bases == ():
return rv
@@ -112,11 +133,15 @@ class ContextMeta(type):
# If we have a changed resolve but no changed default or missing
# resolve we invert the call logic.
- if resolve is not default_resolve and \
- resolve_or_missing is default_resolve_or_missing:
+ if (
+ resolve is not default_resolve
+ and resolve_or_missing is default_resolve_or_missing
+ ):
rv._legacy_resolve_mode = True
- elif resolve is default_resolve and \
- resolve_or_missing is default_resolve_or_missing:
+ elif (
+ resolve is default_resolve
+ and resolve_or_missing is default_resolve_or_missing
+ ):
rv._fast_resolve_mode = True
return rv
@@ -149,6 +174,7 @@ class Context(with_metaclass(ContextMeta)):
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
+
# XXX: we want to eventually make this be a deprecation warning and
# remove it.
_legacy_resolve_mode = False
@@ -179,9 +205,9 @@ class Context(with_metaclass(ContextMeta)):
index = blocks.index(current) + 1
blocks[index]
except LookupError:
- return self.environment.undefined('there is no parent block '
- 'called %r.' % name,
- name='super')
+ return self.environment.undefined(
+ "there is no parent block called %r." % name, name="super"
+ )
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
@@ -232,7 +258,7 @@ class Context(with_metaclass(ContextMeta)):
return dict(self.parent, **self.vars)
@internalcode
- def call(__self, __obj, *args, **kwargs):
+ def call(__self, __obj, *args, **kwargs): # noqa: B902
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
@@ -242,55 +268,62 @@ class Context(with_metaclass(ContextMeta)):
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
- if hasattr(__obj, '__call__'):
+ if hasattr(__obj, "__call__"): # noqa: B004
fn = __obj.__call__
- for fn_type in ('contextfunction',
- 'evalcontextfunction',
- 'environmentfunction'):
+ for fn_type in (
+ "contextfunction",
+ "evalcontextfunction",
+ "environmentfunction",
+ ):
if hasattr(fn, fn_type):
__obj = fn
break
- if isinstance(__obj, _context_function_types):
- if getattr(__obj, 'contextfunction', 0):
+ if callable(__obj):
+ if getattr(__obj, "contextfunction", False) is True:
args = (__self,) + args
- elif getattr(__obj, 'evalcontextfunction', 0):
+ elif getattr(__obj, "evalcontextfunction", False) is True:
args = (__self.eval_ctx,) + args
- elif getattr(__obj, 'environmentfunction', 0):
+ elif getattr(__obj, "environmentfunction", False) is True:
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
- return __self.environment.undefined('value was undefined because '
- 'a callable raised a '
- 'StopIteration exception')
+ return __self.environment.undefined(
+ "value was undefined because "
+ "a callable raised a "
+ "StopIteration exception"
+ )
def derived(self, locals=None):
"""Internal helper function to create a derived context. This is
used in situations where the system needs a new context in the same
template that is independent.
"""
- context = new_context(self.environment, self.name, {},
- self.get_all(), True, None, locals)
+ context = new_context(
+ self.environment, self.name, {}, self.get_all(), True, None, locals
+ )
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
- def _all(meth):
- proxy = lambda self: getattr(self.get_all(), meth)()
+ def _all(meth): # noqa: B902
+ def proxy(self):
+ return getattr(self.get_all(), meth)()
+
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
- keys = _all('keys')
- values = _all('values')
- items = _all('items')
+ keys = _all("keys")
+ values = _all("values")
+ items = _all("items")
# not available on python 3
if PY2:
- iterkeys = _all('iterkeys')
- itervalues = _all('itervalues')
- iteritems = _all('iteritems')
+ iterkeys = _all("iterkeys")
+ itervalues = _all("itervalues")
+ iteritems = _all("iteritems")
del _all
def __contains__(self, name):
@@ -306,23 +339,14 @@ class Context(with_metaclass(ContextMeta)):
return item
def __repr__(self):
- return '<%s %s of %r>' % (
+ return "<%s %s of %r>" % (
self.__class__.__name__,
repr(self.get_all()),
- self.name
+ self.name,
)
-# register the context as mapping if possible
-try:
- from collections.abc import Mapping
- Mapping.register(Context)
-except ImportError:
- try:
- from collections import Mapping
- Mapping.register(Context)
- except ImportError:
- pass
+abc.Mapping.register(Context)
class BlockReference(object):
@@ -338,11 +362,10 @@ class BlockReference(object):
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
- return self._context.environment. \
- undefined('there is no parent block called %r.' %
- self.name, name='super')
- return BlockReference(self.name, self._context, self._stack,
- self._depth + 1)
+ return self._context.environment.undefined(
+ "there is no parent block called %r." % self.name, name="super"
+ )
+ return BlockReference(self.name, self._context, self._stack, self._depth + 1)
@internalcode
def __call__(self):
@@ -352,143 +375,212 @@ class BlockReference(object):
return rv
-class LoopContextBase(object):
- """A loop context for dynamic iteration."""
+@implements_iterator
+class LoopContext:
+ """A wrapper iterable for dynamic ``for`` loops, with information
+ about the loop and iteration.
+ """
+
+ #: Current iteration of the loop, starting at 0.
+ index0 = -1
- _before = _first_iteration
- _current = _first_iteration
- _after = _last_iteration
_length = None
+ _after = missing
+ _current = missing
+ _before = missing
+ _last_changed_value = missing
- def __init__(self, undefined, recurse=None, depth0=0):
+ def __init__(self, iterable, undefined, recurse=None, depth0=0):
+ """
+ :param iterable: Iterable to wrap.
+ :param undefined: :class:`Undefined` class to use for next and
+ previous items.
+ :param recurse: The function to render the loop body when the
+ loop is marked recursive.
+ :param depth0: Incremented when looping recursively.
+ """
+ self._iterable = iterable
+ self._iterator = self._to_iterator(iterable)
self._undefined = undefined
self._recurse = recurse
- self.index0 = -1
+ #: How many levels deep a recursive loop currently is, starting at 0.
self.depth0 = depth0
- self._last_checked_value = missing
- def cycle(self, *args):
- """Cycles among the arguments with the current loop index."""
- if not args:
- raise TypeError('no items for cycling given')
- return args[self.index0 % len(args)]
+ @staticmethod
+ def _to_iterator(iterable):
+ return iter(iterable)
- def changed(self, *value):
- """Checks whether the value has changed since the last call."""
- if self._last_checked_value != value:
- self._last_checked_value = value
- return True
- return False
+ @property
+ def length(self):
+ """Length of the iterable.
- first = property(lambda x: x.index0 == 0)
- last = property(lambda x: x._after is _last_iteration)
- index = property(lambda x: x.index0 + 1)
- revindex = property(lambda x: x.length - x.index0)
- revindex0 = property(lambda x: x.length - x.index)
- depth = property(lambda x: x.depth0 + 1)
+ If the iterable is a generator or otherwise does not have a
+ size, it is eagerly evaluated to get a size.
+ """
+ if self._length is not None:
+ return self._length
- @property
- def previtem(self):
- if self._before is _first_iteration:
- return self._undefined('there is no previous item')
- return self._before
+ try:
+ self._length = len(self._iterable)
+ except TypeError:
+ iterable = list(self._iterator)
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
- @property
- def nextitem(self):
- if self._after is _last_iteration:
- return self._undefined('there is no next item')
- return self._after
+ return self._length
def __len__(self):
return self.length
- @internalcode
- def loop(self, iterable):
- if self._recurse is None:
- raise TypeError('Tried to call non recursive loop. Maybe you '
- "forgot the 'recursive' modifier.")
- return self._recurse(iterable, self._recurse, self.depth0 + 1)
+ @property
+ def depth(self):
+ """How many levels deep a recursive loop currently is, starting at 1."""
+ return self.depth0 + 1
- # a nifty trick to enhance the error message if someone tried to call
- # the the loop without or with too many arguments.
- __call__ = loop
- del loop
+ @property
+ def index(self):
+ """Current iteration of the loop, starting at 1."""
+ return self.index0 + 1
- def __repr__(self):
- return '<%s %r/%r>' % (
- self.__class__.__name__,
- self.index,
- self.length
- )
+ @property
+ def revindex0(self):
+ """Number of iterations from the end of the loop, ending at 0.
+ Requires calculating :attr:`length`.
+ """
+ return self.length - self.index
-class LoopContext(LoopContextBase):
+ @property
+ def revindex(self):
+ """Number of iterations from the end of the loop, ending at 1.
- def __init__(self, iterable, undefined, recurse=None, depth0=0):
- LoopContextBase.__init__(self, undefined, recurse, depth0)
- self._iterator = iter(iterable)
+ Requires calculating :attr:`length`.
+ """
+ return self.length - self.index0
- # try to get the length of the iterable early. This must be done
- # here because there are some broken iterators around where there
- # __len__ is the number of iterations left (i'm looking at your
- # listreverseiterator!).
- try:
- self._length = len(iterable)
- except (TypeError, AttributeError):
- self._length = None
- self._after = self._safe_next()
+ @property
+ def first(self):
+ """Whether this is the first iteration of the loop."""
+ return self.index0 == 0
+
+ def _peek_next(self):
+ """Return the next element in the iterable, or :data:`missing`
+ if the iterable is exhausted. Only peeks one item ahead, caching
+ the result in :attr:`_last` for use in subsequent checks. The
+ cache is reset when :meth:`__next__` is called.
+ """
+ if self._after is not missing:
+ return self._after
+
+ self._after = next(self._iterator, missing)
+ return self._after
@property
- def length(self):
- if self._length is None:
- # if was not possible to get the length of the iterator when
- # the loop context was created (ie: iterating over a generator)
- # we have to convert the iterable into a sequence and use the
- # length of that + the number of iterations so far.
- iterable = tuple(self._iterator)
- self._iterator = iter(iterable)
- iterations_done = self.index0 + 2
- self._length = len(iterable) + iterations_done
- return self._length
+ def last(self):
+ """Whether this is the last iteration of the loop.
- def __iter__(self):
- return LoopContextIterator(self)
+ Causes the iterable to advance early. See
+ :func:`itertools.groupby` for issues this can cause.
+ The :func:`groupby` filter avoids that issue.
+ """
+ return self._peek_next() is missing
- def _safe_next(self):
- try:
- return next(self._iterator)
- except StopIteration:
- return _last_iteration
+ @property
+ def previtem(self):
+ """The item in the previous iteration. Undefined during the
+ first iteration.
+ """
+ if self.first:
+ return self._undefined("there is no previous item")
+ return self._before
-@implements_iterator
-class LoopContextIterator(object):
- """The iterator for a loop context."""
- __slots__ = ('context',)
+ @property
+ def nextitem(self):
+ """The item in the next iteration. Undefined during the last
+ iteration.
- def __init__(self, context):
- self.context = context
+ Causes the iterable to advance early. See
+ :func:`itertools.groupby` for issues this can cause.
+ The :func:`groupby` filter avoids that issue.
+ """
+ rv = self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
+
+ def cycle(self, *args):
+ """Return a value from the given args, cycling through based on
+ the current :attr:`index0`.
+
+ :param args: One or more values to cycle through.
+ """
+ if not args:
+ raise TypeError("no items for cycling given")
+
+ return args[self.index0 % len(args)]
+
+ def changed(self, *value):
+ """Return ``True`` if previously called with a different value
+ (including when called for the first time).
+
+ :param value: One or more values to compare to the last call.
+ """
+ if self._last_changed_value != value:
+ self._last_changed_value = value
+ return True
+
+ return False
def __iter__(self):
return self
def __next__(self):
- ctx = self.context
- ctx.index0 += 1
- if ctx._after is _last_iteration:
- raise StopIteration()
- ctx._before = ctx._current
- ctx._current = ctx._after
- ctx._after = ctx._safe_next()
- return ctx._current, ctx
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = next(self._iterator)
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
+
+ @internalcode
+ def __call__(self, iterable):
+ """When iterating over nested data, render the body of the loop
+ recursively with the given inner iterable data.
+
+ The loop must have the ``recursive`` marker for this to work.
+ """
+ if self._recurse is None:
+ raise TypeError(
+ "The loop must have the 'recursive' marker to be called recursively."
+ )
+
+ return self._recurse(iterable, self._recurse, depth=self.depth)
+
+ def __repr__(self):
+ return "<%s %d/%d>" % (self.__class__.__name__, self.index, self.length)
class Macro(object):
"""Wraps a macro function."""
- def __init__(self, environment, func, name, arguments,
- catch_kwargs, catch_varargs, caller,
- default_autoescape=None):
+ def __init__(
+ self,
+ environment,
+ func,
+ name,
+ arguments,
+ catch_kwargs,
+ catch_varargs,
+ caller,
+ default_autoescape=None,
+ ):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
@@ -497,7 +589,7 @@ class Macro(object):
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
- self.explicit_caller = 'caller' in arguments
+ self.explicit_caller = "caller" in arguments
if default_autoescape is None:
default_autoescape = environment.autoescape
self._default_autoescape = default_autoescape
@@ -509,9 +601,8 @@ class Macro(object):
# decide largely based on compile-time information if a macro is
# safe or unsafe. While there was a volatile mode it was largely
# unused for deciding on escaping. This turns out to be
- # problemtic for macros because if a macro is safe or not not so
- # much depends on the escape mode when it was defined but when it
- # was used.
+ # problematic for macros because whether a macro is safe depends not
+ # on the escape mode when it was defined, but rather when it was used.
#
# Because however we export macros from the module system and
# there are historic callers that do not pass an eval context (and
@@ -519,7 +610,7 @@ class Macro(object):
# check here.
#
# This is considered safe because an eval context is not a valid
- # argument to callables otherwise anwyays. Worst case here is
+ # argument to callables otherwise anyway. Worst case here is
# that if no eval context is passed we fall back to the compile
# time autoescape flag.
if args and isinstance(args[0], EvalContext):
@@ -529,7 +620,7 @@ class Macro(object):
autoescape = self._default_autoescape
# try to consume the positional arguments
- arguments = list(args[:self._argument_count])
+ arguments = list(args[: self._argument_count])
off = len(arguments)
# For information why this is necessary refer to the handling
@@ -540,12 +631,12 @@ class Macro(object):
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
- for idx, name in enumerate(self.arguments[len(arguments):]):
+ for name in self.arguments[len(arguments) :]:
try:
value = kwargs.pop(name)
except KeyError:
value = missing
- if name == 'caller':
+ if name == "caller":
found_caller = True
arguments.append(value)
else:
@@ -555,26 +646,31 @@ class Macro(object):
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller and not found_caller:
- caller = kwargs.pop('caller', None)
+ caller = kwargs.pop("caller", None)
if caller is None:
- caller = self._environment.undefined('No caller defined',
- name='caller')
+ caller = self._environment.undefined("No caller defined", name="caller")
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
- if 'caller' in kwargs:
- raise TypeError('macro %r was invoked with two values for '
- 'the special caller argument. This is '
- 'most likely a bug.' % self.name)
- raise TypeError('macro %r takes no keyword argument %r' %
- (self.name, next(iter(kwargs))))
+ if "caller" in kwargs:
+ raise TypeError(
+ "macro %r was invoked with two values for "
+ "the special caller argument. This is "
+ "most likely a bug." % self.name
+ )
+ raise TypeError(
+ "macro %r takes no keyword argument %r"
+ % (self.name, next(iter(kwargs)))
+ )
if self.catch_varargs:
- arguments.append(args[self._argument_count:])
+ arguments.append(args[self._argument_count :])
elif len(args) > self._argument_count:
- raise TypeError('macro %r takes not more than %d argument(s)' %
- (self.name, len(self.arguments)))
+ raise TypeError(
+ "macro %r takes not more than %d argument(s)"
+ % (self.name, len(self.arguments))
+ )
return self._invoke(arguments, autoescape)
@@ -586,16 +682,16 @@ class Macro(object):
return rv
def __repr__(self):
- return '<%s %s>' % (
+ return "<%s %s>" % (
self.__class__.__name__,
- self.name is None and 'anonymous' or repr(self.name)
+ self.name is None and "anonymous" or repr(self.name),
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
- iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
+ iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
@@ -607,8 +703,13 @@ class Undefined(object):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
- __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
- '_undefined_exception')
+
+ __slots__ = (
+ "_undefined_hint",
+ "_undefined_obj",
+ "_undefined_name",
+ "_undefined_exception",
+ )
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
@@ -616,40 +717,86 @@ class Undefined(object):
self._undefined_name = name
self._undefined_exception = exc
+ @property
+ def _undefined_message(self):
+ """Build a message about the undefined value based on how it was
+ accessed.
+ """
+ if self._undefined_hint:
+ return self._undefined_hint
+
+ if self._undefined_obj is missing:
+ return "%r is undefined" % self._undefined_name
+
+ if not isinstance(self._undefined_name, string_types):
+ return "%s has no element %r" % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name,
+ )
+
+ return "%r has no attribute %r" % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name,
+ )
+
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
- """Regular callback function for undefined objects that raises an
- `jinja2.exceptions.UndefinedError` on call.
+ """Raise an :exc:`UndefinedError` when operations are performed
+ on the undefined value.
"""
- if self._undefined_hint is None:
- if self._undefined_obj is missing:
- hint = '%r is undefined' % self._undefined_name
- elif not isinstance(self._undefined_name, string_types):
- hint = '%s has no element %r' % (
- object_type_repr(self._undefined_obj),
- self._undefined_name
- )
- else:
- hint = '%r has no attribute %r' % (
- object_type_repr(self._undefined_obj),
- self._undefined_name
- )
- else:
- hint = self._undefined_hint
- raise self._undefined_exception(hint)
+ raise self._undefined_exception(self._undefined_message)
@internalcode
def __getattr__(self, name):
- if name[:2] == '__':
+ if name[:2] == "__":
raise AttributeError(name)
return self._fail_with_undefined_error()
- __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
- __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
- __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
- __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
- __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
- __rsub__ = _fail_with_undefined_error
+ __add__ = (
+ __radd__
+ ) = (
+ __mul__
+ ) = (
+ __rmul__
+ ) = (
+ __div__
+ ) = (
+ __rdiv__
+ ) = (
+ __truediv__
+ ) = (
+ __rtruediv__
+ ) = (
+ __floordiv__
+ ) = (
+ __rfloordiv__
+ ) = (
+ __mod__
+ ) = (
+ __rmod__
+ ) = (
+ __pos__
+ ) = (
+ __neg__
+ ) = (
+ __call__
+ ) = (
+ __getitem__
+ ) = (
+ __lt__
+ ) = (
+ __le__
+ ) = (
+ __gt__
+ ) = (
+ __ge__
+ ) = (
+ __int__
+ ) = (
+ __float__
+ ) = (
+ __complex__
+ ) = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
@@ -661,7 +808,7 @@ class Undefined(object):
return id(type(self))
def __str__(self):
- return u''
+ return u""
def __len__(self):
return 0
@@ -672,10 +819,11 @@ class Undefined(object):
def __nonzero__(self):
return False
+
__bool__ = __nonzero__
def __repr__(self):
- return 'Undefined'
+ return "Undefined"
def make_logging_undefined(logger=None, base=None):
@@ -700,6 +848,7 @@ def make_logging_undefined(logger=None, base=None):
"""
if logger is None:
import logging
+
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
@@ -708,26 +857,27 @@ def make_logging_undefined(logger=None, base=None):
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
- hint = '%s is undefined' % undef._undefined_name
+ hint = "%s is undefined" % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
- hint = '%s has no element %s' % (
+ hint = "%s has no element %s" % (
object_type_repr(undef._undefined_obj),
- undef._undefined_name)
+ undef._undefined_name,
+ )
else:
- hint = '%s has no attribute %s' % (
+ hint = "%s has no attribute %s" % (
object_type_repr(undef._undefined_obj),
- undef._undefined_name)
+ undef._undefined_name,
+ )
else:
hint = undef._undefined_hint
- logger.warning('Template variable warning: %s', hint)
+ logger.warning("Template variable warning: %s", hint)
class LoggingUndefined(base):
-
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
- logger.error('Template variable error: %s', str(e))
+ logger.error("Template variable error: %s", str(e))
raise e
def __str__(self):
@@ -741,6 +891,7 @@ def make_logging_undefined(logger=None, base=None):
return rv
if PY2:
+
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
@@ -750,7 +901,9 @@ def make_logging_undefined(logger=None, base=None):
rv = base.__unicode__(self)
_log_message(self)
return rv
+
else:
+
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
@@ -759,6 +912,36 @@ def make_logging_undefined(logger=None, base=None):
return LoggingUndefined
+# No @implements_to_string decorator here because __str__
+# is not overwritten from Undefined in this class.
+# This would cause a recursion error in Python 2.
+class ChainableUndefined(Undefined):
+ """An undefined that is chainable, where both ``__getattr__`` and
+ ``__getitem__`` return itself rather than raising an
+ :exc:`UndefinedError`.
+
+ >>> foo = ChainableUndefined(name='foo')
+ >>> str(foo.bar['baz'])
+ ''
+ >>> foo.bar['baz'] + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+
+ .. versionadded:: 2.11.0
+ """
+
+ __slots__ = ()
+
+ def __html__(self):
+ return self.__str__()
+
+ def __getattr__(self, _):
+ return self
+
+ __getitem__ = __getattr__
+
+
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
@@ -773,17 +956,18 @@ class DebugUndefined(Undefined):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
+
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
- return u'{{ %s }}' % self._undefined_name
- return '{{ no such element: %s[%r] }}' % (
+ return u"{{ %s }}" % self._undefined_name
+ return "{{ no such element: %s[%r] }}" % (
object_type_repr(self._undefined_obj),
- self._undefined_name
+ self._undefined_name,
)
- return u'{{ undefined value printed: %s }}' % self._undefined_hint
+ return u"{{ undefined value printed: %s }}" % self._undefined_hint
@implements_to_string
@@ -806,12 +990,22 @@ class StrictUndefined(Undefined):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
+
__slots__ = ()
- __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
- __ne__ = __bool__ = __hash__ = \
- Undefined._fail_with_undefined_error
+ __iter__ = (
+ __str__
+ ) = (
+ __len__
+ ) = (
+ __nonzero__
+ ) = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
-del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
+del (
+ Undefined.__slots__,
+ ChainableUndefined.__slots__,
+ DebugUndefined.__slots__,
+ StrictUndefined.__slots__,
+)
diff --git a/lib/spack/external/jinja2/sandbox.py b/lib/spack/external/jinja2/sandbox.py
index b9e5ec495a..cfd7993aee 100644
--- a/lib/spack/external/jinja2/sandbox.py
+++ b/lib/spack/external/jinja2/sandbox.py
@@ -1,76 +1,66 @@
# -*- coding: utf-8 -*-
+"""A sandbox layer that ensures unsafe operations cannot be performed.
+Useful when the template itself comes from an untrusted source.
"""
- jinja2.sandbox
- ~~~~~~~~~~~~~~
-
- Adds a sandbox layer to Jinja as it was the default behavior in the old
- Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
- default behavior is easier to use.
-
- The behavior can be changed by subclassing the environment.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-import types
import operator
-import sys
-from jinja2.environment import Environment
-from jinja2.exceptions import SecurityError
-from jinja2._compat import string_types, PY2
-from jinja2.utils import Markup
-
-from markupsafe import EscapeFormatter
+import types
+import warnings
+from collections import deque
from string import Formatter
-if sys.version_info >= (3, 3):
- from collections.abc import Mapping
-else:
- from collections import Mapping
+from markupsafe import EscapeFormatter
+from markupsafe import Markup
+from ._compat import abc
+from ._compat import PY2
+from ._compat import range_type
+from ._compat import string_types
+from .environment import Environment
+from .exceptions import SecurityError
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
- UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
- 'func_defaults', 'func_globals'])
+ UNSAFE_FUNCTION_ATTRIBUTES = {
+ "func_closure",
+ "func_code",
+ "func_dict",
+ "func_defaults",
+ "func_globals",
+ }
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
-
#: unsafe method attributes. function attributes are unsafe for methods too
-UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
+UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"}
-#: unsafe generator attirbutes.
-UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
+#: unsafe generator attributes.
+UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
-UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
+UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
-UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
-
-import warnings
+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
# make sure we don't warn in python 2.6 about stuff we don't care about
-warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
- module='jinja2.sandbox')
-
-from collections import deque
+warnings.filterwarnings(
+ "ignore", "the sets module", DeprecationWarning, module=__name__
+)
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
-
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
+
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
@@ -79,39 +69,60 @@ except ImportError:
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
+
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
-if sys.version_info >= (3, 3):
- from collections.abc import MutableSet, MutableMapping, MutableSequence
-else:
- from collections import MutableSet, MutableMapping, MutableSequence
-_mutable_set_types += (MutableSet,)
-_mutable_mapping_types += (MutableMapping,)
-_mutable_sequence_types += (MutableSequence,)
-
+_mutable_set_types += (abc.MutableSet,)
+_mutable_mapping_types += (abc.MutableMapping,)
+_mutable_sequence_types += (abc.MutableSequence,)
_mutable_spec = (
- (_mutable_set_types, frozenset([
- 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
- 'symmetric_difference_update', 'update'
- ])),
- (_mutable_mapping_types, frozenset([
- 'clear', 'pop', 'popitem', 'setdefault', 'update'
- ])),
- (_mutable_sequence_types, frozenset([
- 'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
- ])),
- (deque, frozenset([
- 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
- 'popleft', 'remove', 'rotate'
- ]))
+ (
+ _mutable_set_types,
+ frozenset(
+ [
+ "add",
+ "clear",
+ "difference_update",
+ "discard",
+ "pop",
+ "remove",
+ "symmetric_difference_update",
+ "update",
+ ]
+ ),
+ ),
+ (
+ _mutable_mapping_types,
+ frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
+ ),
+ (
+ _mutable_sequence_types,
+ frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
+ ),
+ (
+ deque,
+ frozenset(
+ [
+ "append",
+ "appendleft",
+ "clear",
+ "extend",
+ "extendleft",
+ "pop",
+ "popleft",
+ "remove",
+ "rotate",
+ ]
+ ),
+ ),
)
-class _MagicFormatMapping(Mapping):
+class _MagicFormatMapping(abc.Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
@@ -125,7 +136,7 @@ class _MagicFormatMapping(Mapping):
self._last_index = 0
def __getitem__(self, key):
- if key == '':
+ if key == "":
idx = self._last_index
self._last_index += 1
try:
@@ -143,9 +154,9 @@ class _MagicFormatMapping(Mapping):
def inspect_format_method(callable):
- if not isinstance(callable, (types.MethodType,
- types.BuiltinMethodType)) or \
- callable.__name__ != 'format':
+ if not isinstance(
+ callable, (types.MethodType, types.BuiltinMethodType)
+ ) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, string_types):
@@ -156,10 +167,14 @@ def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
- rng = range(*args)
+ rng = range_type(*args)
+
if len(rng) > MAX_RANGE:
- raise OverflowError('range too big, maximum size for range is %d' %
- MAX_RANGE)
+ raise OverflowError(
+ "Range too big. The sandbox blocks ranges larger than"
+ " MAX_RANGE (%d)." % MAX_RANGE
+ )
+
return rng
@@ -192,24 +207,25 @@ def is_internal_attribute(obj, attr):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
- if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
- attr in UNSAFE_METHOD_ATTRIBUTES:
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
- if attr == 'mro':
+ if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
- elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
+ elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
- elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
+ elif hasattr(types, "AsyncGeneratorType") and isinstance(
+ obj, types.AsyncGeneratorType
+ ):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
- return attr.startswith('__')
+ return attr.startswith("__")
def modifies_known_mutable(obj, attr):
@@ -250,28 +266,26 @@ class SandboxedEnvironment(Environment):
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
+
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
- '+': operator.add,
- '-': operator.sub,
- '*': operator.mul,
- '/': operator.truediv,
- '//': operator.floordiv,
- '**': operator.pow,
- '%': operator.mod
+ "+": operator.add,
+ "-": operator.sub,
+ "*": operator.mul,
+ "/": operator.truediv,
+ "//": operator.floordiv,
+ "**": operator.pow,
+ "%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
- default_unop_table = {
- '+': operator.pos,
- '-': operator.neg
- }
+ default_unop_table = {"+": operator.pos, "-": operator.neg}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
@@ -307,7 +321,7 @@ class SandboxedEnvironment(Environment):
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
- method returns `True`, :meth:`call_unop` is excuted for this unary
+ method returns `True`, :meth:`call_unop` is executed for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
@@ -321,10 +335,9 @@ class SandboxedEnvironment(Environment):
"""
return False
-
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
- self.globals['range'] = safe_range
+ self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
@@ -335,7 +348,7 @@ class SandboxedEnvironment(Environment):
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
- return not (attr.startswith('_') or is_internal_attribute(obj, attr))
+ return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
@@ -343,8 +356,9 @@ class SandboxedEnvironment(Environment):
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
- return not (getattr(obj, 'unsafe_callable', False) or
- getattr(obj, 'alters_data', False))
+ return not (
+ getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
+ )
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
@@ -404,13 +418,15 @@ class SandboxedEnvironment(Environment):
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
- return self.undefined('access to attribute %r of %r '
- 'object is unsafe.' % (
- attribute,
- obj.__class__.__name__
- ), name=attribute, obj=obj, exc=SecurityError)
-
- def format_string(self, s, args, kwargs):
+ return self.undefined(
+ "access to attribute %r of %r "
+ "object is unsafe." % (attribute, obj.__class__.__name__),
+ name=attribute,
+ obj=obj,
+ exc=SecurityError,
+ )
+
+ def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
@@ -418,20 +434,31 @@ class SandboxedEnvironment(Environment):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
+
+ if format_func is not None and format_func.__name__ == "format_map":
+ if len(args) != 1 or kwargs:
+ raise TypeError(
+ "format_map() takes exactly one argument %d given"
+ % (len(args) + (kwargs is not None))
+ )
+
+ kwargs = args[0]
+ args = None
+
kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
- def call(__self, __context, __obj, *args, **kwargs):
+ def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
- return __self.format_string(fmt, args, kwargs)
+ return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
- raise SecurityError('%r is not safely callable' % (__obj,))
+ raise SecurityError("%r is not safely callable" % (__obj,))
return __context.call(__obj, *args, **kwargs)
@@ -447,16 +474,16 @@ class ImmutableSandboxedEnvironment(SandboxedEnvironment):
return not modifies_known_mutable(obj, attr)
-# This really is not a public API apparenlty.
+# This really is not a public API apparently.
try:
from _string import formatter_field_name_split
except ImportError:
+
def formatter_field_name_split(field_name):
return field_name._formatter_field_name_split()
class SandboxedFormatterMixin(object):
-
def __init__(self, env):
self._env = env
@@ -470,14 +497,14 @@ class SandboxedFormatterMixin(object):
obj = self._env.getitem(obj, i)
return obj, first
-class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
+class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
-class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
+class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)
diff --git a/lib/spack/external/jinja2/tests.py b/lib/spack/external/jinja2/tests.py
index d5d6b5b33f..fabd4ce51b 100644
--- a/lib/spack/external/jinja2/tests.py
+++ b/lib/spack/external/jinja2/tests.py
@@ -1,29 +1,17 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.tests
- ~~~~~~~~~~~~
-
- Jinja test functions. Used with the "is" operator.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
+"""Built-in template tests used with the ``is`` operator."""
+import decimal
import operator
import re
-import sys
-from jinja2.runtime import Undefined
-from jinja2._compat import text_type, string_types, integer_types
-import decimal
-if sys.version_info >= (3, 3):
- from collections.abc import Mapping
-else:
- from collections import Mapping
+from ._compat import abc
+from ._compat import integer_types
+from ._compat import string_types
+from ._compat import text_type
+from .runtime import Undefined
-number_re = re.compile(r'^-?\d+(\.\d+)?$')
+number_re = re.compile(r"^-?\d+(\.\d+)?$")
regex_type = type(number_re)
-
-
test_callable = callable
@@ -69,6 +57,48 @@ def test_none(value):
return value is None
+def test_boolean(value):
+ """Return true if the object is a boolean value.
+
+ .. versionadded:: 2.11
+ """
+ return value is True or value is False
+
+
+def test_false(value):
+ """Return true if the object is False.
+
+ .. versionadded:: 2.11
+ """
+ return value is False
+
+
+def test_true(value):
+ """Return true if the object is True.
+
+ .. versionadded:: 2.11
+ """
+ return value is True
+
+
+# NOTE: The existing 'number' test matches booleans and floats
+def test_integer(value):
+ """Return true if the object is an integer.
+
+ .. versionadded:: 2.11
+ """
+ return isinstance(value, integer_types) and value is not True and value is not False
+
+
+# NOTE: The existing 'number' test matches booleans and integers
+def test_float(value):
+ """Return true if the object is a float.
+
+ .. versionadded:: 2.11
+ """
+ return isinstance(value, float)
+
+
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
@@ -89,7 +119,7 @@ def test_mapping(value):
.. versionadded:: 2.6
"""
- return isinstance(value, Mapping)
+ return isinstance(value, abc.Mapping)
def test_number(value):
@@ -104,7 +134,7 @@ def test_sequence(value):
try:
len(value)
value.__getitem__
- except:
+ except Exception:
return False
return True
@@ -133,7 +163,7 @@ def test_iterable(value):
def test_escaped(value):
"""Check if the value is escaped."""
- return hasattr(value, '__html__')
+ return hasattr(value, "__html__")
def test_in(value, seq):
@@ -145,36 +175,41 @@ def test_in(value, seq):
TESTS = {
- 'odd': test_odd,
- 'even': test_even,
- 'divisibleby': test_divisibleby,
- 'defined': test_defined,
- 'undefined': test_undefined,
- 'none': test_none,
- 'lower': test_lower,
- 'upper': test_upper,
- 'string': test_string,
- 'mapping': test_mapping,
- 'number': test_number,
- 'sequence': test_sequence,
- 'iterable': test_iterable,
- 'callable': test_callable,
- 'sameas': test_sameas,
- 'escaped': test_escaped,
- 'in': test_in,
- '==': operator.eq,
- 'eq': operator.eq,
- 'equalto': operator.eq,
- '!=': operator.ne,
- 'ne': operator.ne,
- '>': operator.gt,
- 'gt': operator.gt,
- 'greaterthan': operator.gt,
- 'ge': operator.ge,
- '>=': operator.ge,
- '<': operator.lt,
- 'lt': operator.lt,
- 'lessthan': operator.lt,
- '<=': operator.le,
- 'le': operator.le,
+ "odd": test_odd,
+ "even": test_even,
+ "divisibleby": test_divisibleby,
+ "defined": test_defined,
+ "undefined": test_undefined,
+ "none": test_none,
+ "boolean": test_boolean,
+ "false": test_false,
+ "true": test_true,
+ "integer": test_integer,
+ "float": test_float,
+ "lower": test_lower,
+ "upper": test_upper,
+ "string": test_string,
+ "mapping": test_mapping,
+ "number": test_number,
+ "sequence": test_sequence,
+ "iterable": test_iterable,
+ "callable": test_callable,
+ "sameas": test_sameas,
+ "escaped": test_escaped,
+ "in": test_in,
+ "==": operator.eq,
+ "eq": operator.eq,
+ "equalto": operator.eq,
+ "!=": operator.ne,
+ "ne": operator.ne,
+ ">": operator.gt,
+ "gt": operator.gt,
+ "greaterthan": operator.gt,
+ "ge": operator.ge,
+ ">=": operator.ge,
+ "<": operator.lt,
+ "lt": operator.lt,
+ "lessthan": operator.lt,
+ "<=": operator.le,
+ "le": operator.le,
}
diff --git a/lib/spack/external/jinja2/utils.py b/lib/spack/external/jinja2/utils.py
index cff4e783a8..6afca81055 100644
--- a/lib/spack/external/jinja2/utils.py
+++ b/lib/spack/external/jinja2/utils.py
@@ -1,44 +1,32 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.utils
- ~~~~~~~~~~~~
-
- Utility functions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import re
import json
-import errno
+import os
+import re
+import warnings
from collections import deque
+from random import choice
+from random import randrange
+from string import ascii_letters as _letters
+from string import digits as _digits
from threading import Lock
-from jinja2._compat import text_type, string_types, implements_iterator, \
- url_quote
+from markupsafe import escape
+from markupsafe import Markup
-_word_split_re = re.compile(r'(\s+)')
-_punctuation_re = re.compile(
- '^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
- '|'.join(map(re.escape, ('(', '<', '&lt;'))),
- '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '&gt;')))
- )
-)
-_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
-_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
-_entity_re = re.compile(r'&([^;]+);')
-_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
-_digits = '0123456789'
+from ._compat import abc
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import url_quote
# special singleton representing missing values for the runtime
-missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
+missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
# internal code
internal_code = set()
-concat = u''.join
+concat = u"".join
-_slash_escape = '\\/' not in json.dumps('/')
+_slash_escape = "\\/" not in json.dumps("/")
def contextfunction(f):
@@ -98,24 +86,26 @@ def is_undefined(obj):
return default
return var
"""
- from jinja2.runtime import Undefined
+ from .runtime import Undefined
+
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
- for event in iterable:
+ for _ in iterable:
pass
def clear_caches():
- """Jinja2 keeps internal caches for environments and lexers. These are
- used so that Jinja2 doesn't have to recreate environments and lexers all
+ """Jinja keeps internal caches for environments and lexers. These are
+ used so that Jinja doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
- from jinja2.environment import _spontaneous_environments
- from jinja2.lexer import _lexer_cache
+ from .environment import _spontaneous_environments
+ from .lexer import _lexer_cache
+
_spontaneous_environments.clear()
_lexer_cache.clear()
@@ -132,12 +122,10 @@ def import_string(import_name, silent=False):
:return: imported object
"""
try:
- if ':' in import_name:
- module, obj = import_name.split(':', 1)
- elif '.' in import_name:
- items = import_name.split('.')
- module = '.'.join(items[:-1])
- obj = items[-1]
+ if ":" in import_name:
+ module, obj = import_name.split(":", 1)
+ elif "." in import_name:
+ module, _, obj = import_name.rpartition(".")
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
@@ -146,15 +134,14 @@ def import_string(import_name, silent=False):
raise
-def open_if_exists(filename, mode='rb'):
+def open_if_exists(filename, mode="rb"):
"""Returns a file descriptor for the filename if that file exists,
- otherwise `None`.
+ otherwise ``None``.
"""
- try:
- return open(filename, mode)
- except IOError as e:
- if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
- raise
+ if not os.path.isfile(filename):
+ return None
+
+ return open(filename, mode)
def object_type_repr(obj):
@@ -163,15 +150,19 @@ def object_type_repr(obj):
example for `None` and `Ellipsis`).
"""
if obj is None:
- return 'None'
+ return "None"
elif obj is Ellipsis:
- return 'Ellipsis'
+ return "Ellipsis"
+
+ cls = type(obj)
+
# __builtin__ in 2.x, builtins in 3.x
- if obj.__class__.__module__ in ('__builtin__', 'builtins'):
- name = obj.__class__.__name__
+ if cls.__module__ in ("__builtin__", "builtins"):
+ name = cls.__name__
else:
- name = obj.__class__.__module__ + '.' + obj.__class__.__name__
- return '%s object' % name
+ name = cls.__module__ + "." + cls.__name__
+
+ return "%s object" % name
def pformat(obj, verbose=False):
@@ -180,9 +171,11 @@ def pformat(obj, verbose=False):
"""
try:
from pretty import pretty
+
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
+
return pformat(obj)
@@ -200,45 +193,77 @@ def urlize(text, trim_url_limit=None, rel=None, target=None):
If target is not None, a target attribute will be added to the link.
"""
- trim_url = lambda x, limit=trim_url_limit: limit is not None \
- and (x[:limit] + (len(x) >=limit and '...'
- or '')) or x
- words = _word_split_re.split(text_type(escape(text)))
- rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ''
- target_attr = target and ' target="%s"' % escape(target) or ''
+ trim_url = (
+ lambda x, limit=trim_url_limit: limit is not None
+ and (x[:limit] + (len(x) >= limit and "..." or ""))
+ or x
+ )
+ words = re.split(r"(\s+)", text_type(escape(text)))
+ rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
+ target_attr = target and ' target="%s"' % escape(target) or ""
for i, word in enumerate(words):
- match = _punctuation_re.match(word)
+ head, middle, tail = "", word, ""
+ match = re.match(r"^([(<]|&lt;)+", middle)
+
if match:
- lead, middle, trail = match.groups()
- if middle.startswith('www.') or (
- '@' not in middle and
- not middle.startswith('http://') and
- not middle.startswith('https://') and
- len(middle) > 0 and
- middle[0] in _letters + _digits and (
- middle.endswith('.org') or
- middle.endswith('.net') or
- middle.endswith('.com')
- )):
- middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
- rel_attr, target_attr, trim_url(middle))
- if middle.startswith('http://') or \
- middle.startswith('https://'):
- middle = '<a href="%s"%s%s>%s</a>' % (middle,
- rel_attr, target_attr, trim_url(middle))
- if '@' in middle and not middle.startswith('www.') and \
- not ':' in middle and _simple_email_re.match(middle):
- middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
- if lead + middle + trail != word:
- words[i] = lead + middle + trail
- return u''.join(words)
+ head = match.group()
+ middle = middle[match.end() :]
+
+ # Unlike lead, which is anchored to the start of the string,
+ # need to check that the string ends with any of the characters
+ # before trying to match all of them, to avoid backtracking.
+ if middle.endswith((")", ">", ".", ",", "\n", "&gt;")):
+ match = re.search(r"([)>.,\n]|&gt;)+$", middle)
+
+ if match:
+ tail = match.group()
+ middle = middle[: match.start()]
+
+ if middle.startswith("www.") or (
+ "@" not in middle
+ and not middle.startswith("http://")
+ and not middle.startswith("https://")
+ and len(middle) > 0
+ and middle[0] in _letters + _digits
+ and (
+ middle.endswith(".org")
+ or middle.endswith(".net")
+ or middle.endswith(".com")
+ )
+ ):
+ middle = '<a href="http://%s"%s%s>%s</a>' % (
+ middle,
+ rel_attr,
+ target_attr,
+ trim_url(middle),
+ )
+
+ if middle.startswith("http://") or middle.startswith("https://"):
+ middle = '<a href="%s"%s%s>%s</a>' % (
+ middle,
+ rel_attr,
+ target_attr,
+ trim_url(middle),
+ )
+
+ if (
+ "@" in middle
+ and not middle.startswith("www.")
+ and ":" not in middle
+ and re.match(r"^\S+@\w[\w.-]*\.\w+$", middle)
+ ):
+ middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
+
+ words[i] = head + middle + tail
+
+ return u"".join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem ipsum for the template."""
- from jinja2.constants import LOREM_IPSUM_WORDS
- from random import choice, randrange
+ from .constants import LOREM_IPSUM_WORDS
+
words = LOREM_IPSUM_WORDS.split()
result = []
@@ -263,43 +288,53 @@ def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
- word += ','
+ word += ","
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
- word += '.'
+ word += "."
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
- p = u' '.join(p)
- if p.endswith(','):
- p = p[:-1] + '.'
- elif not p.endswith('.'):
- p += '.'
+ p = u" ".join(p)
+ if p.endswith(","):
+ p = p[:-1] + "."
+ elif not p.endswith("."):
+ p += "."
result.append(p)
if not html:
- return u'\n\n'.join(result)
- return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
+ return u"\n\n".join(result)
+ return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result))
+
+def unicode_urlencode(obj, charset="utf-8", for_qs=False):
+ """Quote a string for use in a URL using the given charset.
-def unicode_urlencode(obj, charset='utf-8', for_qs=False):
- """URL escapes a single bytestring or unicode string with the
- given charset if applicable to URL safe quoting under all rules
- that need to be considered under all supported Python versions.
+ This function is misnamed, it is a wrapper around
+ :func:`urllib.parse.quote`.
- If non strings are provided they are converted to their unicode
- representation first.
+ :param obj: String or bytes to quote. Other types are converted to
+ string then encoded to bytes using the given charset.
+ :param charset: Encode text to bytes using this charset.
+ :param for_qs: Quote "/" and use "+" for spaces.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
+
if isinstance(obj, text_type):
obj = obj.encode(charset)
- safe = not for_qs and b'/' or b''
- rv = text_type(url_quote(obj, safe))
+
+ safe = b"" if for_qs else b"/"
+ rv = url_quote(obj, safe)
+
+ if not isinstance(rv, text_type):
+ rv = rv.decode("utf-8")
+
if for_qs:
- rv = rv.replace('%20', '+')
+ rv = rv.replace("%20", "+")
+
return rv
@@ -326,9 +361,9 @@ class LRUCache(object):
def __getstate__(self):
return {
- 'capacity': self.capacity,
- '_mapping': self._mapping,
- '_queue': self._queue
+ "capacity": self.capacity,
+ "_mapping": self._mapping,
+ "_queue": self._queue,
}
def __setstate__(self, d):
@@ -342,7 +377,7 @@ class LRUCache(object):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
- rv._queue = deque(self._queue)
+ rv._queue.extend(self._queue)
return rv
def get(self, key, default=None):
@@ -356,15 +391,11 @@ class LRUCache(object):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
- self._wlock.acquire()
try:
- try:
- return self[key]
- except KeyError:
- self[key] = default
- return default
- finally:
- self._wlock.release()
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
def clear(self):
"""Clear the cache."""
@@ -384,10 +415,7 @@ class LRUCache(object):
return len(self._mapping)
def __repr__(self):
- return '<%s %r>' % (
- self.__class__.__name__,
- self._mapping
- )
+ return "<%s %r>" % (self.__class__.__name__, self._mapping)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
@@ -436,7 +464,6 @@ class LRUCache(object):
try:
self._remove(key)
except ValueError:
- # __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
@@ -449,6 +476,12 @@ class LRUCache(object):
def iteritems(self):
"""Iterate over all items."""
+ warnings.warn(
+ "'iteritems()' will be removed in version 3.0. Use"
+ " 'iter(cache.items())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
return iter(self.items())
def values(self):
@@ -457,6 +490,22 @@ class LRUCache(object):
def itervalue(self):
"""Iterate over all values."""
+ warnings.warn(
+ "'itervalue()' will be removed in version 3.0. Use"
+ " 'iter(cache.values())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return iter(self.values())
+
+ def itervalues(self):
+ """Iterate over all values."""
+ warnings.warn(
+ "'itervalues()' will be removed in version 3.0. Use"
+ " 'iter(cache.values())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
return iter(self.values())
def keys(self):
@@ -467,12 +516,19 @@ class LRUCache(object):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
- return reversed(tuple(self._queue))
+ warnings.warn(
+ "'iterkeys()' will be removed in version 3.0. Use"
+ " 'iter(cache.keys())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return iter(self)
- __iter__ = iterkeys
+ def __iter__(self):
+ return reversed(tuple(self._queue))
def __reversed__(self):
- """Iterate over the values in the cache dict, oldest items
+ """Iterate over the keys in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
@@ -480,22 +536,15 @@ class LRUCache(object):
__copy__ = copy
-# register the LRU cache as mutable mapping if possible
-try:
- from collections.abc import MutableMapping
- MutableMapping.register(LRUCache)
-except ImportError:
- try:
- from collections import MutableMapping
- MutableMapping.register(LRUCache)
- except ImportError:
- pass
+abc.MutableMapping.register(LRUCache)
-def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
- disabled_extensions=(),
- default_for_string=True,
- default=False):
+def select_autoescape(
+ enabled_extensions=("html", "htm", "xml"),
+ disabled_extensions=(),
+ default_for_string=True,
+ default=False,
+):
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
@@ -530,10 +579,9 @@ def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
.. versionadded:: 2.9
"""
- enabled_patterns = tuple('.' + x.lstrip('.').lower()
- for x in enabled_extensions)
- disabled_patterns = tuple('.' + x.lstrip('.').lower()
- for x in disabled_extensions)
+ enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
+ disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
+
def autoescape(template_name):
if template_name is None:
return default_for_string
@@ -543,6 +591,7 @@ def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
if template_name.endswith(disabled_patterns):
return False
return default
+
return autoescape
@@ -566,35 +615,63 @@ def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
"""
if dumper is None:
dumper = json.dumps
- rv = dumper(obj, **kwargs) \
- .replace(u'<', u'\\u003c') \
- .replace(u'>', u'\\u003e') \
- .replace(u'&', u'\\u0026') \
- .replace(u"'", u'\\u0027')
+ rv = (
+ dumper(obj, **kwargs)
+ .replace(u"<", u"\\u003c")
+ .replace(u">", u"\\u003e")
+ .replace(u"&", u"\\u0026")
+ .replace(u"'", u"\\u0027")
+ )
return Markup(rv)
-@implements_iterator
class Cycler(object):
- """A cycle helper for templates."""
+ """Cycle through values by yield them one at a time, then restarting
+ once the end is reached. Available as ``cycler`` in templates.
+
+ Similar to ``loop.cycle``, but can be used outside loops or across
+ multiple loops. For example, render a list of folders and files in a
+ list, alternating giving them "odd" and "even" classes.
+
+ .. code-block:: html+jinja
+
+ {% set row_class = cycler("odd", "even") %}
+ <ul class="browser">
+ {% for folder in folders %}
+ <li class="folder {{ row_class.next() }}">{{ folder }}
+ {% endfor %}
+ {% for file in files %}
+ <li class="file {{ row_class.next() }}">{{ file }}
+ {% endfor %}
+ </ul>
+
+ :param items: Each positional argument will be yielded in the order
+ given for each cycle.
+
+ .. versionadded:: 2.1
+ """
def __init__(self, *items):
if not items:
- raise RuntimeError('at least one item has to be provided')
+ raise RuntimeError("at least one item has to be provided")
self.items = items
- self.reset()
+ self.pos = 0
def reset(self):
- """Resets the cycle."""
+ """Resets the current item to the first item."""
self.pos = 0
@property
def current(self):
- """Returns the current item."""
+ """Return the current item. Equivalent to the item that will be
+ returned next time :meth:`next` is called.
+ """
return self.items[self.pos]
def next(self):
- """Goes one item ahead and returns it."""
+ """Return the current item, then advance :attr:`current` to the
+ next item.
+ """
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
@@ -605,27 +682,28 @@ class Cycler(object):
class Joiner(object):
"""A joining helper for templates."""
- def __init__(self, sep=u', '):
+ def __init__(self, sep=u", "):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
- return u''
+ return u""
return self.sep
class Namespace(object):
"""A namespace object that can hold arbitrary attributes. It may be
- initialized from a dictionary or with keyword argments."""
+ initialized from a dictionary or with keyword arguments."""
- def __init__(*args, **kwargs):
+ def __init__(*args, **kwargs): # noqa: B902
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name):
- if name == '_Namespace__attrs':
+ # __class__ is needed for the awaitable check in async mode
+ if name in {"_Namespace__attrs", "__class__"}:
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
@@ -636,16 +714,24 @@ class Namespace(object):
self.__attrs[name] = value
def __repr__(self):
- return '<Namespace %r>' % self.__attrs
+ return "<Namespace %r>" % self.__attrs
# does this python version support async for in and async generators?
try:
- exec('async def _():\n async for _ in ():\n yield _')
+ exec("async def _():\n async for _ in ():\n yield _")
have_async_gen = True
except SyntaxError:
have_async_gen = False
-# Imported here because that's where it was in the past
-from markupsafe import Markup, escape, soft_unicode
+def soft_unicode(s):
+ from markupsafe import soft_unicode
+
+ warnings.warn(
+ "'jinja2.utils.soft_unicode' will be removed in version 3.0."
+ " Use 'markupsafe.soft_unicode' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return soft_unicode(s)
diff --git a/lib/spack/external/jinja2/visitor.py b/lib/spack/external/jinja2/visitor.py
index ba526dfac9..d1365bf10e 100644
--- a/lib/spack/external/jinja2/visitor.py
+++ b/lib/spack/external/jinja2/visitor.py
@@ -1,14 +1,8 @@
# -*- coding: utf-8 -*-
+"""API for traversing the AST nodes. Implemented by the compiler and
+meta introspection.
"""
- jinja2.visitor
- ~~~~~~~~~~~~~~
-
- This module implements a visitor for the nodes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-from jinja2.nodes import Node
+from .nodes import Node
class NodeVisitor(object):
@@ -28,7 +22,7 @@ class NodeVisitor(object):
exists for this node. In that case the generic visit function is
used instead.
"""
- method = 'visit_' + node.__class__.__name__
+ method = "visit_" + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
diff --git a/lib/spack/external/jsonschema/README.rst b/lib/spack/external/jsonschema/README.rst
deleted file mode 100644
index 20c2fe6266..0000000000
--- a/lib/spack/external/jsonschema/README.rst
+++ /dev/null
@@ -1,104 +0,0 @@
-==========
-jsonschema
-==========
-
-``jsonschema`` is an implementation of `JSON Schema <http://json-schema.org>`_
-for Python (supporting 2.6+ including Python 3).
-
-.. code-block:: python
-
- >>> from jsonschema import validate
-
- >>> # A sample schema, like what we'd get from json.load()
- >>> schema = {
- ... "type" : "object",
- ... "properties" : {
- ... "price" : {"type" : "number"},
- ... "name" : {"type" : "string"},
- ... },
- ... }
-
- >>> # If no exception is raised by validate(), the instance is valid.
- >>> validate({"name" : "Eggs", "price" : 34.99}, schema)
-
- >>> validate(
- ... {"name" : "Eggs", "price" : "Invalid"}, schema
- ... ) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValidationError: 'Invalid' is not of type 'number'
-
-
-Features
---------
-
-* Full support for
- `Draft 3 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft3Validator>`_
- **and** `Draft 4 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft4Validator>`_
- of the schema.
-
-* `Lazy validation <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.IValidator.iter_errors>`_
- that can iteratively report *all* validation errors.
-
-* Small and extensible
-
-* `Programmatic querying <https://python-jsonschema.readthedocs.org/en/latest/errors/#module-jsonschema>`_
- of which properties or items failed validation.
-
-
-Release Notes
--------------
-
-* A simple CLI was added for validation
-* Validation errors now keep full absolute paths and absolute schema paths in
- their ``absolute_path`` and ``absolute_schema_path`` attributes. The ``path``
- and ``schema_path`` attributes are deprecated in favor of ``relative_path``
- and ``relative_schema_path``\ .
-
-*Note:* Support for Python 3.2 was dropped in this release, and installation
-now uses setuptools.
-
-
-Running the Test Suite
-----------------------
-
-``jsonschema`` uses the wonderful `Tox <http://tox.readthedocs.org>`_ for its
-test suite. (It really is wonderful, if for some reason you haven't heard of
-it, you really should use it for your projects).
-
-Assuming you have ``tox`` installed (perhaps via ``pip install tox`` or your
-package manager), just run ``tox`` in the directory of your source checkout to
-run ``jsonschema``'s test suite on all of the versions of Python ``jsonschema``
-supports. Note that you'll need to have all of those versions installed in
-order to run the tests on each of them, otherwise ``tox`` will skip (and fail)
-the tests on that version.
-
-Of course you're also free to just run the tests on a single version with your
-favorite test runner. The tests live in the ``jsonschema.tests`` package.
-
-
-Community
----------
-
-There's a `mailing list <https://groups.google.com/forum/#!forum/jsonschema>`_
-for this implementation on Google Groups.
-
-Please join, and feel free to send questions there.
-
-
-Contributing
-------------
-
-I'm Julian Berman.
-
-``jsonschema`` is on `GitHub <http://github.com/Julian/jsonschema>`_.
-
-Get in touch, via GitHub or otherwise, if you've got something to contribute,
-it'd be most welcome!
-
-You can also generally find me on Freenode (nick: ``tos9``) in various
-channels, including ``#python``.
-
-If you feel overwhelmingly grateful, you can woo me with beer money on
-`Gittip <https://www.gittip.com/Julian/>`_ or via Google Wallet with the email
-in my GitHub profile.
diff --git a/lib/spack/external/jsonschema/__init__.py b/lib/spack/external/jsonschema/__init__.py
index 6c099f1d8b..6dfdb9419a 100644
--- a/lib/spack/external/jsonschema/__init__.py
+++ b/lib/spack/external/jsonschema/__init__.py
@@ -4,23 +4,34 @@ An implementation of JSON Schema for Python
The main functionality is provided by the validator classes for each of the
supported JSON Schema versions.
-Most commonly, :func:`validate` is the quickest way to simply validate a given
+Most commonly, `validate` is the quickest way to simply validate a given
instance under a schema, and will create a validator for you.
-
"""
from jsonschema.exceptions import (
ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError
)
from jsonschema._format import (
- FormatChecker, draft3_format_checker, draft4_format_checker,
+ FormatChecker,
+ draft3_format_checker,
+ draft4_format_checker,
+ draft6_format_checker,
+ draft7_format_checker,
)
+from jsonschema._types import TypeChecker
from jsonschema.validators import (
- Draft3Validator, Draft4Validator, RefResolver, validate
+ Draft3Validator,
+ Draft4Validator,
+ Draft6Validator,
+ Draft7Validator,
+ RefResolver,
+ validate,
)
-
-
-__version__ = "2.4.0"
-
-
-# flake8: noqa
+# try:
+# from importlib import metadata
+# except ImportError: # for Python<3.8
+# import importlib_metadata as metadata
+# __version__ = metadata.version("jsonschema")
+# set the version manually here, as we don't install dist-info or egg-info
+# files for vendored spack externals.
+__version__ = '3.2.0'
diff --git a/lib/spack/external/jsonschema/_format.py b/lib/spack/external/jsonschema/_format.py
index bb52d183ad..281a7cfcff 100644
--- a/lib/spack/external/jsonschema/_format.py
+++ b/lib/spack/external/jsonschema/_format.py
@@ -1,6 +1,7 @@
import datetime
import re
import socket
+import struct
from jsonschema.compat import str_types
from jsonschema.exceptions import FormatError
@@ -14,17 +15,19 @@ class FormatChecker(object):
validation. If validation is desired however, instances of this class can
be hooked into validators to enable format validation.
- :class:`FormatChecker` objects always return ``True`` when asked about
+ `FormatChecker` objects always return ``True`` when asked about
formats that they do not know how to validate.
To check a custom format using a function that takes an instance and
- returns a ``bool``, use the :meth:`FormatChecker.checks` or
- :meth:`FormatChecker.cls_checks` decorators.
+ returns a ``bool``, use the `FormatChecker.checks` or
+ `FormatChecker.cls_checks` decorators.
- :argument iterable formats: the known formats to validate. This argument
- can be used to limit which formats will be used
- during validation.
+ Arguments:
+ formats (~collections.Iterable):
+
+ The known formats to validate. This argument can be used to
+ limit which formats will be used during validation.
"""
checkers = {}
@@ -35,16 +38,27 @@ class FormatChecker(object):
else:
self.checkers = dict((k, self.checkers[k]) for k in formats)
+ def __repr__(self):
+ return "<FormatChecker checkers={}>".format(sorted(self.checkers))
+
def checks(self, format, raises=()):
"""
Register a decorated function as validating a new format.
- :argument str format: the format that the decorated function will check
- :argument Exception raises: the exception(s) raised by the decorated
- function when an invalid instance is found. The exception object
- will be accessible as the :attr:`ValidationError.cause` attribute
- of the resulting validation error.
+ Arguments:
+
+ format (str):
+
+ The format that the decorated function will check.
+
+ raises (Exception):
+ The exception(s) raised by the decorated function when an
+ invalid instance is found.
+
+ The exception object will be accessible as the
+ `jsonschema.exceptions.ValidationError.cause` attribute of the
+ resulting validation error.
"""
def _checks(func):
@@ -58,11 +72,20 @@ class FormatChecker(object):
"""
Check whether the instance conforms to the given format.
- :argument instance: the instance to check
- :type: any primitive type (str, number, bool)
- :argument str format: the format that instance should conform to
- :raises: :exc:`FormatError` if instance does not conform to format
+ Arguments:
+
+ instance (*any primitive type*, i.e. str, number, bool):
+
+ The instance to check
+
+ format (str):
+ The format that instance should conform to
+
+
+ Raises:
+
+ FormatError: if the instance does not conform to ``format``
"""
if format not in self.checkers:
@@ -83,11 +106,19 @@ class FormatChecker(object):
"""
Check whether the instance conforms to the given format.
- :argument instance: the instance to check
- :type: any primitive type (str, number, bool)
- :argument str format: the format that instance should conform to
- :rtype: bool
+ Arguments:
+
+ instance (*any primitive type*, i.e. str, number, bool):
+
+ The instance to check
+ format (str):
+
+ The format that instance should conform to
+
+ Returns:
+
+ bool: whether it conformed
"""
try:
@@ -98,25 +129,55 @@ class FormatChecker(object):
return True
-_draft_checkers = {"draft3": [], "draft4": []}
+draft3_format_checker = FormatChecker()
+draft4_format_checker = FormatChecker()
+draft6_format_checker = FormatChecker()
+draft7_format_checker = FormatChecker()
+
+_draft_checkers = dict(
+ draft3=draft3_format_checker,
+ draft4=draft4_format_checker,
+ draft6=draft6_format_checker,
+ draft7=draft7_format_checker,
+)
-def _checks_drafts(both=None, draft3=None, draft4=None, raises=()):
- draft3 = draft3 or both
- draft4 = draft4 or both
+
+def _checks_drafts(
+ name=None,
+ draft3=None,
+ draft4=None,
+ draft6=None,
+ draft7=None,
+ raises=(),
+):
+ draft3 = draft3 or name
+ draft4 = draft4 or name
+ draft6 = draft6 or name
+ draft7 = draft7 or name
def wrap(func):
if draft3:
- _draft_checkers["draft3"].append(draft3)
- func = FormatChecker.cls_checks(draft3, raises)(func)
+ func = _draft_checkers["draft3"].checks(draft3, raises)(func)
if draft4:
- _draft_checkers["draft4"].append(draft4)
- func = FormatChecker.cls_checks(draft4, raises)(func)
+ func = _draft_checkers["draft4"].checks(draft4, raises)(func)
+ if draft6:
+ func = _draft_checkers["draft6"].checks(draft6, raises)(func)
+ if draft7:
+ func = _draft_checkers["draft7"].checks(draft7, raises)(func)
+
+ # Oy. This is bad global state, but relied upon for now, until
+ # deprecation. See https://github.com/Julian/jsonschema/issues/519
+ # and test_format_checkers_come_with_defaults
+ FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)(
+ func,
+ )
return func
return wrap
-@_checks_drafts("email")
+@_checks_drafts(name="idn-email")
+@_checks_drafts(name="email")
def is_email(instance):
if not isinstance(instance, str_types):
return True
@@ -125,7 +186,10 @@ def is_email(instance):
_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
-@_checks_drafts(draft3="ip-address", draft4="ipv4")
+
+@_checks_drafts(
+ draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4",
+)
def is_ipv4(instance):
if not isinstance(instance, str_types):
return True
@@ -135,7 +199,11 @@ def is_ipv4(instance):
if hasattr(socket, "inet_pton"):
- @_checks_drafts("ipv6", raises=socket.error)
+ # FIXME: Really this only should raise struct.error, but see the sadness
+ # that is https://twistedmatrix.com/trac/ticket/9409
+ @_checks_drafts(
+ name="ipv6", raises=(socket.error, struct.error, ValueError),
+ )
def is_ipv6(instance):
if not isinstance(instance, str_types):
return True
@@ -144,7 +212,13 @@ if hasattr(socket, "inet_pton"):
_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
-@_checks_drafts(draft3="host-name", draft4="hostname")
+
+@_checks_drafts(
+ draft3="host-name",
+ draft4="hostname",
+ draft6="hostname",
+ draft7="hostname",
+)
def is_host_name(instance):
if not isinstance(instance, str_types):
return True
@@ -158,46 +232,103 @@ def is_host_name(instance):
try:
- import rfc3987
+ # The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
+ import idna
except ImportError:
pass
else:
- @_checks_drafts("uri", raises=ValueError)
- def is_uri(instance):
+ @_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError)
+ def is_idn_host_name(instance):
if not isinstance(instance, str_types):
return True
- return rfc3987.parse(instance, rule="URI")
+ idna.encode(instance)
+ return True
try:
- import strict_rfc3339
+ import rfc3987
except ImportError:
try:
- import isodate
+ from rfc3986_validator import validate_rfc3986
except ImportError:
pass
else:
- @_checks_drafts("date-time", raises=(ValueError, isodate.ISO8601Error))
- def is_date(instance):
+ @_checks_drafts(name="uri")
+ def is_uri(instance):
if not isinstance(instance, str_types):
return True
- return isodate.parse_datetime(instance)
-else:
- @_checks_drafts("date-time")
- def is_date(instance):
+ return validate_rfc3986(instance, rule="URI")
+
+ @_checks_drafts(
+ draft6="uri-reference",
+ draft7="uri-reference",
+ raises=ValueError,
+ )
+ def is_uri_reference(instance):
if not isinstance(instance, str_types):
return True
- return strict_rfc3339.validate_rfc3339(instance)
+ return validate_rfc3986(instance, rule="URI_reference")
+
+else:
+ @_checks_drafts(draft7="iri", raises=ValueError)
+ def is_iri(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="IRI")
+
+ @_checks_drafts(draft7="iri-reference", raises=ValueError)
+ def is_iri_reference(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="IRI_reference")
+
+ @_checks_drafts(name="uri", raises=ValueError)
+ def is_uri(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="URI")
+
+ @_checks_drafts(
+ draft6="uri-reference",
+ draft7="uri-reference",
+ raises=ValueError,
+ )
+ def is_uri_reference(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return rfc3987.parse(instance, rule="URI_reference")
+
+
+try:
+ from strict_rfc3339 import validate_rfc3339
+except ImportError:
+ try:
+ from rfc3339_validator import validate_rfc3339
+ except ImportError:
+ validate_rfc3339 = None
+
+if validate_rfc3339:
+ @_checks_drafts(name="date-time")
+ def is_datetime(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return validate_rfc3339(instance)
+
+ @_checks_drafts(draft7="time")
+ def is_time(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return is_datetime("1970-01-01T" + instance)
-@_checks_drafts("regex", raises=re.error)
+@_checks_drafts(name="regex", raises=re.error)
def is_regex(instance):
if not isinstance(instance, str_types):
return True
return re.compile(instance)
-@_checks_drafts(draft3="date", raises=ValueError)
+@_checks_drafts(draft3="date", draft7="date", raises=ValueError)
def is_date(instance):
if not isinstance(instance, str_types):
return True
@@ -205,7 +336,7 @@ def is_date(instance):
@_checks_drafts(draft3="time", raises=ValueError)
-def is_time(instance):
+def is_draft3_time(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%H:%M:%S")
@@ -219,7 +350,6 @@ else:
def is_css_color_code(instance):
return webcolors.normalize_hex(instance)
-
@_checks_drafts(draft3="color", raises=(ValueError, TypeError))
def is_css21_color(instance):
if (
@@ -229,12 +359,67 @@ else:
return True
return is_css_color_code(instance)
-
def is_css3_color(instance):
if instance.lower() in webcolors.css3_names_to_hex:
return True
return is_css_color_code(instance)
-draft3_format_checker = FormatChecker(_draft_checkers["draft3"])
-draft4_format_checker = FormatChecker(_draft_checkers["draft4"])
+try:
+ import jsonpointer
+except ImportError:
+ pass
+else:
+ @_checks_drafts(
+ draft6="json-pointer",
+ draft7="json-pointer",
+ raises=jsonpointer.JsonPointerException,
+ )
+ def is_json_pointer(instance):
+ if not isinstance(instance, str_types):
+ return True
+ return jsonpointer.JsonPointer(instance)
+
+ # TODO: I don't want to maintain this, so it
+ # needs to go either into jsonpointer (pending
+ # https://github.com/stefankoegl/python-json-pointer/issues/34) or
+ # into a new external library.
+ @_checks_drafts(
+ draft7="relative-json-pointer",
+ raises=jsonpointer.JsonPointerException,
+ )
+ def is_relative_json_pointer(instance):
+ # Definition taken from:
+ # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
+ if not isinstance(instance, str_types):
+ return True
+ non_negative_integer, rest = [], ""
+ for i, character in enumerate(instance):
+ if character.isdigit():
+ non_negative_integer.append(character)
+ continue
+
+ if not non_negative_integer:
+ return False
+
+ rest = instance[i:]
+ break
+ return (rest == "#") or jsonpointer.JsonPointer(rest)
+
+
+try:
+ import uritemplate.exceptions
+except ImportError:
+ pass
+else:
+ @_checks_drafts(
+ draft6="uri-template",
+ draft7="uri-template",
+ raises=uritemplate.exceptions.InvalidTemplate,
+ )
+ def is_uri_template(
+ instance,
+ template_validator=uritemplate.Validator().force_balanced_braces(),
+ ):
+ template = uritemplate.URITemplate(instance)
+ return template_validator.validate(template)
diff --git a/lib/spack/external/jsonschema/_legacy_validators.py b/lib/spack/external/jsonschema/_legacy_validators.py
new file mode 100644
index 0000000000..264ff7d713
--- /dev/null
+++ b/lib/spack/external/jsonschema/_legacy_validators.py
@@ -0,0 +1,141 @@
+from jsonschema import _utils
+from jsonschema.compat import iteritems
+from jsonschema.exceptions import ValidationError
+
+
+def dependencies_draft3(validator, dependencies, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, dependency in iteritems(dependencies):
+ if property not in instance:
+ continue
+
+ if validator.is_type(dependency, "object"):
+ for error in validator.descend(
+ instance, dependency, schema_path=property,
+ ):
+ yield error
+ elif validator.is_type(dependency, "string"):
+ if dependency not in instance:
+ yield ValidationError(
+ "%r is a dependency of %r" % (dependency, property)
+ )
+ else:
+ for each in dependency:
+ if each not in instance:
+ message = "%r is a dependency of %r"
+ yield ValidationError(message % (each, property))
+
+
+def disallow_draft3(validator, disallow, instance, schema):
+ for disallowed in _utils.ensure_list(disallow):
+ if validator.is_valid(instance, {"type": [disallowed]}):
+ yield ValidationError(
+ "%r is disallowed for %r" % (disallowed, instance)
+ )
+
+
+def extends_draft3(validator, extends, instance, schema):
+ if validator.is_type(extends, "object"):
+ for error in validator.descend(instance, extends):
+ yield error
+ return
+ for index, subschema in enumerate(extends):
+ for error in validator.descend(instance, subschema, schema_path=index):
+ yield error
+
+
+def items_draft3_draft4(validator, items, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if validator.is_type(items, "object"):
+ for index, item in enumerate(instance):
+ for error in validator.descend(item, items, path=index):
+ yield error
+ else:
+ for (index, item), subschema in zip(enumerate(instance), items):
+ for error in validator.descend(
+ item, subschema, path=index, schema_path=index,
+ ):
+ yield error
+
+
+def minimum_draft3_draft4(validator, minimum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if schema.get("exclusiveMinimum", False):
+ failed = instance <= minimum
+ cmp = "less than or equal to"
+ else:
+ failed = instance < minimum
+ cmp = "less than"
+
+ if failed:
+ yield ValidationError(
+ "%r is %s the minimum of %r" % (instance, cmp, minimum)
+ )
+
+
+def maximum_draft3_draft4(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if schema.get("exclusiveMaximum", False):
+ failed = instance >= maximum
+ cmp = "greater than or equal to"
+ else:
+ failed = instance > maximum
+ cmp = "greater than"
+
+ if failed:
+ yield ValidationError(
+ "%r is %s the maximum of %r" % (instance, cmp, maximum)
+ )
+
+
+def properties_draft3(validator, properties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, subschema in iteritems(properties):
+ if property in instance:
+ for error in validator.descend(
+ instance[property],
+ subschema,
+ path=property,
+ schema_path=property,
+ ):
+ yield error
+ elif subschema.get("required", False):
+ error = ValidationError("%r is a required property" % property)
+ error._set(
+ validator="required",
+ validator_value=subschema["required"],
+ instance=instance,
+ schema=schema,
+ )
+ error.path.appendleft(property)
+ error.schema_path.extend([property, "required"])
+ yield error
+
+
+def type_draft3(validator, types, instance, schema):
+ types = _utils.ensure_list(types)
+
+ all_errors = []
+ for index, type in enumerate(types):
+ if validator.is_type(type, "object"):
+ errors = list(validator.descend(instance, type, schema_path=index))
+ if not errors:
+ return
+ all_errors.extend(errors)
+ else:
+ if validator.is_type(instance, type):
+ return
+ else:
+ yield ValidationError(
+ _utils.types_msg(instance, types), context=all_errors,
+ )
diff --git a/lib/spack/external/jsonschema/_types.py b/lib/spack/external/jsonschema/_types.py
new file mode 100644
index 0000000000..a71a4e34bd
--- /dev/null
+++ b/lib/spack/external/jsonschema/_types.py
@@ -0,0 +1,188 @@
+import numbers
+
+from pyrsistent import pmap
+import attr
+
+from jsonschema.compat import int_types, str_types
+from jsonschema.exceptions import UndefinedTypeCheck
+
+
+def is_array(checker, instance):
+ return isinstance(instance, list)
+
+
+def is_bool(checker, instance):
+ return isinstance(instance, bool)
+
+
+def is_integer(checker, instance):
+ # bool inherits from int, so ensure bools aren't reported as ints
+ if isinstance(instance, bool):
+ return False
+ return isinstance(instance, int_types)
+
+
+def is_null(checker, instance):
+ return instance is None
+
+
+def is_number(checker, instance):
+ # bool inherits from int, so ensure bools aren't reported as ints
+ if isinstance(instance, bool):
+ return False
+ return isinstance(instance, numbers.Number)
+
+
+def is_object(checker, instance):
+ return isinstance(instance, dict)
+
+
+def is_string(checker, instance):
+ return isinstance(instance, str_types)
+
+
+def is_any(checker, instance):
+ return True
+
+
+@attr.s(frozen=True)
+class TypeChecker(object):
+ """
+ A ``type`` property checker.
+
+ A `TypeChecker` performs type checking for an `IValidator`. Type
+ checks to perform are updated using `TypeChecker.redefine` or
+ `TypeChecker.redefine_many` and removed via `TypeChecker.remove`.
+ Each of these return a new `TypeChecker` object.
+
+ Arguments:
+
+ type_checkers (dict):
+
+ The initial mapping of types to their checking functions.
+ """
+ _type_checkers = attr.ib(default=pmap(), converter=pmap)
+
+ def is_type(self, instance, type):
+ """
+ Check if the instance is of the appropriate type.
+
+ Arguments:
+
+ instance (object):
+
+ The instance to check
+
+ type (str):
+
+ The name of the type that is expected.
+
+ Returns:
+
+ bool: Whether it conformed.
+
+
+ Raises:
+
+ `jsonschema.exceptions.UndefinedTypeCheck`:
+ if type is unknown to this object.
+ """
+ try:
+ fn = self._type_checkers[type]
+ except KeyError:
+ raise UndefinedTypeCheck(type)
+
+ return fn(self, instance)
+
+ def redefine(self, type, fn):
+ """
+ Produce a new checker with the given type redefined.
+
+ Arguments:
+
+ type (str):
+
+ The name of the type to check.
+
+ fn (collections.Callable):
+
+ A function taking exactly two parameters - the type
+ checker calling the function and the instance to check.
+ The function should return true if instance is of this
+ type and false otherwise.
+
+ Returns:
+
+ A new `TypeChecker` instance.
+ """
+ return self.redefine_many({type: fn})
+
+ def redefine_many(self, definitions=()):
+ """
+ Produce a new checker with the given types redefined.
+
+ Arguments:
+
+ definitions (dict):
+
+ A dictionary mapping types to their checking functions.
+
+ Returns:
+
+ A new `TypeChecker` instance.
+ """
+ return attr.evolve(
+ self, type_checkers=self._type_checkers.update(definitions),
+ )
+
+ def remove(self, *types):
+ """
+ Produce a new checker with the given types forgotten.
+
+ Arguments:
+
+ types (~collections.Iterable):
+
+ the names of the types to remove.
+
+ Returns:
+
+ A new `TypeChecker` instance
+
+ Raises:
+
+ `jsonschema.exceptions.UndefinedTypeCheck`:
+
+ if any given type is unknown to this object
+ """
+
+ checkers = self._type_checkers
+ for each in types:
+ try:
+ checkers = checkers.remove(each)
+ except KeyError:
+ raise UndefinedTypeCheck(each)
+ return attr.evolve(self, type_checkers=checkers)
+
+
+draft3_type_checker = TypeChecker(
+ {
+ u"any": is_any,
+ u"array": is_array,
+ u"boolean": is_bool,
+ u"integer": is_integer,
+ u"object": is_object,
+ u"null": is_null,
+ u"number": is_number,
+ u"string": is_string,
+ },
+)
+draft4_type_checker = draft3_type_checker.remove(u"any")
+draft6_type_checker = draft4_type_checker.redefine(
+ u"integer",
+ lambda checker, instance: (
+ is_integer(checker, instance) or
+ isinstance(instance, float) and instance.is_integer()
+ ),
+)
+draft7_type_checker = draft6_type_checker
diff --git a/lib/spack/external/jsonschema/_utils.py b/lib/spack/external/jsonschema/_utils.py
index 2262f3305d..ceb880198d 100644
--- a/lib/spack/external/jsonschema/_utils.py
+++ b/lib/spack/external/jsonschema/_utils.py
@@ -3,13 +3,12 @@ import json
import pkgutil
import re
-from jsonschema.compat import str_types, MutableMapping, urlsplit
+from jsonschema.compat import MutableMapping, str_types, urlsplit
class URIDict(MutableMapping):
"""
Dictionary which uses normalized URIs as keys.
-
"""
def normalize(self, uri):
@@ -41,7 +40,6 @@ class URIDict(MutableMapping):
class Unset(object):
"""
An as-of-yet unset attribute or unprovided default parameter.
-
"""
def __repr__(self):
@@ -51,17 +49,15 @@ class Unset(object):
def load_schema(name):
"""
Load a schema from ./schemas/``name``.json and return it.
-
"""
- data = pkgutil.get_data(__package__, "schemas/{0}.json".format(name))
+ data = pkgutil.get_data("jsonschema", "schemas/{0}.json".format(name))
return json.loads(data.decode("utf-8"))
def indent(string, times=1):
"""
- A dumb version of :func:`textwrap.indent` from Python 3.3.
-
+ A dumb version of `textwrap.indent` from Python 3.3.
"""
return "\n".join(" " * (4 * times) + line for line in string.splitlines())
@@ -73,8 +69,11 @@ def format_as_index(indices):
For example, [1, 2, "foo"] -> [1][2]["foo"]
- :type indices: sequence
+ Arguments:
+ indices (sequence):
+
+ The indices to format.
"""
if not indices:
@@ -90,7 +89,6 @@ def find_additional_properties(instance, schema):
/ or ``patternProperties``.
Assumes ``instance`` is dict-like already.
-
"""
properties = schema.get("properties", {})
@@ -105,7 +103,6 @@ def find_additional_properties(instance, schema):
def extras_msg(extras):
"""
Create an error message for extra items or properties.
-
"""
if len(extras) == 1:
@@ -123,7 +120,6 @@ def types_msg(instance, types):
be considered to be a description of that object and used as its type.
Otherwise the message is simply the reprs of the given ``types``.
-
"""
reprs = []
@@ -143,7 +139,6 @@ def flatten(suitable_for_isinstance):
* an arbitrary nested tree of tuples
Return a flattened tuple of the given argument.
-
"""
types = set()
@@ -163,7 +158,6 @@ def ensure_list(thing):
Wrap ``thing`` in a list if it's a single str.
Otherwise, return it unchanged.
-
"""
if isinstance(thing, str_types):
@@ -171,10 +165,16 @@ def ensure_list(thing):
return thing
+def equal(one, two):
+ """
+ Check if two things are equal, but evade booleans and ints being equal.
+ """
+ return unbool(one) == unbool(two)
+
+
def unbool(element, true=object(), false=object()):
"""
A hack to make True and 1 and False and 0 unique for ``uniq``.
-
"""
if element is True:
@@ -191,7 +191,6 @@ def uniq(container):
Successively tries first to rely that the elements are hashable, then
falls back on them being sortable, and finally falls back on brute
force.
-
"""
try:
diff --git a/lib/spack/external/jsonschema/_validators.py b/lib/spack/external/jsonschema/_validators.py
index c6e801ccb2..179fec09a9 100644
--- a/lib/spack/external/jsonschema/_validators.py
+++ b/lib/spack/external/jsonschema/_validators.py
@@ -1,13 +1,18 @@
import re
-from jsonschema import _utils
+from jsonschema._utils import (
+ ensure_list,
+ equal,
+ extras_msg,
+ find_additional_properties,
+ types_msg,
+ unbool,
+ uniq,
+)
from jsonschema.exceptions import FormatError, ValidationError
from jsonschema.compat import iteritems
-FLOAT_TOLERANCE = 10 ** -15
-
-
def patternProperties(validator, patternProperties, instance, schema):
if not validator.is_type(instance, "object"):
return
@@ -21,35 +26,60 @@ def patternProperties(validator, patternProperties, instance, schema):
yield error
+def propertyNames(validator, propertyNames, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property in instance:
+ for error in validator.descend(
+ instance=property,
+ schema=propertyNames,
+ ):
+ yield error
+
+
def additionalProperties(validator, aP, instance, schema):
if not validator.is_type(instance, "object"):
return
- extras = set(_utils.find_additional_properties(instance, schema))
+ extras = set(find_additional_properties(instance, schema))
if validator.is_type(aP, "object"):
for extra in extras:
for error in validator.descend(instance[extra], aP, path=extra):
yield error
elif not aP and extras:
- error = "Additional properties are not allowed (%s %s unexpected)"
- yield ValidationError(error % _utils.extras_msg(extras))
+ if "patternProperties" in schema:
+ patterns = sorted(schema["patternProperties"])
+ if len(extras) == 1:
+ verb = "does"
+ else:
+ verb = "do"
+ error = "%s %s not match any of the regexes: %s" % (
+ ", ".join(map(repr, sorted(extras))),
+ verb,
+ ", ".join(map(repr, patterns)),
+ )
+ yield ValidationError(error)
+ else:
+ error = "Additional properties are not allowed (%s %s unexpected)"
+ yield ValidationError(error % extras_msg(extras))
def items(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
- if validator.is_type(items, "object"):
- for index, item in enumerate(instance):
- for error in validator.descend(item, items, path=index):
- yield error
- else:
+ if validator.is_type(items, "array"):
for (index, item), subschema in zip(enumerate(instance), items):
for error in validator.descend(
item, subschema, path=index, schema_path=index,
):
yield error
+ else:
+ for index, item in enumerate(instance):
+ for error in validator.descend(item, items, path=index):
+ yield error
def additionalItems(validator, aI, instance, schema):
@@ -68,41 +98,66 @@ def additionalItems(validator, aI, instance, schema):
error = "Additional items are not allowed (%s %s unexpected)"
yield ValidationError(
error %
- _utils.extras_msg(instance[len(schema.get("items", [])):])
+ extras_msg(instance[len(schema.get("items", [])):])
)
-def minimum(validator, minimum, instance, schema):
+def const(validator, const, instance, schema):
+ if not equal(instance, const):
+ yield ValidationError("%r was expected" % (const,))
+
+
+def contains(validator, contains, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if not any(validator.is_valid(element, contains) for element in instance):
+ yield ValidationError(
+ "None of %r are valid under the given schema" % (instance,)
+ )
+
+
+def exclusiveMinimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
- if schema.get("exclusiveMinimum", False):
- failed = float(instance) <= minimum
- cmp = "less than or equal to"
- else:
- failed = float(instance) < minimum
- cmp = "less than"
+ if instance <= minimum:
+ yield ValidationError(
+ "%r is less than or equal to the minimum of %r" % (
+ instance, minimum,
+ ),
+ )
- if failed:
+
+def exclusiveMaximum(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance >= maximum:
yield ValidationError(
- "%r is %s the minimum of %r" % (instance, cmp, minimum)
+ "%r is greater than or equal to the maximum of %r" % (
+ instance, maximum,
+ ),
)
-def maximum(validator, maximum, instance, schema):
+def minimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
- if schema.get("exclusiveMaximum", False):
- failed = instance >= maximum
- cmp = "greater than or equal to"
- else:
- failed = instance > maximum
- cmp = "greater than"
+ if instance < minimum:
+ yield ValidationError(
+ "%r is less than the minimum of %r" % (instance, minimum)
+ )
- if failed:
+
+def maximum(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance > maximum:
yield ValidationError(
- "%r is %s the maximum of %r" % (instance, cmp, maximum)
+ "%r is greater than the maximum of %r" % (instance, maximum)
)
@@ -111,8 +166,8 @@ def multipleOf(validator, dB, instance, schema):
return
if isinstance(dB, float):
- mod = instance % dB
- failed = (mod > FLOAT_TOLERANCE) and (dB - mod) > FLOAT_TOLERANCE
+ quotient = instance / dB
+ failed = int(quotient) != quotient
else:
failed = instance % dB
@@ -134,9 +189,9 @@ def uniqueItems(validator, uI, instance, schema):
if (
uI and
validator.is_type(instance, "array") and
- not _utils.uniq(instance)
+ not uniq(instance)
):
- yield ValidationError("%r has non-unique elements" % instance)
+ yield ValidationError("%r has non-unique elements" % (instance,))
def pattern(validator, patrn, instance, schema):
@@ -173,104 +228,52 @@ def dependencies(validator, dependencies, instance, schema):
if property not in instance:
continue
- if validator.is_type(dependency, "object"):
+ if validator.is_type(dependency, "array"):
+ for each in dependency:
+ if each not in instance:
+ message = "%r is a dependency of %r"
+ yield ValidationError(message % (each, property))
+ else:
for error in validator.descend(
instance, dependency, schema_path=property,
):
yield error
- else:
- dependencies = _utils.ensure_list(dependency)
- for dependency in dependencies:
- if dependency not in instance:
- yield ValidationError(
- "%r is a dependency of %r" % (dependency, property)
- )
def enum(validator, enums, instance, schema):
- if instance not in enums:
+ if instance == 0 or instance == 1:
+ unbooled = unbool(instance)
+ if all(unbooled != unbool(each) for each in enums):
+ yield ValidationError("%r is not one of %r" % (instance, enums))
+ elif instance not in enums:
yield ValidationError("%r is not one of %r" % (instance, enums))
def ref(validator, ref, instance, schema):
- with validator.resolver.resolving(ref) as resolved:
- for error in validator.descend(instance, resolved):
- yield error
-
-
-def type_draft3(validator, types, instance, schema):
- types = _utils.ensure_list(types)
-
- all_errors = []
- for index, type in enumerate(types):
- if type == "any":
- return
- if validator.is_type(type, "object"):
- errors = list(validator.descend(instance, type, schema_path=index))
- if not errors:
- return
- all_errors.extend(errors)
- else:
- if validator.is_type(instance, type):
- return
+ resolve = getattr(validator.resolver, "resolve", None)
+ if resolve is None:
+ with validator.resolver.resolving(ref) as resolved:
+ for error in validator.descend(instance, resolved):
+ yield error
else:
- yield ValidationError(
- _utils.types_msg(instance, types), context=all_errors,
- )
-
-
-def properties_draft3(validator, properties, instance, schema):
- if not validator.is_type(instance, "object"):
- return
+ scope, resolved = validator.resolver.resolve(ref)
+ validator.resolver.push_scope(scope)
- for property, subschema in iteritems(properties):
- if property in instance:
- for error in validator.descend(
- instance[property],
- subschema,
- path=property,
- schema_path=property,
- ):
+ try:
+ for error in validator.descend(instance, resolved):
yield error
- elif subschema.get("required", False):
- error = ValidationError("%r is a required property" % property)
- error._set(
- validator="required",
- validator_value=subschema["required"],
- instance=instance,
- schema=schema,
- )
- error.path.appendleft(property)
- error.schema_path.extend([property, "required"])
- yield error
-
-
-def disallow_draft3(validator, disallow, instance, schema):
- for disallowed in _utils.ensure_list(disallow):
- if validator.is_valid(instance, {"type" : [disallowed]}):
- yield ValidationError(
- "%r is disallowed for %r" % (disallowed, instance)
- )
+ finally:
+ validator.resolver.pop_scope()
-def extends_draft3(validator, extends, instance, schema):
- if validator.is_type(extends, "object"):
- for error in validator.descend(instance, extends):
- yield error
- return
- for index, subschema in enumerate(extends):
- for error in validator.descend(instance, subschema, schema_path=index):
- yield error
-
-
-def type_draft4(validator, types, instance, schema):
- types = _utils.ensure_list(types)
+def type(validator, types, instance, schema):
+ types = ensure_list(types)
if not any(validator.is_type(instance, type) for type in types):
- yield ValidationError(_utils.types_msg(instance, types))
+ yield ValidationError(types_msg(instance, types))
-def properties_draft4(validator, properties, instance, schema):
+def properties(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
@@ -285,7 +288,7 @@ def properties_draft4(validator, properties, instance, schema):
yield error
-def required_draft4(validator, required, instance, schema):
+def required(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in required:
@@ -293,33 +296,31 @@ def required_draft4(validator, required, instance, schema):
yield ValidationError("%r is a required property" % property)
-def minProperties_draft4(validator, mP, instance, schema):
+def minProperties(validator, mP, instance, schema):
if validator.is_type(instance, "object") and len(instance) < mP:
yield ValidationError(
"%r does not have enough properties" % (instance,)
)
-def maxProperties_draft4(validator, mP, instance, schema):
+def maxProperties(validator, mP, instance, schema):
if not validator.is_type(instance, "object"):
return
if validator.is_type(instance, "object") and len(instance) > mP:
yield ValidationError("%r has too many properties" % (instance,))
-def allOf_draft4(validator, allOf, instance, schema):
+def allOf(validator, allOf, instance, schema):
for index, subschema in enumerate(allOf):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
-def oneOf_draft4(validator, oneOf, instance, schema):
- subschemas = enumerate(oneOf)
+def anyOf(validator, anyOf, instance, schema):
all_errors = []
- for index, subschema in subschemas:
+ for index, subschema in enumerate(anyOf):
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
- first_valid = subschema
break
all_errors.extend(errs)
else:
@@ -328,20 +329,14 @@ def oneOf_draft4(validator, oneOf, instance, schema):
context=all_errors,
)
- more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
- if more_valid:
- more_valid.append(first_valid)
- reprs = ", ".join(repr(schema) for schema in more_valid)
- yield ValidationError(
- "%r is valid under each of %s" % (instance, reprs)
- )
-
-def anyOf_draft4(validator, anyOf, instance, schema):
+def oneOf(validator, oneOf, instance, schema):
+ subschemas = enumerate(oneOf)
all_errors = []
- for index, subschema in enumerate(anyOf):
+ for index, subschema in subschemas:
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
+ first_valid = subschema
break
all_errors.extend(errs)
else:
@@ -350,9 +345,29 @@ def anyOf_draft4(validator, anyOf, instance, schema):
context=all_errors,
)
+ more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
+ if more_valid:
+ more_valid.append(first_valid)
+ reprs = ", ".join(repr(schema) for schema in more_valid)
+ yield ValidationError(
+ "%r is valid under each of %s" % (instance, reprs)
+ )
+
-def not_draft4(validator, not_schema, instance, schema):
+def not_(validator, not_schema, instance, schema):
if validator.is_valid(instance, not_schema):
yield ValidationError(
"%r is not allowed for %r" % (not_schema, instance)
)
+
+
+def if_(validator, if_schema, instance, schema):
+ if validator.is_valid(instance, if_schema):
+ if u"then" in schema:
+ then = schema[u"then"]
+ for error in validator.descend(instance, then, schema_path="then"):
+ yield error
+ elif u"else" in schema:
+ else_ = schema[u"else"]
+ for error in validator.descend(instance, else_, schema_path="else"):
+ yield error
diff --git a/lib/spack/external/jsonschema/cli.py b/lib/spack/external/jsonschema/cli.py
index 0126564f46..ab3335b27c 100644
--- a/lib/spack/external/jsonschema/cli.py
+++ b/lib/spack/external/jsonschema/cli.py
@@ -1,8 +1,12 @@
+"""
+The ``jsonschema`` command line.
+"""
from __future__ import absolute_import
import argparse
import json
import sys
+from jsonschema import __version__
from jsonschema._reflect import namedAny
from jsonschema.validators import validator_for
@@ -26,26 +30,37 @@ parser.add_argument(
action="append",
dest="instances",
type=_json_file,
- help="a path to a JSON instance to validate "
- "(may be specified multiple times)",
+ help=(
+ "a path to a JSON instance (i.e. filename.json) "
+ "to validate (may be specified multiple times)"
+ ),
)
parser.add_argument(
"-F", "--error-format",
default="{error.instance}: {error.message}\n",
- help="the format to use for each error output message, specified in "
- "a form suitable for passing to str.format, which will be called "
- "with 'error' for each error",
+ help=(
+ "the format to use for each error output message, specified in "
+ "a form suitable for passing to str.format, which will be called "
+ "with 'error' for each error"
+ ),
)
parser.add_argument(
"-V", "--validator",
type=_namedAnyWithDefault,
- help="the fully qualified object name of a validator to use, or, for "
- "validators that are registered with jsonschema, simply the name "
- "of the class.",
+ help=(
+ "the fully qualified object name of a validator to use, or, for "
+ "validators that are registered with jsonschema, simply the name "
+ "of the class."
+ ),
+)
+parser.add_argument(
+ "--version",
+ action="version",
+ version=__version__,
)
parser.add_argument(
"schema",
- help="the JSON Schema to validate with",
+ help="the JSON Schema to validate with (i.e. schema.json)",
type=_json_file,
)
@@ -64,6 +79,9 @@ def main(args=sys.argv[1:]):
def run(arguments, stdout=sys.stdout, stderr=sys.stderr):
error_format = arguments["error_format"]
validator = arguments["validator"](schema=arguments["schema"])
+
+ validator.check_schema(arguments["schema"])
+
errored = False
for instance in arguments["instances"] or ():
for error in validator.iter_errors(instance):
diff --git a/lib/spack/external/jsonschema/compat.py b/lib/spack/external/jsonschema/compat.py
index 6ca49ab6be..47e0980455 100644
--- a/lib/spack/external/jsonschema/compat.py
+++ b/lib/spack/external/jsonschema/compat.py
@@ -1,52 +1,54 @@
-from __future__ import unicode_literals
-import sys
+"""
+Python 2/3 compatibility helpers.
+
+Note: This module is *not* public API.
+"""
+import contextlib
import operator
+import sys
+
try:
- from collections import MutableMapping, Sequence # noqa
-except ImportError:
from collections.abc import MutableMapping, Sequence # noqa
+except ImportError:
+ from collections import MutableMapping, Sequence # noqa
PY3 = sys.version_info[0] >= 3
if PY3:
zip = zip
- from io import StringIO
+ from functools import lru_cache
+ from io import StringIO as NativeIO
from urllib.parse import (
- unquote, urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit
+ unquote, urljoin, urlunsplit, SplitResult, urlsplit
)
- from urllib.request import urlopen
+ from urllib.request import pathname2url, urlopen
str_types = str,
int_types = int,
iteritems = operator.methodcaller("items")
else:
from itertools import izip as zip # noqa
- from StringIO import StringIO
- from urlparse import (
- urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit # noqa
- )
- from urllib import unquote # noqa
- from urllib2 import urlopen # noqa
+ from io import BytesIO as NativeIO
+ from urlparse import urljoin, urlunsplit, SplitResult, urlsplit
+ from urllib import pathname2url, unquote # noqa
+ import urllib2 # noqa
+ def urlopen(*args, **kwargs):
+ return contextlib.closing(urllib2.urlopen(*args, **kwargs))
+
str_types = basestring
int_types = int, long
iteritems = operator.methodcaller("iteritems")
-
-# On python < 3.3 fragments are not handled properly with unknown schemes
-def urlsplit(url):
- scheme, netloc, path, query, fragment = _urlsplit(url)
- if "#" in path:
- path, fragment = path.split("#", 1)
- return SplitResult(scheme, netloc, path, query, fragment)
+ from functools32 import lru_cache
def urldefrag(url):
if "#" in url:
s, n, p, q, frag = urlsplit(url)
- defrag = urlunsplit((s, n, p, q, ''))
+ defrag = urlunsplit((s, n, p, q, ""))
else:
defrag = url
- frag = ''
+ frag = ""
return defrag, frag
diff --git a/lib/spack/external/jsonschema/exceptions.py b/lib/spack/external/jsonschema/exceptions.py
index 478e59c531..691dcffe6c 100644
--- a/lib/spack/external/jsonschema/exceptions.py
+++ b/lib/spack/external/jsonschema/exceptions.py
@@ -1,8 +1,13 @@
+"""
+Validation errors, and some surrounding helpers.
+"""
from collections import defaultdict, deque
import itertools
import pprint
import textwrap
+import attr
+
from jsonschema import _utils
from jsonschema.compat import PY3, iteritems
@@ -27,6 +32,18 @@ class _Error(Exception):
schema_path=(),
parent=None,
):
+ super(_Error, self).__init__(
+ message,
+ validator,
+ path,
+ cause,
+ context,
+ validator_value,
+ instance,
+ schema,
+ schema_path,
+ parent,
+ )
self.message = message
self.path = self.relative_path = deque(path)
self.schema_path = self.relative_schema_path = deque(schema_path)
@@ -44,9 +61,6 @@ class _Error(Exception):
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.message)
- def __str__(self):
- return unicode(self).encode("utf-8")
-
def __unicode__(self):
essential_for_verbose = (
self.validator, self.validator_value, self.instance, self.schema,
@@ -58,22 +72,27 @@ class _Error(Exception):
pinstance = pprint.pformat(self.instance, width=72)
return self.message + textwrap.dedent("""
- Failed validating %r in schema%s:
+ Failed validating %r in %s%s:
%s
- On instance%s:
+ On %s%s:
%s
""".rstrip()
) % (
self.validator,
+ self._word_for_schema_in_error_message,
_utils.format_as_index(list(self.relative_schema_path)[:-1]),
_utils.indent(pschema),
+ self._word_for_instance_in_error_message,
_utils.format_as_index(self.relative_path),
_utils.indent(pinstance),
)
if PY3:
__str__ = __unicode__
+ else:
+ def __str__(self):
+ return unicode(self).encode("utf-8")
@classmethod
def create_from(cls, other):
@@ -86,7 +105,7 @@ class _Error(Exception):
return self.relative_path
path = deque(self.relative_path)
- path.extendleft(parent.absolute_path)
+ path.extendleft(reversed(parent.absolute_path))
return path
@property
@@ -96,7 +115,7 @@ class _Error(Exception):
return self.relative_schema_path
path = deque(self.relative_schema_path)
- path.extendleft(parent.absolute_schema_path)
+ path.extendleft(reversed(parent.absolute_schema_path))
return path
def _set(self, **kwargs):
@@ -113,26 +132,63 @@ class _Error(Exception):
class ValidationError(_Error):
- pass
+ """
+ An instance was invalid under a provided schema.
+ """
+
+ _word_for_schema_in_error_message = "schema"
+ _word_for_instance_in_error_message = "instance"
class SchemaError(_Error):
- pass
+ """
+ A schema was invalid under its corresponding metaschema.
+ """
+
+ _word_for_schema_in_error_message = "metaschema"
+ _word_for_instance_in_error_message = "schema"
+@attr.s(hash=True)
class RefResolutionError(Exception):
- pass
+ """
+ A ref could not be resolved.
+ """
+
+ _cause = attr.ib()
+
+ def __str__(self):
+ return str(self._cause)
+
+
+class UndefinedTypeCheck(Exception):
+ """
+ A type checker was asked to check a type it did not have registered.
+ """
+
+ def __init__(self, type):
+ self.type = type
+
+ def __unicode__(self):
+ return "Type %r is unknown to this type checker" % self.type
+
+ if PY3:
+ __str__ = __unicode__
+ else:
+ def __str__(self):
+ return unicode(self).encode("utf-8")
class UnknownType(Exception):
+ """
+ A validator was asked to validate an instance against an unknown type.
+ """
+
def __init__(self, type, instance, schema):
self.type = type
self.instance = instance
self.schema = schema
- def __str__(self):
- return unicode(self).encode("utf-8")
-
def __unicode__(self):
pschema = pprint.pformat(self.schema, width=72)
pinstance = pprint.pformat(self.instance, width=72)
@@ -147,29 +203,34 @@ class UnknownType(Exception):
if PY3:
__str__ = __unicode__
-
+ else:
+ def __str__(self):
+ return unicode(self).encode("utf-8")
class FormatError(Exception):
+ """
+ Validating a format failed.
+ """
+
def __init__(self, message, cause=None):
super(FormatError, self).__init__(message, cause)
self.message = message
self.cause = self.__cause__ = cause
- def __str__(self):
- return self.message.encode("utf-8")
-
def __unicode__(self):
return self.message
if PY3:
__str__ = __unicode__
+ else:
+ def __str__(self):
+ return self.message.encode("utf-8")
class ErrorTree(object):
"""
ErrorTrees make it easier to check which validations failed.
-
"""
_instance = _unset
@@ -184,12 +245,11 @@ class ErrorTree(object):
container = container[element]
container.errors[error.validator] = error
- self._instance = error.instance
+ container._instance = error.instance
def __contains__(self, index):
"""
Check whether ``instance[index]`` has any errors.
-
"""
return index in self._contents
@@ -201,8 +261,7 @@ class ErrorTree(object):
If the index is not in the instance that this tree corresponds to and
is not known by this tree, whatever error would be raised by
``instance.__getitem__`` will be propagated (usually this is some
- subclass of :class:`LookupError`.
-
+ subclass of `exceptions.LookupError`.
"""
if self._instance is not _unset and index not in self:
@@ -210,22 +269,22 @@ class ErrorTree(object):
return self._contents[index]
def __setitem__(self, index, value):
+ """
+ Add an error to the tree at the given ``index``.
+ """
self._contents[index] = value
def __iter__(self):
"""
Iterate (non-recursively) over the indices in the instance with errors.
-
"""
return iter(self._contents)
def __len__(self):
"""
- Same as :attr:`total_errors`.
-
+ Return the `total_errors`.
"""
-
return self.total_errors
def __repr__(self):
@@ -235,7 +294,6 @@ class ErrorTree(object):
def total_errors(self):
"""
The total number of errors in the entire tree, including children.
-
"""
child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
@@ -243,6 +301,21 @@ class ErrorTree(object):
def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
+ """
+ Create a key function that can be used to sort errors by relevance.
+
+ Arguments:
+ weak (set):
+ a collection of validator names to consider to be "weak".
+ If there are two errors at the same level of the instance
+ and one is in the set of weak validator names, the other
+ error will take priority. By default, :validator:`anyOf` and
+ :validator:`oneOf` are considered weak validators and will
+ be superseded by other same-level validation errors.
+
+ strong (set):
+ a collection of validator names to consider to be "strong"
+ """
def relevance(error):
validator = error.validator
return -len(error.path), validator not in weak, validator in strong
@@ -253,6 +326,43 @@ relevance = by_relevance()
def best_match(errors, key=relevance):
+ """
+ Try to find an error that appears to be the best match among given errors.
+
+ In general, errors that are higher up in the instance (i.e. for which
+ `ValidationError.path` is shorter) are considered better matches,
+ since they indicate "more" is wrong with the instance.
+
+ If the resulting match is either :validator:`oneOf` or :validator:`anyOf`,
+ the *opposite* assumption is made -- i.e. the deepest error is picked,
+ since these validators only need to match once, and any other errors may
+ not be relevant.
+
+ Arguments:
+ errors (collections.Iterable):
+
+ the errors to select from. Do not provide a mixture of
+ errors from different validation attempts (i.e. from
+ different instances or schemas), since it won't produce
+ sensical output.
+
+ key (collections.Callable):
+
+ the key to use when sorting errors. See `relevance` and
+ transitively `by_relevance` for more details (the default is
+ to sort with the defaults of that function). Changing the
+ default is only useful if you want to change the function
+ that rates errors but still want the error context descent
+ done by this function.
+
+ Returns:
+ the best matching error, or ``None`` if the iterable was empty
+
+ .. note::
+
+ This function is a heuristic. Its return value may change for a given
+ set of inputs from version to version if better heuristics are added.
+ """
errors = iter(errors)
best = next(errors, None)
if best is None:
diff --git a/lib/spack/external/jsonschema/schemas/draft3.json b/lib/spack/external/jsonschema/schemas/draft3.json
index 5bcefe30d5..f8a09c563b 100644
--- a/lib/spack/external/jsonschema/schemas/draft3.json
+++ b/lib/spack/external/jsonschema/schemas/draft3.json
@@ -80,9 +80,7 @@
"type": "number"
},
"enum": {
- "minItems": 1,
- "type": "array",
- "uniqueItems": true
+ "type": "array"
},
"exclusiveMaximum": {
"default": false,
diff --git a/lib/spack/external/jsonschema/schemas/draft4.json b/lib/spack/external/jsonschema/schemas/draft4.json
index fead5cefab..9b666cff88 100644
--- a/lib/spack/external/jsonschema/schemas/draft4.json
+++ b/lib/spack/external/jsonschema/schemas/draft4.json
@@ -111,9 +111,7 @@
"type": "string"
},
"enum": {
- "minItems": 1,
- "type": "array",
- "uniqueItems": true
+ "type": "array"
},
"exclusiveMaximum": {
"default": false,
@@ -123,6 +121,9 @@
"default": false,
"type": "boolean"
},
+ "format": {
+ "type": "string"
+ },
"id": {
"format": "uri",
"type": "string"
diff --git a/lib/spack/external/jsonschema/schemas/draft6.json b/lib/spack/external/jsonschema/schemas/draft6.json
new file mode 100644
index 0000000000..a0d2bf7896
--- /dev/null
+++ b/lib/spack/external/jsonschema/schemas/draft6.json
@@ -0,0 +1,153 @@
+{
+ "$schema": "http://json-schema.org/draft-06/schema#",
+ "$id": "http://json-schema.org/draft-06/schema#",
+ "title": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "allOf": [
+ { "$ref": "#/definitions/nonNegativeInteger" },
+ { "default": 0 }
+ ]
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ },
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "examples": {
+ "type": "array",
+ "items": {}
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": { "$ref": "#" },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "contains": { "$ref": "#" },
+ "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": { "$ref": "#" },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "propertyNames": { "$ref": "#" },
+ "const": {},
+ "enum": {
+ "type": "array"
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "default": {}
+}
diff --git a/lib/spack/external/jsonschema/schemas/draft7.json b/lib/spack/external/jsonschema/schemas/draft7.json
new file mode 100644
index 0000000000..746cde9690
--- /dev/null
+++ b/lib/spack/external/jsonschema/schemas/draft7.json
@@ -0,0 +1,166 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://json-schema.org/draft-07/schema#",
+ "title": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "allOf": [
+ { "$ref": "#/definitions/nonNegativeInteger" },
+ { "default": 0 }
+ ]
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ },
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$comment": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": true,
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "examples": {
+ "type": "array",
+ "items": true
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": { "$ref": "#" },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": true
+ },
+ "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "contains": { "$ref": "#" },
+ "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": { "$ref": "#" },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "propertyNames": { "$ref": "#" },
+ "const": true,
+ "enum": {
+ "type": "array",
+ "items": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "contentMediaType": { "type": "string" },
+ "contentEncoding": { "type": "string" },
+ "if": {"$ref": "#"},
+ "then": {"$ref": "#"},
+ "else": {"$ref": "#"},
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "default": true
+}
diff --git a/lib/spack/external/jsonschema/tests/__init__.py b/lib/spack/external/jsonschema/tests/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/spack/external/jsonschema/tests/__init__.py
+++ /dev/null
diff --git a/lib/spack/external/jsonschema/tests/compat.py b/lib/spack/external/jsonschema/tests/compat.py
deleted file mode 100644
index b37483f5dd..0000000000
--- a/lib/spack/external/jsonschema/tests/compat.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import sys
-
-
-if sys.version_info[:2] < (2, 7): # pragma: no cover
- import unittest2 as unittest
-else:
- import unittest
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-
-# flake8: noqa
diff --git a/lib/spack/external/jsonschema/tests/test_cli.py b/lib/spack/external/jsonschema/tests/test_cli.py
deleted file mode 100644
index f625ca989d..0000000000
--- a/lib/spack/external/jsonschema/tests/test_cli.py
+++ /dev/null
@@ -1,110 +0,0 @@
-from jsonschema import Draft4Validator, ValidationError, cli
-from jsonschema.compat import StringIO
-from jsonschema.tests.compat import mock, unittest
-
-
-def fake_validator(*errors):
- errors = list(reversed(errors))
-
- class FakeValidator(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def iter_errors(self, instance):
- if errors:
- return errors.pop()
- return []
- return FakeValidator
-
-
-class TestParser(unittest.TestCase):
- FakeValidator = fake_validator()
-
- def setUp(self):
- mock_open = mock.mock_open()
- patch_open = mock.patch.object(cli, "open", mock_open, create=True)
- patch_open.start()
- self.addCleanup(patch_open.stop)
-
- mock_json_load = mock.Mock()
- mock_json_load.return_value = {}
- patch_json_load = mock.patch("json.load")
- patch_json_load.start()
- self.addCleanup(patch_json_load.stop)
-
- def test_find_validator_by_fully_qualified_object_name(self):
- arguments = cli.parse_args(
- [
- "--validator",
- "jsonschema.tests.test_cli.TestParser.FakeValidator",
- "--instance", "foo.json",
- "schema.json",
- ]
- )
- self.assertIs(arguments["validator"], self.FakeValidator)
-
- def test_find_validator_in_jsonschema(self):
- arguments = cli.parse_args(
- [
- "--validator", "Draft4Validator",
- "--instance", "foo.json",
- "schema.json",
- ]
- )
- self.assertIs(arguments["validator"], Draft4Validator)
-
-
-class TestCLI(unittest.TestCase):
- def test_successful_validation(self):
- stdout, stderr = StringIO(), StringIO()
- exit_code = cli.run(
- {
- "validator": fake_validator(),
- "schema": {},
- "instances": [1],
- "error_format": "{error.message}",
- },
- stdout=stdout,
- stderr=stderr,
- )
- self.assertFalse(stdout.getvalue())
- self.assertFalse(stderr.getvalue())
- self.assertEqual(exit_code, 0)
-
- def test_unsuccessful_validation(self):
- error = ValidationError("I am an error!", instance=1)
- stdout, stderr = StringIO(), StringIO()
- exit_code = cli.run(
- {
- "validator": fake_validator([error]),
- "schema": {},
- "instances": [1],
- "error_format": "{error.instance} - {error.message}",
- },
- stdout=stdout,
- stderr=stderr,
- )
- self.assertFalse(stdout.getvalue())
- self.assertEqual(stderr.getvalue(), "1 - I am an error!")
- self.assertEqual(exit_code, 1)
-
- def test_unsuccessful_validation_multiple_instances(self):
- first_errors = [
- ValidationError("9", instance=1),
- ValidationError("8", instance=1),
- ]
- second_errors = [ValidationError("7", instance=2)]
- stdout, stderr = StringIO(), StringIO()
- exit_code = cli.run(
- {
- "validator": fake_validator(first_errors, second_errors),
- "schema": {},
- "instances": [1, 2],
- "error_format": "{error.instance} - {error.message}\t",
- },
- stdout=stdout,
- stderr=stderr,
- )
- self.assertFalse(stdout.getvalue())
- self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t")
- self.assertEqual(exit_code, 1)
diff --git a/lib/spack/external/jsonschema/tests/test_exceptions.py b/lib/spack/external/jsonschema/tests/test_exceptions.py
deleted file mode 100644
index 9e5793c628..0000000000
--- a/lib/spack/external/jsonschema/tests/test_exceptions.py
+++ /dev/null
@@ -1,382 +0,0 @@
-import textwrap
-
-from jsonschema import Draft4Validator, exceptions
-from jsonschema.compat import PY3
-from jsonschema.tests.compat import mock, unittest
-
-
-class TestBestMatch(unittest.TestCase):
- def best_match(self, errors):
- errors = list(errors)
- best = exceptions.best_match(errors)
- reversed_best = exceptions.best_match(reversed(errors))
- self.assertEqual(
- best,
- reversed_best,
- msg="Didn't return a consistent best match!\n"
- "Got: {0}\n\nThen: {1}".format(best, reversed_best),
- )
- return best
-
- def test_shallower_errors_are_better_matches(self):
- validator = Draft4Validator(
- {
- "properties" : {
- "foo" : {
- "minProperties" : 2,
- "properties" : {"bar" : {"type" : "object"}},
- }
- }
- }
- )
- best = self.best_match(validator.iter_errors({"foo" : {"bar" : []}}))
- self.assertEqual(best.validator, "minProperties")
-
- def test_oneOf_and_anyOf_are_weak_matches(self):
- """
- A property you *must* match is probably better than one you have to
- match a part of.
-
- """
-
- validator = Draft4Validator(
- {
- "minProperties" : 2,
- "anyOf" : [{"type" : "string"}, {"type" : "number"}],
- "oneOf" : [{"type" : "string"}, {"type" : "number"}],
- }
- )
- best = self.best_match(validator.iter_errors({}))
- self.assertEqual(best.validator, "minProperties")
-
- def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
- """
- If the most relevant error is an anyOf, then we traverse its context
- and select the otherwise *least* relevant error, since in this case
- that means the most specific, deep, error inside the instance.
-
- I.e. since only one of the schemas must match, we look for the most
- relevant one.
-
- """
-
- validator = Draft4Validator(
- {
- "properties" : {
- "foo" : {
- "anyOf" : [
- {"type" : "string"},
- {"properties" : {"bar" : {"type" : "array"}}},
- ],
- },
- },
- },
- )
- best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
- self.assertEqual(best.validator_value, "array")
-
- def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
- """
- If the most relevant error is an oneOf, then we traverse its context
- and select the otherwise *least* relevant error, since in this case
- that means the most specific, deep, error inside the instance.
-
- I.e. since only one of the schemas must match, we look for the most
- relevant one.
-
- """
-
- validator = Draft4Validator(
- {
- "properties" : {
- "foo" : {
- "oneOf" : [
- {"type" : "string"},
- {"properties" : {"bar" : {"type" : "array"}}},
- ],
- },
- },
- },
- )
- best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
- self.assertEqual(best.validator_value, "array")
-
- def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
- """
- Now, if the error is allOf, we traverse but select the *most* relevant
- error from the context, because all schemas here must match anyways.
-
- """
-
- validator = Draft4Validator(
- {
- "properties" : {
- "foo" : {
- "allOf" : [
- {"type" : "string"},
- {"properties" : {"bar" : {"type" : "array"}}},
- ],
- },
- },
- },
- )
- best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
- self.assertEqual(best.validator_value, "string")
-
- def test_nested_context_for_oneOf(self):
- validator = Draft4Validator(
- {
- "properties" : {
- "foo" : {
- "oneOf" : [
- {"type" : "string"},
- {
- "oneOf" : [
- {"type" : "string"},
- {
- "properties" : {
- "bar" : {"type" : "array"}
- },
- },
- ],
- },
- ],
- },
- },
- },
- )
- best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
- self.assertEqual(best.validator_value, "array")
-
- def test_one_error(self):
- validator = Draft4Validator({"minProperties" : 2})
- error, = validator.iter_errors({})
- self.assertEqual(
- exceptions.best_match(validator.iter_errors({})).validator,
- "minProperties",
- )
-
- def test_no_errors(self):
- validator = Draft4Validator({})
- self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
-
-
-class TestByRelevance(unittest.TestCase):
- def test_short_paths_are_better_matches(self):
- shallow = exceptions.ValidationError("Oh no!", path=["baz"])
- deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
- match = max([shallow, deep], key=exceptions.relevance)
- self.assertIs(match, shallow)
-
- match = max([deep, shallow], key=exceptions.relevance)
- self.assertIs(match, shallow)
-
- def test_global_errors_are_even_better_matches(self):
- shallow = exceptions.ValidationError("Oh no!", path=[])
- deep = exceptions.ValidationError("Oh yes!", path=["foo"])
-
- errors = sorted([shallow, deep], key=exceptions.relevance)
- self.assertEqual(
- [list(error.path) for error in errors],
- [["foo"], []],
- )
-
- errors = sorted([deep, shallow], key=exceptions.relevance)
- self.assertEqual(
- [list(error.path) for error in errors],
- [["foo"], []],
- )
-
- def test_weak_validators_are_lower_priority(self):
- weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
- normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
-
- best_match = exceptions.by_relevance(weak="a")
-
- match = max([weak, normal], key=best_match)
- self.assertIs(match, normal)
-
- match = max([normal, weak], key=best_match)
- self.assertIs(match, normal)
-
- def test_strong_validators_are_higher_priority(self):
- weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
- normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
- strong = exceptions.ValidationError("Oh fine!", path=[], validator="c")
-
- best_match = exceptions.by_relevance(weak="a", strong="c")
-
- match = max([weak, normal, strong], key=best_match)
- self.assertIs(match, strong)
-
- match = max([strong, normal, weak], key=best_match)
- self.assertIs(match, strong)
-
-
-class TestErrorTree(unittest.TestCase):
- def test_it_knows_how_many_total_errors_it_contains(self):
- errors = [mock.MagicMock() for _ in range(8)]
- tree = exceptions.ErrorTree(errors)
- self.assertEqual(tree.total_errors, 8)
-
- def test_it_contains_an_item_if_the_item_had_an_error(self):
- errors = [exceptions.ValidationError("a message", path=["bar"])]
- tree = exceptions.ErrorTree(errors)
- self.assertIn("bar", tree)
-
- def test_it_does_not_contain_an_item_if_the_item_had_no_error(self):
- errors = [exceptions.ValidationError("a message", path=["bar"])]
- tree = exceptions.ErrorTree(errors)
- self.assertNotIn("foo", tree)
-
- def test_validators_that_failed_appear_in_errors_dict(self):
- error = exceptions.ValidationError("a message", validator="foo")
- tree = exceptions.ErrorTree([error])
- self.assertEqual(tree.errors, {"foo" : error})
-
- def test_it_creates_a_child_tree_for_each_nested_path(self):
- errors = [
- exceptions.ValidationError("a bar message", path=["bar"]),
- exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]),
- ]
- tree = exceptions.ErrorTree(errors)
- self.assertIn(0, tree["bar"])
- self.assertNotIn(1, tree["bar"])
-
- def test_children_have_their_errors_dicts_built(self):
- e1, e2 = (
- exceptions.ValidationError("1", validator="foo", path=["bar", 0]),
- exceptions.ValidationError("2", validator="quux", path=["bar", 0]),
- )
- tree = exceptions.ErrorTree([e1, e2])
- self.assertEqual(tree["bar"][0].errors, {"foo" : e1, "quux" : e2})
-
- def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
- error = exceptions.ValidationError("123", validator="foo", instance=[])
- tree = exceptions.ErrorTree([error])
-
- with self.assertRaises(IndexError):
- tree[0]
-
- def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
- """
- If a validator is dumb (like :validator:`required` in draft 3) and
- refers to a path that isn't in the instance, the tree still properly
- returns a subtree for that path.
-
- """
-
- error = exceptions.ValidationError(
- "a message", validator="foo", instance={}, path=["foo"],
- )
- tree = exceptions.ErrorTree([error])
- self.assertIsInstance(tree["foo"], exceptions.ErrorTree)
-
-
-class TestErrorReprStr(unittest.TestCase):
- def make_error(self, **kwargs):
- defaults = dict(
- message=u"hello",
- validator=u"type",
- validator_value=u"string",
- instance=5,
- schema={u"type": u"string"},
- )
- defaults.update(kwargs)
- return exceptions.ValidationError(**defaults)
-
- def assertShows(self, expected, **kwargs):
- if PY3:
- expected = expected.replace("u'", "'")
- expected = textwrap.dedent(expected).rstrip("\n")
-
- error = self.make_error(**kwargs)
- message_line, _, rest = str(error).partition("\n")
- self.assertEqual(message_line, error.message)
- self.assertEqual(rest, expected)
-
- def test_repr(self):
- self.assertEqual(
- repr(exceptions.ValidationError(message="Hello!")),
- "<ValidationError: %r>" % "Hello!",
- )
-
- def test_unset_error(self):
- error = exceptions.ValidationError("message")
- self.assertEqual(str(error), "message")
-
- kwargs = {
- "validator": "type",
- "validator_value": "string",
- "instance": 5,
- "schema": {"type": "string"}
- }
- # Just the message should show if any of the attributes are unset
- for attr in kwargs:
- k = dict(kwargs)
- del k[attr]
- error = exceptions.ValidationError("message", **k)
- self.assertEqual(str(error), "message")
-
- def test_empty_paths(self):
- self.assertShows(
- """
- Failed validating u'type' in schema:
- {u'type': u'string'}
-
- On instance:
- 5
- """,
- path=[],
- schema_path=[],
- )
-
- def test_one_item_paths(self):
- self.assertShows(
- """
- Failed validating u'type' in schema:
- {u'type': u'string'}
-
- On instance[0]:
- 5
- """,
- path=[0],
- schema_path=["items"],
- )
-
- def test_multiple_item_paths(self):
- self.assertShows(
- """
- Failed validating u'type' in schema[u'items'][0]:
- {u'type': u'string'}
-
- On instance[0][u'a']:
- 5
- """,
- path=[0, u"a"],
- schema_path=[u"items", 0, 1],
- )
-
- def test_uses_pprint(self):
- with mock.patch("pprint.pformat") as pformat:
- str(self.make_error())
- self.assertEqual(pformat.call_count, 2) # schema + instance
-
- def test_str_works_with_instances_having_overriden_eq_operator(self):
- """
- Check for https://github.com/Julian/jsonschema/issues/164 which
- rendered exceptions unusable when a `ValidationError` involved
- instances with an `__eq__` method that returned truthy values.
-
- """
-
- instance = mock.MagicMock()
- error = exceptions.ValidationError(
- "a message",
- validator="foo",
- instance=instance,
- validator_value="some",
- schema="schema",
- )
- str(error)
- self.assertFalse(instance.__eq__.called)
diff --git a/lib/spack/external/jsonschema/tests/test_format.py b/lib/spack/external/jsonschema/tests/test_format.py
deleted file mode 100644
index 8392ca1de3..0000000000
--- a/lib/spack/external/jsonschema/tests/test_format.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""
-Tests for the parts of jsonschema related to the :validator:`format` property.
-
-"""
-
-from jsonschema.tests.compat import mock, unittest
-
-from jsonschema import FormatError, ValidationError, FormatChecker
-from jsonschema.validators import Draft4Validator
-
-
-class TestFormatChecker(unittest.TestCase):
- def setUp(self):
- self.fn = mock.Mock()
-
- def test_it_can_validate_no_formats(self):
- checker = FormatChecker(formats=())
- self.assertFalse(checker.checkers)
-
- def test_it_raises_a_key_error_for_unknown_formats(self):
- with self.assertRaises(KeyError):
- FormatChecker(formats=["o noes"])
-
- def test_it_can_register_cls_checkers(self):
- with mock.patch.dict(FormatChecker.checkers, clear=True):
- FormatChecker.cls_checks("new")(self.fn)
- self.assertEqual(FormatChecker.checkers, {"new" : (self.fn, ())})
-
- def test_it_can_register_checkers(self):
- checker = FormatChecker()
- checker.checks("new")(self.fn)
- self.assertEqual(
- checker.checkers,
- dict(FormatChecker.checkers, new=(self.fn, ()))
- )
-
- def test_it_catches_registered_errors(self):
- checker = FormatChecker()
- cause = self.fn.side_effect = ValueError()
-
- checker.checks("foo", raises=ValueError)(self.fn)
-
- with self.assertRaises(FormatError) as cm:
- checker.check("bar", "foo")
-
- self.assertIs(cm.exception.cause, cause)
- self.assertIs(cm.exception.__cause__, cause)
-
- # Unregistered errors should not be caught
- self.fn.side_effect = AttributeError
- with self.assertRaises(AttributeError):
- checker.check("bar", "foo")
-
- def test_format_error_causes_become_validation_error_causes(self):
- checker = FormatChecker()
- checker.checks("foo", raises=ValueError)(self.fn)
- cause = self.fn.side_effect = ValueError()
- validator = Draft4Validator({"format" : "foo"}, format_checker=checker)
-
- with self.assertRaises(ValidationError) as cm:
- validator.validate("bar")
-
- self.assertIs(cm.exception.__cause__, cause)
diff --git a/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py b/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py
deleted file mode 100644
index 75c6857bc0..0000000000
--- a/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py
+++ /dev/null
@@ -1,290 +0,0 @@
-"""
-Test runner for the JSON Schema official test suite
-
-Tests comprehensive correctness of each draft's validator.
-
-See https://github.com/json-schema/JSON-Schema-Test-Suite for details.
-
-"""
-
-from contextlib import closing
-from decimal import Decimal
-import glob
-import json
-import io
-import itertools
-import os
-import re
-import subprocess
-import sys
-
-try:
- from sys import pypy_version_info
-except ImportError:
- pypy_version_info = None
-
-from jsonschema import (
- FormatError, SchemaError, ValidationError, Draft3Validator,
- Draft4Validator, FormatChecker, draft3_format_checker,
- draft4_format_checker, validate,
-)
-from jsonschema.compat import PY3
-from jsonschema.tests.compat import mock, unittest
-import jsonschema
-
-
-REPO_ROOT = os.path.join(os.path.dirname(jsonschema.__file__), os.path.pardir)
-SUITE = os.getenv("JSON_SCHEMA_TEST_SUITE", os.path.join(REPO_ROOT, "json"))
-
-if not os.path.isdir(SUITE):
- raise ValueError(
- "Can't find the JSON-Schema-Test-Suite directory. Set the "
- "'JSON_SCHEMA_TEST_SUITE' environment variable or run the tests from "
- "alongside a checkout of the suite."
- )
-
-TESTS_DIR = os.path.join(SUITE, "tests")
-JSONSCHEMA_SUITE = os.path.join(SUITE, "bin", "jsonschema_suite")
-
-remotes_stdout = subprocess.Popen(
- ["python", JSONSCHEMA_SUITE, "remotes"], stdout=subprocess.PIPE,
-).stdout
-
-with closing(remotes_stdout):
- if PY3:
- remotes_stdout = io.TextIOWrapper(remotes_stdout)
- REMOTES = json.load(remotes_stdout)
-
-
-def make_case(schema, data, valid, name):
- if valid:
- def test_case(self):
- kwargs = getattr(self, "validator_kwargs", {})
- validate(data, schema, cls=self.validator_class, **kwargs)
- else:
- def test_case(self):
- kwargs = getattr(self, "validator_kwargs", {})
- with self.assertRaises(ValidationError):
- validate(data, schema, cls=self.validator_class, **kwargs)
-
- if not PY3:
- name = name.encode("utf-8")
- test_case.__name__ = name
-
- return test_case
-
-
-def maybe_skip(skip, test_case, case, test):
- if skip is not None:
- reason = skip(case, test)
- if reason is not None:
- test_case = unittest.skip(reason)(test_case)
- return test_case
-
-
-def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
- if ignore_glob:
- ignore_glob = os.path.join(basedir, ignore_glob)
-
- def add_test_methods(test_class):
- ignored = set(glob.iglob(ignore_glob))
-
- for filename in glob.iglob(os.path.join(basedir, tests_glob)):
- if filename in ignored:
- continue
-
- validating, _ = os.path.splitext(os.path.basename(filename))
- id = itertools.count(1)
-
- with open(filename) as test_file:
- for case in json.load(test_file):
- for test in case["tests"]:
- name = "test_%s_%s_%s" % (
- validating,
- next(id),
- re.sub(r"[\W ]+", "_", test["description"]),
- )
- assert not hasattr(test_class, name), name
-
- test_case = make_case(
- data=test["data"],
- schema=case["schema"],
- valid=test["valid"],
- name=name,
- )
- test_case = maybe_skip(skip, test_case, case, test)
- setattr(test_class, name, test_case)
-
- return test_class
- return add_test_methods
-
-
-class TypesMixin(object):
- @unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
- def test_string_a_bytestring_is_a_string(self):
- self.validator_class({"type" : "string"}).validate(b"foo")
-
-
-class DecimalMixin(object):
- def test_it_can_validate_with_decimals(self):
- schema = {"type" : "number"}
- validator = self.validator_class(
- schema, types={"number" : (int, float, Decimal)}
- )
-
- for valid in [1, 1.1, Decimal(1) / Decimal(8)]:
- validator.validate(valid)
-
- for invalid in ["foo", {}, [], True, None]:
- with self.assertRaises(ValidationError):
- validator.validate(invalid)
-
-
-def missing_format(checker):
- def missing_format(case, test):
- format = case["schema"].get("format")
- if format not in checker.checkers:
- return "Format checker {0!r} not found.".format(format)
- elif (
- format == "date-time" and
- pypy_version_info is not None and
- pypy_version_info[:2] <= (1, 9)
- ):
- # datetime.datetime is overzealous about typechecking in <=1.9
- return "datetime.datetime is broken on this version of PyPy."
- return missing_format
-
-
-class FormatMixin(object):
- def test_it_returns_true_for_formats_it_does_not_know_about(self):
- validator = self.validator_class(
- {"format" : "carrot"}, format_checker=FormatChecker(),
- )
- validator.validate("bugs")
-
- def test_it_does_not_validate_formats_by_default(self):
- validator = self.validator_class({})
- self.assertIsNone(validator.format_checker)
-
- def test_it_validates_formats_if_a_checker_is_provided(self):
- checker = mock.Mock(spec=FormatChecker)
- validator = self.validator_class(
- {"format" : "foo"}, format_checker=checker,
- )
-
- validator.validate("bar")
-
- checker.check.assert_called_once_with("bar", "foo")
-
- cause = ValueError()
- checker.check.side_effect = FormatError('aoeu', cause=cause)
-
- with self.assertRaises(ValidationError) as cm:
- validator.validate("bar")
- # Make sure original cause is attached
- self.assertIs(cm.exception.cause, cause)
-
- def test_it_validates_formats_of_any_type(self):
- checker = mock.Mock(spec=FormatChecker)
- validator = self.validator_class(
- {"format" : "foo"}, format_checker=checker,
- )
-
- validator.validate([1, 2, 3])
-
- checker.check.assert_called_once_with([1, 2, 3], "foo")
-
- cause = ValueError()
- checker.check.side_effect = FormatError('aoeu', cause=cause)
-
- with self.assertRaises(ValidationError) as cm:
- validator.validate([1, 2, 3])
- # Make sure original cause is attached
- self.assertIs(cm.exception.cause, cause)
-
-
-if sys.maxunicode == 2 ** 16 - 1: # This is a narrow build.
- def narrow_unicode_build(case, test):
- if "supplementary Unicode" in test["description"]:
- return "Not running surrogate Unicode case, this Python is narrow."
-else:
- def narrow_unicode_build(case, test): # This isn't, skip nothing.
- return
-
-
-@load_json_cases(
- "draft3/*.json",
- skip=narrow_unicode_build,
- ignore_glob="draft3/refRemote.json",
-)
-@load_json_cases(
- "draft3/optional/format.json", skip=missing_format(draft3_format_checker)
-)
-@load_json_cases("draft3/optional/bignum.json")
-@load_json_cases("draft3/optional/zeroTerminatedFloats.json")
-class TestDraft3(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
- validator_class = Draft3Validator
- validator_kwargs = {"format_checker" : draft3_format_checker}
-
- def test_any_type_is_valid_for_type_any(self):
- validator = self.validator_class({"type" : "any"})
- validator.validate(mock.Mock())
-
- # TODO: we're in need of more meta schema tests
- def test_invalid_properties(self):
- with self.assertRaises(SchemaError):
- validate({}, {"properties": {"test": True}},
- cls=self.validator_class)
-
- def test_minItems_invalid_string(self):
- with self.assertRaises(SchemaError):
- # needs to be an integer
- validate([1], {"minItems" : "1"}, cls=self.validator_class)
-
-
-@load_json_cases(
- "draft4/*.json",
- skip=narrow_unicode_build,
- ignore_glob="draft4/refRemote.json",
-)
-@load_json_cases(
- "draft4/optional/format.json", skip=missing_format(draft4_format_checker)
-)
-@load_json_cases("draft4/optional/bignum.json")
-@load_json_cases("draft4/optional/zeroTerminatedFloats.json")
-class TestDraft4(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
- validator_class = Draft4Validator
- validator_kwargs = {"format_checker" : draft4_format_checker}
-
- # TODO: we're in need of more meta schema tests
- def test_invalid_properties(self):
- with self.assertRaises(SchemaError):
- validate({}, {"properties": {"test": True}},
- cls=self.validator_class)
-
- def test_minItems_invalid_string(self):
- with self.assertRaises(SchemaError):
- # needs to be an integer
- validate([1], {"minItems" : "1"}, cls=self.validator_class)
-
-
-class RemoteRefResolutionMixin(object):
- def setUp(self):
- patch = mock.patch("jsonschema.validators.requests")
- requests = patch.start()
- requests.get.side_effect = self.resolve
- self.addCleanup(patch.stop)
-
- def resolve(self, reference):
- _, _, reference = reference.partition("http://localhost:1234/")
- return mock.Mock(**{"json.return_value" : REMOTES.get(reference)})
-
-
-@load_json_cases("draft3/refRemote.json")
-class Draft3RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
- validator_class = Draft3Validator
-
-
-@load_json_cases("draft4/refRemote.json")
-class Draft4RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
- validator_class = Draft4Validator
diff --git a/lib/spack/external/jsonschema/tests/test_validators.py b/lib/spack/external/jsonschema/tests/test_validators.py
deleted file mode 100644
index f8692388ea..0000000000
--- a/lib/spack/external/jsonschema/tests/test_validators.py
+++ /dev/null
@@ -1,786 +0,0 @@
-from collections import deque
-from contextlib import contextmanager
-import json
-
-from jsonschema import FormatChecker, ValidationError
-from jsonschema.tests.compat import mock, unittest
-from jsonschema.validators import (
- RefResolutionError, UnknownType, Draft3Validator,
- Draft4Validator, RefResolver, create, extend, validator_for, validate,
-)
-
-
-class TestCreateAndExtend(unittest.TestCase):
- def setUp(self):
- self.meta_schema = {u"properties" : {u"smelly" : {}}}
- self.smelly = mock.MagicMock()
- self.validators = {u"smelly" : self.smelly}
- self.types = {u"dict" : dict}
- self.Validator = create(
- meta_schema=self.meta_schema,
- validators=self.validators,
- default_types=self.types,
- )
-
- self.validator_value = 12
- self.schema = {u"smelly" : self.validator_value}
- self.validator = self.Validator(self.schema)
-
- def test_attrs(self):
- self.assertEqual(self.Validator.VALIDATORS, self.validators)
- self.assertEqual(self.Validator.META_SCHEMA, self.meta_schema)
- self.assertEqual(self.Validator.DEFAULT_TYPES, self.types)
-
- def test_init(self):
- self.assertEqual(self.validator.schema, self.schema)
-
- def test_iter_errors(self):
- instance = "hello"
-
- self.smelly.return_value = []
- self.assertEqual(list(self.validator.iter_errors(instance)), [])
-
- error = mock.Mock()
- self.smelly.return_value = [error]
- self.assertEqual(list(self.validator.iter_errors(instance)), [error])
-
- self.smelly.assert_called_with(
- self.validator, self.validator_value, instance, self.schema,
- )
-
- def test_if_a_version_is_provided_it_is_registered(self):
- with mock.patch("jsonschema.validators.validates") as validates:
- validates.side_effect = lambda version : lambda cls : cls
- Validator = create(meta_schema={u"id" : ""}, version="my version")
- validates.assert_called_once_with("my version")
- self.assertEqual(Validator.__name__, "MyVersionValidator")
-
- def test_if_a_version_is_not_provided_it_is_not_registered(self):
- with mock.patch("jsonschema.validators.validates") as validates:
- create(meta_schema={u"id" : "id"})
- self.assertFalse(validates.called)
-
- def test_extend(self):
- validators = dict(self.Validator.VALIDATORS)
- new = mock.Mock()
-
- Extended = extend(self.Validator, validators={u"a new one" : new})
-
- validators.update([(u"a new one", new)])
- self.assertEqual(Extended.VALIDATORS, validators)
- self.assertNotIn(u"a new one", self.Validator.VALIDATORS)
-
- self.assertEqual(Extended.META_SCHEMA, self.Validator.META_SCHEMA)
- self.assertEqual(Extended.DEFAULT_TYPES, self.Validator.DEFAULT_TYPES)
-
-
-class TestIterErrors(unittest.TestCase):
- def setUp(self):
- self.validator = Draft3Validator({})
-
- def test_iter_errors(self):
- instance = [1, 2]
- schema = {
- u"disallow" : u"array",
- u"enum" : [["a", "b", "c"], ["d", "e", "f"]],
- u"minItems" : 3
- }
-
- got = (e.message for e in self.validator.iter_errors(instance, schema))
- expected = [
- "%r is disallowed for [1, 2]" % (schema["disallow"],),
- "[1, 2] is too short",
- "[1, 2] is not one of %r" % (schema["enum"],),
- ]
- self.assertEqual(sorted(got), sorted(expected))
-
- def test_iter_errors_multiple_failures_one_validator(self):
- instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
- schema = {
- u"properties" : {
- "foo" : {u"type" : "string"},
- "bar" : {u"minItems" : 2},
- "baz" : {u"maximum" : 10, u"enum" : [2, 4, 6, 8]},
- }
- }
-
- errors = list(self.validator.iter_errors(instance, schema))
- self.assertEqual(len(errors), 4)
-
-
-class TestValidationErrorMessages(unittest.TestCase):
- def message_for(self, instance, schema, *args, **kwargs):
- kwargs.setdefault("cls", Draft3Validator)
- with self.assertRaises(ValidationError) as e:
- validate(instance, schema, *args, **kwargs)
- return e.exception.message
-
- def test_single_type_failure(self):
- message = self.message_for(instance=1, schema={u"type" : u"string"})
- self.assertEqual(message, "1 is not of type %r" % u"string")
-
- def test_single_type_list_failure(self):
- message = self.message_for(instance=1, schema={u"type" : [u"string"]})
- self.assertEqual(message, "1 is not of type %r" % u"string")
-
- def test_multiple_type_failure(self):
- types = u"string", u"object"
- message = self.message_for(instance=1, schema={u"type" : list(types)})
- self.assertEqual(message, "1 is not of type %r, %r" % types)
-
- def test_object_without_title_type_failure(self):
- type = {u"type" : [{u"minimum" : 3}]}
- message = self.message_for(instance=1, schema={u"type" : [type]})
- self.assertEqual(message, "1 is not of type %r" % (type,))
-
- def test_object_with_name_type_failure(self):
- name = "Foo"
- schema = {u"type" : [{u"name" : name, u"minimum" : 3}]}
- message = self.message_for(instance=1, schema=schema)
- self.assertEqual(message, "1 is not of type %r" % (name,))
-
- def test_minimum(self):
- message = self.message_for(instance=1, schema={"minimum" : 2})
- self.assertEqual(message, "1 is less than the minimum of 2")
-
- def test_maximum(self):
- message = self.message_for(instance=1, schema={"maximum" : 0})
- self.assertEqual(message, "1 is greater than the maximum of 0")
-
- def test_dependencies_failure_has_single_element_not_list(self):
- depend, on = "bar", "foo"
- schema = {u"dependencies" : {depend : on}}
- message = self.message_for({"bar" : 2}, schema)
- self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
-
- def test_additionalItems_single_failure(self):
- message = self.message_for(
- [2], {u"items" : [], u"additionalItems" : False},
- )
- self.assertIn("(2 was unexpected)", message)
-
- def test_additionalItems_multiple_failures(self):
- message = self.message_for(
- [1, 2, 3], {u"items" : [], u"additionalItems" : False}
- )
- self.assertIn("(1, 2, 3 were unexpected)", message)
-
- def test_additionalProperties_single_failure(self):
- additional = "foo"
- schema = {u"additionalProperties" : False}
- message = self.message_for({additional : 2}, schema)
- self.assertIn("(%r was unexpected)" % (additional,), message)
-
- def test_additionalProperties_multiple_failures(self):
- schema = {u"additionalProperties" : False}
- message = self.message_for(dict.fromkeys(["foo", "bar"]), schema)
-
- self.assertIn(repr("foo"), message)
- self.assertIn(repr("bar"), message)
- self.assertIn("were unexpected)", message)
-
- def test_invalid_format_default_message(self):
- checker = FormatChecker(formats=())
- check_fn = mock.Mock(return_value=False)
- checker.checks(u"thing")(check_fn)
-
- schema = {u"format" : u"thing"}
- message = self.message_for("bla", schema, format_checker=checker)
-
- self.assertIn(repr("bla"), message)
- self.assertIn(repr("thing"), message)
- self.assertIn("is not a", message)
-
-
-class TestValidationErrorDetails(unittest.TestCase):
- # TODO: These really need unit tests for each individual validator, rather
- # than just these higher level tests.
- def test_anyOf(self):
- instance = 5
- schema = {
- "anyOf": [
- {"minimum": 20},
- {"type": "string"}
- ]
- }
-
- validator = Draft4Validator(schema)
- errors = list(validator.iter_errors(instance))
- self.assertEqual(len(errors), 1)
- e = errors[0]
-
- self.assertEqual(e.validator, "anyOf")
- self.assertEqual(e.validator_value, schema["anyOf"])
- self.assertEqual(e.instance, instance)
- self.assertEqual(e.schema, schema)
- self.assertIsNone(e.parent)
-
- self.assertEqual(e.path, deque([]))
- self.assertEqual(e.relative_path, deque([]))
- self.assertEqual(e.absolute_path, deque([]))
-
- self.assertEqual(e.schema_path, deque(["anyOf"]))
- self.assertEqual(e.relative_schema_path, deque(["anyOf"]))
- self.assertEqual(e.absolute_schema_path, deque(["anyOf"]))
-
- self.assertEqual(len(e.context), 2)
-
- e1, e2 = sorted_errors(e.context)
-
- self.assertEqual(e1.validator, "minimum")
- self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"])
- self.assertEqual(e1.instance, instance)
- self.assertEqual(e1.schema, schema["anyOf"][0])
- self.assertIs(e1.parent, e)
-
- self.assertEqual(e1.path, deque([]))
- self.assertEqual(e1.absolute_path, deque([]))
- self.assertEqual(e1.relative_path, deque([]))
-
- self.assertEqual(e1.schema_path, deque([0, "minimum"]))
- self.assertEqual(e1.relative_schema_path, deque([0, "minimum"]))
- self.assertEqual(
- e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]),
- )
-
- self.assertFalse(e1.context)
-
- self.assertEqual(e2.validator, "type")
- self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"])
- self.assertEqual(e2.instance, instance)
- self.assertEqual(e2.schema, schema["anyOf"][1])
- self.assertIs(e2.parent, e)
-
- self.assertEqual(e2.path, deque([]))
- self.assertEqual(e2.relative_path, deque([]))
- self.assertEqual(e2.absolute_path, deque([]))
-
- self.assertEqual(e2.schema_path, deque([1, "type"]))
- self.assertEqual(e2.relative_schema_path, deque([1, "type"]))
- self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"]))
-
- self.assertEqual(len(e2.context), 0)
-
- def test_type(self):
- instance = {"foo": 1}
- schema = {
- "type": [
- {"type": "integer"},
- {
- "type": "object",
- "properties": {
- "foo": {"enum": [2]}
- }
- }
- ]
- }
-
- validator = Draft3Validator(schema)
- errors = list(validator.iter_errors(instance))
- self.assertEqual(len(errors), 1)
- e = errors[0]
-
- self.assertEqual(e.validator, "type")
- self.assertEqual(e.validator_value, schema["type"])
- self.assertEqual(e.instance, instance)
- self.assertEqual(e.schema, schema)
- self.assertIsNone(e.parent)
-
- self.assertEqual(e.path, deque([]))
- self.assertEqual(e.relative_path, deque([]))
- self.assertEqual(e.absolute_path, deque([]))
-
- self.assertEqual(e.schema_path, deque(["type"]))
- self.assertEqual(e.relative_schema_path, deque(["type"]))
- self.assertEqual(e.absolute_schema_path, deque(["type"]))
-
- self.assertEqual(len(e.context), 2)
-
- e1, e2 = sorted_errors(e.context)
-
- self.assertEqual(e1.validator, "type")
- self.assertEqual(e1.validator_value, schema["type"][0]["type"])
- self.assertEqual(e1.instance, instance)
- self.assertEqual(e1.schema, schema["type"][0])
- self.assertIs(e1.parent, e)
-
- self.assertEqual(e1.path, deque([]))
- self.assertEqual(e1.relative_path, deque([]))
- self.assertEqual(e1.absolute_path, deque([]))
-
- self.assertEqual(e1.schema_path, deque([0, "type"]))
- self.assertEqual(e1.relative_schema_path, deque([0, "type"]))
- self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"]))
-
- self.assertFalse(e1.context)
-
- self.assertEqual(e2.validator, "enum")
- self.assertEqual(e2.validator_value, [2])
- self.assertEqual(e2.instance, 1)
- self.assertEqual(e2.schema, {u"enum" : [2]})
- self.assertIs(e2.parent, e)
-
- self.assertEqual(e2.path, deque(["foo"]))
- self.assertEqual(e2.relative_path, deque(["foo"]))
- self.assertEqual(e2.absolute_path, deque(["foo"]))
-
- self.assertEqual(
- e2.schema_path, deque([1, "properties", "foo", "enum"]),
- )
- self.assertEqual(
- e2.relative_schema_path, deque([1, "properties", "foo", "enum"]),
- )
- self.assertEqual(
- e2.absolute_schema_path,
- deque(["type", 1, "properties", "foo", "enum"]),
- )
-
- self.assertFalse(e2.context)
-
- def test_single_nesting(self):
- instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
- schema = {
- "properties" : {
- "foo" : {"type" : "string"},
- "bar" : {"minItems" : 2},
- "baz" : {"maximum" : 10, "enum" : [2, 4, 6, 8]},
- }
- }
-
- validator = Draft3Validator(schema)
- errors = validator.iter_errors(instance)
- e1, e2, e3, e4 = sorted_errors(errors)
-
- self.assertEqual(e1.path, deque(["bar"]))
- self.assertEqual(e2.path, deque(["baz"]))
- self.assertEqual(e3.path, deque(["baz"]))
- self.assertEqual(e4.path, deque(["foo"]))
-
- self.assertEqual(e1.relative_path, deque(["bar"]))
- self.assertEqual(e2.relative_path, deque(["baz"]))
- self.assertEqual(e3.relative_path, deque(["baz"]))
- self.assertEqual(e4.relative_path, deque(["foo"]))
-
- self.assertEqual(e1.absolute_path, deque(["bar"]))
- self.assertEqual(e2.absolute_path, deque(["baz"]))
- self.assertEqual(e3.absolute_path, deque(["baz"]))
- self.assertEqual(e4.absolute_path, deque(["foo"]))
-
- self.assertEqual(e1.validator, "minItems")
- self.assertEqual(e2.validator, "enum")
- self.assertEqual(e3.validator, "maximum")
- self.assertEqual(e4.validator, "type")
-
- def test_multiple_nesting(self):
- instance = [1, {"foo" : 2, "bar" : {"baz" : [1]}}, "quux"]
- schema = {
- "type" : "string",
- "items" : {
- "type" : ["string", "object"],
- "properties" : {
- "foo" : {"enum" : [1, 3]},
- "bar" : {
- "type" : "array",
- "properties" : {
- "bar" : {"required" : True},
- "baz" : {"minItems" : 2},
- }
- }
- }
- }
- }
-
- validator = Draft3Validator(schema)
- errors = validator.iter_errors(instance)
- e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
-
- self.assertEqual(e1.path, deque([]))
- self.assertEqual(e2.path, deque([0]))
- self.assertEqual(e3.path, deque([1, "bar"]))
- self.assertEqual(e4.path, deque([1, "bar", "bar"]))
- self.assertEqual(e5.path, deque([1, "bar", "baz"]))
- self.assertEqual(e6.path, deque([1, "foo"]))
-
- self.assertEqual(e1.schema_path, deque(["type"]))
- self.assertEqual(e2.schema_path, deque(["items", "type"]))
- self.assertEqual(
- list(e3.schema_path), ["items", "properties", "bar", "type"],
- )
- self.assertEqual(
- list(e4.schema_path),
- ["items", "properties", "bar", "properties", "bar", "required"],
- )
- self.assertEqual(
- list(e5.schema_path),
- ["items", "properties", "bar", "properties", "baz", "minItems"]
- )
- self.assertEqual(
- list(e6.schema_path), ["items", "properties", "foo", "enum"],
- )
-
- self.assertEqual(e1.validator, "type")
- self.assertEqual(e2.validator, "type")
- self.assertEqual(e3.validator, "type")
- self.assertEqual(e4.validator, "required")
- self.assertEqual(e5.validator, "minItems")
- self.assertEqual(e6.validator, "enum")
-
- def test_additionalProperties(self):
- instance = {"bar": "bar", "foo": 2}
- schema = {
- "additionalProperties" : {"type": "integer", "minimum": 5}
- }
-
- validator = Draft3Validator(schema)
- errors = validator.iter_errors(instance)
- e1, e2 = sorted_errors(errors)
-
- self.assertEqual(e1.path, deque(["bar"]))
- self.assertEqual(e2.path, deque(["foo"]))
-
- self.assertEqual(e1.validator, "type")
- self.assertEqual(e2.validator, "minimum")
-
- def test_patternProperties(self):
- instance = {"bar": 1, "foo": 2}
- schema = {
- "patternProperties" : {
- "bar": {"type": "string"},
- "foo": {"minimum": 5}
- }
- }
-
- validator = Draft3Validator(schema)
- errors = validator.iter_errors(instance)
- e1, e2 = sorted_errors(errors)
-
- self.assertEqual(e1.path, deque(["bar"]))
- self.assertEqual(e2.path, deque(["foo"]))
-
- self.assertEqual(e1.validator, "type")
- self.assertEqual(e2.validator, "minimum")
-
- def test_additionalItems(self):
- instance = ["foo", 1]
- schema = {
- "items": [],
- "additionalItems" : {"type": "integer", "minimum": 5}
- }
-
- validator = Draft3Validator(schema)
- errors = validator.iter_errors(instance)
- e1, e2 = sorted_errors(errors)
-
- self.assertEqual(e1.path, deque([0]))
- self.assertEqual(e2.path, deque([1]))
-
- self.assertEqual(e1.validator, "type")
- self.assertEqual(e2.validator, "minimum")
-
- def test_additionalItems_with_items(self):
- instance = ["foo", "bar", 1]
- schema = {
- "items": [{}],
- "additionalItems" : {"type": "integer", "minimum": 5}
- }
-
- validator = Draft3Validator(schema)
- errors = validator.iter_errors(instance)
- e1, e2 = sorted_errors(errors)
-
- self.assertEqual(e1.path, deque([1]))
- self.assertEqual(e2.path, deque([2]))
-
- self.assertEqual(e1.validator, "type")
- self.assertEqual(e2.validator, "minimum")
-
-
-class ValidatorTestMixin(object):
- def setUp(self):
- self.instance = mock.Mock()
- self.schema = {}
- self.resolver = mock.Mock()
- self.validator = self.validator_class(self.schema)
-
- def test_valid_instances_are_valid(self):
- errors = iter([])
-
- with mock.patch.object(
- self.validator, "iter_errors", return_value=errors,
- ):
- self.assertTrue(
- self.validator.is_valid(self.instance, self.schema)
- )
-
- def test_invalid_instances_are_not_valid(self):
- errors = iter([mock.Mock()])
-
- with mock.patch.object(
- self.validator, "iter_errors", return_value=errors,
- ):
- self.assertFalse(
- self.validator.is_valid(self.instance, self.schema)
- )
-
- def test_non_existent_properties_are_ignored(self):
- instance, my_property, my_value = mock.Mock(), mock.Mock(), mock.Mock()
- validate(instance=instance, schema={my_property : my_value})
-
- def test_it_creates_a_ref_resolver_if_not_provided(self):
- self.assertIsInstance(self.validator.resolver, RefResolver)
-
- def test_it_delegates_to_a_ref_resolver(self):
- resolver = RefResolver("", {})
- schema = {"$ref" : mock.Mock()}
-
- @contextmanager
- def resolving():
- yield {"type": "integer"}
-
- with mock.patch.object(resolver, "resolving") as resolve:
- resolve.return_value = resolving()
- with self.assertRaises(ValidationError):
- self.validator_class(schema, resolver=resolver).validate(None)
-
- resolve.assert_called_once_with(schema["$ref"])
-
- def test_is_type_is_true_for_valid_type(self):
- self.assertTrue(self.validator.is_type("foo", "string"))
-
- def test_is_type_is_false_for_invalid_type(self):
- self.assertFalse(self.validator.is_type("foo", "array"))
-
- def test_is_type_evades_bool_inheriting_from_int(self):
- self.assertFalse(self.validator.is_type(True, "integer"))
- self.assertFalse(self.validator.is_type(True, "number"))
-
- def test_is_type_raises_exception_for_unknown_type(self):
- with self.assertRaises(UnknownType):
- self.validator.is_type("foo", object())
-
-
-class TestDraft3Validator(ValidatorTestMixin, unittest.TestCase):
- validator_class = Draft3Validator
-
- def test_is_type_is_true_for_any_type(self):
- self.assertTrue(self.validator.is_valid(mock.Mock(), {"type": "any"}))
-
- def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
- self.assertTrue(self.validator.is_type(True, "boolean"))
- self.assertTrue(self.validator.is_valid(True, {"type": "any"}))
-
- def test_non_string_custom_types(self):
- schema = {'type': [None]}
- cls = self.validator_class(schema, types={None: type(None)})
- cls.validate(None, schema)
-
-
-class TestDraft4Validator(ValidatorTestMixin, unittest.TestCase):
- validator_class = Draft4Validator
-
-
-class TestBuiltinFormats(unittest.TestCase):
- """
- The built-in (specification-defined) formats do not raise type errors.
-
- If an instance or value is not a string, it should be ignored.
-
- """
-
-
-for format in FormatChecker.checkers:
- def test(self, format=format):
- v = Draft4Validator({"format": format}, format_checker=FormatChecker())
- v.validate(123)
-
- name = "test_{0}_ignores_non_strings".format(format)
- test.__name__ = name
- setattr(TestBuiltinFormats, name, test)
- del test # Ugh py.test. Stop discovering top level tests.
-
-
-class TestValidatorFor(unittest.TestCase):
- def test_draft_3(self):
- schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
- self.assertIs(validator_for(schema), Draft3Validator)
-
- schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
- self.assertIs(validator_for(schema), Draft3Validator)
-
- def test_draft_4(self):
- schema = {"$schema" : "http://json-schema.org/draft-04/schema"}
- self.assertIs(validator_for(schema), Draft4Validator)
-
- schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
- self.assertIs(validator_for(schema), Draft4Validator)
-
- def test_custom_validator(self):
- Validator = create(meta_schema={"id" : "meta schema id"}, version="12")
- schema = {"$schema" : "meta schema id"}
- self.assertIs(validator_for(schema), Validator)
-
- def test_validator_for_jsonschema_default(self):
- self.assertIs(validator_for({}), Draft4Validator)
-
- def test_validator_for_custom_default(self):
- self.assertIs(validator_for({}, default=None), None)
-
-
-class TestValidate(unittest.TestCase):
- def test_draft3_validator_is_chosen(self):
- schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
- with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
- validate({}, schema)
- chk_schema.assert_called_once_with(schema)
- # Make sure it works without the empty fragment
- schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
- with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
- validate({}, schema)
- chk_schema.assert_called_once_with(schema)
-
- def test_draft4_validator_is_chosen(self):
- schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
- with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
- validate({}, schema)
- chk_schema.assert_called_once_with(schema)
-
- def test_draft4_validator_is_the_default(self):
- with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
- validate({}, {})
- chk_schema.assert_called_once_with({})
-
-
-class TestRefResolver(unittest.TestCase):
-
- base_uri = ""
- stored_uri = "foo://stored"
- stored_schema = {"stored" : "schema"}
-
- def setUp(self):
- self.referrer = {}
- self.store = {self.stored_uri : self.stored_schema}
- self.resolver = RefResolver(self.base_uri, self.referrer, self.store)
-
- def test_it_does_not_retrieve_schema_urls_from_the_network(self):
- ref = Draft3Validator.META_SCHEMA["id"]
- with mock.patch.object(self.resolver, "resolve_remote") as remote:
- with self.resolver.resolving(ref) as resolved:
- self.assertEqual(resolved, Draft3Validator.META_SCHEMA)
- self.assertFalse(remote.called)
-
- def test_it_resolves_local_refs(self):
- ref = "#/properties/foo"
- self.referrer["properties"] = {"foo" : object()}
- with self.resolver.resolving(ref) as resolved:
- self.assertEqual(resolved, self.referrer["properties"]["foo"])
-
- def test_it_resolves_local_refs_with_id(self):
- schema = {"id": "foo://bar/schema#", "a": {"foo": "bar"}}
- resolver = RefResolver.from_schema(schema)
- with resolver.resolving("#/a") as resolved:
- self.assertEqual(resolved, schema["a"])
- with resolver.resolving("foo://bar/schema#/a") as resolved:
- self.assertEqual(resolved, schema["a"])
-
- def test_it_retrieves_stored_refs(self):
- with self.resolver.resolving(self.stored_uri) as resolved:
- self.assertIs(resolved, self.stored_schema)
-
- self.resolver.store["cached_ref"] = {"foo" : 12}
- with self.resolver.resolving("cached_ref#/foo") as resolved:
- self.assertEqual(resolved, 12)
-
- def test_it_retrieves_unstored_refs_via_requests(self):
- ref = "http://bar#baz"
- schema = {"baz" : 12}
-
- with mock.patch("jsonschema.validators.requests") as requests:
- requests.get.return_value.json.return_value = schema
- with self.resolver.resolving(ref) as resolved:
- self.assertEqual(resolved, 12)
- requests.get.assert_called_once_with("http://bar")
-
- def test_it_retrieves_unstored_refs_via_urlopen(self):
- ref = "http://bar#baz"
- schema = {"baz" : 12}
-
- with mock.patch("jsonschema.validators.requests", None):
- with mock.patch("jsonschema.validators.urlopen") as urlopen:
- urlopen.return_value.read.return_value = (
- json.dumps(schema).encode("utf8"))
- with self.resolver.resolving(ref) as resolved:
- self.assertEqual(resolved, 12)
- urlopen.assert_called_once_with("http://bar")
-
- def test_it_can_construct_a_base_uri_from_a_schema(self):
- schema = {"id" : "foo"}
- resolver = RefResolver.from_schema(schema)
- self.assertEqual(resolver.base_uri, "foo")
- with resolver.resolving("") as resolved:
- self.assertEqual(resolved, schema)
- with resolver.resolving("#") as resolved:
- self.assertEqual(resolved, schema)
- with resolver.resolving("foo") as resolved:
- self.assertEqual(resolved, schema)
- with resolver.resolving("foo#") as resolved:
- self.assertEqual(resolved, schema)
-
- def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
- schema = {}
- resolver = RefResolver.from_schema(schema)
- self.assertEqual(resolver.base_uri, "")
- with resolver.resolving("") as resolved:
- self.assertEqual(resolved, schema)
- with resolver.resolving("#") as resolved:
- self.assertEqual(resolved, schema)
-
- def test_custom_uri_scheme_handlers(self):
- schema = {"foo": "bar"}
- ref = "foo://bar"
- foo_handler = mock.Mock(return_value=schema)
- resolver = RefResolver("", {}, handlers={"foo": foo_handler})
- with resolver.resolving(ref) as resolved:
- self.assertEqual(resolved, schema)
- foo_handler.assert_called_once_with(ref)
-
- def test_cache_remote_on(self):
- ref = "foo://bar"
- foo_handler = mock.Mock()
- resolver = RefResolver(
- "", {}, cache_remote=True, handlers={"foo" : foo_handler},
- )
- with resolver.resolving(ref):
- pass
- with resolver.resolving(ref):
- pass
- foo_handler.assert_called_once_with(ref)
-
- def test_cache_remote_off(self):
- ref = "foo://bar"
- foo_handler = mock.Mock()
- resolver = RefResolver(
- "", {}, cache_remote=False, handlers={"foo" : foo_handler},
- )
- with resolver.resolving(ref):
- pass
- with resolver.resolving(ref):
- pass
- self.assertEqual(foo_handler.call_count, 2)
-
- def test_if_you_give_it_junk_you_get_a_resolution_error(self):
- ref = "foo://bar"
- foo_handler = mock.Mock(side_effect=ValueError("Oh no! What's this?"))
- resolver = RefResolver("", {}, handlers={"foo" : foo_handler})
- with self.assertRaises(RefResolutionError) as err:
- with resolver.resolving(ref):
- pass
- self.assertEqual(str(err.exception), "Oh no! What's this?")
-
-
-def sorted_errors(errors):
- def key(error):
- return (
- [str(e) for e in error.path],
- [str(e) for e in error.schema_path]
- )
- return sorted(errors, key=key)
diff --git a/lib/spack/external/jsonschema/validators.py b/lib/spack/external/jsonschema/validators.py
index 30c3515398..1dc420c70d 100644
--- a/lib/spack/external/jsonschema/validators.py
+++ b/lib/spack/external/jsonschema/validators.py
@@ -1,26 +1,107 @@
+"""
+Creation and extension of validators, with implementations for existing drafts.
+"""
from __future__ import division
+from warnings import warn
import contextlib
import json
import numbers
-requests = None
+from six import add_metaclass
-from jsonschema import _utils, _validators
+from jsonschema import (
+ _legacy_validators,
+ _types,
+ _utils,
+ _validators,
+ exceptions,
+)
from jsonschema.compat import (
- Sequence, urljoin, urlsplit, urldefrag, unquote, urlopen,
- str_types, int_types, iteritems,
+ Sequence,
+ int_types,
+ iteritems,
+ lru_cache,
+ str_types,
+ unquote,
+ urldefrag,
+ urljoin,
+ urlopen,
+ urlsplit,
)
-from jsonschema.exceptions import ErrorTree # Backwards compatibility # noqa
-from jsonschema.exceptions import RefResolutionError, SchemaError, UnknownType
+# Sigh. https://gitlab.com/pycqa/flake8/issues/280
+# https://github.com/pyga/ebb-lint/issues/7
+# Imported for backwards compatibility.
+from jsonschema.exceptions import ErrorTree
+ErrorTree
+
+
+class _DontDoThat(Exception):
+ """
+ Raised when a Validators with non-default type checker is misused.
+
+ Asking one for DEFAULT_TYPES doesn't make sense, since type checkers
+ exist for the unrepresentable cases where DEFAULT_TYPES can't
+ represent the type relationship.
+ """
+
+ def __str__(self):
+ return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers"
-_unset = _utils.Unset()
validators = {}
meta_schemas = _utils.URIDict()
+def _generate_legacy_type_checks(types=()):
+ """
+ Generate newer-style type checks out of JSON-type-name-to-type mappings.
+
+ Arguments:
+
+ types (dict):
+
+ A mapping of type names to their Python types
+
+ Returns:
+
+ A dictionary of definitions to pass to `TypeChecker`
+ """
+ types = dict(types)
+
+ def gen_type_check(pytypes):
+ pytypes = _utils.flatten(pytypes)
+
+ def type_check(checker, instance):
+ if isinstance(instance, bool):
+ if bool not in pytypes:
+ return False
+ return isinstance(instance, pytypes)
+
+ return type_check
+
+ definitions = {}
+ for typename, pytypes in iteritems(types):
+ definitions[typename] = gen_type_check(pytypes)
+
+ return definitions
+
+
+_DEPRECATED_DEFAULT_TYPES = {
+ u"array": list,
+ u"boolean": bool,
+ u"integer": int_types,
+ u"null": type(None),
+ u"number": numbers.Number,
+ u"object": dict,
+ u"string": str_types,
+}
+_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker(
+ type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES),
+)
+
+
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
@@ -28,40 +109,180 @@ def validates(version):
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
- :argument str version: an identifier to use as the version's name
- :returns: a class decorator to decorate the validator with the version
+ Arguments:
+
+ version (str):
+
+ An identifier to use as the version's name
+
+ Returns:
+
+ collections.Callable:
+ a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
- if u"id" in cls.META_SCHEMA:
- meta_schemas[cls.META_SCHEMA[u"id"]] = cls
+ meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
+ if meta_schema_id:
+ meta_schemas[meta_schema_id] = cls
return cls
return _validates
-def create(meta_schema, validators=(), version=None, default_types=None): # noqa
- if default_types is None:
- default_types = {
- u"array" : list, u"boolean" : bool, u"integer" : int_types,
- u"null" : type(None), u"number" : numbers.Number, u"object" : dict,
- u"string" : str_types,
- }
+def _DEFAULT_TYPES(self):
+ if self._CREATED_WITH_DEFAULT_TYPES is None:
+ raise _DontDoThat()
+
+ warn(
+ (
+ "The DEFAULT_TYPES attribute is deprecated. "
+ "See the type checker attached to this validator instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self._DEFAULT_TYPES
+
+
+class _DefaultTypesDeprecatingMetaClass(type):
+ DEFAULT_TYPES = property(_DEFAULT_TYPES)
+
+
+def _id_of(schema):
+ if schema is True or schema is False:
+ return u""
+ return schema.get(u"$id", u"")
+
+
+def create(
+ meta_schema,
+ validators=(),
+ version=None,
+ default_types=None,
+ type_checker=None,
+ id_of=_id_of,
+):
+ """
+ Create a new validator class.
+
+ Arguments:
+
+ meta_schema (collections.Mapping):
+
+ the meta schema for the new validator class
+
+ validators (collections.Mapping):
+
+ a mapping from names to callables, where each callable will
+ validate the schema property with the given name.
+
+ Each callable should take 4 arguments:
+
+ 1. a validator instance,
+ 2. the value of the property being validated within the
+ instance
+ 3. the instance
+ 4. the schema
+
+ version (str):
+
+ an identifier for the version that this validator class will
+ validate. If provided, the returned validator class will
+ have its ``__name__`` set to include the version, and also
+ will have `jsonschema.validators.validates` automatically
+ called for the given version.
+ type_checker (jsonschema.TypeChecker):
+
+ a type checker, used when applying the :validator:`type` validator.
+
+ If unprovided, a `jsonschema.TypeChecker` will be created
+ with a set of default types typical of JSON Schema drafts.
+
+ default_types (collections.Mapping):
+
+ .. deprecated:: 3.0.0
+
+ Please use the type_checker argument instead.
+
+ If set, it provides mappings of JSON types to Python types
+ that will be converted to functions and redefined in this
+ object's `jsonschema.TypeChecker`.
+
+ id_of (collections.Callable):
+
+ A function that given a schema, returns its ID.
+
+ Returns:
+
+ a new `jsonschema.IValidator` class
+ """
+
+ if default_types is not None:
+ if type_checker is not None:
+ raise TypeError(
+ "Do not specify default_types when providing a type checker.",
+ )
+ _created_with_default_types = True
+ warn(
+ (
+ "The default_types argument is deprecated. "
+ "Use the type_checker argument instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ type_checker = _types.TypeChecker(
+ type_checkers=_generate_legacy_type_checks(default_types),
+ )
+ else:
+ default_types = _DEPRECATED_DEFAULT_TYPES
+ if type_checker is None:
+ _created_with_default_types = False
+ type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES
+ elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES:
+ _created_with_default_types = False
+ else:
+ _created_with_default_types = None
+
+ @add_metaclass(_DefaultTypesDeprecatingMetaClass)
class Validator(object):
+
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
- DEFAULT_TYPES = dict(default_types)
+ TYPE_CHECKER = type_checker
+ ID_OF = staticmethod(id_of)
+
+ DEFAULT_TYPES = property(_DEFAULT_TYPES)
+ _DEFAULT_TYPES = dict(default_types)
+ _CREATED_WITH_DEFAULT_TYPES = _created_with_default_types
def __init__(
- self, schema, types=(), resolver=None, format_checker=None,
+ self,
+ schema,
+ types=(),
+ resolver=None,
+ format_checker=None,
):
- self._types = dict(self.DEFAULT_TYPES)
- self._types.update(types)
+ if types:
+ warn(
+ (
+ "The types argument is deprecated. Provide "
+ "a type_checker to jsonschema.validators.extend "
+ "instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many(
+ _generate_legacy_type_checks(types),
+ )
if resolver is None:
- resolver = RefResolver.from_schema(schema)
+ resolver = RefResolver.from_schema(schema, id_of=id_of)
self.resolver = resolver
self.format_checker = format_checker
@@ -70,13 +291,28 @@ def create(meta_schema, validators=(), version=None, default_types=None): # noq
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
- raise SchemaError.create_from(error)
+ raise exceptions.SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
- with self.resolver.in_scope(_schema.get(u"id", u"")):
+ if _schema is True:
+ return
+ elif _schema is False:
+ yield exceptions.ValidationError(
+ "False schema does not allow %r" % (instance,),
+ validator=None,
+ validator_value=None,
+ instance=instance,
+ schema=_schema,
+ )
+ return
+
+ scope = id_of(_schema)
+ if scope:
+ self.resolver.push_scope(scope)
+ try:
ref = _schema.get(u"$ref")
if ref is not None:
validators = [(u"$ref", ref)]
@@ -100,6 +336,9 @@ def create(meta_schema, validators=(), version=None, default_types=None): # noq
if k != u"$ref":
error.schema_path.appendleft(k)
yield error
+ finally:
+ if scope:
+ self.resolver.pop_scope()
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
@@ -114,19 +353,10 @@ def create(meta_schema, validators=(), version=None, default_types=None): # noq
raise error
def is_type(self, instance, type):
- if type not in self._types:
- raise UnknownType(type, instance, self.schema)
- pytypes = self._types[type]
-
- # bool inherits from int, so ensure bools aren't reported as ints
- if isinstance(instance, bool):
- pytypes = _utils.flatten(pytypes)
- is_number = any(
- issubclass(pytype, numbers.Number) for pytype in pytypes
- )
- if is_number and bool not in pytypes:
- return False
- return isinstance(instance, pytypes)
+ try:
+ return self.TYPE_CHECKER.is_type(instance, type)
+ except exceptions.UndefinedTypeCheck:
+ raise exceptions.UnknownType(type, instance, self.schema)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
@@ -139,104 +369,290 @@ def create(meta_schema, validators=(), version=None, default_types=None): # noq
return Validator
-def extend(validator, validators, version=None):
+def extend(validator, validators=(), version=None, type_checker=None):
+ """
+ Create a new validator class by extending an existing one.
+
+ Arguments:
+
+ validator (jsonschema.IValidator):
+
+ an existing validator class
+
+ validators (collections.Mapping):
+
+ a mapping of new validator callables to extend with, whose
+ structure is as in `create`.
+
+ .. note::
+
+ Any validator callables with the same name as an
+ existing one will (silently) replace the old validator
+ callable entirely, effectively overriding any validation
+ done in the "parent" validator class.
+
+ If you wish to instead extend the behavior of a parent's
+ validator callable, delegate and call it directly in
+ the new validator function by retrieving it using
+ ``OldValidator.VALIDATORS["validator_name"]``.
+
+ version (str):
+
+ a version for the new validator class
+
+ type_checker (jsonschema.TypeChecker):
+
+ a type checker, used when applying the :validator:`type` validator.
+
+ If unprovided, the type checker of the extended
+ `jsonschema.IValidator` will be carried along.`
+
+ Returns:
+
+ a new `jsonschema.IValidator` class extending the one provided
+
+ .. note:: Meta Schemas
+
+ The new validator class will have its parent's meta schema.
+
+ If you wish to change or extend the meta schema in the new
+ validator class, modify ``META_SCHEMA`` directly on the returned
+ class. Note that no implicit copying is done, so a copy should
+ likely be made before modifying it, in order to not affect the
+ old validator.
+ """
+
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
+
+ if type_checker is None:
+ type_checker = validator.TYPE_CHECKER
+ elif validator._CREATED_WITH_DEFAULT_TYPES:
+ raise TypeError(
+ "Cannot extend a validator created with default_types "
+ "with a type_checker. Update the validator to use a "
+ "type_checker when created."
+ )
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
- default_types=validator.DEFAULT_TYPES,
+ type_checker=type_checker,
+ id_of=validator.ID_OF,
)
Draft3Validator = create(
meta_schema=_utils.load_schema("draft3"),
validators={
- u"$ref" : _validators.ref,
- u"additionalItems" : _validators.additionalItems,
- u"additionalProperties" : _validators.additionalProperties,
- u"dependencies" : _validators.dependencies,
- u"disallow" : _validators.disallow_draft3,
- u"divisibleBy" : _validators.multipleOf,
- u"enum" : _validators.enum,
- u"extends" : _validators.extends_draft3,
- u"format" : _validators.format,
- u"items" : _validators.items,
- u"maxItems" : _validators.maxItems,
- u"maxLength" : _validators.maxLength,
- u"maximum" : _validators.maximum,
- u"minItems" : _validators.minItems,
- u"minLength" : _validators.minLength,
- u"minimum" : _validators.minimum,
- u"multipleOf" : _validators.multipleOf,
- u"pattern" : _validators.pattern,
- u"patternProperties" : _validators.patternProperties,
- u"properties" : _validators.properties_draft3,
- u"type" : _validators.type_draft3,
- u"uniqueItems" : _validators.uniqueItems,
+ u"$ref": _validators.ref,
+ u"additionalItems": _validators.additionalItems,
+ u"additionalProperties": _validators.additionalProperties,
+ u"dependencies": _legacy_validators.dependencies_draft3,
+ u"disallow": _legacy_validators.disallow_draft3,
+ u"divisibleBy": _validators.multipleOf,
+ u"enum": _validators.enum,
+ u"extends": _legacy_validators.extends_draft3,
+ u"format": _validators.format,
+ u"items": _legacy_validators.items_draft3_draft4,
+ u"maxItems": _validators.maxItems,
+ u"maxLength": _validators.maxLength,
+ u"maximum": _legacy_validators.maximum_draft3_draft4,
+ u"minItems": _validators.minItems,
+ u"minLength": _validators.minLength,
+ u"minimum": _legacy_validators.minimum_draft3_draft4,
+ u"pattern": _validators.pattern,
+ u"patternProperties": _validators.patternProperties,
+ u"properties": _legacy_validators.properties_draft3,
+ u"type": _legacy_validators.type_draft3,
+ u"uniqueItems": _validators.uniqueItems,
},
+ type_checker=_types.draft3_type_checker,
version="draft3",
+ id_of=lambda schema: schema.get(u"id", ""),
)
Draft4Validator = create(
meta_schema=_utils.load_schema("draft4"),
validators={
- u"$ref" : _validators.ref,
- u"additionalItems" : _validators.additionalItems,
- u"additionalProperties" : _validators.additionalProperties,
- u"allOf" : _validators.allOf_draft4,
- u"anyOf" : _validators.anyOf_draft4,
- u"dependencies" : _validators.dependencies,
- u"enum" : _validators.enum,
- u"format" : _validators.format,
- u"items" : _validators.items,
- u"maxItems" : _validators.maxItems,
- u"maxLength" : _validators.maxLength,
- u"maxProperties" : _validators.maxProperties_draft4,
- u"maximum" : _validators.maximum,
- u"minItems" : _validators.minItems,
- u"minLength" : _validators.minLength,
- u"minProperties" : _validators.minProperties_draft4,
- u"minimum" : _validators.minimum,
- u"multipleOf" : _validators.multipleOf,
- u"not" : _validators.not_draft4,
- u"oneOf" : _validators.oneOf_draft4,
- u"pattern" : _validators.pattern,
- u"patternProperties" : _validators.patternProperties,
- u"properties" : _validators.properties_draft4,
- u"required" : _validators.required_draft4,
- u"type" : _validators.type_draft4,
- u"uniqueItems" : _validators.uniqueItems,
+ u"$ref": _validators.ref,
+ u"additionalItems": _validators.additionalItems,
+ u"additionalProperties": _validators.additionalProperties,
+ u"allOf": _validators.allOf,
+ u"anyOf": _validators.anyOf,
+ u"dependencies": _validators.dependencies,
+ u"enum": _validators.enum,
+ u"format": _validators.format,
+ u"items": _legacy_validators.items_draft3_draft4,
+ u"maxItems": _validators.maxItems,
+ u"maxLength": _validators.maxLength,
+ u"maxProperties": _validators.maxProperties,
+ u"maximum": _legacy_validators.maximum_draft3_draft4,
+ u"minItems": _validators.minItems,
+ u"minLength": _validators.minLength,
+ u"minProperties": _validators.minProperties,
+ u"minimum": _legacy_validators.minimum_draft3_draft4,
+ u"multipleOf": _validators.multipleOf,
+ u"not": _validators.not_,
+ u"oneOf": _validators.oneOf,
+ u"pattern": _validators.pattern,
+ u"patternProperties": _validators.patternProperties,
+ u"properties": _validators.properties,
+ u"required": _validators.required,
+ u"type": _validators.type,
+ u"uniqueItems": _validators.uniqueItems,
},
+ type_checker=_types.draft4_type_checker,
version="draft4",
+ id_of=lambda schema: schema.get(u"id", ""),
+)
+
+Draft6Validator = create(
+ meta_schema=_utils.load_schema("draft6"),
+ validators={
+ u"$ref": _validators.ref,
+ u"additionalItems": _validators.additionalItems,
+ u"additionalProperties": _validators.additionalProperties,
+ u"allOf": _validators.allOf,
+ u"anyOf": _validators.anyOf,
+ u"const": _validators.const,
+ u"contains": _validators.contains,
+ u"dependencies": _validators.dependencies,
+ u"enum": _validators.enum,
+ u"exclusiveMaximum": _validators.exclusiveMaximum,
+ u"exclusiveMinimum": _validators.exclusiveMinimum,
+ u"format": _validators.format,
+ u"items": _validators.items,
+ u"maxItems": _validators.maxItems,
+ u"maxLength": _validators.maxLength,
+ u"maxProperties": _validators.maxProperties,
+ u"maximum": _validators.maximum,
+ u"minItems": _validators.minItems,
+ u"minLength": _validators.minLength,
+ u"minProperties": _validators.minProperties,
+ u"minimum": _validators.minimum,
+ u"multipleOf": _validators.multipleOf,
+ u"not": _validators.not_,
+ u"oneOf": _validators.oneOf,
+ u"pattern": _validators.pattern,
+ u"patternProperties": _validators.patternProperties,
+ u"properties": _validators.properties,
+ u"propertyNames": _validators.propertyNames,
+ u"required": _validators.required,
+ u"type": _validators.type,
+ u"uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft6_type_checker,
+ version="draft6",
+)
+
+Draft7Validator = create(
+ meta_schema=_utils.load_schema("draft7"),
+ validators={
+ u"$ref": _validators.ref,
+ u"additionalItems": _validators.additionalItems,
+ u"additionalProperties": _validators.additionalProperties,
+ u"allOf": _validators.allOf,
+ u"anyOf": _validators.anyOf,
+ u"const": _validators.const,
+ u"contains": _validators.contains,
+ u"dependencies": _validators.dependencies,
+ u"enum": _validators.enum,
+ u"exclusiveMaximum": _validators.exclusiveMaximum,
+ u"exclusiveMinimum": _validators.exclusiveMinimum,
+ u"format": _validators.format,
+ u"if": _validators.if_,
+ u"items": _validators.items,
+ u"maxItems": _validators.maxItems,
+ u"maxLength": _validators.maxLength,
+ u"maxProperties": _validators.maxProperties,
+ u"maximum": _validators.maximum,
+ u"minItems": _validators.minItems,
+ u"minLength": _validators.minLength,
+ u"minProperties": _validators.minProperties,
+ u"minimum": _validators.minimum,
+ u"multipleOf": _validators.multipleOf,
+ u"oneOf": _validators.oneOf,
+ u"not": _validators.not_,
+ u"pattern": _validators.pattern,
+ u"patternProperties": _validators.patternProperties,
+ u"properties": _validators.properties,
+ u"propertyNames": _validators.propertyNames,
+ u"required": _validators.required,
+ u"type": _validators.type,
+ u"uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft7_type_checker,
+ version="draft7",
)
+_LATEST_VERSION = Draft7Validator
+
class RefResolver(object):
"""
Resolve JSON References.
- :argument str base_uri: URI of the referring document
- :argument referrer: the actual referring document
- :argument dict store: a mapping from URIs to documents to cache
- :argument bool cache_remote: whether remote refs should be cached after
- first resolution
- :argument dict handlers: a mapping from URI schemes to functions that
- should be used to retrieve them
+ Arguments:
+
+ base_uri (str):
+
+ The URI of the referring document
+
+ referrer:
+
+ The actual referring document
+
+ store (dict):
+
+ A mapping from URIs to documents to cache
+
+ cache_remote (bool):
+
+ Whether remote refs should be cached after first resolution
+
+ handlers (dict):
+
+ A mapping from URI schemes to functions that should be used
+ to retrieve them
+
+ urljoin_cache (:func:`functools.lru_cache`):
+ A cache that will be used for caching the results of joining
+ the resolution scope to subscopes.
+
+ remote_cache (:func:`functools.lru_cache`):
+
+ A cache that will be used for caching the results of
+ resolved remote URLs.
+
+ Attributes:
+
+ cache_remote (bool):
+
+ Whether remote refs should be cached after first resolution
"""
def __init__(
- self, base_uri, referrer, store=(), cache_remote=True, handlers=(),
+ self,
+ base_uri,
+ referrer,
+ store=(),
+ cache_remote=True,
+ handlers=(),
+ urljoin_cache=None,
+ remote_cache=None,
):
- self.base_uri = base_uri
- self.resolution_scope = base_uri
- # This attribute is not used, it is for backwards compatibility
+ if urljoin_cache is None:
+ urljoin_cache = lru_cache(1024)(urljoin)
+ if remote_cache is None:
+ remote_cache = lru_cache(1024)(self.resolve_from_url)
+
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
+ self._scopes_stack = [base_uri]
self.store = _utils.URIDict(
(id, validator.META_SCHEMA)
for id, validator in iteritems(meta_schemas)
@@ -244,64 +660,139 @@ class RefResolver(object):
self.store.update(store)
self.store[base_uri] = referrer
+ self._urljoin_cache = urljoin_cache
+ self._remote_cache = remote_cache
+
@classmethod
- def from_schema(cls, schema, *args, **kwargs):
+ def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
- :argument schema schema: the referring schema
- :rtype: :class:`RefResolver`
+ Arguments:
+
+ schema:
+
+ the referring schema
+
+ Returns:
+
+ `RefResolver`
+ """
+
+ return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
+
+ def push_scope(self, scope):
+ """
+ Enter a given sub-scope.
+
+ Treats further dereferences as being performed underneath the
+ given scope.
+ """
+ self._scopes_stack.append(
+ self._urljoin_cache(self.resolution_scope, scope),
+ )
+ def pop_scope(self):
"""
+ Exit the most recent entered scope.
- return cls(schema.get(u"id", u""), schema, *args, **kwargs)
+ Treats further dereferences as being performed underneath the
+ original scope.
+
+ Don't call this method more times than `push_scope` has been
+ called.
+ """
+ try:
+ self._scopes_stack.pop()
+ except IndexError:
+ raise exceptions.RefResolutionError(
+ "Failed to pop the scope from an empty stack. "
+ "`pop_scope()` should only be called once for every "
+ "`push_scope()`"
+ )
+
+ @property
+ def resolution_scope(self):
+ """
+ Retrieve the current resolution scope.
+ """
+ return self._scopes_stack[-1]
+
+ @property
+ def base_uri(self):
+ """
+ Retrieve the current base URI, not including any fragment.
+ """
+ uri, _ = urldefrag(self.resolution_scope)
+ return uri
@contextlib.contextmanager
def in_scope(self, scope):
- old_scope = self.resolution_scope
- self.resolution_scope = urljoin(old_scope, scope)
+ """
+ Temporarily enter the given scope for the duration of the context.
+ """
+ self.push_scope(scope)
try:
yield
finally:
- self.resolution_scope = old_scope
+ self.pop_scope()
@contextlib.contextmanager
def resolving(self, ref):
"""
- Context manager which resolves a JSON ``ref`` and enters the
- resolution scope of this ref.
+ Resolve the given ``ref`` and enter its resolution scope.
+
+ Exits the scope on exit of this context manager.
+
+ Arguments:
- :argument str ref: reference to resolve
+ ref (str):
+ The reference to resolve
"""
- full_uri = urljoin(self.resolution_scope, ref)
- uri, fragment = urldefrag(full_uri)
- if not uri:
- uri = self.base_uri
+ url, resolved = self.resolve(ref)
+ self.push_scope(url)
+ try:
+ yield resolved
+ finally:
+ self.pop_scope()
- if uri in self.store:
- document = self.store[uri]
- else:
+ def resolve(self, ref):
+ """
+ Resolve the given reference.
+ """
+ url = self._urljoin_cache(self.resolution_scope, ref)
+ return url, self._remote_cache(url)
+
+ def resolve_from_url(self, url):
+ """
+ Resolve the given remote URL.
+ """
+ url, fragment = urldefrag(url)
+ try:
+ document = self.store[url]
+ except KeyError:
try:
- document = self.resolve_remote(uri)
+ document = self.resolve_remote(url)
except Exception as exc:
- raise RefResolutionError(exc)
+ raise exceptions.RefResolutionError(exc)
- old_base_uri, self.base_uri = self.base_uri, uri
- try:
- with self.in_scope(uri):
- yield self.resolve_fragment(document, fragment)
- finally:
- self.base_uri = old_base_uri
+ return self.resolve_fragment(document, fragment)
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
- :argument document: the referrant document
- :argument str fragment: a URI fragment to resolve within it
+ Arguments:
+
+ document:
+ The referent document
+
+ fragment (str):
+
+ a URI fragment to resolve within it
"""
fragment = fragment.lstrip(u"/")
@@ -319,7 +810,7 @@ class RefResolver(object):
try:
document = document[part]
except (TypeError, LookupError):
- raise RefResolutionError(
+ raise exceptions.RefResolutionError(
"Unresolvable JSON pointer: %r" % fragment
)
@@ -329,8 +820,9 @@ class RefResolver(object):
"""
Resolve a remote ``uri``.
- Does not check the store first, but stores the retrieved document in
- the store if :attr:`RefResolver.cache_remote` is True.
+ If called directly, does not check the store first, but after
+ retrieving the document at the specified URI it will be saved in
+ the store if :attr:`cache_remote` is True.
.. note::
@@ -341,85 +833,138 @@ class RefResolver(object):
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
- :argument str uri: the URI to resolve
- :returns: the retrieved document
+ Arguments:
+
+ uri (str):
- .. _requests: http://pypi.python.org/pypi/requests/
+ The URI to resolve
+ Returns:
+
+ The retrieved document
+
+ .. _requests: https://pypi.org/project/requests/
"""
+ try:
+ import requests
+ except ImportError:
+ requests = None
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
- elif (
- scheme in [u"http", u"https"] and
- requests and
- getattr(requests.Response, "json", None) is not None
- ):
+ elif scheme in [u"http", u"https"] and requests:
# Requests has support for detecting the correct encoding of
# json over http
- if callable(requests.Response.json):
- result = requests.get(uri).json()
- else:
- result = requests.get(uri).json
+ result = requests.get(uri).json()
else:
# Otherwise, pass off to urllib and assume utf-8
- result = json.loads(urlopen(uri).read().decode("utf-8"))
+ with urlopen(uri) as url:
+ result = json.loads(url.read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
-def validator_for(schema, default=_unset):
- if default is _unset:
- default = Draft4Validator
- return meta_schemas.get(schema.get(u"$schema", u""), default)
-
-
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
- >>> validate([2, 3, 4], {"maxItems" : 2})
+ >>> validate([2, 3, 4], {"maxItems": 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
- :func:`validate` will first verify that the provided schema is itself
- valid, since not doing so can lead to less obvious error messages and fail
- in less obvious or consistent ways. If you know you have a valid schema
- already or don't care, you might prefer using the
- :meth:`~IValidator.validate` method directly on a specific validator
- (e.g. :meth:`Draft4Validator.validate`).
+ :func:`validate` will first verify that the provided schema is
+ itself valid, since not doing so can lead to less obvious error
+ messages and fail in less obvious or consistent ways.
+
+ If you know you have a valid schema already, especially if you
+ intend to validate multiple instances with the same schema, you
+ likely would prefer using the `IValidator.validate` method directly
+ on a specific validator (e.g. ``Draft7Validator.validate``).
+
+
+ Arguments:
+ instance:
- :argument instance: the instance to validate
- :argument schema: the schema to validate with
- :argument cls: an :class:`IValidator` class that will be used to validate
- the instance.
+ The instance to validate
- If the ``cls`` argument is not provided, two things will happen in
- accordance with the specification. First, if the schema has a
- :validator:`$schema` property containing a known meta-schema [#]_ then the
- proper validator will be used. The specification recommends that all
- schemas contain :validator:`$schema` properties for this reason. If no
- :validator:`$schema` property is found, the default validator class is
- :class:`Draft4Validator`.
+ schema:
- Any other provided positional and keyword arguments will be passed on when
- instantiating the ``cls``.
+ The schema to validate with
- :raises:
- :exc:`ValidationError` if the instance is invalid
+ cls (IValidator):
- :exc:`SchemaError` if the schema itself is invalid
+ The class that will be used to validate the instance.
+
+ If the ``cls`` argument is not provided, two things will happen
+ in accordance with the specification. First, if the schema has a
+ :validator:`$schema` property containing a known meta-schema [#]_
+ then the proper validator will be used. The specification recommends
+ that all schemas contain :validator:`$schema` properties for this
+ reason. If no :validator:`$schema` property is found, the default
+ validator class is the latest released draft.
+
+ Any other provided positional and keyword arguments will be passed
+ on when instantiating the ``cls``.
+
+ Raises:
+
+ `jsonschema.exceptions.ValidationError` if the instance
+ is invalid
+
+ `jsonschema.exceptions.SchemaError` if the schema itself
+ is invalid
.. rubric:: Footnotes
- .. [#] known by a validator registered with :func:`validates`
+ .. [#] known by a validator registered with
+ `jsonschema.validators.validates`
"""
if cls is None:
cls = validator_for(schema)
+
cls.check_schema(schema)
- cls(schema, *args, **kwargs).validate(instance)
+ validator = cls(schema, *args, **kwargs)
+ error = exceptions.best_match(validator.iter_errors(instance))
+ if error is not None:
+ raise error
+
+
+def validator_for(schema, default=_LATEST_VERSION):
+ """
+ Retrieve the validator class appropriate for validating the given schema.
+
+ Uses the :validator:`$schema` property that should be present in the
+ given schema to look up the appropriate validator class.
+
+ Arguments:
+
+ schema (collections.Mapping or bool):
+
+ the schema to look at
+
+ default:
+
+ the default to return if the appropriate validator class
+ cannot be determined.
+
+ If unprovided, the default is to return the latest supported
+ draft.
+ """
+ if schema is True or schema is False or u"$schema" not in schema:
+ return default
+ if schema[u"$schema"] not in meta_schemas:
+ warn(
+ (
+ "The metaschema specified by $schema was not found. "
+ "Using the latest draft to validate, but this will raise "
+ "an error in the future."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION)
diff --git a/lib/spack/external/markupsafe/AUTHORS b/lib/spack/external/markupsafe/AUTHORS
deleted file mode 100644
index f7e2942ecc..0000000000
--- a/lib/spack/external/markupsafe/AUTHORS
+++ /dev/null
@@ -1,13 +0,0 @@
-MarkupSafe is written and maintained by Armin Ronacher and
-various contributors:
-
-Development Lead
-````````````````
-
-- Armin Ronacher <armin.ronacher@active-4.com>
-
-Patches and Suggestions
-```````````````````````
-
-- Georg Brandl
-- Mickaël Guérin
diff --git a/lib/spack/external/markupsafe/LICENSE b/lib/spack/external/markupsafe/LICENSE
deleted file mode 100644
index 5d2693890d..0000000000
--- a/lib/spack/external/markupsafe/LICENSE
+++ /dev/null
@@ -1,33 +0,0 @@
-Copyright (c) 2010 by Armin Ronacher and contributors. See AUTHORS
-for more details.
-
-Some rights reserved.
-
-Redistribution and use in source and binary forms of the software as well
-as documentation, with or without modification, are permitted provided
-that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
-* The names of the contributors may not be used to endorse or
- promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
-NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
-OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
diff --git a/lib/spack/external/markupsafe/LICENSE.rst b/lib/spack/external/markupsafe/LICENSE.rst
new file mode 100644
index 0000000000..9d227a0cc4
--- /dev/null
+++ b/lib/spack/external/markupsafe/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lib/spack/external/markupsafe/README.rst b/lib/spack/external/markupsafe/README.rst
index 360a0877a4..3548b8d1f7 100644
--- a/lib/spack/external/markupsafe/README.rst
+++ b/lib/spack/external/markupsafe/README.rst
@@ -1,113 +1,69 @@
MarkupSafe
==========
-Implements a unicode subclass that supports HTML strings:
+MarkupSafe implements a text object that escapes characters so it is
+safe to use in HTML and XML. Characters that have special meanings are
+replaced so that they display as the actual characters. This mitigates
+injection attacks, meaning untrusted user input can safely be displayed
+on a page.
-.. code-block:: python
- >>> from markupsafe import Markup, escape
- >>> escape("<script>alert(document.cookie);</script>")
- Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
- >>> tmpl = Markup("<em>%s</em>")
- >>> tmpl % "Peter > Lustig"
- Markup(u'<em>Peter &gt; Lustig</em>')
-
-If you want to make an object unicode that is not yet unicode
-but don't want to lose the taint information, you can use the
-``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which
-is a different name for the same function).
-
-.. code-block:: python
-
- >>> from markupsafe import soft_unicode
- >>> soft_unicode(42)
- u'42'
- >>> soft_unicode(Markup('foo'))
- Markup(u'foo')
-
-HTML Representations
---------------------
-
-Objects can customize their HTML markup equivalent by overriding
-the ``__html__`` function:
+Installing
+----------
-.. code-block:: python
+Install and update using `pip`_:
- >>> class Foo(object):
- ... def __html__(self):
- ... return '<strong>Nice</strong>'
- ...
- >>> escape(Foo())
- Markup(u'<strong>Nice</strong>')
- >>> Markup(Foo())
- Markup(u'<strong>Nice</strong>')
+.. code-block:: text
-Silent Escapes
---------------
+ pip install -U MarkupSafe
-Since MarkupSafe 0.10 there is now also a separate escape function
-called ``escape_silent`` that returns an empty string for ``None`` for
-consistency with other systems that return empty strings for ``None``
-when escaping (for instance Pylons' webhelpers).
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
-If you also want to use this for the escape method of the Markup
-object, you can create your own subclass that does that:
-.. code-block:: python
+Examples
+--------
- from markupsafe import Markup, escape_silent as escape
+.. code-block:: pycon
- class SilentMarkup(Markup):
- __slots__ = ()
-
- @classmethod
- def escape(cls, s):
- return cls(escape(s))
-
-New-Style String Formatting
----------------------------
-
-Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and
-3.x are now fully supported. Previously the escape behavior of those
-functions was spotty at best. The new implementations operates under the
-following algorithm:
-
-1. if an object has an ``__html_format__`` method it is called as
- replacement for ``__format__`` with the format specifier. It either
- has to return a string or markup object.
-2. if an object has an ``__html__`` method it is called.
-3. otherwise the default format system of Python kicks in and the result
- is HTML escaped.
-
-Here is how you can implement your own formatting:
+ >>> from markupsafe import Markup, escape
+ >>> # escape replaces special characters and wraps in Markup
+ >>> escape('<script>alert(document.cookie);</script>')
+ Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
+ >>> Markup('<strong>Hello</strong>')
+ Markup('<strong>hello</strong>')
+ >>> escape(Markup('<strong>Hello</strong>'))
+ Markup('<strong>hello</strong>')
+ >>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
+ >>> # methods and operators escape their arguments
+ >>> template = Markup("Hello <em>%s</em>")
+ >>> template % '"World"'
+ Markup('Hello <em>&#34;World&#34;</em>')
-.. code-block:: python
- class User(object):
+Donate
+------
- def __init__(self, id, username):
- self.id = id
- self.username = username
+The Pallets organization develops and supports MarkupSafe and other
+libraries that use it. In order to grow the community of contributors
+and users, and allow the maintainers to devote more time to the
+projects, `please donate today`_.
- def __html_format__(self, format_spec):
- if format_spec == 'link':
- return Markup('<a href="/user/{0}">{1}</a>').format(
- self.id,
- self.__html__(),
- )
- elif format_spec:
- raise ValueError('Invalid format spec')
- return self.__html__()
+.. _please donate today: https://palletsprojects.com/donate
- def __html__(self):
- return Markup('<span class=user>{0}</span>').format(self.username)
-And to format that user:
+Links
+-----
-.. code-block:: python
+* Website: https://palletsprojects.com/p/markupsafe/
+* Documentation: https://markupsafe.palletsprojects.com/
+* License: `BSD-3-Clause <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
+* Releases: https://pypi.org/project/MarkupSafe/
+* Code: https://github.com/pallets/markupsafe
+* Issue tracker: https://github.com/pallets/markupsafe/issues
+* Test status:
- >>> user = User(1, 'foo')
- >>> Markup('<p>User: {0:link}').format(user)
- Markup(u'<p>User: <a href="/user/1"><span class=user>foo</span></a>')
+ * Linux, Mac: https://travis-ci.org/pallets/markupsafe
+ * Windows: https://ci.appveyor.com/project/pallets/markupsafe
-Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher.
+* Test coverage: https://codecov.io/gh/pallets/markupsafe
diff --git a/lib/spack/external/markupsafe/__init__.py b/lib/spack/external/markupsafe/__init__.py
index 506326f450..da05ed328a 100644
--- a/lib/spack/external/markupsafe/__init__.py
+++ b/lib/spack/external/markupsafe/__init__.py
@@ -1,80 +1,74 @@
# -*- coding: utf-8 -*-
"""
- markupsafe
- ~~~~~~~~~~
+markupsafe
+~~~~~~~~~~
- Implements a Markup string.
+Implements an escape function and a Markup string to replace HTML
+special characters with safe representations.
- :copyright: (c) 2010 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
"""
import re
import string
-import sys
-from markupsafe._compat import text_type, string_types, int_types, \
- unichr, iteritems, PY2
-if sys.version_info >= (3, 3):
- from collections.abc import Mapping
-else:
- from collections import Mapping
+from ._compat import int_types
+from ._compat import iteritems
+from ._compat import Mapping
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import unichr
-__version__ = "1.0"
+__version__ = "1.1.1"
-__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
+__all__ = ["Markup", "soft_unicode", "escape", "escape_silent"]
-
-_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
-_entity_re = re.compile(r'&([^& ;]+);')
+_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
+_entity_re = re.compile(r"&([^& ;]+);")
class Markup(text_type):
- r"""Marks a string as being safe for inclusion in HTML/XML output without
- needing to be escaped. This implements the `__html__` interface a couple
- of frameworks and web applications use. :class:`Markup` is a direct
- subclass of `unicode` and provides all the methods of `unicode` just that
- it escapes arguments passed and always returns `Markup`.
-
- The `escape` function returns markup objects so that double escaping can't
- happen.
-
- The constructor of the :class:`Markup` class can be used for three
- different things: When passed an unicode object it's assumed to be safe,
- when passed an object with an HTML representation (has an `__html__`
- method) that representation is used, otherwise the object passed is
- converted into a unicode string and then assumed to be safe:
-
- >>> Markup("Hello <em>World</em>!")
- Markup(u'Hello <em>World</em>!')
- >>> class Foo(object):
- ... def __html__(self):
- ... return '<a href="#">foo</a>'
+ """A string that is ready to be safely inserted into an HTML or XML
+ document, either because it was escaped or because it was marked
+ safe.
+
+ Passing an object to the constructor converts it to text and wraps
+ it to mark it safe without escaping. To escape the text, use the
+ :meth:`escape` class method instead.
+
+ >>> Markup('Hello, <em>World</em>!')
+ Markup('Hello, <em>World</em>!')
+ >>> Markup(42)
+ Markup('42')
+ >>> Markup.escape('Hello, <em>World</em>!')
+ Markup('Hello &lt;em&gt;World&lt;/em&gt;!')
+
+ This implements the ``__html__()`` interface that some frameworks
+ use. Passing an object that implements ``__html__()`` will wrap the
+ output of that method, marking it safe.
+
+ >>> class Foo:
+ ... def __html__(self):
+ ... return '<a href="/foo">foo</a>'
...
>>> Markup(Foo())
- Markup(u'<a href="#">foo</a>')
-
- If you want object passed being always treated as unsafe you can use the
- :meth:`escape` classmethod to create a :class:`Markup` object:
+ Markup('<a href="/foo">foo</a>')
- >>> Markup.escape("Hello <em>World</em>!")
- Markup(u'Hello &lt;em&gt;World&lt;/em&gt;!')
+ This is a subclass of the text type (``str`` in Python 3,
+ ``unicode`` in Python 2). It has the same methods as that type, but
+ all methods escape their arguments and return a ``Markup`` instance.
- Operations on a markup string are markup aware which means that all
- arguments are passed through the :func:`escape` function:
-
- >>> em = Markup("<em>%s</em>")
- >>> em % "foo & bar"
- Markup(u'<em>foo &amp; bar</em>')
- >>> strong = Markup("<strong>%(text)s</strong>")
- >>> strong % {'text': '<blink>hacker here</blink>'}
- Markup(u'<strong>&lt;blink&gt;hacker here&lt;/blink&gt;</strong>')
- >>> Markup("<em>Hello</em> ") + "<foo>"
- Markup(u'<em>Hello</em> &lt;foo&gt;')
+ >>> Markup('<em>%s</em>') % 'foo & bar'
+ Markup('<em>foo &amp; bar</em>')
+ >>> Markup('<em>Hello</em> ') + '<foo>'
+ Markup('<em>Hello</em> &lt;foo&gt;')
"""
+
__slots__ = ()
- def __new__(cls, base=u'', encoding=None, errors='strict'):
- if hasattr(base, '__html__'):
+ def __new__(cls, base=u"", encoding=None, errors="strict"):
+ if hasattr(base, "__html__"):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
@@ -84,12 +78,12 @@ class Markup(text_type):
return self
def __add__(self, other):
- if isinstance(other, string_types) or hasattr(other, '__html__'):
+ if isinstance(other, string_types) or hasattr(other, "__html__"):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
- if hasattr(other, '__html__') or isinstance(other, string_types):
+ if hasattr(other, "__html__") or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
@@ -97,6 +91,7 @@ class Markup(text_type):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
+
__rmul__ = __mul__
def __mod__(self, arg):
@@ -107,115 +102,124 @@ class Markup(text_type):
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
- return '%s(%s)' % (
- self.__class__.__name__,
- text_type.__repr__(self)
- )
+ return "%s(%s)" % (self.__class__.__name__, text_type.__repr__(self))
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
+
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
+
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
+
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
- return list(map(self.__class__, text_type.splitlines(
- self, *args, **kwargs)))
+ return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
+
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
- r"""Unescape markup again into an text_type string. This also resolves
- known HTML4 and XHTML entities:
+ """Convert escaped markup back into a text string. This replaces
+ HTML entities with the characters they represent.
- >>> Markup("Main &raquo; <em>About</em>").unescape()
- u'Main \xbb <em>About</em>'
+ >>> Markup('Main &raquo; <em>About</em>').unescape()
+ 'Main » <em>About</em>'
"""
- from markupsafe._constants import HTML_ENTITIES
+ from ._constants import HTML_ENTITIES
+
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
- if name[:2] in ('#x', '#X'):
+ if name[:2] in ("#x", "#X"):
return unichr(int(name[2:], 16))
- elif name.startswith('#'):
+ elif name.startswith("#"):
return unichr(int(name[1:]))
except ValueError:
pass
# Don't modify unexpected input.
return m.group()
+
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
- r"""Unescape markup into an text_type string and strip all tags. This
- also resolves known HTML4 and XHTML entities. Whitespace is
- normalized to one:
+ """:meth:`unescape` the markup, remove tags, and normalize
+ whitespace to single spaces.
- >>> Markup("Main &raquo; <em>About</em>").striptags()
- u'Main \xbb About'
+ >>> Markup('Main &raquo;\t<em>About</em>').striptags()
+ 'Main » About'
"""
- stripped = u' '.join(_striptags_re.sub('', self).split())
+ stripped = u" ".join(_striptags_re.sub("", self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
- """Escape the string. Works like :func:`escape` with the difference
- that for subclasses of :class:`Markup` this function would return the
- correct subclass.
+ """Escape a string. Calls :func:`escape` and ensures that for
+ subclasses the correct type is returned.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
- def make_simple_escaping_wrapper(name):
+ def make_simple_escaping_wrapper(name): # noqa: B902
orig = getattr(text_type, name)
+
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, iteritems(kwargs), self.escape)
return self.__class__(orig(self, *args, **kwargs))
+
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
- for method in '__getitem__', 'capitalize', \
- 'title', 'lower', 'upper', 'replace', 'ljust', \
- 'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
- 'translate', 'expandtabs', 'swapcase', 'zfill':
+ for method in (
+ "__getitem__",
+ "capitalize",
+ "title",
+ "lower",
+ "upper",
+ "replace",
+ "ljust",
+ "rjust",
+ "lstrip",
+ "rstrip",
+ "center",
+ "strip",
+ "translate",
+ "expandtabs",
+ "swapcase",
+ "zfill",
+ ):
locals()[method] = make_simple_escaping_wrapper(method)
- # new in python 2.5
- if hasattr(text_type, 'partition'):
- def partition(self, sep):
- return tuple(map(self.__class__,
- text_type.partition(self, self.escape(sep))))
- def rpartition(self, sep):
- return tuple(map(self.__class__,
- text_type.rpartition(self, self.escape(sep))))
-
- # new in python 2.6
- if hasattr(text_type, 'format'):
- def format(*args, **kwargs):
- self, args = args[0], args[1:]
- formatter = EscapeFormatter(self.escape)
- kwargs = _MagicFormatMapping(args, kwargs)
- return self.__class__(formatter.vformat(self, args, kwargs))
-
- def __html_format__(self, format_spec):
- if format_spec:
- raise ValueError('Unsupported format specification '
- 'for Markup.')
- return self
+ def partition(self, sep):
+ return tuple(map(self.__class__, text_type.partition(self, self.escape(sep))))
+
+ def rpartition(self, sep):
+ return tuple(map(self.__class__, text_type.rpartition(self, self.escape(sep))))
+
+ def format(self, *args, **kwargs):
+ formatter = EscapeFormatter(self.escape)
+ kwargs = _MagicFormatMapping(args, kwargs)
+ return self.__class__(formatter.vformat(self, args, kwargs))
+
+ def __html_format__(self, format_spec):
+ if format_spec:
+ raise ValueError("Unsupported format specification " "for Markup.")
+ return self
# not in python 3
- if hasattr(text_type, '__getslice__'):
- __getslice__ = make_simple_escaping_wrapper('__getslice__')
+ if hasattr(text_type, "__getslice__"):
+ __getslice__ = make_simple_escaping_wrapper("__getslice__")
del method, make_simple_escaping_wrapper
@@ -234,7 +238,7 @@ class _MagicFormatMapping(Mapping):
self._last_index = 0
def __getitem__(self, key):
- if key == '':
+ if key == "":
idx = self._last_index
self._last_index += 1
try:
@@ -251,35 +255,37 @@ class _MagicFormatMapping(Mapping):
return len(self._kwargs)
-if hasattr(text_type, 'format'):
- class EscapeFormatter(string.Formatter):
+if hasattr(text_type, "format"):
+ class EscapeFormatter(string.Formatter):
def __init__(self, escape):
self.escape = escape
def format_field(self, value, format_spec):
- if hasattr(value, '__html_format__'):
+ if hasattr(value, "__html_format__"):
rv = value.__html_format__(format_spec)
- elif hasattr(value, '__html__'):
+ elif hasattr(value, "__html__"):
if format_spec:
- raise ValueError('No format specification allowed '
- 'when formatting an object with '
- 'its __html__ method.')
+ raise ValueError(
+ "Format specifier {0} given, but {1} does not"
+ " define __html_format__. A class that defines"
+ " __html__ must define __html_format__ to work"
+ " with format specifiers.".format(format_spec, type(value))
+ )
rv = value.__html__()
else:
# We need to make sure the format spec is unicode here as
# otherwise the wrong callback methods are invoked. For
# instance a byte string there would invoke __str__ and
# not __unicode__.
- rv = string.Formatter.format_field(
- self, value, text_type(format_spec))
+ rv = string.Formatter.format_field(self, value, text_type(format_spec))
return text_type(self.escape(rv))
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
- if hasattr(value, '__html__') or isinstance(value, string_types):
+ if hasattr(value, "__html__") or isinstance(value, string_types):
obj[key] = escape(value)
return obj
@@ -291,20 +297,31 @@ class _MarkupEscapeHelper(object):
self.obj = obj
self.escape = escape
- __getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
- __unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
- __repr__ = lambda s: str(s.escape(repr(s.obj)))
- __int__ = lambda s: int(s.obj)
- __float__ = lambda s: float(s.obj)
+ def __getitem__(self, item):
+ return _MarkupEscapeHelper(self.obj[item], self.escape)
+
+ def __str__(self):
+ return text_type(self.escape(self.obj))
+
+ __unicode__ = __str__
+
+ def __repr__(self):
+ return str(self.escape(repr(self.obj)))
+
+ def __int__(self):
+ return int(self.obj)
+
+ def __float__(self):
+ return float(self.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
- from markupsafe._speedups import escape, escape_silent, soft_unicode
+ from ._speedups import escape, escape_silent, soft_unicode
except ImportError:
- from markupsafe._native import escape, escape_silent, soft_unicode
+ from ._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
- __all__.append('soft_str')
+ __all__.append("soft_str")
diff --git a/lib/spack/external/markupsafe/_compat.py b/lib/spack/external/markupsafe/_compat.py
index 62e5632ad8..bc05090f9e 100644
--- a/lib/spack/external/markupsafe/_compat.py
+++ b/lib/spack/external/markupsafe/_compat.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
"""
- markupsafe._compat
- ~~~~~~~~~~~~~~~~~~
+markupsafe._compat
+~~~~~~~~~~~~~~~~~~
- Compatibility module for different Python versions.
-
- :copyright: (c) 2013 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
"""
import sys
@@ -17,10 +15,19 @@ if not PY2:
string_types = (str,)
unichr = chr
int_types = (int,)
- iteritems = lambda x: iter(x.items())
+
+ def iteritems(x):
+ return iter(x.items())
+
+ from collections.abc import Mapping
+
else:
text_type = unicode
string_types = (str, unicode)
unichr = unichr
int_types = (int, long)
- iteritems = lambda x: x.iteritems()
+
+ def iteritems(x):
+ return x.iteritems()
+
+ from collections import Mapping
diff --git a/lib/spack/external/markupsafe/_constants.py b/lib/spack/external/markupsafe/_constants.py
index 919bf03c50..7c57c2d294 100644
--- a/lib/spack/external/markupsafe/_constants.py
+++ b/lib/spack/external/markupsafe/_constants.py
@@ -1,267 +1,264 @@
# -*- coding: utf-8 -*-
"""
- markupsafe._constants
- ~~~~~~~~~~~~~~~~~~~~~
+markupsafe._constants
+~~~~~~~~~~~~~~~~~~~~~
- Highlevel implementation of the Markup string.
-
- :copyright: (c) 2010 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
"""
-
HTML_ENTITIES = {
- 'AElig': 198,
- 'Aacute': 193,
- 'Acirc': 194,
- 'Agrave': 192,
- 'Alpha': 913,
- 'Aring': 197,
- 'Atilde': 195,
- 'Auml': 196,
- 'Beta': 914,
- 'Ccedil': 199,
- 'Chi': 935,
- 'Dagger': 8225,
- 'Delta': 916,
- 'ETH': 208,
- 'Eacute': 201,
- 'Ecirc': 202,
- 'Egrave': 200,
- 'Epsilon': 917,
- 'Eta': 919,
- 'Euml': 203,
- 'Gamma': 915,
- 'Iacute': 205,
- 'Icirc': 206,
- 'Igrave': 204,
- 'Iota': 921,
- 'Iuml': 207,
- 'Kappa': 922,
- 'Lambda': 923,
- 'Mu': 924,
- 'Ntilde': 209,
- 'Nu': 925,
- 'OElig': 338,
- 'Oacute': 211,
- 'Ocirc': 212,
- 'Ograve': 210,
- 'Omega': 937,
- 'Omicron': 927,
- 'Oslash': 216,
- 'Otilde': 213,
- 'Ouml': 214,
- 'Phi': 934,
- 'Pi': 928,
- 'Prime': 8243,
- 'Psi': 936,
- 'Rho': 929,
- 'Scaron': 352,
- 'Sigma': 931,
- 'THORN': 222,
- 'Tau': 932,
- 'Theta': 920,
- 'Uacute': 218,
- 'Ucirc': 219,
- 'Ugrave': 217,
- 'Upsilon': 933,
- 'Uuml': 220,
- 'Xi': 926,
- 'Yacute': 221,
- 'Yuml': 376,
- 'Zeta': 918,
- 'aacute': 225,
- 'acirc': 226,
- 'acute': 180,
- 'aelig': 230,
- 'agrave': 224,
- 'alefsym': 8501,
- 'alpha': 945,
- 'amp': 38,
- 'and': 8743,
- 'ang': 8736,
- 'apos': 39,
- 'aring': 229,
- 'asymp': 8776,
- 'atilde': 227,
- 'auml': 228,
- 'bdquo': 8222,
- 'beta': 946,
- 'brvbar': 166,
- 'bull': 8226,
- 'cap': 8745,
- 'ccedil': 231,
- 'cedil': 184,
- 'cent': 162,
- 'chi': 967,
- 'circ': 710,
- 'clubs': 9827,
- 'cong': 8773,
- 'copy': 169,
- 'crarr': 8629,
- 'cup': 8746,
- 'curren': 164,
- 'dArr': 8659,
- 'dagger': 8224,
- 'darr': 8595,
- 'deg': 176,
- 'delta': 948,
- 'diams': 9830,
- 'divide': 247,
- 'eacute': 233,
- 'ecirc': 234,
- 'egrave': 232,
- 'empty': 8709,
- 'emsp': 8195,
- 'ensp': 8194,
- 'epsilon': 949,
- 'equiv': 8801,
- 'eta': 951,
- 'eth': 240,
- 'euml': 235,
- 'euro': 8364,
- 'exist': 8707,
- 'fnof': 402,
- 'forall': 8704,
- 'frac12': 189,
- 'frac14': 188,
- 'frac34': 190,
- 'frasl': 8260,
- 'gamma': 947,
- 'ge': 8805,
- 'gt': 62,
- 'hArr': 8660,
- 'harr': 8596,
- 'hearts': 9829,
- 'hellip': 8230,
- 'iacute': 237,
- 'icirc': 238,
- 'iexcl': 161,
- 'igrave': 236,
- 'image': 8465,
- 'infin': 8734,
- 'int': 8747,
- 'iota': 953,
- 'iquest': 191,
- 'isin': 8712,
- 'iuml': 239,
- 'kappa': 954,
- 'lArr': 8656,
- 'lambda': 955,
- 'lang': 9001,
- 'laquo': 171,
- 'larr': 8592,
- 'lceil': 8968,
- 'ldquo': 8220,
- 'le': 8804,
- 'lfloor': 8970,
- 'lowast': 8727,
- 'loz': 9674,
- 'lrm': 8206,
- 'lsaquo': 8249,
- 'lsquo': 8216,
- 'lt': 60,
- 'macr': 175,
- 'mdash': 8212,
- 'micro': 181,
- 'middot': 183,
- 'minus': 8722,
- 'mu': 956,
- 'nabla': 8711,
- 'nbsp': 160,
- 'ndash': 8211,
- 'ne': 8800,
- 'ni': 8715,
- 'not': 172,
- 'notin': 8713,
- 'nsub': 8836,
- 'ntilde': 241,
- 'nu': 957,
- 'oacute': 243,
- 'ocirc': 244,
- 'oelig': 339,
- 'ograve': 242,
- 'oline': 8254,
- 'omega': 969,
- 'omicron': 959,
- 'oplus': 8853,
- 'or': 8744,
- 'ordf': 170,
- 'ordm': 186,
- 'oslash': 248,
- 'otilde': 245,
- 'otimes': 8855,
- 'ouml': 246,
- 'para': 182,
- 'part': 8706,
- 'permil': 8240,
- 'perp': 8869,
- 'phi': 966,
- 'pi': 960,
- 'piv': 982,
- 'plusmn': 177,
- 'pound': 163,
- 'prime': 8242,
- 'prod': 8719,
- 'prop': 8733,
- 'psi': 968,
- 'quot': 34,
- 'rArr': 8658,
- 'radic': 8730,
- 'rang': 9002,
- 'raquo': 187,
- 'rarr': 8594,
- 'rceil': 8969,
- 'rdquo': 8221,
- 'real': 8476,
- 'reg': 174,
- 'rfloor': 8971,
- 'rho': 961,
- 'rlm': 8207,
- 'rsaquo': 8250,
- 'rsquo': 8217,
- 'sbquo': 8218,
- 'scaron': 353,
- 'sdot': 8901,
- 'sect': 167,
- 'shy': 173,
- 'sigma': 963,
- 'sigmaf': 962,
- 'sim': 8764,
- 'spades': 9824,
- 'sub': 8834,
- 'sube': 8838,
- 'sum': 8721,
- 'sup': 8835,
- 'sup1': 185,
- 'sup2': 178,
- 'sup3': 179,
- 'supe': 8839,
- 'szlig': 223,
- 'tau': 964,
- 'there4': 8756,
- 'theta': 952,
- 'thetasym': 977,
- 'thinsp': 8201,
- 'thorn': 254,
- 'tilde': 732,
- 'times': 215,
- 'trade': 8482,
- 'uArr': 8657,
- 'uacute': 250,
- 'uarr': 8593,
- 'ucirc': 251,
- 'ugrave': 249,
- 'uml': 168,
- 'upsih': 978,
- 'upsilon': 965,
- 'uuml': 252,
- 'weierp': 8472,
- 'xi': 958,
- 'yacute': 253,
- 'yen': 165,
- 'yuml': 255,
- 'zeta': 950,
- 'zwj': 8205,
- 'zwnj': 8204
+ "AElig": 198,
+ "Aacute": 193,
+ "Acirc": 194,
+ "Agrave": 192,
+ "Alpha": 913,
+ "Aring": 197,
+ "Atilde": 195,
+ "Auml": 196,
+ "Beta": 914,
+ "Ccedil": 199,
+ "Chi": 935,
+ "Dagger": 8225,
+ "Delta": 916,
+ "ETH": 208,
+ "Eacute": 201,
+ "Ecirc": 202,
+ "Egrave": 200,
+ "Epsilon": 917,
+ "Eta": 919,
+ "Euml": 203,
+ "Gamma": 915,
+ "Iacute": 205,
+ "Icirc": 206,
+ "Igrave": 204,
+ "Iota": 921,
+ "Iuml": 207,
+ "Kappa": 922,
+ "Lambda": 923,
+ "Mu": 924,
+ "Ntilde": 209,
+ "Nu": 925,
+ "OElig": 338,
+ "Oacute": 211,
+ "Ocirc": 212,
+ "Ograve": 210,
+ "Omega": 937,
+ "Omicron": 927,
+ "Oslash": 216,
+ "Otilde": 213,
+ "Ouml": 214,
+ "Phi": 934,
+ "Pi": 928,
+ "Prime": 8243,
+ "Psi": 936,
+ "Rho": 929,
+ "Scaron": 352,
+ "Sigma": 931,
+ "THORN": 222,
+ "Tau": 932,
+ "Theta": 920,
+ "Uacute": 218,
+ "Ucirc": 219,
+ "Ugrave": 217,
+ "Upsilon": 933,
+ "Uuml": 220,
+ "Xi": 926,
+ "Yacute": 221,
+ "Yuml": 376,
+ "Zeta": 918,
+ "aacute": 225,
+ "acirc": 226,
+ "acute": 180,
+ "aelig": 230,
+ "agrave": 224,
+ "alefsym": 8501,
+ "alpha": 945,
+ "amp": 38,
+ "and": 8743,
+ "ang": 8736,
+ "apos": 39,
+ "aring": 229,
+ "asymp": 8776,
+ "atilde": 227,
+ "auml": 228,
+ "bdquo": 8222,
+ "beta": 946,
+ "brvbar": 166,
+ "bull": 8226,
+ "cap": 8745,
+ "ccedil": 231,
+ "cedil": 184,
+ "cent": 162,
+ "chi": 967,
+ "circ": 710,
+ "clubs": 9827,
+ "cong": 8773,
+ "copy": 169,
+ "crarr": 8629,
+ "cup": 8746,
+ "curren": 164,
+ "dArr": 8659,
+ "dagger": 8224,
+ "darr": 8595,
+ "deg": 176,
+ "delta": 948,
+ "diams": 9830,
+ "divide": 247,
+ "eacute": 233,
+ "ecirc": 234,
+ "egrave": 232,
+ "empty": 8709,
+ "emsp": 8195,
+ "ensp": 8194,
+ "epsilon": 949,
+ "equiv": 8801,
+ "eta": 951,
+ "eth": 240,
+ "euml": 235,
+ "euro": 8364,
+ "exist": 8707,
+ "fnof": 402,
+ "forall": 8704,
+ "frac12": 189,
+ "frac14": 188,
+ "frac34": 190,
+ "frasl": 8260,
+ "gamma": 947,
+ "ge": 8805,
+ "gt": 62,
+ "hArr": 8660,
+ "harr": 8596,
+ "hearts": 9829,
+ "hellip": 8230,
+ "iacute": 237,
+ "icirc": 238,
+ "iexcl": 161,
+ "igrave": 236,
+ "image": 8465,
+ "infin": 8734,
+ "int": 8747,
+ "iota": 953,
+ "iquest": 191,
+ "isin": 8712,
+ "iuml": 239,
+ "kappa": 954,
+ "lArr": 8656,
+ "lambda": 955,
+ "lang": 9001,
+ "laquo": 171,
+ "larr": 8592,
+ "lceil": 8968,
+ "ldquo": 8220,
+ "le": 8804,
+ "lfloor": 8970,
+ "lowast": 8727,
+ "loz": 9674,
+ "lrm": 8206,
+ "lsaquo": 8249,
+ "lsquo": 8216,
+ "lt": 60,
+ "macr": 175,
+ "mdash": 8212,
+ "micro": 181,
+ "middot": 183,
+ "minus": 8722,
+ "mu": 956,
+ "nabla": 8711,
+ "nbsp": 160,
+ "ndash": 8211,
+ "ne": 8800,
+ "ni": 8715,
+ "not": 172,
+ "notin": 8713,
+ "nsub": 8836,
+ "ntilde": 241,
+ "nu": 957,
+ "oacute": 243,
+ "ocirc": 244,
+ "oelig": 339,
+ "ograve": 242,
+ "oline": 8254,
+ "omega": 969,
+ "omicron": 959,
+ "oplus": 8853,
+ "or": 8744,
+ "ordf": 170,
+ "ordm": 186,
+ "oslash": 248,
+ "otilde": 245,
+ "otimes": 8855,
+ "ouml": 246,
+ "para": 182,
+ "part": 8706,
+ "permil": 8240,
+ "perp": 8869,
+ "phi": 966,
+ "pi": 960,
+ "piv": 982,
+ "plusmn": 177,
+ "pound": 163,
+ "prime": 8242,
+ "prod": 8719,
+ "prop": 8733,
+ "psi": 968,
+ "quot": 34,
+ "rArr": 8658,
+ "radic": 8730,
+ "rang": 9002,
+ "raquo": 187,
+ "rarr": 8594,
+ "rceil": 8969,
+ "rdquo": 8221,
+ "real": 8476,
+ "reg": 174,
+ "rfloor": 8971,
+ "rho": 961,
+ "rlm": 8207,
+ "rsaquo": 8250,
+ "rsquo": 8217,
+ "sbquo": 8218,
+ "scaron": 353,
+ "sdot": 8901,
+ "sect": 167,
+ "shy": 173,
+ "sigma": 963,
+ "sigmaf": 962,
+ "sim": 8764,
+ "spades": 9824,
+ "sub": 8834,
+ "sube": 8838,
+ "sum": 8721,
+ "sup": 8835,
+ "sup1": 185,
+ "sup2": 178,
+ "sup3": 179,
+ "supe": 8839,
+ "szlig": 223,
+ "tau": 964,
+ "there4": 8756,
+ "theta": 952,
+ "thetasym": 977,
+ "thinsp": 8201,
+ "thorn": 254,
+ "tilde": 732,
+ "times": 215,
+ "trade": 8482,
+ "uArr": 8657,
+ "uacute": 250,
+ "uarr": 8593,
+ "ucirc": 251,
+ "ugrave": 249,
+ "uml": 168,
+ "upsih": 978,
+ "upsilon": 965,
+ "uuml": 252,
+ "weierp": 8472,
+ "xi": 958,
+ "yacute": 253,
+ "yen": 165,
+ "yuml": 255,
+ "zeta": 950,
+ "zwj": 8205,
+ "zwnj": 8204,
}
diff --git a/lib/spack/external/markupsafe/_native.py b/lib/spack/external/markupsafe/_native.py
index 5e83f10a11..cd08752cd8 100644
--- a/lib/spack/external/markupsafe/_native.py
+++ b/lib/spack/external/markupsafe/_native.py
@@ -1,36 +1,49 @@
# -*- coding: utf-8 -*-
"""
- markupsafe._native
- ~~~~~~~~~~~~~~~~~~
+markupsafe._native
+~~~~~~~~~~~~~~~~~~
- Native Python implementation the C module is not compiled.
+Native Python implementation used when the C module is not compiled.
- :copyright: (c) 2010 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
"""
-from markupsafe import Markup
-from markupsafe._compat import text_type
+from . import Markup
+from ._compat import text_type
def escape(s):
- """Convert the characters &, <, >, ' and " in string s to HTML-safe
- sequences. Use this if you need to display text that might contain
- such characters in HTML. Marks return value as markup string.
+ """Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
+ the string with HTML-safe sequences. Use this if you need to display
+ text that might contain such characters in HTML.
+
+ If the object has an ``__html__`` method, it is called and the
+ return value is assumed to already be safe for HTML.
+
+ :param s: An object to be converted to a string and escaped.
+ :return: A :class:`Markup` string with the escaped text.
"""
- if hasattr(s, '__html__'):
- return s.__html__()
- return Markup(text_type(s)
- .replace('&', '&amp;')
- .replace('>', '&gt;')
- .replace('<', '&lt;')
- .replace("'", '&#39;')
- .replace('"', '&#34;')
+ if hasattr(s, "__html__"):
+ return Markup(s.__html__())
+ return Markup(
+ text_type(s)
+ .replace("&", "&amp;")
+ .replace(">", "&gt;")
+ .replace("<", "&lt;")
+ .replace("'", "&#39;")
+ .replace('"', "&#34;")
)
def escape_silent(s):
- """Like :func:`escape` but converts `None` into an empty
- markup string.
+ """Like :func:`escape` but treats ``None`` as the empty string.
+ Useful with optional values, as otherwise you get the string
+ ``'None'`` when the value is ``None``.
+
+ >>> escape(None)
+ Markup('None')
+ >>> escape_silent(None)
+ Markup('')
"""
if s is None:
return Markup()
@@ -38,8 +51,18 @@ def escape_silent(s):
def soft_unicode(s):
- """Make a string unicode if it isn't already. That way a markup
- string is not converted back to unicode.
+ """Convert an object to a string if it isn't already. This preserves
+ a :class:`Markup` string rather than converting it back to a basic
+ string, so it will still be marked as safe and won't be escaped
+ again.
+
+ >>> value = escape('<User 1>')
+ >>> value
+ Markup('&lt;User 1&gt;')
+ >>> escape(str(value))
+ Markup('&amp;lt;User 1&amp;gt;')
+ >>> escape(soft_unicode(value))
+ Markup('&lt;User 1&gt;')
"""
if not isinstance(s, text_type):
s = text_type(s)
diff --git a/lib/spack/external/ordereddict_backport.py b/lib/spack/external/ordereddict_backport.py
deleted file mode 100644
index 3c7f012e9e..0000000000
--- a/lib/spack/external/ordereddict_backport.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
-# Spack Project Developers. See the top-level COPYRIGHT file for details.
-#
-# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-"""This file dispatches to the correct implementation of OrderedDict."""
-
-# TODO: this file, along with py26/ordereddict.py, can be removed when
-# TODO: support for python 2.6 will be dropped
-
-# Removing this import will make python 2.6
-# fail on import of ordereddict
-from __future__ import absolute_import
-
-import sys
-
-if sys.version_info[:2] == (2, 6):
- import ordereddict
- OrderedDict = ordereddict.OrderedDict
-else:
- import collections
- OrderedDict = collections.OrderedDict
diff --git a/lib/spack/external/py2/functools32/LICENSE b/lib/spack/external/py2/functools32/LICENSE
new file mode 100644
index 0000000000..43388e7e13
--- /dev/null
+++ b/lib/spack/external/py2/functools32/LICENSE
@@ -0,0 +1,289 @@
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC. Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team. In October of the same
+year, the PythonLabs team moved to Digital Creations (now Zope
+Corporation, see http://www.zope.com). In 2001, the Python Software
+Foundation (PSF, see http://www.python.org/psf/) was formed, a
+non-profit organization created specifically to own Python-related
+Intellectual Property. Zope Corporation is a sponsoring member of
+the PSF.
+
+All Python releases are Open Source (see http://www.opensource.org for
+the Open Source Definition). Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+ Release Derived Year Owner GPL-
+ from compatible? (1)
+
+ 0.9.0 thru 1.2 1991-1995 CWI yes
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
+ 1.6 1.5.2 2000 CNRI no
+ 2.0 1.6 2000 BeOpen.com no
+ 1.6.1 1.6 2001 CNRI yes (2)
+ 2.1 2.0+1.6.1 2001 PSF no
+ 2.0.1 2.0+1.6.1 2001 PSF yes
+ 2.1.1 2.1+2.0.1 2001 PSF yes
+ 2.2 2.1.1 2001 PSF yes
+ 2.1.2 2.1.1 2002 PSF yes
+ 2.1.3 2.1.2 2002 PSF yes
+ 2.2.1 2.2 2002 PSF yes
+ 2.2.2 2.2.1 2002 PSF yes
+ 2.2.3 2.2.2 2003 PSF yes
+ 2.3 2.2.2 2002-2003 PSF yes
+ 2.3.1 2.3 2002-2003 PSF yes
+ 2.3.2 2.3.1 2002-2003 PSF yes
+ 2.3.3 2.3.2 2002-2003 PSF yes
+ 2.3.4 2.3.3 2004 PSF yes
+ 2.3.5 2.3.4 2005 PSF yes
+ 2.4 2.3 2004 PSF yes
+ 2.4.1 2.4 2005 PSF yes
+ 2.4.2 2.4.1 2005 PSF yes
+ 2.4.3 2.4.2 2006 PSF yes
+ 2.4.4 2.4.3 2006 PSF yes
+ 2.5 2.4 2006 PSF yes
+ 2.5.1 2.5 2007 PSF yes
+ 2.5.2 2.5.1 2008 PSF yes
+ 2.5.3 2.5.2 2008 PSF yes
+ 2.6 2.5 2008 PSF yes
+ 2.6.1 2.6 2008 PSF yes
+ 2.6.2 2.6.1 2009 PSF yes
+ 2.6.3 2.6.2 2009 PSF yes
+ 2.6.4 2.6.3 2009 PSF yes
+ 2.6.5 2.6.4 2010 PSF yes
+ 3.0 2.6 2008 PSF yes
+ 3.0.1 3.0 2009 PSF yes
+ 3.1 3.0.1 2009 PSF yes
+ 3.1.1 3.1 2009 PSF yes
+ 3.1.2 3.1.1 2010 PSF yes
+ 3.1.3 3.1.2 2010 PSF yes
+ 3.1.4 3.1.3 2011 PSF yes
+ 3.2 3.1 2011 PSF yes
+ 3.2.1 3.2 2011 PSF yes
+ 3.2.2 3.2.1 2011 PSF yes
+ 3.2.3 3.2.2 2012 PSF yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+ the GPL. All Python licenses, unlike the GPL, let you distribute
+ a modified version without making your changes open source. The
+ GPL-compatible licenses make it possible to combine Python with
+ other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+ because its license has a choice of law clause. According to
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+ is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012 Python Software Foundation; All Rights Reserved" are retained in Python
+alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions. Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee. This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party. As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee. Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement. This Agreement together with
+Python 1.6.1 may be located on the Internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013. This
+Agreement may also be obtained from a proxy server on the Internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee. This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+ ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands. All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/lib/spack/external/py2/functools32/__init__.py b/lib/spack/external/py2/functools32/__init__.py
new file mode 100644
index 0000000000..837f7fb651
--- /dev/null
+++ b/lib/spack/external/py2/functools32/__init__.py
@@ -0,0 +1 @@
+from .functools32 import *
diff --git a/lib/spack/external/py2/functools32/_dummy_thread32.py b/lib/spack/external/py2/functools32/_dummy_thread32.py
new file mode 100644
index 0000000000..8503b0e3dd
--- /dev/null
+++ b/lib/spack/external/py2/functools32/_dummy_thread32.py
@@ -0,0 +1,158 @@
+"""Drop-in replacement for the thread module.
+
+Meant to be used as a brain-dead substitute so that threaded code does
+not need to be rewritten for when the thread module is not present.
+
+Suggested usage is::
+
+ try:
+ try:
+ import _thread # Python >= 3
+ except:
+ import thread as _thread # Python < 3
+ except ImportError:
+ import _dummy_thread as _thread
+
+"""
+# Exports only things specified by thread documentation;
+# skipping obsolete synonyms allocate(), start_new(), exit_thread().
+__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
+ 'interrupt_main', 'LockType']
+
+# A dummy value
+TIMEOUT_MAX = 2**31
+
+# NOTE: this module can be imported early in the extension building process,
+# and so top level imports of other modules should be avoided. Instead, all
+# imports are done when needed on a function-by-function basis. Since threads
+# are disabled, the import lock should not be an issue anyway (??).
+
+class error(Exception):
+ """Dummy implementation of _thread.error."""
+
+ def __init__(self, *args):
+ self.args = args
+
+def start_new_thread(function, args, kwargs={}):
+ """Dummy implementation of _thread.start_new_thread().
+
+ Compatibility is maintained by making sure that ``args`` is a
+ tuple and ``kwargs`` is a dictionary. If an exception is raised
+ and it is SystemExit (which can be done by _thread.exit()) it is
+ caught and nothing is done; all other exceptions are printed out
+ by using traceback.print_exc().
+
+ If the executed function calls interrupt_main the KeyboardInterrupt will be
+ raised when the function returns.
+
+ """
+ if type(args) != type(tuple()):
+ raise TypeError("2nd arg must be a tuple")
+ if type(kwargs) != type(dict()):
+ raise TypeError("3rd arg must be a dict")
+ global _main
+ _main = False
+ try:
+ function(*args, **kwargs)
+ except SystemExit:
+ pass
+ except:
+ import traceback
+ traceback.print_exc()
+ _main = True
+ global _interrupt
+ if _interrupt:
+ _interrupt = False
+ raise KeyboardInterrupt
+
+def exit():
+ """Dummy implementation of _thread.exit()."""
+ raise SystemExit
+
+def get_ident():
+ """Dummy implementation of _thread.get_ident().
+
+ Since this module should only be used when _threadmodule is not
+ available, it is safe to assume that the current process is the
+ only thread. Thus a constant can be safely returned.
+ """
+ return -1
+
+def allocate_lock():
+ """Dummy implementation of _thread.allocate_lock()."""
+ return LockType()
+
+def stack_size(size=None):
+ """Dummy implementation of _thread.stack_size()."""
+ if size is not None:
+ raise error("setting thread stack size not supported")
+ return 0
+
+class LockType(object):
+ """Class implementing dummy implementation of _thread.LockType.
+
+ Compatibility is maintained by maintaining self.locked_status
+ which is a boolean that stores the state of the lock. Pickling of
+ the lock, though, should not be done since if the _thread module is
+ then used with an unpickled ``lock()`` from here problems could
+ occur from this class not having atomic methods.
+
+ """
+
+ def __init__(self):
+ self.locked_status = False
+
+ def acquire(self, waitflag=None, timeout=-1):
+ """Dummy implementation of acquire().
+
+ For blocking calls, self.locked_status is automatically set to
+ True and returned appropriately based on value of
+ ``waitflag``. If it is non-blocking, then the value is
+ actually checked and not set if it is already acquired. This
+ is all done so that threading.Condition's assert statements
+ aren't triggered and throw a little fit.
+
+ """
+ if waitflag is None or waitflag:
+ self.locked_status = True
+ return True
+ else:
+ if not self.locked_status:
+ self.locked_status = True
+ return True
+ else:
+ if timeout > 0:
+ import time
+ time.sleep(timeout)
+ return False
+
+ __enter__ = acquire
+
+ def __exit__(self, typ, val, tb):
+ self.release()
+
+ def release(self):
+ """Release the dummy lock."""
+ # XXX Perhaps shouldn't actually bother to test? Could lead
+ # to problems for complex, threaded code.
+ if not self.locked_status:
+ raise error
+ self.locked_status = False
+ return True
+
+ def locked(self):
+ return self.locked_status
+
+# Used to signal that interrupt_main was called in a "thread"
+_interrupt = False
+# True when not executing in a "thread"
+_main = True
+
+def interrupt_main():
+ """Set _interrupt flag to True to have start_new_thread raise
+ KeyboardInterrupt upon exiting."""
+ if _main:
+ raise KeyboardInterrupt
+ else:
+ global _interrupt
+ _interrupt = True
diff --git a/lib/spack/external/py2/functools32/functools32.py b/lib/spack/external/py2/functools32/functools32.py
new file mode 100644
index 0000000000..c44551fac0
--- /dev/null
+++ b/lib/spack/external/py2/functools32/functools32.py
@@ -0,0 +1,423 @@
+"""functools.py - Tools for working with functions and callable objects
+"""
+# Python module wrapper for _functools C module
+# to allow utilities written in Python to be added
+# to the functools module.
+# Written by Nick Coghlan <ncoghlan at gmail.com>
+# and Raymond Hettinger <python at rcn.com>
+# Copyright (C) 2006-2010 Python Software Foundation.
+# See C source code for _functools credits/copyright
+
+__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
+ 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial']
+
+from _functools import partial, reduce
+from collections import MutableMapping, namedtuple
+from .reprlib32 import recursive_repr as _recursive_repr
+from weakref import proxy as _proxy
+import sys as _sys
+try:
+ from thread import allocate_lock as Lock
+except ImportError:
+ from ._dummy_thread32 import allocate_lock as Lock
+
+################################################################################
+### OrderedDict
+################################################################################
+
+class _Link(object):
+ __slots__ = 'prev', 'next', 'key', '__weakref__'
+
+class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as regular dictionaries.
+
+ # The internal self.__map dict maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # The sentinel is in self.__hardroot with a weakref proxy in self.__root.
+ # The prev links are weakref proxies (to prevent circular references).
+ # Individual links are kept alive by the hard reference in self.__map.
+ # Those hard references disappear when a key is deleted from an OrderedDict.
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. The signature is the same as
+ regular dictionaries, but keyword arguments are not recommended because
+ their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__hardroot = _Link()
+ self.__root = root = _proxy(self.__hardroot)
+ root.prev = root.next = root
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value,
+ dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link at the end of the linked list,
+ # and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ self.__map[key] = link = Link()
+ root = self.__root
+ last = root.prev
+ link.prev, link.next, link.key = last, root, key
+ last.next = link
+ root.prev = proxy(link)
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which gets
+ # removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link = self.__map.pop(key)
+ link_prev = link.prev
+ link_next = link.next
+ link_prev.next = link_next
+ link_next.prev = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ # Traverse the linked list in order.
+ root = self.__root
+ curr = root.next
+ while curr is not root:
+ yield curr.key
+ curr = curr.next
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ # Traverse the linked list in reverse order.
+ root = self.__root
+ curr = root.prev
+ while curr is not root:
+ yield curr.key
+ curr = curr.prev
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ root = self.__root
+ root.prev = root.next = root
+ self.__map.clear()
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root.prev
+ link_prev = link.prev
+ link_prev.next = root
+ root.prev = link_prev
+ else:
+ link = root.next
+ link_next = link.next
+ root.next = link_next
+ link_next.prev = root
+ key = link.key
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ def move_to_end(self, key, last=True):
+ '''Move an existing element to the end (or beginning if last==False).
+
+ Raises KeyError if the element does not exist.
+ When last=True, acts like a fast version of self[key]=self.pop(key).
+
+ '''
+ link = self.__map[key]
+ link_prev = link.prev
+ link_next = link.next
+ link_prev.next = link_next
+ link_next.prev = link_prev
+ root = self.__root
+ if last:
+ last = root.prev
+ link.prev = last
+ link.next = root
+ last.next = root.prev = link
+ else:
+ first = root.next
+ link.prev = root
+ link.next = first
+ root.next = first.prev = link
+
+ def __sizeof__(self):
+ sizeof = _sys.getsizeof
+ n = len(self) + 1 # number of links including root
+ size = sizeof(self.__dict__) # instance dictionary
+ size += sizeof(self.__map) * 2 # internal dict and inherited dict
+ size += sizeof(self.__hardroot) * n # link objects
+ size += sizeof(self.__root) * n # proxy objects
+ return size
+
+ update = __update = MutableMapping.update
+ keys = MutableMapping.keys
+ values = MutableMapping.values
+ items = MutableMapping.items
+ __ne__ = MutableMapping.__ne__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding
+ value. If key is not found, d is returned if given, otherwise KeyError
+ is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ @_recursive_repr()
+ def __repr__(self):
+ 'od.__repr__() <==> repr(od)'
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, list(self.items()))
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
+ If not specified, the value defaults to None.
+
+ '''
+ self = cls()
+ for key in iterable:
+ self[key] = value
+ return self
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and \
+ all(p==q for p, q in zip(self.items(), other.items()))
+ return dict.__eq__(self, other)
+
+# update_wrapper() and wraps() are tools to help write
+# wrapper functions that can handle naive introspection
+
+WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
+WRAPPER_UPDATES = ('__dict__',)
+def update_wrapper(wrapper,
+ wrapped,
+ assigned = WRAPPER_ASSIGNMENTS,
+ updated = WRAPPER_UPDATES):
+ """Update a wrapper function to look like the wrapped function
+
+ wrapper is the function to be updated
+ wrapped is the original function
+ assigned is a tuple naming the attributes assigned directly
+ from the wrapped function to the wrapper function (defaults to
+ functools.WRAPPER_ASSIGNMENTS)
+ updated is a tuple naming the attributes of the wrapper that
+ are updated with the corresponding attribute from the wrapped
+ function (defaults to functools.WRAPPER_UPDATES)
+ """
+ wrapper.__wrapped__ = wrapped
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ pass
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ # Return the wrapper so this can be used as a decorator via partial()
+ return wrapper
+
+def wraps(wrapped,
+ assigned = WRAPPER_ASSIGNMENTS,
+ updated = WRAPPER_UPDATES):
+ """Decorator factory to apply update_wrapper() to a wrapper function
+
+ Returns a decorator that invokes update_wrapper() with the decorated
+ function as the wrapper argument and the arguments to wraps() as the
+ remaining arguments. Default arguments are as for update_wrapper().
+ This is a convenience function to simplify applying partial() to
+ update_wrapper().
+ """
+ return partial(update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+
+def total_ordering(cls):
+ """Class decorator that fills in missing ordering methods"""
+ convert = {
+ '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
+ ('__le__', lambda self, other: self < other or self == other),
+ ('__ge__', lambda self, other: not self < other)],
+ '__le__': [('__ge__', lambda self, other: not self <= other or self == other),
+ ('__lt__', lambda self, other: self <= other and not self == other),
+ ('__gt__', lambda self, other: not self <= other)],
+ '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
+ ('__ge__', lambda self, other: self > other or self == other),
+ ('__le__', lambda self, other: not self > other)],
+ '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
+ ('__gt__', lambda self, other: self >= other and not self == other),
+ ('__lt__', lambda self, other: not self >= other)]
+ }
+ roots = set(dir(cls)) & set(convert)
+ if not roots:
+ raise ValueError('must define at least one ordering operation: < > <= >=')
+ root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
+ for opname, opfunc in convert[root]:
+ if opname not in roots:
+ opfunc.__name__ = opname
+ opfunc.__doc__ = getattr(int, opname).__doc__
+ setattr(cls, opname, opfunc)
+ return cls
+
+def cmp_to_key(mycmp):
+ """Convert a cmp= function into a key= function"""
+ class K(object):
+ __slots__ = ['obj']
+ def __init__(self, obj):
+ self.obj = obj
+ def __lt__(self, other):
+ return mycmp(self.obj, other.obj) < 0
+ def __gt__(self, other):
+ return mycmp(self.obj, other.obj) > 0
+ def __eq__(self, other):
+ return mycmp(self.obj, other.obj) == 0
+ def __le__(self, other):
+ return mycmp(self.obj, other.obj) <= 0
+ def __ge__(self, other):
+ return mycmp(self.obj, other.obj) >= 0
+ def __ne__(self, other):
+ return mycmp(self.obj, other.obj) != 0
+ __hash__ = None
+ return K
+
+_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
+
+def lru_cache(maxsize=100):
+ """Least-recently-used cache decorator.
+
+ If *maxsize* is set to None, the LRU features are disabled and the cache
+ can grow without bound.
+
+ Arguments to the cached function must be hashable.
+
+ View the cache statistics named tuple (hits, misses, maxsize, currsize) with
+ f.cache_info(). Clear the cache and statistics with f.cache_clear().
+ Access the underlying function with f.__wrapped__.
+
+ See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
+
+ """
+ # Users should only access the lru_cache through its public API:
+ # cache_info, cache_clear, and f.__wrapped__
+ # The internals of the lru_cache are encapsulated for thread safety and
+ # to allow the implementation to change (including a possible C version).
+
+ def decorating_function(user_function,
+ tuple=tuple, sorted=sorted, len=len, KeyError=KeyError):
+
+ hits, misses = [0], [0]
+ kwd_mark = (object(),) # separates positional and keyword args
+ lock = Lock() # needed because OrderedDict isn't threadsafe
+
+ if maxsize is None:
+ cache = dict() # simple cache without ordering or size limit
+
+ @wraps(user_function)
+ def wrapper(*args, **kwds):
+ key = args
+ if kwds:
+ key += kwd_mark + tuple(sorted(kwds.items()))
+ try:
+ result = cache[key]
+ hits[0] += 1
+ return result
+ except KeyError:
+ pass
+ result = user_function(*args, **kwds)
+ cache[key] = result
+ misses[0] += 1
+ return result
+ else:
+ cache = OrderedDict() # ordered least recent to most recent
+ cache_popitem = cache.popitem
+ cache_renew = cache.move_to_end
+
+ @wraps(user_function)
+ def wrapper(*args, **kwds):
+ key = args
+ if kwds:
+ key += kwd_mark + tuple(sorted(kwds.items()))
+ with lock:
+ try:
+ result = cache[key]
+ cache_renew(key) # record recent use of this key
+ hits[0] += 1
+ return result
+ except KeyError:
+ pass
+ result = user_function(*args, **kwds)
+ with lock:
+ cache[key] = result # record recent use of this key
+ misses[0] += 1
+ if len(cache) > maxsize:
+ cache_popitem(0) # purge least recently used cache entry
+ return result
+
+ def cache_info():
+ """Report cache statistics"""
+ with lock:
+ return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
+
+ def cache_clear():
+ """Clear the cache and cache statistics"""
+ with lock:
+ cache.clear()
+ hits[0] = misses[0] = 0
+
+ wrapper.cache_info = cache_info
+ wrapper.cache_clear = cache_clear
+ return wrapper
+
+ return decorating_function
diff --git a/lib/spack/external/py2/functools32/reprlib32.py b/lib/spack/external/py2/functools32/reprlib32.py
new file mode 100644
index 0000000000..af919758ca
--- /dev/null
+++ b/lib/spack/external/py2/functools32/reprlib32.py
@@ -0,0 +1,157 @@
+"""Redo the builtin repr() (representation) but with limits on most sizes."""
+
+__all__ = ["Repr", "repr", "recursive_repr"]
+
+import __builtin__ as builtins
+from itertools import islice
+try:
+ from thread import get_ident
+except ImportError:
+ from _dummy_thread32 import get_ident
+
+def recursive_repr(fillvalue='...'):
+ 'Decorator to make a repr function return fillvalue for a recursive call'
+
+ def decorating_function(user_function):
+ repr_running = set()
+
+ def wrapper(self):
+ key = id(self), get_ident()
+ if key in repr_running:
+ return fillvalue
+ repr_running.add(key)
+ try:
+ result = user_function(self)
+ finally:
+ repr_running.discard(key)
+ return result
+
+ # Can't use functools.wraps() here because of bootstrap issues
+ wrapper.__module__ = getattr(user_function, '__module__')
+ wrapper.__doc__ = getattr(user_function, '__doc__')
+ wrapper.__name__ = getattr(user_function, '__name__')
+ wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
+ return wrapper
+
+ return decorating_function
+
+class Repr:
+
+ def __init__(self):
+ self.maxlevel = 6
+ self.maxtuple = 6
+ self.maxlist = 6
+ self.maxarray = 5
+ self.maxdict = 4
+ self.maxset = 6
+ self.maxfrozenset = 6
+ self.maxdeque = 6
+ self.maxstring = 30
+ self.maxlong = 40
+ self.maxother = 30
+
+ def repr(self, x):
+ return self.repr1(x, self.maxlevel)
+
+ def repr1(self, x, level):
+ typename = type(x).__name__
+ if ' ' in typename:
+ parts = typename.split()
+ typename = '_'.join(parts)
+ if hasattr(self, 'repr_' + typename):
+ return getattr(self, 'repr_' + typename)(x, level)
+ else:
+ return self.repr_instance(x, level)
+
+ def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
+ n = len(x)
+ if level <= 0 and n:
+ s = '...'
+ else:
+ newlevel = level - 1
+ repr1 = self.repr1
+ pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
+ if n > maxiter: pieces.append('...')
+ s = ', '.join(pieces)
+ if n == 1 and trail: right = trail + right
+ return '%s%s%s' % (left, s, right)
+
+ def repr_tuple(self, x, level):
+ return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
+
+ def repr_list(self, x, level):
+ return self._repr_iterable(x, level, '[', ']', self.maxlist)
+
+ def repr_array(self, x, level):
+ header = "array('%s', [" % x.typecode
+ return self._repr_iterable(x, level, header, '])', self.maxarray)
+
+ def repr_set(self, x, level):
+ x = _possibly_sorted(x)
+ return self._repr_iterable(x, level, 'set([', '])', self.maxset)
+
+ def repr_frozenset(self, x, level):
+ x = _possibly_sorted(x)
+ return self._repr_iterable(x, level, 'frozenset([', '])',
+ self.maxfrozenset)
+
+ def repr_deque(self, x, level):
+ return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
+
+ def repr_dict(self, x, level):
+ n = len(x)
+ if n == 0: return '{}'
+ if level <= 0: return '{...}'
+ newlevel = level - 1
+ repr1 = self.repr1
+ pieces = []
+ for key in islice(_possibly_sorted(x), self.maxdict):
+ keyrepr = repr1(key, newlevel)
+ valrepr = repr1(x[key], newlevel)
+ pieces.append('%s: %s' % (keyrepr, valrepr))
+ if n > self.maxdict: pieces.append('...')
+ s = ', '.join(pieces)
+ return '{%s}' % (s,)
+
+ def repr_str(self, x, level):
+ s = builtins.repr(x[:self.maxstring])
+ if len(s) > self.maxstring:
+ i = max(0, (self.maxstring-3)//2)
+ j = max(0, self.maxstring-3-i)
+ s = builtins.repr(x[:i] + x[len(x)-j:])
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+ def repr_int(self, x, level):
+ s = builtins.repr(x) # XXX Hope this isn't too slow...
+ if len(s) > self.maxlong:
+ i = max(0, (self.maxlong-3)//2)
+ j = max(0, self.maxlong-3-i)
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+ def repr_instance(self, x, level):
+ try:
+ s = builtins.repr(x)
+ # Bugs in x.__repr__() can cause arbitrary
+ # exceptions -- then make up something
+ except Exception:
+ return '<%s instance at %x>' % (x.__class__.__name__, id(x))
+ if len(s) > self.maxother:
+ i = max(0, (self.maxother-3)//2)
+ j = max(0, self.maxother-3-i)
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+
+def _possibly_sorted(x):
+ # Since not all sequences of items can be sorted and comparison
+ # functions may raise arbitrary exceptions, return an unsorted
+ # sequence in that case.
+ try:
+ return sorted(x)
+ except Exception:
+ return list(x)
+
+aRepr = Repr()
+repr = aRepr.repr
diff --git a/lib/spack/external/py26/ordereddict.py b/lib/spack/external/py26/ordereddict.py
deleted file mode 100644
index 7242b5060d..0000000000
--- a/lib/spack/external/py26/ordereddict.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright (c) 2009 Raymond Hettinger
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-
-from UserDict import DictMixin
-
-class OrderedDict(dict, DictMixin):
-
- def __init__(self, *args, **kwds):
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__end
- except AttributeError:
- self.clear()
- self.update(*args, **kwds)
-
- def clear(self):
- self.__end = end = []
- end += [None, end, end] # sentinel node for doubly linked list
- self.__map = {} # key --> [key, prev, next]
- dict.clear(self)
-
- def __setitem__(self, key, value):
- if key not in self:
- end = self.__end
- curr = end[1]
- curr[2] = end[1] = self.__map[key] = [key, curr, end]
- dict.__setitem__(self, key, value)
-
- def __delitem__(self, key):
- dict.__delitem__(self, key)
- key, prev, next = self.__map.pop(key)
- prev[2] = next
- next[1] = prev
-
- def __iter__(self):
- end = self.__end
- curr = end[2]
- while curr is not end:
- yield curr[0]
- curr = curr[2]
-
- def __reversed__(self):
- end = self.__end
- curr = end[1]
- while curr is not end:
- yield curr[0]
- curr = curr[1]
-
- def popitem(self, last=True):
- if not self:
- raise KeyError('dictionary is empty')
- if last:
- key = reversed(self).next()
- else:
- key = iter(self).next()
- value = self.pop(key)
- return key, value
-
- def __reduce__(self):
- items = [[k, self[k]] for k in self]
- tmp = self.__map, self.__end
- del self.__map, self.__end
- inst_dict = vars(self).copy()
- self.__map, self.__end = tmp
- if inst_dict:
- return (self.__class__, (items,), inst_dict)
- return self.__class__, (items,)
-
- def keys(self):
- return list(self)
-
- setdefault = DictMixin.setdefault
- update = DictMixin.update
- pop = DictMixin.pop
- values = DictMixin.values
- items = DictMixin.items
- iterkeys = DictMixin.iterkeys
- itervalues = DictMixin.itervalues
- iteritems = DictMixin.iteritems
-
- def __repr__(self):
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
-
- def copy(self):
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- if isinstance(other, OrderedDict):
- if len(self) != len(other):
- return False
- for p, q in zip(self.items(), other.items()):
- if p != q:
- return False
- return True
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
diff --git a/lib/spack/external/pyrsistent/LICENSE b/lib/spack/external/pyrsistent/LICENSE
new file mode 100644
index 0000000000..6609e4c05a
--- /dev/null
+++ b/lib/spack/external/pyrsistent/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2019 Tobias Gustafsson
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/lib/spack/external/pyrsistent/__init__.py b/lib/spack/external/pyrsistent/__init__.py
new file mode 100644
index 0000000000..6e610c1ddb
--- /dev/null
+++ b/lib/spack/external/pyrsistent/__init__.py
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+
+from pyrsistent._pmap import pmap
+
+
+__all__ = ('pmap',)
diff --git a/lib/spack/external/pyrsistent/_compat.py b/lib/spack/external/pyrsistent/_compat.py
new file mode 100644
index 0000000000..e728586afe
--- /dev/null
+++ b/lib/spack/external/pyrsistent/_compat.py
@@ -0,0 +1,31 @@
+from six import string_types
+
+
+# enum compat
+try:
+ from enum import Enum
+except:
+ class Enum(object): pass
+ # no objects will be instances of this class
+
+# collections compat
+try:
+ from collections.abc import (
+ Container,
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+ Set,
+ Sized,
+ )
+except ImportError:
+ from collections import (
+ Container,
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+ Set,
+ Sized,
+ )
diff --git a/lib/spack/external/pyrsistent/_pmap.py b/lib/spack/external/pyrsistent/_pmap.py
new file mode 100644
index 0000000000..e8a0ec53f8
--- /dev/null
+++ b/lib/spack/external/pyrsistent/_pmap.py
@@ -0,0 +1,460 @@
+from ._compat import Mapping, Hashable
+from itertools import chain
+import six
+from pyrsistent._pvector import pvector
+from pyrsistent._transformations import transform
+
+
+class PMap(object):
+ """
+ Persistent map/dict. Tries to follow the same naming conventions as the built in dict where feasible.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`m` or :py:func:`pmap` to
+ create an instance.
+
+ Was originally written as a very close copy of the Clojure equivalent but was later rewritten to closer
+ re-assemble the python dict. This means that a sparse vector (a PVector) of buckets is used. The keys are
+ hashed and the elements inserted at position hash % len(bucket_vector). Whenever the map size exceeds 2/3 of
+ the containing vectors size the map is reallocated to a vector of double the size. This is done to avoid
+ excessive hash collisions.
+
+ This structure corresponds most closely to the built in dict type and is intended as a replacement. Where the
+ semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
+ for example assignments and deletion of values.
+
+ PMap implements the Mapping protocol and is Hashable. It also supports dot-notation for
+ element access.
+
+ Random access and insert is log32(n) where n is the size of the map.
+
+ The following are examples of some common operations on persistent maps
+
+ >>> m1 = m(a=1, b=3)
+ >>> m2 = m1.set('c', 3)
+ >>> m3 = m2.remove('a')
+ >>> m1
+ pmap({'b': 3, 'a': 1})
+ >>> m2
+ pmap({'c': 3, 'b': 3, 'a': 1})
+ >>> m3
+ pmap({'c': 3, 'b': 3})
+ >>> m3['c']
+ 3
+ >>> m3.c
+ 3
+ """
+ __slots__ = ('_size', '_buckets', '__weakref__', '_cached_hash')
+
+ def __new__(cls, size, buckets):
+ self = super(PMap, cls).__new__(cls)
+ self._size = size
+ self._buckets = buckets
+ return self
+
+ @staticmethod
+ def _get_bucket(buckets, key):
+ index = hash(key) % len(buckets)
+ bucket = buckets[index]
+ return index, bucket
+
+ @staticmethod
+ def _getitem(buckets, key):
+ _, bucket = PMap._get_bucket(buckets, key)
+ if bucket:
+ for k, v in bucket:
+ if k == key:
+ return v
+
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ return PMap._getitem(self._buckets, key)
+
+ @staticmethod
+ def _contains(buckets, key):
+ _, bucket = PMap._get_bucket(buckets, key)
+ if bucket:
+ for k, _ in bucket:
+ if k == key:
+ return True
+
+ return False
+
+ return False
+
+ def __contains__(self, key):
+ return self._contains(self._buckets, key)
+
+ get = Mapping.get
+
+ def __iter__(self):
+ return self.iterkeys()
+
+ def __getattr__(self, key):
+ try:
+ return self[key]
+ except KeyError:
+ raise AttributeError(
+ "{0} has no attribute '{1}'".format(type(self).__name__, key)
+ )
+
+ def iterkeys(self):
+ for k, _ in self.iteritems():
+ yield k
+
+ # These are more efficient implementations compared to the original
+ # methods that are based on the keys iterator and then calls the
+ # accessor functions to access the value for the corresponding key
+ def itervalues(self):
+ for _, v in self.iteritems():
+ yield v
+
+ def iteritems(self):
+ for bucket in self._buckets:
+ if bucket:
+ for k, v in bucket:
+ yield k, v
+
+ def values(self):
+ return pvector(self.itervalues())
+
+ def keys(self):
+ return pvector(self.iterkeys())
+
+ def items(self):
+ return pvector(self.iteritems())
+
+ def __len__(self):
+ return self._size
+
+ def __repr__(self):
+ return 'pmap({0})'.format(str(dict(self)))
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if not isinstance(other, Mapping):
+ return NotImplemented
+ if len(self) != len(other):
+ return False
+ if isinstance(other, PMap):
+ if (hasattr(self, '_cached_hash') and hasattr(other, '_cached_hash')
+ and self._cached_hash != other._cached_hash):
+ return False
+ if self._buckets == other._buckets:
+ return True
+ return dict(self.iteritems()) == dict(other.iteritems())
+ elif isinstance(other, dict):
+ return dict(self.iteritems()) == other
+ return dict(self.iteritems()) == dict(six.iteritems(other))
+
+ __ne__ = Mapping.__ne__
+
+ def __lt__(self, other):
+ raise TypeError('PMaps are not orderable')
+
+ __le__ = __lt__
+ __gt__ = __lt__
+ __ge__ = __lt__
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __hash__(self):
+ if not hasattr(self, '_cached_hash'):
+ self._cached_hash = hash(frozenset(self.iteritems()))
+ return self._cached_hash
+
+ def set(self, key, val):
+ """
+ Return a new PMap with key and val inserted.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m2 = m1.set('a', 3)
+ >>> m3 = m1.set('c' ,4)
+ >>> m1
+ pmap({'b': 2, 'a': 1})
+ >>> m2
+ pmap({'b': 2, 'a': 3})
+ >>> m3
+ pmap({'c': 4, 'b': 2, 'a': 1})
+ """
+ return self.evolver().set(key, val).persistent()
+
+ def remove(self, key):
+ """
+ Return a new PMap without the element specified by key. Raises KeyError if the element
+ is not present.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.remove('a')
+ pmap({'b': 2})
+ """
+ return self.evolver().remove(key).persistent()
+
+ def discard(self, key):
+ """
+ Return a new PMap without the element specified by key. Returns reference to itself
+ if element is not present.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.discard('a')
+ pmap({'b': 2})
+ >>> m1 is m1.discard('c')
+ True
+ """
+ try:
+ return self.remove(key)
+ except KeyError:
+ return self
+
+ def update(self, *maps):
+ """
+ Return a new PMap with the items in Mappings inserted. If the same key is present in multiple
+ maps the rightmost (last) value is inserted.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.update(m(a=2, c=3), {'a': 17, 'd': 35})
+ pmap({'c': 3, 'b': 2, 'a': 17, 'd': 35})
+ """
+ return self.update_with(lambda l, r: r, *maps)
+
+ def update_with(self, update_fn, *maps):
+ """
+ Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple
+ maps the values will be merged using merge_fn going from left to right.
+
+ >>> from operator import add
+ >>> m1 = m(a=1, b=2)
+ >>> m1.update_with(add, m(a=2))
+ pmap({'b': 2, 'a': 3})
+
+ The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost.
+
+ >>> m1 = m(a=1)
+ >>> m1.update_with(lambda l, r: l, m(a=2), {'a':3})
+ pmap({'a': 1})
+ """
+ evolver = self.evolver()
+ for map in maps:
+ for key, value in map.items():
+ evolver.set(key, update_fn(evolver[key], value) if key in evolver else value)
+
+ return evolver.persistent()
+
+ def __add__(self, other):
+ return self.update(other)
+
+ def __reduce__(self):
+ # Pickling support
+ return pmap, (dict(self),)
+
+ def transform(self, *transformations):
+ """
+ Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
+ consists of two parts. One match expression that specifies which elements to transform
+ and one transformation function that performs the actual transformation.
+
+ >>> from pyrsistent import freeze, ny
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ When nothing has been transformed the original data structure is kept
+
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+ """
+ return transform(self, transformations)
+
+ def copy(self):
+ return self
+
+ class _Evolver(object):
+ __slots__ = ('_buckets_evolver', '_size', '_original_pmap')
+
+ def __init__(self, original_pmap):
+ self._original_pmap = original_pmap
+ self._buckets_evolver = original_pmap._buckets.evolver()
+ self._size = original_pmap._size
+
+ def __getitem__(self, key):
+ return PMap._getitem(self._buckets_evolver, key)
+
+ def __setitem__(self, key, val):
+ self.set(key, val)
+
+ def set(self, key, val):
+ if len(self._buckets_evolver) < 0.67 * self._size:
+ self._reallocate(2 * len(self._buckets_evolver))
+
+ kv = (key, val)
+ index, bucket = PMap._get_bucket(self._buckets_evolver, key)
+ if bucket:
+ for k, v in bucket:
+ if k == key:
+ if v is not val:
+ new_bucket = [(k2, v2) if k2 != k else (k2, val) for k2, v2 in bucket]
+ self._buckets_evolver[index] = new_bucket
+
+ return self
+
+ new_bucket = [kv]
+ new_bucket.extend(bucket)
+ self._buckets_evolver[index] = new_bucket
+ self._size += 1
+ else:
+ self._buckets_evolver[index] = [kv]
+ self._size += 1
+
+ return self
+
+ def _reallocate(self, new_size):
+ new_list = new_size * [None]
+ buckets = self._buckets_evolver.persistent()
+ for k, v in chain.from_iterable(x for x in buckets if x):
+ index = hash(k) % new_size
+ if new_list[index]:
+ new_list[index].append((k, v))
+ else:
+ new_list[index] = [(k, v)]
+
+ # A reallocation should always result in a dirty buckets evolver to avoid
+ # possible loss of elements when doing the reallocation.
+ self._buckets_evolver = pvector().evolver()
+ self._buckets_evolver.extend(new_list)
+
+ def is_dirty(self):
+ return self._buckets_evolver.is_dirty()
+
+ def persistent(self):
+ if self.is_dirty():
+ self._original_pmap = PMap(self._size, self._buckets_evolver.persistent())
+
+ return self._original_pmap
+
+ def __len__(self):
+ return self._size
+
+ def __contains__(self, key):
+ return PMap._contains(self._buckets_evolver, key)
+
+ def __delitem__(self, key):
+ self.remove(key)
+
+ def remove(self, key):
+ index, bucket = PMap._get_bucket(self._buckets_evolver, key)
+
+ if bucket:
+ new_bucket = [(k, v) for (k, v) in bucket if k != key]
+ if len(bucket) > len(new_bucket):
+ self._buckets_evolver[index] = new_bucket if new_bucket else None
+ self._size -= 1
+ return self
+
+ raise KeyError('{0}'.format(key))
+
+ def evolver(self):
+ """
+ Create a new evolver for this pmap. For a discussion on evolvers in general see the
+ documentation for the pvector evolver.
+
+ Create the evolver and perform various mutating updates to it:
+
+ >>> m1 = m(a=1, b=2)
+ >>> e = m1.evolver()
+ >>> e['c'] = 3
+ >>> len(e)
+ 3
+ >>> del e['a']
+
+ The underlying pmap remains the same:
+
+ >>> m1
+ pmap({'b': 2, 'a': 1})
+
+ The changes are kept in the evolver. An updated pmap can be created using the
+ persistent() function on the evolver.
+
+ >>> m2 = e.persistent()
+ >>> m2
+ pmap({'c': 3, 'b': 2})
+
+ The new pmap will share data with the original pmap in the same way that would have
+ been done if only using operations on the pmap.
+ """
+ return self._Evolver(self)
+
+Mapping.register(PMap)
+Hashable.register(PMap)
+
+
+def _turbo_mapping(initial, pre_size):
+ if pre_size:
+ size = pre_size
+ else:
+ try:
+ size = 2 * len(initial) or 8
+ except Exception:
+ # Guess we can't figure out the length. Give up on length hinting,
+ # we can always reallocate later.
+ size = 8
+
+ buckets = size * [None]
+
+ if not isinstance(initial, Mapping):
+ # Make a dictionary of the initial data if it isn't already,
+ # that will save us some job further down since we can assume no
+ # key collisions
+ initial = dict(initial)
+
+ for k, v in six.iteritems(initial):
+ h = hash(k)
+ index = h % size
+ bucket = buckets[index]
+
+ if bucket:
+ bucket.append((k, v))
+ else:
+ buckets[index] = [(k, v)]
+
+ return PMap(len(initial), pvector().extend(buckets))
+
+
+_EMPTY_PMAP = _turbo_mapping({}, 0)
+
+
+def pmap(initial={}, pre_size=0):
+ """
+ Create new persistent map, inserts all elements in initial into the newly created map.
+ The optional argument pre_size may be used to specify an initial size of the underlying bucket vector. This
+ may have a positive performance impact in the cases where you know beforehand that a large number of elements
+ will be inserted into the map eventually since it will reduce the number of reallocations required.
+
+ >>> pmap({'a': 13, 'b': 14})
+ pmap({'b': 14, 'a': 13})
+ """
+ if not initial:
+ return _EMPTY_PMAP
+
+ return _turbo_mapping(initial, pre_size)
+
+
+def m(**kwargs):
+ """
+ Creates a new persitent map. Inserts all key value arguments into the newly created map.
+
+ >>> m(a=13, b=14)
+ pmap({'b': 14, 'a': 13})
+ """
+ return pmap(kwargs)
diff --git a/lib/spack/external/pyrsistent/_pvector.py b/lib/spack/external/pyrsistent/_pvector.py
new file mode 100644
index 0000000000..82232782b7
--- /dev/null
+++ b/lib/spack/external/pyrsistent/_pvector.py
@@ -0,0 +1,713 @@
+from abc import abstractmethod, ABCMeta
+from ._compat import Sequence, Hashable
+from numbers import Integral
+import operator
+import six
+from pyrsistent._transformations import transform
+
+
+def _bitcount(val):
+ return bin(val).count("1")
+
+BRANCH_FACTOR = 32
+BIT_MASK = BRANCH_FACTOR - 1
+SHIFT = _bitcount(BIT_MASK)
+
+
+def compare_pvector(v, other, operator):
+ return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other)
+
+
+def _index_or_slice(index, stop):
+ if stop is None:
+ return index
+
+ return slice(index, stop)
+
+
+class PythonPVector(object):
+ """
+ Support structure for PVector that implements structural sharing for vectors using a trie.
+ """
+ __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__')
+
+ def __new__(cls, count, shift, root, tail):
+ self = super(PythonPVector, cls).__new__(cls)
+ self._count = count
+ self._shift = shift
+ self._root = root
+ self._tail = tail
+
+ # Derived attribute stored for performance
+ self._tail_offset = self._count - len(self._tail)
+ return self
+
+ def __len__(self):
+ return self._count
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ # There are more conditions than the below where it would be OK to
+ # return ourselves, implement those...
+ if index.start is None and index.stop is None and index.step is None:
+ return self
+
+ # This is a bit nasty realizing the whole structure as a list before
+ # slicing it but it is the fastest way I've found to date, and it's easy :-)
+ return _EMPTY_PVECTOR.extend(self.tolist()[index])
+
+ if index < 0:
+ index += self._count
+
+ return PythonPVector._node_for(self, index)[index & BIT_MASK]
+
+ def __add__(self, other):
+ return self.extend(other)
+
+ def __repr__(self):
+ return 'pvector({0})'.format(str(self.tolist()))
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __iter__(self):
+ # This is kind of lazy and will produce some memory overhead but it is the fasted method
+ # by far of those tried since it uses the speed of the built in python list directly.
+ return iter(self.tolist())
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq)
+
+ def __gt__(self, other):
+ return compare_pvector(self, other, operator.gt)
+
+ def __lt__(self, other):
+ return compare_pvector(self, other, operator.lt)
+
+ def __ge__(self, other):
+ return compare_pvector(self, other, operator.ge)
+
+ def __le__(self, other):
+ return compare_pvector(self, other, operator.le)
+
+ def __mul__(self, times):
+ if times <= 0 or self is _EMPTY_PVECTOR:
+ return _EMPTY_PVECTOR
+
+ if times == 1:
+ return self
+
+ return _EMPTY_PVECTOR.extend(times * self.tolist())
+
+ __rmul__ = __mul__
+
+ def _fill_list(self, node, shift, the_list):
+ if shift:
+ shift -= SHIFT
+ for n in node:
+ self._fill_list(n, shift, the_list)
+ else:
+ the_list.extend(node)
+
+ def tolist(self):
+ """
+ The fastest way to convert the vector into a python list.
+ """
+ the_list = []
+ self._fill_list(self._root, self._shift, the_list)
+ the_list.extend(self._tail)
+ return the_list
+
+ def _totuple(self):
+ """
+ Returns the content as a python tuple.
+ """
+ return tuple(self.tolist())
+
+ def __hash__(self):
+ # Taking the easy way out again...
+ return hash(self._totuple())
+
+ def transform(self, *transformations):
+ return transform(self, transformations)
+
+ def __reduce__(self):
+ # Pickling support
+ return pvector, (self.tolist(),)
+
+ def mset(self, *args):
+ if len(args) % 2:
+ raise TypeError("mset expected an even number of arguments")
+
+ evolver = self.evolver()
+ for i in range(0, len(args), 2):
+ evolver[args[i]] = args[i+1]
+
+ return evolver.persistent()
+
+ class Evolver(object):
+ __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes',
+ '_extra_tail', '_cached_leafs', '_orig_pvector')
+
+ def __init__(self, v):
+ self._reset(v)
+
+ def __getitem__(self, index):
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index < 0:
+ index += self._count + len(self._extra_tail)
+
+ if self._count <= index < self._count + len(self._extra_tail):
+ return self._extra_tail[index - self._count]
+
+ return PythonPVector._node_for(self, index)[index & BIT_MASK]
+
+ def _reset(self, v):
+ self._count = v._count
+ self._shift = v._shift
+ self._root = v._root
+ self._tail = v._tail
+ self._tail_offset = v._tail_offset
+ self._dirty_nodes = {}
+ self._cached_leafs = {}
+ self._extra_tail = []
+ self._orig_pvector = v
+
+ def append(self, element):
+ self._extra_tail.append(element)
+ return self
+
+ def extend(self, iterable):
+ self._extra_tail.extend(iterable)
+ return self
+
+ def set(self, index, val):
+ self[index] = val
+ return self
+
+ def __setitem__(self, index, val):
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index < 0:
+ index += self._count + len(self._extra_tail)
+
+ if 0 <= index < self._count:
+ node = self._cached_leafs.get(index >> SHIFT)
+ if node:
+ node[index & BIT_MASK] = val
+ elif index >= self._tail_offset:
+ if id(self._tail) not in self._dirty_nodes:
+ self._tail = list(self._tail)
+ self._dirty_nodes[id(self._tail)] = True
+ self._cached_leafs[index >> SHIFT] = self._tail
+ self._tail[index & BIT_MASK] = val
+ else:
+ self._root = self._do_set(self._shift, self._root, index, val)
+ elif self._count <= index < self._count + len(self._extra_tail):
+ self._extra_tail[index - self._count] = val
+ elif index == self._count + len(self._extra_tail):
+ self._extra_tail.append(val)
+ else:
+ raise IndexError("Index out of range: %s" % (index,))
+
+ def _do_set(self, level, node, i, val):
+ if id(node) in self._dirty_nodes:
+ ret = node
+ else:
+ ret = list(node)
+ self._dirty_nodes[id(ret)] = True
+
+ if level == 0:
+ ret[i & BIT_MASK] = val
+ self._cached_leafs[i >> SHIFT] = ret
+ else:
+ sub_index = (i >> level) & BIT_MASK # >>>
+ ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
+
+ return ret
+
+ def delete(self, index):
+ del self[index]
+ return self
+
+ def __delitem__(self, key):
+ if self._orig_pvector:
+ # All structural sharing bets are off, base evolver on _extra_tail only
+ l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist()
+ l.extend(self._extra_tail)
+ self._reset(_EMPTY_PVECTOR)
+ self._extra_tail = l
+
+ del self._extra_tail[key]
+
+ def persistent(self):
+ result = self._orig_pvector
+ if self.is_dirty():
+ result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail)
+ self._reset(result)
+
+ return result
+
+ def __len__(self):
+ return self._count + len(self._extra_tail)
+
+ def is_dirty(self):
+ return bool(self._dirty_nodes or self._extra_tail)
+
+ def evolver(self):
+ return PythonPVector.Evolver(self)
+
+ def set(self, i, val):
+ # This method could be implemented by a call to mset() but doing so would cause
+ # a ~5 X performance penalty on PyPy (considered the primary platform for this implementation
+ # of PVector) so we're keeping this implementation for now.
+
+ if not isinstance(i, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__)
+
+ if i < 0:
+ i += self._count
+
+ if 0 <= i < self._count:
+ if i >= self._tail_offset:
+ new_tail = list(self._tail)
+ new_tail[i & BIT_MASK] = val
+ return PythonPVector(self._count, self._shift, self._root, new_tail)
+
+ return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail)
+
+ if i == self._count:
+ return self.append(val)
+
+ raise IndexError("Index out of range: %s" % (i,))
+
+ def _do_set(self, level, node, i, val):
+ ret = list(node)
+ if level == 0:
+ ret[i & BIT_MASK] = val
+ else:
+ sub_index = (i >> level) & BIT_MASK # >>>
+ ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
+
+ return ret
+
+ @staticmethod
+ def _node_for(pvector_like, i):
+ if 0 <= i < pvector_like._count:
+ if i >= pvector_like._tail_offset:
+ return pvector_like._tail
+
+ node = pvector_like._root
+ for level in range(pvector_like._shift, 0, -SHIFT):
+ node = node[(i >> level) & BIT_MASK] # >>>
+
+ return node
+
+ raise IndexError("Index out of range: %s" % (i,))
+
+ def _create_new_root(self):
+ new_shift = self._shift
+
+ # Overflow root?
+ if (self._count >> SHIFT) > (1 << self._shift): # >>>
+ new_root = [self._root, self._new_path(self._shift, self._tail)]
+ new_shift += SHIFT
+ else:
+ new_root = self._push_tail(self._shift, self._root, self._tail)
+
+ return new_root, new_shift
+
+ def append(self, val):
+ if len(self._tail) < BRANCH_FACTOR:
+ new_tail = list(self._tail)
+ new_tail.append(val)
+ return PythonPVector(self._count + 1, self._shift, self._root, new_tail)
+
+ # Full tail, push into tree
+ new_root, new_shift = self._create_new_root()
+ return PythonPVector(self._count + 1, new_shift, new_root, [val])
+
+ def _new_path(self, level, node):
+ if level == 0:
+ return node
+
+ return [self._new_path(level - SHIFT, node)]
+
+ def _mutating_insert_tail(self):
+ self._root, self._shift = self._create_new_root()
+ self._tail = []
+
+ def _mutating_fill_tail(self, offset, sequence):
+ max_delta_len = BRANCH_FACTOR - len(self._tail)
+ delta = sequence[offset:offset + max_delta_len]
+ self._tail.extend(delta)
+ delta_len = len(delta)
+ self._count += delta_len
+ return offset + delta_len
+
+ def _mutating_extend(self, sequence):
+ offset = 0
+ sequence_len = len(sequence)
+ while offset < sequence_len:
+ offset = self._mutating_fill_tail(offset, sequence)
+ if len(self._tail) == BRANCH_FACTOR:
+ self._mutating_insert_tail()
+
+ self._tail_offset = self._count - len(self._tail)
+
+ def extend(self, obj):
+ # Mutates the new vector directly for efficiency but that's only an
+ # implementation detail, once it is returned it should be considered immutable
+ l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj)
+ if l:
+ new_vector = self.append(l[0])
+ new_vector._mutating_extend(l[1:])
+ return new_vector
+
+ return self
+
+ def _push_tail(self, level, parent, tail_node):
+ """
+ if parent is leaf, insert node,
+ else does it map to an existing child? ->
+ node_to_insert = push node one more level
+ else alloc new path
+
+ return node_to_insert placed in copy of parent
+ """
+ ret = list(parent)
+
+ if level == SHIFT:
+ ret.append(tail_node)
+ return ret
+
+ sub_index = ((self._count - 1) >> level) & BIT_MASK # >>>
+ if len(parent) > sub_index:
+ ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node)
+ return ret
+
+ ret.append(self._new_path(level - SHIFT, tail_node))
+ return ret
+
+ def index(self, value, *args, **kwargs):
+ return self.tolist().index(value, *args, **kwargs)
+
+ def count(self, value):
+ return self.tolist().count(value)
+
+ def delete(self, index, stop=None):
+ l = self.tolist()
+ del l[_index_or_slice(index, stop)]
+ return _EMPTY_PVECTOR.extend(l)
+
+ def remove(self, value):
+ l = self.tolist()
+ l.remove(value)
+ return _EMPTY_PVECTOR.extend(l)
+
+@six.add_metaclass(ABCMeta)
+class PVector(object):
+ """
+ Persistent vector implementation. Meant as a replacement for the cases where you would normally
+ use a Python list.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`v` and :py:func:`pvector` to
+ create an instance.
+
+ Heavily influenced by the persistent vector available in Clojure. Initially this was more or
+ less just a port of the Java code for the Clojure vector. It has since been modified and to
+ some extent optimized for usage in Python.
+
+ The vector is organized as a trie, any mutating method will return a new vector that contains the changes. No
+ updates are done to the original vector. Structural sharing between vectors are applied where possible to save
+ space and to avoid making complete copies.
+
+ This structure corresponds most closely to the built in list type and is intended as a replacement. Where the
+ semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
+ for example assignments.
+
+ The PVector implements the Sequence protocol and is Hashable.
+
+ Inserts are amortized O(1). Random access is log32(n) where n is the size of the vector.
+
+ The following are examples of some common operations on persistent vectors:
+
+ >>> p = v(1, 2, 3)
+ >>> p2 = p.append(4)
+ >>> p3 = p2.extend([5, 6, 7])
+ >>> p
+ pvector([1, 2, 3])
+ >>> p2
+ pvector([1, 2, 3, 4])
+ >>> p3
+ pvector([1, 2, 3, 4, 5, 6, 7])
+ >>> p3[5]
+ 6
+ >>> p.set(1, 99)
+ pvector([1, 99, 3])
+ >>>
+ """
+
+ @abstractmethod
+ def __len__(self):
+ """
+ >>> len(v(1, 2, 3))
+ 3
+ """
+
+ @abstractmethod
+ def __getitem__(self, index):
+ """
+ Get value at index. Full slicing support.
+
+ >>> v1 = v(5, 6, 7, 8)
+ >>> v1[2]
+ 7
+ >>> v1[1:3]
+ pvector([6, 7])
+ """
+
+ @abstractmethod
+ def __add__(self, other):
+ """
+ >>> v1 = v(1, 2)
+ >>> v2 = v(3, 4)
+ >>> v1 + v2
+ pvector([1, 2, 3, 4])
+ """
+
+ @abstractmethod
+ def __mul__(self, times):
+ """
+ >>> v1 = v(1, 2)
+ >>> 3 * v1
+ pvector([1, 2, 1, 2, 1, 2])
+ """
+
+ @abstractmethod
+ def __hash__(self):
+ """
+ >>> v1 = v(1, 2, 3)
+ >>> v2 = v(1, 2, 3)
+ >>> hash(v1) == hash(v2)
+ True
+ """
+
+ @abstractmethod
+ def evolver(self):
+ """
+ Create a new evolver for this pvector. The evolver acts as a mutable view of the vector
+ with "transaction like" semantics. No part of the underlying vector i updated, it is still
+ fully immutable. Furthermore multiple evolvers created from the same pvector do not
+ interfere with each other.
+
+ You may want to use an evolver instead of working directly with the pvector in the
+ following cases:
+
+ * Multiple updates are done to the same vector and the intermediate results are of no
+ interest. In this case using an evolver may be a more efficient and easier to work with.
+ * You need to pass a vector into a legacy function or a function that you have no control
+ over which performs in place mutations of lists. In this case pass an evolver instance
+ instead and then create a new pvector from the evolver once the function returns.
+
+ The following example illustrates a typical workflow when working with evolvers. It also
+ displays most of the API (which i kept small by design, you should not be tempted to
+ use evolvers in excess ;-)).
+
+ Create the evolver and perform various mutating updates to it:
+
+ >>> v1 = v(1, 2, 3, 4, 5)
+ >>> e = v1.evolver()
+ >>> e[1] = 22
+ >>> _ = e.append(6)
+ >>> _ = e.extend([7, 8, 9])
+ >>> e[8] += 1
+ >>> len(e)
+ 9
+
+ The underlying pvector remains the same:
+
+ >>> v1
+ pvector([1, 2, 3, 4, 5])
+
+ The changes are kept in the evolver. An updated pvector can be created using the
+ persistent() function on the evolver.
+
+ >>> v2 = e.persistent()
+ >>> v2
+ pvector([1, 22, 3, 4, 5, 6, 7, 8, 10])
+
+ The new pvector will share data with the original pvector in the same way that would have
+ been done if only using operations on the pvector.
+ """
+
+ @abstractmethod
+ def mset(self, *args):
+ """
+ Return a new vector with elements in specified positions replaced by values (multi set).
+
+ Elements on even positions in the argument list are interpreted as indexes while
+ elements on odd positions are considered values.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.mset(0, 11, 2, 33)
+ pvector([11, 2, 33])
+ """
+
+ @abstractmethod
+ def set(self, i, val):
+ """
+ Return a new vector with element at position i replaced with val. The original vector remains unchanged.
+
+ Setting a value one step beyond the end of the vector is equal to appending. Setting beyond that will
+ result in an IndexError.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.set(1, 4)
+ pvector([1, 4, 3])
+ >>> v1.set(3, 4)
+ pvector([1, 2, 3, 4])
+ >>> v1.set(-1, 4)
+ pvector([1, 2, 4])
+ """
+
+ @abstractmethod
+ def append(self, val):
+ """
+ Return a new vector with val appended.
+
+ >>> v1 = v(1, 2)
+ >>> v1.append(3)
+ pvector([1, 2, 3])
+ """
+
+ @abstractmethod
+ def extend(self, obj):
+ """
+ Return a new vector with all values in obj appended to it. Obj may be another
+ PVector or any other Iterable.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.extend([4, 5])
+ pvector([1, 2, 3, 4, 5])
+ """
+
+ @abstractmethod
+ def index(self, value, *args, **kwargs):
+ """
+ Return first index of value. Additional indexes may be supplied to limit the search to a
+ sub range of the vector.
+
+ >>> v1 = v(1, 2, 3, 4, 3)
+ >>> v1.index(3)
+ 2
+ >>> v1.index(3, 3, 5)
+ 4
+ """
+
+ @abstractmethod
+ def count(self, value):
+ """
+ Return the number of times that value appears in the vector.
+
+ >>> v1 = v(1, 4, 3, 4)
+ >>> v1.count(4)
+ 2
+ """
+
+ @abstractmethod
+ def transform(self, *transformations):
+ """
+ Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
+ consists of two parts. One match expression that specifies which elements to transform
+ and one transformation function that performs the actual transformation.
+
+ >>> from pyrsistent import freeze, ny
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ When nothing has been transformed the original data structure is kept
+
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+ """
+
+ @abstractmethod
+ def delete(self, index, stop=None):
+ """
+ Delete a portion of the vector by index or range.
+
+ >>> v1 = v(1, 2, 3, 4, 5)
+ >>> v1.delete(1)
+ pvector([1, 3, 4, 5])
+ >>> v1.delete(1, 3)
+ pvector([1, 4, 5])
+ """
+
+ @abstractmethod
+ def remove(self, value):
+ """
+ Remove the first occurrence of a value from the vector.
+
+ >>> v1 = v(1, 2, 3, 2, 1)
+ >>> v2 = v1.remove(1)
+ >>> v2
+ pvector([2, 3, 2, 1])
+ >>> v2.remove(1)
+ pvector([2, 3, 2])
+ """
+
+
+_EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], [])
+PVector.register(PythonPVector)
+Sequence.register(PVector)
+Hashable.register(PVector)
+
+def python_pvector(iterable=()):
+ """
+ Create a new persistent vector containing the elements in iterable.
+
+ >>> v1 = pvector([1, 2, 3])
+ >>> v1
+ pvector([1, 2, 3])
+ """
+ return _EMPTY_PVECTOR.extend(iterable)
+
+try:
+ # Use the C extension as underlying trie implementation if it is available
+ import os
+ if os.environ.get('PYRSISTENT_NO_C_EXTENSION'):
+ pvector = python_pvector
+ else:
+ from pvectorc import pvector
+ PVector.register(type(pvector()))
+except ImportError:
+ pvector = python_pvector
+
+
+def v(*elements):
+ """
+ Create a new persistent vector containing all parameters to this function.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1
+ pvector([1, 2, 3])
+ """
+ return pvector(elements)
diff --git a/lib/spack/external/pyrsistent/_transformations.py b/lib/spack/external/pyrsistent/_transformations.py
new file mode 100644
index 0000000000..612098969b
--- /dev/null
+++ b/lib/spack/external/pyrsistent/_transformations.py
@@ -0,0 +1,143 @@
+import re
+import six
+try:
+ from inspect import Parameter, signature
+except ImportError:
+ signature = None
+ try:
+ from inspect import getfullargspec as getargspec
+ except ImportError:
+ from inspect import getargspec
+
+
+_EMPTY_SENTINEL = object()
+
+
+def inc(x):
+ """ Add one to the current value """
+ return x + 1
+
+
+def dec(x):
+ """ Subtract one from the current value """
+ return x - 1
+
+
+def discard(evolver, key):
+ """ Discard the element and returns a structure without the discarded elements """
+ try:
+ del evolver[key]
+ except KeyError:
+ pass
+
+
+# Matchers
+def rex(expr):
+ """ Regular expression matcher to use together with transform functions """
+ r = re.compile(expr)
+ return lambda key: isinstance(key, six.string_types) and r.match(key)
+
+
+def ny(_):
+ """ Matcher that matches any value """
+ return True
+
+
+# Support functions
+def _chunks(l, n):
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def transform(structure, transformations):
+ r = structure
+ for path, command in _chunks(transformations, 2):
+ r = _do_to_path(r, path, command)
+ return r
+
+
+def _do_to_path(structure, path, command):
+ if not path:
+ return command(structure) if callable(command) else command
+
+ kvs = _get_keys_and_values(structure, path[0])
+ return _update_structure(structure, kvs, path[1:], command)
+
+
+def _items(structure):
+ try:
+ return structure.items()
+ except AttributeError:
+ # Support wider range of structures by adding a transform_items() or similar?
+ return list(enumerate(structure))
+
+
+def _get(structure, key, default):
+ try:
+ if hasattr(structure, '__getitem__'):
+ return structure[key]
+
+ return getattr(structure, key)
+
+ except (IndexError, KeyError):
+ return default
+
+
+def _get_keys_and_values(structure, key_spec):
+ if callable(key_spec):
+ # Support predicates as callable objects in the path
+ arity = _get_arity(key_spec)
+ if arity == 1:
+ # Unary predicates are called with the "key" of the path
+ # - eg a key in a mapping, an index in a sequence.
+ return [(k, v) for k, v in _items(structure) if key_spec(k)]
+ elif arity == 2:
+ # Binary predicates are called with the key and the corresponding
+ # value.
+ return [(k, v) for k, v in _items(structure) if key_spec(k, v)]
+ else:
+ # Other arities are an error.
+ raise ValueError(
+ "callable in transform path must take 1 or 2 arguments"
+ )
+
+ # Non-callables are used as-is as a key.
+ return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))]
+
+
+if signature is None:
+ def _get_arity(f):
+ argspec = getargspec(f)
+ return len(argspec.args) - len(argspec.defaults or ())
+else:
+ def _get_arity(f):
+ return sum(
+ 1
+ for p
+ in signature(f).parameters.values()
+ if p.default is Parameter.empty
+ and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
+ )
+
+
+def _update_structure(structure, kvs, path, command):
+ from pyrsistent._pmap import pmap
+ e = structure.evolver()
+ if not path and command is discard:
+ # Do this in reverse to avoid index problems with vectors. See #92.
+ for k, v in reversed(kvs):
+ discard(e, k)
+ else:
+ for k, v in kvs:
+ is_empty = False
+ if v is _EMPTY_SENTINEL:
+ # Allow expansion of structure but make sure to cover the case
+ # when an empty pmap is added as leaf node. See #154.
+ is_empty = True
+ v = pmap()
+
+ result = _do_to_path(v, path, command)
+ if result is not v or is_empty:
+ e[k] = result
+
+ return e.persistent()
diff --git a/lib/spack/external/_pytest/LICENSE b/lib/spack/external/pytest-fallback/_pytest/LICENSE
index 629df45ac4..629df45ac4 100644
--- a/lib/spack/external/_pytest/LICENSE
+++ b/lib/spack/external/pytest-fallback/_pytest/LICENSE
diff --git a/lib/spack/external/_pytest/__init__.py b/lib/spack/external/pytest-fallback/_pytest/__init__.py
index 6e41f0504e..6e41f0504e 100644
--- a/lib/spack/external/_pytest/__init__.py
+++ b/lib/spack/external/pytest-fallback/_pytest/__init__.py
diff --git a/lib/spack/external/_pytest/_argcomplete.py b/lib/spack/external/pytest-fallback/_pytest/_argcomplete.py
index 965ec79513..965ec79513 100644
--- a/lib/spack/external/_pytest/_argcomplete.py
+++ b/lib/spack/external/pytest-fallback/_pytest/_argcomplete.py
diff --git a/lib/spack/external/_pytest/_code/__init__.py b/lib/spack/external/pytest-fallback/_pytest/_code/__init__.py
index 815c13b42c..815c13b42c 100644
--- a/lib/spack/external/_pytest/_code/__init__.py
+++ b/lib/spack/external/pytest-fallback/_pytest/_code/__init__.py
diff --git a/lib/spack/external/_pytest/_code/_py2traceback.py b/lib/spack/external/pytest-fallback/_pytest/_code/_py2traceback.py
index 5aacf0a428..5aacf0a428 100644
--- a/lib/spack/external/_pytest/_code/_py2traceback.py
+++ b/lib/spack/external/pytest-fallback/_pytest/_code/_py2traceback.py
diff --git a/lib/spack/external/_pytest/_code/code.py b/lib/spack/external/pytest-fallback/_pytest/_code/code.py
index f3b7eedfce..f3b7eedfce 100644
--- a/lib/spack/external/_pytest/_code/code.py
+++ b/lib/spack/external/pytest-fallback/_pytest/_code/code.py
diff --git a/lib/spack/external/_pytest/_code/source.py b/lib/spack/external/pytest-fallback/_pytest/_code/source.py
index fc41712649..fc41712649 100644
--- a/lib/spack/external/_pytest/_code/source.py
+++ b/lib/spack/external/pytest-fallback/_pytest/_code/source.py
diff --git a/lib/spack/external/_pytest/_pluggy.py b/lib/spack/external/pytest-fallback/_pytest/_pluggy.py
index 6cc1d3d54a..6cc1d3d54a 100644
--- a/lib/spack/external/_pytest/_pluggy.py
+++ b/lib/spack/external/pytest-fallback/_pytest/_pluggy.py
diff --git a/lib/spack/external/_pytest/_version.py b/lib/spack/external/pytest-fallback/_pytest/_version.py
index 3edb7da9ad..3edb7da9ad 100644
--- a/lib/spack/external/_pytest/_version.py
+++ b/lib/spack/external/pytest-fallback/_pytest/_version.py
diff --git a/lib/spack/external/_pytest/assertion/__init__.py b/lib/spack/external/pytest-fallback/_pytest/assertion/__init__.py
index b0ef667d56..b0ef667d56 100644
--- a/lib/spack/external/_pytest/assertion/__init__.py
+++ b/lib/spack/external/pytest-fallback/_pytest/assertion/__init__.py
diff --git a/lib/spack/external/_pytest/assertion/rewrite.py b/lib/spack/external/pytest-fallback/_pytest/assertion/rewrite.py
index d48b6648fb..d48b6648fb 100644
--- a/lib/spack/external/_pytest/assertion/rewrite.py
+++ b/lib/spack/external/pytest-fallback/_pytest/assertion/rewrite.py
diff --git a/lib/spack/external/_pytest/assertion/truncate.py b/lib/spack/external/pytest-fallback/_pytest/assertion/truncate.py
index 1e13063569..1e13063569 100644
--- a/lib/spack/external/_pytest/assertion/truncate.py
+++ b/lib/spack/external/pytest-fallback/_pytest/assertion/truncate.py
diff --git a/lib/spack/external/_pytest/assertion/util.py b/lib/spack/external/pytest-fallback/_pytest/assertion/util.py
index c09eff06b0..c09eff06b0 100644
--- a/lib/spack/external/_pytest/assertion/util.py
+++ b/lib/spack/external/pytest-fallback/_pytest/assertion/util.py
diff --git a/lib/spack/external/_pytest/cacheprovider.py b/lib/spack/external/pytest-fallback/_pytest/cacheprovider.py
index c537c14472..c537c14472 100755
--- a/lib/spack/external/_pytest/cacheprovider.py
+++ b/lib/spack/external/pytest-fallback/_pytest/cacheprovider.py
diff --git a/lib/spack/external/_pytest/capture.py b/lib/spack/external/pytest-fallback/_pytest/capture.py
index cb5af6fcb3..cb5af6fcb3 100644
--- a/lib/spack/external/_pytest/capture.py
+++ b/lib/spack/external/pytest-fallback/_pytest/capture.py
diff --git a/lib/spack/external/_pytest/compat.py b/lib/spack/external/pytest-fallback/_pytest/compat.py
index 255f69ce0d..255f69ce0d 100644
--- a/lib/spack/external/_pytest/compat.py
+++ b/lib/spack/external/pytest-fallback/_pytest/compat.py
diff --git a/lib/spack/external/_pytest/config.py b/lib/spack/external/pytest-fallback/_pytest/config.py
index 513478a972..513478a972 100644
--- a/lib/spack/external/_pytest/config.py
+++ b/lib/spack/external/pytest-fallback/_pytest/config.py
diff --git a/lib/spack/external/_pytest/debugging.py b/lib/spack/external/pytest-fallback/_pytest/debugging.py
index aa9c9a3863..aa9c9a3863 100644
--- a/lib/spack/external/_pytest/debugging.py
+++ b/lib/spack/external/pytest-fallback/_pytest/debugging.py
diff --git a/lib/spack/external/_pytest/deprecated.py b/lib/spack/external/pytest-fallback/_pytest/deprecated.py
index 38e9496778..38e9496778 100644
--- a/lib/spack/external/_pytest/deprecated.py
+++ b/lib/spack/external/pytest-fallback/_pytest/deprecated.py
diff --git a/lib/spack/external/_pytest/doctest.py b/lib/spack/external/pytest-fallback/_pytest/doctest.py
index 4c05acddf7..4c05acddf7 100644
--- a/lib/spack/external/_pytest/doctest.py
+++ b/lib/spack/external/pytest-fallback/_pytest/doctest.py
diff --git a/lib/spack/external/_pytest/fixtures.py b/lib/spack/external/pytest-fallback/_pytest/fixtures.py
index 98317a4889..7ad495615e 100644
--- a/lib/spack/external/_pytest/fixtures.py
+++ b/lib/spack/external/pytest-fallback/_pytest/fixtures.py
@@ -1,5 +1,6 @@
from __future__ import absolute_import, division, print_function
+import collections
import inspect
import sys
import warnings
@@ -21,9 +22,6 @@ from _pytest.compat import (
from _pytest.outcomes import fail, TEST_OUTCOME
-from ordereddict_backport import OrderedDict
-
-
def pytest_sessionstart(session):
import _pytest.python
scopename2class.update({
@@ -165,7 +163,7 @@ def reorder_items(items):
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
- keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
+ keys = collections.OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
@@ -200,7 +198,7 @@ def slice_items(items, ignore, scoped_argkeys_cache):
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
- newargkeys = OrderedDict.fromkeys(k for k in argkeys if k not in ignore)
+ newargkeys = collections.OrderedDict.fromkeys(k for k in argkeys if k not in ignore)
if newargkeys: # found a slicing key
slicing_argkey, _ = newargkeys.popitem()
items_before = items[:i]
diff --git a/lib/spack/external/_pytest/freeze_support.py b/lib/spack/external/pytest-fallback/_pytest/freeze_support.py
index 97147a8825..97147a8825 100644
--- a/lib/spack/external/_pytest/freeze_support.py
+++ b/lib/spack/external/pytest-fallback/_pytest/freeze_support.py
diff --git a/lib/spack/external/_pytest/helpconfig.py b/lib/spack/external/pytest-fallback/_pytest/helpconfig.py
index e744637f86..e744637f86 100644
--- a/lib/spack/external/_pytest/helpconfig.py
+++ b/lib/spack/external/pytest-fallback/_pytest/helpconfig.py
diff --git a/lib/spack/external/_pytest/hookspec.py b/lib/spack/external/pytest-fallback/_pytest/hookspec.py
index e5c966e58b..e5c966e58b 100644
--- a/lib/spack/external/_pytest/hookspec.py
+++ b/lib/spack/external/pytest-fallback/_pytest/hookspec.py
diff --git a/lib/spack/external/_pytest/junitxml.py b/lib/spack/external/pytest-fallback/_pytest/junitxml.py
index 7fb40dc354..7fb40dc354 100644
--- a/lib/spack/external/_pytest/junitxml.py
+++ b/lib/spack/external/pytest-fallback/_pytest/junitxml.py
diff --git a/lib/spack/external/_pytest/main.py b/lib/spack/external/pytest-fallback/_pytest/main.py
index 98aa28eb34..98aa28eb34 100644
--- a/lib/spack/external/_pytest/main.py
+++ b/lib/spack/external/pytest-fallback/_pytest/main.py
diff --git a/lib/spack/external/_pytest/mark.py b/lib/spack/external/pytest-fallback/_pytest/mark.py
index 454722ca2c..454722ca2c 100644
--- a/lib/spack/external/_pytest/mark.py
+++ b/lib/spack/external/pytest-fallback/_pytest/mark.py
diff --git a/lib/spack/external/_pytest/monkeypatch.py b/lib/spack/external/pytest-fallback/_pytest/monkeypatch.py
index 39ac770135..39ac770135 100644
--- a/lib/spack/external/_pytest/monkeypatch.py
+++ b/lib/spack/external/pytest-fallback/_pytest/monkeypatch.py
diff --git a/lib/spack/external/_pytest/nodes.py b/lib/spack/external/pytest-fallback/_pytest/nodes.py
index ad3af2ce67..ad3af2ce67 100644
--- a/lib/spack/external/_pytest/nodes.py
+++ b/lib/spack/external/pytest-fallback/_pytest/nodes.py
diff --git a/lib/spack/external/_pytest/nose.py b/lib/spack/external/pytest-fallback/_pytest/nose.py
index d246c5603d..d246c5603d 100644
--- a/lib/spack/external/_pytest/nose.py
+++ b/lib/spack/external/pytest-fallback/_pytest/nose.py
diff --git a/lib/spack/external/_pytest/outcomes.py b/lib/spack/external/pytest-fallback/_pytest/outcomes.py
index ff5ef756d9..ff5ef756d9 100644
--- a/lib/spack/external/_pytest/outcomes.py
+++ b/lib/spack/external/pytest-fallback/_pytest/outcomes.py
diff --git a/lib/spack/external/_pytest/pastebin.py b/lib/spack/external/pytest-fallback/_pytest/pastebin.py
index 9d689819f0..9d689819f0 100644
--- a/lib/spack/external/_pytest/pastebin.py
+++ b/lib/spack/external/pytest-fallback/_pytest/pastebin.py
diff --git a/lib/spack/external/_pytest/pytester.py b/lib/spack/external/pytest-fallback/_pytest/pytester.py
index 82aa00e0d2..82aa00e0d2 100644
--- a/lib/spack/external/_pytest/pytester.py
+++ b/lib/spack/external/pytest-fallback/_pytest/pytester.py
diff --git a/lib/spack/external/_pytest/python.py b/lib/spack/external/pytest-fallback/_pytest/python.py
index 41fd2bdb7f..41fd2bdb7f 100644
--- a/lib/spack/external/_pytest/python.py
+++ b/lib/spack/external/pytest-fallback/_pytest/python.py
diff --git a/lib/spack/external/_pytest/python_api.py b/lib/spack/external/pytest-fallback/_pytest/python_api.py
index a931b4d2c7..a931b4d2c7 100644
--- a/lib/spack/external/_pytest/python_api.py
+++ b/lib/spack/external/pytest-fallback/_pytest/python_api.py
diff --git a/lib/spack/external/_pytest/recwarn.py b/lib/spack/external/pytest-fallback/_pytest/recwarn.py
index c9fa872c07..c9fa872c07 100644
--- a/lib/spack/external/_pytest/recwarn.py
+++ b/lib/spack/external/pytest-fallback/_pytest/recwarn.py
diff --git a/lib/spack/external/_pytest/resultlog.py b/lib/spack/external/pytest-fallback/_pytest/resultlog.py
index 9f9c2d1f65..9f9c2d1f65 100644
--- a/lib/spack/external/_pytest/resultlog.py
+++ b/lib/spack/external/pytest-fallback/_pytest/resultlog.py
diff --git a/lib/spack/external/_pytest/runner.py b/lib/spack/external/pytest-fallback/_pytest/runner.py
index b643fa3c91..b643fa3c91 100644
--- a/lib/spack/external/_pytest/runner.py
+++ b/lib/spack/external/pytest-fallback/_pytest/runner.py
diff --git a/lib/spack/external/_pytest/setuponly.py b/lib/spack/external/pytest-fallback/_pytest/setuponly.py
index 15e195ad5a..15e195ad5a 100644
--- a/lib/spack/external/_pytest/setuponly.py
+++ b/lib/spack/external/pytest-fallback/_pytest/setuponly.py
diff --git a/lib/spack/external/_pytest/setupplan.py b/lib/spack/external/pytest-fallback/_pytest/setupplan.py
index e11bd40698..e11bd40698 100644
--- a/lib/spack/external/_pytest/setupplan.py
+++ b/lib/spack/external/pytest-fallback/_pytest/setupplan.py
diff --git a/lib/spack/external/_pytest/skipping.py b/lib/spack/external/pytest-fallback/_pytest/skipping.py
index b92800d10b..b92800d10b 100644
--- a/lib/spack/external/_pytest/skipping.py
+++ b/lib/spack/external/pytest-fallback/_pytest/skipping.py
diff --git a/lib/spack/external/_pytest/terminal.py b/lib/spack/external/pytest-fallback/_pytest/terminal.py
index 9da94d0c91..9da94d0c91 100644
--- a/lib/spack/external/_pytest/terminal.py
+++ b/lib/spack/external/pytest-fallback/_pytest/terminal.py
diff --git a/lib/spack/external/_pytest/tmpdir.py b/lib/spack/external/pytest-fallback/_pytest/tmpdir.py
index da1b032237..da1b032237 100644
--- a/lib/spack/external/_pytest/tmpdir.py
+++ b/lib/spack/external/pytest-fallback/_pytest/tmpdir.py
diff --git a/lib/spack/external/_pytest/unittest.py b/lib/spack/external/pytest-fallback/_pytest/unittest.py
index 52c9813e8b..52c9813e8b 100644
--- a/lib/spack/external/_pytest/unittest.py
+++ b/lib/spack/external/pytest-fallback/_pytest/unittest.py
diff --git a/lib/spack/external/_pytest/vendored_packages/README.md b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/README.md
index b5fe6febb0..b5fe6febb0 100644
--- a/lib/spack/external/_pytest/vendored_packages/README.md
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/README.md
diff --git a/lib/spack/external/_pytest/vendored_packages/__init__.py b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/lib/spack/external/_pytest/vendored_packages/__init__.py
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/__init__.py
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst
index da0e7a6ed7..da0e7a6ed7 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER
index a1b589e38a..a1b589e38a 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt
index 121017d086..121017d086 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA
index bd88517c94..bd88517c94 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD
index 3003a3bf2b..3003a3bf2b 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL
index 8b6dd1b5a8..8b6dd1b5a8 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json
index cde22aff02..cde22aff02 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt
index 11bdb5c1f5..11bdb5c1f5 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy.py b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy.py
index 6f26552d73..6f26552d73 100644
--- a/lib/spack/external/_pytest/vendored_packages/pluggy.py
+++ b/lib/spack/external/pytest-fallback/_pytest/vendored_packages/pluggy.py
diff --git a/lib/spack/external/_pytest/warnings.py b/lib/spack/external/pytest-fallback/_pytest/warnings.py
index 926b1f5811..926b1f5811 100644
--- a/lib/spack/external/_pytest/warnings.py
+++ b/lib/spack/external/pytest-fallback/_pytest/warnings.py
diff --git a/lib/spack/external/py/__init__.py b/lib/spack/external/pytest-fallback/py/__init__.py
index 85af650f5c..85af650f5c 100644
--- a/lib/spack/external/py/__init__.py
+++ b/lib/spack/external/pytest-fallback/py/__init__.py
diff --git a/lib/spack/external/py/__metainfo.py b/lib/spack/external/pytest-fallback/py/__metainfo.py
index 12581eb7af..12581eb7af 100644
--- a/lib/spack/external/py/__metainfo.py
+++ b/lib/spack/external/pytest-fallback/py/__metainfo.py
diff --git a/lib/spack/external/py/_apipkg.py b/lib/spack/external/pytest-fallback/py/_apipkg.py
index a73b8f6d0b..a73b8f6d0b 100644
--- a/lib/spack/external/py/_apipkg.py
+++ b/lib/spack/external/pytest-fallback/py/_apipkg.py
diff --git a/lib/spack/external/py/_builtin.py b/lib/spack/external/pytest-fallback/py/_builtin.py
index 52ee9d79ca..52ee9d79ca 100644
--- a/lib/spack/external/py/_builtin.py
+++ b/lib/spack/external/pytest-fallback/py/_builtin.py
diff --git a/lib/spack/external/py/_code/__init__.py b/lib/spack/external/pytest-fallback/py/_code/__init__.py
index f15acf8513..f15acf8513 100644
--- a/lib/spack/external/py/_code/__init__.py
+++ b/lib/spack/external/pytest-fallback/py/_code/__init__.py
diff --git a/lib/spack/external/py/_code/_assertionnew.py b/lib/spack/external/pytest-fallback/py/_code/_assertionnew.py
index afb1b31ff0..afb1b31ff0 100644
--- a/lib/spack/external/py/_code/_assertionnew.py
+++ b/lib/spack/external/pytest-fallback/py/_code/_assertionnew.py
diff --git a/lib/spack/external/py/_code/_assertionold.py b/lib/spack/external/pytest-fallback/py/_code/_assertionold.py
index 4e81fb3ef6..4e81fb3ef6 100644
--- a/lib/spack/external/py/_code/_assertionold.py
+++ b/lib/spack/external/pytest-fallback/py/_code/_assertionold.py
diff --git a/lib/spack/external/py/_code/_py2traceback.py b/lib/spack/external/pytest-fallback/py/_code/_py2traceback.py
index d65e27cb73..d65e27cb73 100644
--- a/lib/spack/external/py/_code/_py2traceback.py
+++ b/lib/spack/external/pytest-fallback/py/_code/_py2traceback.py
diff --git a/lib/spack/external/py/_code/assertion.py b/lib/spack/external/pytest-fallback/py/_code/assertion.py
index 4ce80c75b1..4ce80c75b1 100644
--- a/lib/spack/external/py/_code/assertion.py
+++ b/lib/spack/external/pytest-fallback/py/_code/assertion.py
diff --git a/lib/spack/external/py/_code/code.py b/lib/spack/external/pytest-fallback/py/_code/code.py
index 20fd965c97..20fd965c97 100644
--- a/lib/spack/external/py/_code/code.py
+++ b/lib/spack/external/pytest-fallback/py/_code/code.py
diff --git a/lib/spack/external/py/_code/source.py b/lib/spack/external/pytest-fallback/py/_code/source.py
index c8b668b2fb..c8b668b2fb 100644
--- a/lib/spack/external/py/_code/source.py
+++ b/lib/spack/external/pytest-fallback/py/_code/source.py
diff --git a/lib/spack/external/py/_error.py b/lib/spack/external/pytest-fallback/py/_error.py
index 8ca339beba..8ca339beba 100644
--- a/lib/spack/external/py/_error.py
+++ b/lib/spack/external/pytest-fallback/py/_error.py
diff --git a/lib/spack/external/py/_iniconfig.py b/lib/spack/external/pytest-fallback/py/_iniconfig.py
index 92b50bd853..92b50bd853 100644
--- a/lib/spack/external/py/_iniconfig.py
+++ b/lib/spack/external/pytest-fallback/py/_iniconfig.py
diff --git a/lib/spack/external/py/_io/__init__.py b/lib/spack/external/pytest-fallback/py/_io/__init__.py
index 835f01f3ab..835f01f3ab 100644
--- a/lib/spack/external/py/_io/__init__.py
+++ b/lib/spack/external/pytest-fallback/py/_io/__init__.py
diff --git a/lib/spack/external/py/_io/capture.py b/lib/spack/external/pytest-fallback/py/_io/capture.py
index bc157ed978..bc157ed978 100644
--- a/lib/spack/external/py/_io/capture.py
+++ b/lib/spack/external/pytest-fallback/py/_io/capture.py
diff --git a/lib/spack/external/py/_io/saferepr.py b/lib/spack/external/pytest-fallback/py/_io/saferepr.py
index 8518290efd..8518290efd 100644
--- a/lib/spack/external/py/_io/saferepr.py
+++ b/lib/spack/external/pytest-fallback/py/_io/saferepr.py
diff --git a/lib/spack/external/py/_io/terminalwriter.py b/lib/spack/external/pytest-fallback/py/_io/terminalwriter.py
index 390e8ca7b9..390e8ca7b9 100644
--- a/lib/spack/external/py/_io/terminalwriter.py
+++ b/lib/spack/external/pytest-fallback/py/_io/terminalwriter.py
diff --git a/lib/spack/external/py/_log/__init__.py b/lib/spack/external/pytest-fallback/py/_log/__init__.py
index fad62e960d..fad62e960d 100644
--- a/lib/spack/external/py/_log/__init__.py
+++ b/lib/spack/external/pytest-fallback/py/_log/__init__.py
diff --git a/lib/spack/external/py/_log/log.py b/lib/spack/external/pytest-fallback/py/_log/log.py
index ce47e8c754..ce47e8c754 100644
--- a/lib/spack/external/py/_log/log.py
+++ b/lib/spack/external/pytest-fallback/py/_log/log.py
diff --git a/lib/spack/external/py/_log/warning.py b/lib/spack/external/pytest-fallback/py/_log/warning.py
index 722e31e910..722e31e910 100644
--- a/lib/spack/external/py/_log/warning.py
+++ b/lib/spack/external/pytest-fallback/py/_log/warning.py
diff --git a/lib/spack/external/py/_path/__init__.py b/lib/spack/external/pytest-fallback/py/_path/__init__.py
index 51f3246f80..51f3246f80 100644
--- a/lib/spack/external/py/_path/__init__.py
+++ b/lib/spack/external/pytest-fallback/py/_path/__init__.py
diff --git a/lib/spack/external/py/_path/cacheutil.py b/lib/spack/external/pytest-fallback/py/_path/cacheutil.py
index 9922504750..9922504750 100644
--- a/lib/spack/external/py/_path/cacheutil.py
+++ b/lib/spack/external/pytest-fallback/py/_path/cacheutil.py
diff --git a/lib/spack/external/py/_path/common.py b/lib/spack/external/pytest-fallback/py/_path/common.py
index 5512e51efe..5512e51efe 100644
--- a/lib/spack/external/py/_path/common.py
+++ b/lib/spack/external/pytest-fallback/py/_path/common.py
diff --git a/lib/spack/external/py/_path/local.py b/lib/spack/external/pytest-fallback/py/_path/local.py
index d2f16b993e..d2f16b993e 100644
--- a/lib/spack/external/py/_path/local.py
+++ b/lib/spack/external/pytest-fallback/py/_path/local.py
diff --git a/lib/spack/external/py/_path/svnurl.py b/lib/spack/external/pytest-fallback/py/_path/svnurl.py
index 6589a71d09..6589a71d09 100644
--- a/lib/spack/external/py/_path/svnurl.py
+++ b/lib/spack/external/pytest-fallback/py/_path/svnurl.py
diff --git a/lib/spack/external/py/_path/svnwc.py b/lib/spack/external/pytest-fallback/py/_path/svnwc.py
index 992223c04a..992223c04a 100644
--- a/lib/spack/external/py/_path/svnwc.py
+++ b/lib/spack/external/pytest-fallback/py/_path/svnwc.py
diff --git a/lib/spack/external/py/_process/__init__.py b/lib/spack/external/pytest-fallback/py/_process/__init__.py
index 86c714ad1a..86c714ad1a 100644
--- a/lib/spack/external/py/_process/__init__.py
+++ b/lib/spack/external/pytest-fallback/py/_process/__init__.py
diff --git a/lib/spack/external/py/_process/cmdexec.py b/lib/spack/external/pytest-fallback/py/_process/cmdexec.py
index f83a249402..f83a249402 100644
--- a/lib/spack/external/py/_process/cmdexec.py
+++ b/lib/spack/external/pytest-fallback/py/_process/cmdexec.py
diff --git a/lib/spack/external/py/_process/forkedfunc.py b/lib/spack/external/pytest-fallback/py/_process/forkedfunc.py
index 1c28530688..1c28530688 100644
--- a/lib/spack/external/py/_process/forkedfunc.py
+++ b/lib/spack/external/pytest-fallback/py/_process/forkedfunc.py
diff --git a/lib/spack/external/py/_process/killproc.py b/lib/spack/external/pytest-fallback/py/_process/killproc.py
index 18e8310b5f..18e8310b5f 100644
--- a/lib/spack/external/py/_process/killproc.py
+++ b/lib/spack/external/pytest-fallback/py/_process/killproc.py
diff --git a/lib/spack/external/py/_std.py b/lib/spack/external/pytest-fallback/py/_std.py
index 97a9853323..97a9853323 100644
--- a/lib/spack/external/py/_std.py
+++ b/lib/spack/external/pytest-fallback/py/_std.py
diff --git a/lib/spack/external/py/_xmlgen.py b/lib/spack/external/pytest-fallback/py/_xmlgen.py
index 1c83545884..1c83545884 100644
--- a/lib/spack/external/py/_xmlgen.py
+++ b/lib/spack/external/pytest-fallback/py/_xmlgen.py
diff --git a/lib/spack/external/py/test.py b/lib/spack/external/pytest-fallback/py/test.py
index aa5beb1789..aa5beb1789 100644
--- a/lib/spack/external/py/test.py
+++ b/lib/spack/external/pytest-fallback/py/test.py
diff --git a/lib/spack/external/pytest.py b/lib/spack/external/pytest-fallback/pytest.py
index 6e124db418..6e124db418 100644
--- a/lib/spack/external/pytest.py
+++ b/lib/spack/external/pytest-fallback/pytest.py
diff --git a/lib/spack/external/six.py b/lib/spack/external/six.py
index 6bf4fd3810..4e15675d8b 100644
--- a/lib/spack/external/six.py
+++ b/lib/spack/external/six.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2010-2017 Benjamin Peterson
+# Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -29,7 +29,7 @@ import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.11.0"
+__version__ = "1.16.0"
# Useful for very coarse version differentiation.
@@ -71,6 +71,11 @@ else:
MAXSIZE = int((1 << 63) - 1)
del X
+if PY34:
+ from importlib.util import spec_from_loader
+else:
+ spec_from_loader = None
+
def _add_doc(func, doc):
"""Add documentation to a function."""
@@ -186,6 +191,11 @@ class _SixMetaPathImporter(object):
return self
return None
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.known_modules:
+ return spec_from_loader(fullname, self)
+ return None
+
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
@@ -223,6 +233,12 @@ class _SixMetaPathImporter(object):
return None
get_source = get_code # same as get_code
+ def create_module(self, spec):
+ return self.load_module(spec.name)
+
+ def exec_module(self, module):
+ pass
+
_importer = _SixMetaPathImporter(__name__)
@@ -255,9 +271,11 @@ _moved_attributes = [
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
@@ -637,13 +655,16 @@ if PY3:
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
+ del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
@@ -665,6 +686,7 @@ else:
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
@@ -681,6 +703,10 @@ def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
if PY3:
exec_ = getattr(moves.builtins, "exec")
@@ -716,16 +742,7 @@ else:
""")
-if sys.version_info[:2] == (3, 2):
- exec_("""def raise_from(value, from_value):
- try:
- if from_value is None:
- raise value
- raise value from from_value
- finally:
- value = None
-""")
-elif sys.version_info[:2] > (3, 2):
+if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
@@ -805,13 +822,33 @@ if sys.version_info[:2] < (3, 3):
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
- def wrapper(f):
- f = functools.wraps(wrapped, assigned, updated)(f)
- f.__wrapped__ = wrapped
- return f
- return wrapper
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
else:
wraps = functools.wraps
@@ -824,7 +861,15 @@ def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
@@ -844,13 +889,75 @@ def add_metaclass(metaclass):
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, binary_type):
+ return s
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ # Optimization: Fast return for the common case.
+ if type(s) is str:
+ return s
+ if PY2 and isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
def python_2_unicode_compatible(klass):
"""
- A decorator that defines __unicode__ and __str__ methods under Python 2.
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
diff --git a/lib/spack/llnl/util/multiproc.py b/lib/spack/llnl/util/multiproc.py
index c73ebaed20..2a261d143e 100644
--- a/lib/spack/llnl/util/multiproc.py
+++ b/lib/spack/llnl/util/multiproc.py
@@ -16,7 +16,7 @@ __all__ = ['Barrier']
class Barrier:
"""Simple reusable semaphore barrier.
- Python 2.6 doesn't have multiprocessing barriers so we implement this.
+ Python 2 doesn't have multiprocessing barriers so we implement this.
See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41.
"""
diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py
index a7a4637ba9..81c779661f 100644
--- a/lib/spack/llnl/util/tty/log.py
+++ b/lib/spack/llnl/util/tty/log.py
@@ -607,7 +607,7 @@ class log_output(object):
self._active = True
# return this log_output object so that the user can do things
- # like temporarily echo some ouptut.
+ # like temporarily echo some output.
return self
def __exit__(self, exc_type, exc_val, exc_tb):
diff --git a/lib/spack/spack/analyzers/libabigail.py b/lib/spack/spack/analyzers/libabigail.py
index 9b26f3ca6f..88802ec28c 100644
--- a/lib/spack/spack/analyzers/libabigail.py
+++ b/lib/spack/spack/analyzers/libabigail.py
@@ -2,8 +2,6 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-
import os
import llnl.util.tty as tty
@@ -16,6 +14,7 @@ import spack.hooks
import spack.monitor
import spack.package
import spack.repo
+import spack.util.executable
from .analyzer_base import AnalyzerBase
@@ -40,13 +39,12 @@ class Libabigail(AnalyzerBase):
tty.debug("Preparing to use Libabigail, will install if missing.")
with spack.bootstrap.ensure_bootstrap_configuration():
-
# libabigail won't install lib/bin/share without docs
spec = spack.spec.Spec("libabigail+docs")
- spec.concretize()
-
- self.abidw = spack.bootstrap.get_executable(
- "abidw", spec=spec, install=True)
+ spack.bootstrap.ensure_executables_in_path_or_raise(
+ ["abidw"], abstract_spec=spec
+ )
+ self.abidw = spack.util.executable.which('abidw')
def run(self):
"""
diff --git a/lib/spack/spack/binary_distribution.py b/lib/spack/spack/binary_distribution.py
index 0a23896b8f..01817a3abd 100644
--- a/lib/spack/spack/binary_distribution.py
+++ b/lib/spack/spack/binary_distribution.py
@@ -4,6 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import codecs
+import collections
import hashlib
import json
import os
@@ -12,10 +13,10 @@ import sys
import tarfile
import tempfile
import traceback
+import warnings
from contextlib import closing
import ruamel.yaml as yaml
-from ordereddict_backport import OrderedDict
from six.moves.urllib.error import HTTPError, URLError
import llnl.util.lang
@@ -27,10 +28,13 @@ import spack.config as config
import spack.database as spack_db
import spack.fetch_strategy as fs
import spack.hash_types as ht
+import spack.hooks
import spack.hooks.sbang
import spack.mirror
import spack.platforms
import spack.relocate as relocate
+import spack.repo
+import spack.store
import spack.util.file_cache as file_cache
import spack.util.gpg
import spack.util.spack_json as sjson
@@ -975,8 +979,11 @@ def generate_key_index(key_prefix, tmpdir=None):
shutil.rmtree(tmpdir)
-def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
- allow_root=False, key=None, regenerate_index=False):
+def _build_tarball(
+ spec, outdir,
+ force=False, relative=False, unsigned=False,
+ allow_root=False, key=None, regenerate_index=False
+):
"""
Build a tarball from given spec and put it into the directory structure
used at the mirror (following <tarball_directory_name>).
@@ -1044,11 +1051,11 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
os.remove(temp_tarfile_path)
# create info for later relocation and create tar
- write_buildinfo_file(spec, workdir, rel)
+ write_buildinfo_file(spec, workdir, relative)
# optionally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
- if rel:
+ if relative:
try:
make_package_relative(workdir, spec, allow_root)
except Exception as e:
@@ -1096,7 +1103,7 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
buildinfo = {}
buildinfo['relative_prefix'] = os.path.relpath(
spec.prefix, spack.store.layout.root)
- buildinfo['relative_rpaths'] = rel
+ buildinfo['relative_rpaths'] = relative
spec_dict['buildinfo'] = buildinfo
with open(specfile_path, 'w') as outfile:
@@ -1148,6 +1155,64 @@ def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
return None
+def nodes_to_be_packaged(specs, include_root=True, include_dependencies=True):
+ """Return the list of nodes to be packaged, given a list of specs.
+
+ Args:
+ specs (List[spack.spec.Spec]): list of root specs to be processed
+ include_root (bool): include the root of each spec in the nodes
+ include_dependencies (bool): include the dependencies of each
+ spec in the nodes
+ """
+ if not include_root and not include_dependencies:
+ return set()
+
+ def skip_node(current_node):
+ if current_node.external or current_node.virtual:
+ return True
+ return spack.store.db.query_one(current_node) is None
+
+ expanded_set = set()
+ for current_spec in specs:
+ if not include_dependencies:
+ nodes = [current_spec]
+ else:
+ nodes = [n for n in current_spec.traverse(
+ order='post', root=include_root, deptype=('link', 'run')
+ )]
+
+ for node in nodes:
+ if not skip_node(node):
+ expanded_set.add(node)
+
+ return expanded_set
+
+
+def push(specs, push_url, specs_kwargs=None, **kwargs):
+ """Create a binary package for each of the specs passed as input and push them
+ to a given push URL.
+
+ Args:
+ specs (List[spack.spec.Spec]): installed specs to be packaged
+ push_url (str): url where to push the binary package
+ specs_kwargs (dict): dictionary with two possible boolean keys, "include_root"
+ and "include_dependencies", which determine which part of each spec is
+ packaged and pushed to the mirror
+ **kwargs: TODO
+
+ """
+ specs_kwargs = specs_kwargs or {'include_root': True, 'include_dependencies': True}
+ nodes = nodes_to_be_packaged(specs, **specs_kwargs)
+
+ # TODO: This seems to be an easy target for task
+ # TODO: distribution using a parallel pool
+ for node in nodes:
+ try:
+ _build_tarball(node, push_url, **kwargs)
+ except NoOverwriteException as e:
+ warnings.warn(str(e))
+
+
def download_tarball(spec, preferred_mirrors=None):
"""
Download binary tarball for given package into stage area, returning
@@ -1278,8 +1343,8 @@ def relocate_package(spec, allow_root):
# Spurious replacements (e.g. sbang) will cause issues with binaries
# For example, the new sbang can be longer than the old one.
# Hence 2 dictionaries are maintained here.
- prefix_to_prefix_text = OrderedDict({})
- prefix_to_prefix_bin = OrderedDict({})
+ prefix_to_prefix_text = collections.OrderedDict()
+ prefix_to_prefix_bin = collections.OrderedDict()
if old_sbang_install_path:
install_path = spack.hooks.sbang.sbang_install_path()
@@ -1486,6 +1551,66 @@ def extract_tarball(spec, filename, allow_root=False, unsigned=False,
os.remove(filename)
+def install_root_node(spec, allow_root, unsigned=False, force=False, sha256=None):
+ """Install the root node of a concrete spec from a buildcache.
+
+ Checking the sha256 sum of a node before installation is usually needed only
+ for software installed during Spack's bootstrapping (since we might not have
+ a proper signature verification mechanism available).
+
+ Args:
+ spec: spec to be installed (note that only the root node will be installed)
+ allow_root (bool): allows the root directory to be present in binaries
+ (may affect relocation)
+ unsigned (bool): if True allows installing unsigned binaries
+ force (bool): force installation if the spec is already present in the
+ local store
+ sha256 (str): optional sha256 of the binary package, to be checked
+ before installation
+ """
+ package = spack.repo.get(spec)
+ # Early termination
+ if spec.external or spec.virtual:
+ warnings.warn("Skipping external or virtual package {0}".format(spec.format()))
+ return
+ elif spec.concrete and package.installed and not force:
+ warnings.warn("Package for spec {0} already installed.".format(spec.format()))
+ return
+
+ tarball = download_tarball(spec)
+ if not tarball:
+ msg = 'download of binary cache file for spec "{0}" failed'
+ raise RuntimeError(msg.format(spec.format()))
+
+ if sha256:
+ checker = spack.util.crypto.Checker(sha256)
+ msg = 'cannot verify checksum for "{0}" [expected={1}]'
+ msg = msg.format(tarball, sha256)
+ if not checker.check(tarball):
+ raise spack.binary_distribution.NoChecksumException(msg)
+ tty.debug('Verified SHA256 checksum of the build cache')
+
+ tty.msg('Installing "{0}" from a buildcache'.format(spec.format()))
+ extract_tarball(spec, tarball, allow_root, unsigned, force)
+ spack.hooks.post_install(spec)
+ spack.store.db.add(spec, spack.store.layout)
+
+
+def install_single_spec(spec, allow_root=False, unsigned=False, force=False):
+ """Install a single concrete spec from a buildcache.
+
+ Args:
+ spec (spack.spec.Spec): spec to be installed
+ allow_root (bool): allows the root directory to be present in binaries
+ (may affect relocation)
+ unsigned (bool): if True allows installing unsigned binaries
+ force (bool): force installation if the spec is already present in the
+ local store
+ """
+ for node in spec.traverse(root=True, order='post', deptype=('link', 'run')):
+ install_root_node(node, allow_root=allow_root, unsigned=unsigned, force=force)
+
+
def try_direct_fetch(spec, full_hash_match=False, mirrors=None):
"""
Try to find the spec directly on the configured mirrors
@@ -1937,3 +2062,73 @@ def download_buildcache_entry(file_descriptions, mirror_url=None):
continue
return False
+
+
+def download_single_spec(
+ concrete_spec, destination, require_cdashid=False, mirror_url=None
+):
+ """Download the buildcache files for a single concrete spec.
+
+ Args:
+ concrete_spec: concrete spec to be downloaded
+ destination (str): path where to put the downloaded buildcache
+ require_cdashid (bool): if False the `.cdashid` file is optional
+ mirror_url (str): url of the mirror from which to download
+ """
+ tarfile_name = tarball_name(concrete_spec, '.spack')
+ tarball_dir_name = tarball_directory_name(concrete_spec)
+ tarball_path_name = os.path.join(tarball_dir_name, tarfile_name)
+ local_tarball_path = os.path.join(destination, tarball_dir_name)
+
+ files_to_fetch = [
+ {
+ 'url': [tarball_path_name],
+ 'path': local_tarball_path,
+ 'required': True,
+ }, {
+ 'url': [tarball_name(concrete_spec, '.spec.json'),
+ tarball_name(concrete_spec, '.spec.yaml')],
+ 'path': destination,
+ 'required': True,
+ }, {
+ 'url': [tarball_name(concrete_spec, '.cdashid')],
+ 'path': destination,
+ 'required': require_cdashid,
+ },
+ ]
+
+ return download_buildcache_entry(files_to_fetch, mirror_url)
+
+
+class BinaryCacheQuery(object):
+ """Callable object to query if a spec is in a binary cache"""
+ def __init__(self, all_architectures):
+ """
+ Args:
+ all_architectures (bool): if True consider all the spec for querying,
+ otherwise restrict to the current default architecture
+ """
+ self.all_architectures = all_architectures
+
+ specs = update_cache_and_get_specs()
+
+ if not self.all_architectures:
+ arch = spack.spec.Spec.default_arch()
+ specs = [s for s in specs if s.satisfies(arch)]
+
+ self.possible_specs = specs
+
+ def __call__(self, spec, **kwargs):
+ matches = []
+ if spec.startswith('/'):
+ # Matching a DAG hash
+ query_hash = spec.replace('/', '')
+ for candidate_spec in self.possible_specs:
+ if candidate_spec.dag_hash().startswith(query_hash):
+ matches.append(candidate_spec)
+ else:
+ # Matching a spec constraint
+ matches = [
+ s for s in self.possible_specs if s.satisfies(spec)
+ ]
+ return matches
diff --git a/lib/spack/spack/bootstrap.py b/lib/spack/spack/bootstrap.py
index 52fadbf700..3cb649789d 100644
--- a/lib/spack/spack/bootstrap.py
+++ b/lib/spack/spack/bootstrap.py
@@ -10,14 +10,12 @@ import functools
import json
import os
import os.path
+import platform
import re
import sys
+import sysconfig
-try:
- import sysconfig # novm
-except ImportError:
- # Not supported on Python 2.6
- pass
+import six
import archspec.cpu
@@ -28,7 +26,6 @@ import spack.binary_distribution
import spack.config
import spack.detection
import spack.environment
-import spack.main
import spack.modules
import spack.paths
import spack.platforms
@@ -38,10 +35,6 @@ import spack.store
import spack.user_environment
import spack.util.executable
import spack.util.path
-from spack.util.environment import EnvironmentModifications
-
-#: "spack buildcache" command, initialized lazily
-_buildcache_cmd = None
#: Map a bootstrapper type to the corresponding class
_bootstrap_methods = {}
@@ -60,29 +53,39 @@ def _bootstrapper(type):
return _register
-def _try_import_from_store(module, abstract_spec_str):
+def _try_import_from_store(module, query_spec, query_info=None):
"""Return True if the module can be imported from an already
installed spec, False otherwise.
Args:
module: Python module to be imported
- abstract_spec_str: abstract spec that may provide the module
+ query_spec: spec that may provide the module
+ query_info (dict or None): if a dict is passed it is populated with the
+ command found and the concrete spec providing it
"""
- bincache_platform = spack.platforms.real_host()
- if str(bincache_platform) == 'cray':
- bincache_platform = spack.platforms.linux.Linux()
- with spack.platforms.use_platform(bincache_platform):
- abstract_spec_str = str(spack.spec.Spec(abstract_spec_str))
+ # If it is a string assume it's one of the root specs by this module
+ if isinstance(query_spec, six.string_types):
+ bincache_platform = spack.platforms.real_host()
+ if str(bincache_platform) == 'cray':
+ bincache_platform = spack.platforms.linux.Linux()
+ with spack.platforms.use_platform(bincache_platform):
+ query_spec = str(spack.spec.Spec(query_spec))
- # We have to run as part of this python interpreter
- abstract_spec_str += ' ^' + spec_for_current_python()
+ # We have to run as part of this python interpreter
+ query_spec += ' ^' + spec_for_current_python()
- installed_specs = spack.store.db.query(abstract_spec_str, installed=True)
+ installed_specs = spack.store.db.query(query_spec, installed=True)
for candidate_spec in installed_specs:
- lib_spd = candidate_spec['python'].package.default_site_packages_dir
+ python_spec = candidate_spec['python']
+ lib_spd = python_spec.package.default_site_packages_dir
lib64_spd = lib_spd.replace('lib/', 'lib64/')
+ lib_debian_derivative = os.path.join(
+ 'lib', 'python{0}'.format(python_spec.version.up_to(1)), 'dist-packages'
+ )
+
module_paths = [
+ os.path.join(candidate_spec.prefix, lib_debian_derivative),
os.path.join(candidate_spec.prefix, lib_spd),
os.path.join(candidate_spec.prefix, lib64_spd)
]
@@ -93,9 +96,11 @@ def _try_import_from_store(module, abstract_spec_str):
if _python_import(module):
msg = ('[BOOTSTRAP MODULE {0}] The installed spec "{1}/{2}" '
'provides the "{0}" Python module').format(
- module, abstract_spec_str, candidate_spec.dag_hash()
+ module, query_spec, candidate_spec.dag_hash()
)
tty.debug(msg)
+ if query_info is not None:
+ query_info['spec'] = candidate_spec
return True
except Exception as e:
msg = ('unexpected error while trying to import module '
@@ -105,7 +110,7 @@ def _try_import_from_store(module, abstract_spec_str):
msg = "Spec {0} did not provide module {1}"
tty.warn(msg.format(candidate_spec, module))
- sys.path = sys.path[:-2]
+ sys.path = sys.path[:-3]
return False
@@ -175,7 +180,7 @@ def _fix_ext_suffix(candidate_spec):
os.symlink(abs_path, link_name)
-def _executables_in_store(executables, abstract_spec_str):
+def _executables_in_store(executables, query_spec, query_info=None):
"""Return True if at least one of the executables can be retrieved from
a spec in store, False otherwise.
@@ -185,12 +190,14 @@ def _executables_in_store(executables, abstract_spec_str):
Args:
executables: list of executables to be searched
- abstract_spec_str: abstract spec that may provide the executable
+ query_spec: spec that may provide the executable
+ query_info (dict or None): if a dict is passed it is populated with the
+ command found and the concrete spec providing it
"""
executables_str = ', '.join(executables)
msg = "[BOOTSTRAP EXECUTABLES {0}] Try installed specs with query '{1}'"
- tty.debug(msg.format(executables_str, abstract_spec_str))
- installed_specs = spack.store.db.query(abstract_spec_str, installed=True)
+ tty.debug(msg.format(executables_str, query_spec))
+ installed_specs = spack.store.db.query(query_spec, installed=True)
if installed_specs:
for concrete_spec in installed_specs:
bin_dir = concrete_spec.prefix.bin
@@ -199,6 +206,11 @@ def _executables_in_store(executables, abstract_spec_str):
if (os.path.exists(bin_dir) and os.path.isdir(bin_dir) and
spack.util.executable.which_string(*executables, path=bin_dir)):
spack.util.environment.path_put_first('PATH', [bin_dir])
+ if query_info is not None:
+ query_info['command'] = spack.util.executable.which(
+ *executables, path=bin_dir
+ )
+ query_info['spec'] = concrete_spec
return True
return False
@@ -209,6 +221,7 @@ class _BuildcacheBootstrapper(object):
def __init__(self, conf):
self.name = conf['name']
self.url = conf['info']['url']
+ self.last_search = None
@staticmethod
def _spec_and_platform(abstract_spec_str):
@@ -242,11 +255,6 @@ class _BuildcacheBootstrapper(object):
return data
def _install_by_hash(self, pkg_hash, pkg_sha256, index, bincache_platform):
- global _buildcache_cmd
-
- if _buildcache_cmd is None:
- _buildcache_cmd = spack.main.SpackCommand('buildcache')
-
index_spec = next(x for x in index if x.dag_hash() == pkg_hash)
# Reconstruct the compiler that we need to use for bootstrapping
compiler_entry = {
@@ -266,13 +274,18 @@ class _BuildcacheBootstrapper(object):
'compilers', [{'compiler': compiler_entry}]
):
spec_str = '/' + pkg_hash
- install_args = [
- 'install',
- '--sha256', pkg_sha256,
- '--only-root',
- '-a', '-u', '-o', '-f', spec_str
- ]
- _buildcache_cmd(*install_args, fail_on_error=False)
+ query = spack.binary_distribution.BinaryCacheQuery(
+ all_architectures=True
+ )
+ matches = spack.store.find([spec_str], multiple=False, query_fn=query)
+ for match in matches:
+ spack.binary_distribution.install_root_node(
+ match,
+ allow_root=True,
+ unsigned=True,
+ force=True,
+ sha256=pkg_sha256
+ )
def _install_and_test(
self, abstract_spec, bincache_platform, bincache_data, test_fn
@@ -304,7 +317,9 @@ class _BuildcacheBootstrapper(object):
pkg_hash, pkg_sha256, index, bincache_platform
)
- if test_fn():
+ info = {}
+ if test_fn(query_spec=abstract_spec, query_info=info):
+ self.last_search = info
return True
return False
@@ -315,8 +330,8 @@ class _BuildcacheBootstrapper(object):
)
def try_import(self, module, abstract_spec_str):
- test_fn = functools.partial(_try_import_from_store, module, abstract_spec_str)
- if test_fn():
+ test_fn, info = functools.partial(_try_import_from_store, module), {}
+ if test_fn(query_spec=abstract_spec_str, query_info=info):
return True
tty.info("Bootstrapping {0} from pre-built binaries".format(module))
@@ -329,15 +344,12 @@ class _BuildcacheBootstrapper(object):
)
def try_search_path(self, executables, abstract_spec_str):
- test_fn = functools.partial(
- _executables_in_store, executables, abstract_spec_str
- )
- if test_fn():
+ test_fn, info = functools.partial(_executables_in_store, executables), {}
+ if test_fn(query_spec=abstract_spec_str, query_info=info):
+ self.last_search = info
return True
- abstract_spec, bincache_platform = self._spec_and_platform(
- abstract_spec_str
- )
+ abstract_spec, bincache_platform = self._spec_and_platform(abstract_spec_str)
tty.info("Bootstrapping {0} from pre-built binaries".format(abstract_spec.name))
data = self._read_metadata(abstract_spec.name)
return self._install_and_test(
@@ -350,10 +362,12 @@ class _SourceBootstrapper(object):
"""Install the software needed during bootstrapping from sources."""
def __init__(self, conf):
self.conf = conf
+ self.last_search = None
- @staticmethod
- def try_import(module, abstract_spec_str):
- if _try_import_from_store(module, abstract_spec_str):
+ def try_import(self, module, abstract_spec_str):
+ info = {}
+ if _try_import_from_store(module, abstract_spec_str, query_info=info):
+ self.last_search = info
return True
tty.info("Bootstrapping {0} from sources".format(module))
@@ -384,10 +398,15 @@ class _SourceBootstrapper(object):
# Install the spec that should make the module importable
concrete_spec.package.do_install(fail_fast=True)
- return _try_import_from_store(module, abstract_spec_str=abstract_spec_str)
+ if _try_import_from_store(module, query_spec=concrete_spec, query_info=info):
+ self.last_search = info
+ return True
+ return False
def try_search_path(self, executables, abstract_spec_str):
- if _executables_in_store(executables, abstract_spec_str):
+ info = {}
+ if _executables_in_store(executables, abstract_spec_str, query_info=info):
+ self.last_search = info
return True
# If we compile code from sources detecting a few build tools
@@ -399,12 +418,18 @@ class _SourceBootstrapper(object):
abstract_spec_str += ' os=fe'
concrete_spec = spack.spec.Spec(abstract_spec_str)
- concrete_spec.concretize()
+ if concrete_spec.name == 'patchelf':
+ concrete_spec._old_concretize(deprecation_warning=False)
+ else:
+ concrete_spec.concretize()
- msg = "[BOOTSTRAP GnuPG] Try installing '{0}' from sources"
+ msg = "[BOOTSTRAP] Try installing '{0}' from sources"
tty.debug(msg.format(abstract_spec_str))
concrete_spec.package.do_install()
- return _executables_in_store(executables, abstract_spec_str)
+ if _executables_in_store(executables, concrete_spec, query_info=info):
+ self.last_search = info
+ return True
+ return False
def _make_bootstrapper(conf):
@@ -527,9 +552,13 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
Raises:
RuntimeError: if the executables cannot be ensured to be in PATH
+
+ Return:
+ Executable object
"""
- if spack.util.executable.which_string(*executables):
- return
+ cmd = spack.util.executable.which(*executables)
+ if cmd:
+ return cmd
executables_str = ', '.join(executables)
source_configs = spack.config.get('bootstrap:sources', [])
@@ -543,7 +572,17 @@ def ensure_executables_in_path_or_raise(executables, abstract_spec):
b = _make_bootstrapper(current_config)
try:
if b.try_search_path(executables, abstract_spec):
- return
+ # Additional environment variables needed
+ concrete_spec, cmd = b.last_search['spec'], b.last_search['command']
+ env_mods = spack.util.environment.EnvironmentModifications()
+ for dep in concrete_spec.traverse(
+ root=True, order='post', deptype=('link', 'run')
+ ):
+ env_mods.extend(
+ spack.user_environment.environment_modifications_for_spec(dep)
+ )
+ cmd.add_default_envmod(env_mods)
+ return cmd
except Exception as e:
msg = '[BOOTSTRAP EXECUTABLES {0}] Unexpected error "{1}"'
tty.debug(msg.format(executables_str, str(e)))
@@ -563,75 +602,6 @@ def _python_import(module):
return True
-def get_executable(exe, spec=None, install=False):
- """Find an executable named exe, either in PATH or in Spack
-
- Args:
- exe (str): needed executable name
- spec (spack.spec.Spec or str): spec to search for exe in (default exe)
- install (bool): install spec if not available
-
- When ``install`` is True, Spack will use the python used to run Spack as an
- external. The ``install`` option should only be used with packages that
- install quickly (when using external python) or are guaranteed by Spack
- organization to be in a binary mirror (clingo).
- """
- # Search the system first
- runner = spack.util.executable.which(exe)
- if runner:
- return runner
-
- # Check whether it's already installed
- spec = spack.spec.Spec(spec or exe)
- installed_specs = spack.store.db.query(spec, installed=True)
- for ispec in installed_specs:
- # filter out directories of the same name as the executable
- exe_path = [exe_p for exe_p in fs.find(ispec.prefix, exe)
- if fs.is_exe(exe_p)]
- if exe_path:
- ret = spack.util.executable.Executable(exe_path[0])
- envmod = EnvironmentModifications()
- for dep in ispec.traverse(root=True, order='post'):
- envmod.extend(
- spack.user_environment.environment_modifications_for_spec(dep)
- )
- ret.add_default_envmod(envmod)
- return ret
- else:
- tty.warn('Exe %s not found in prefix %s' % (exe, ispec.prefix))
-
- def _raise_error(executable, exe_spec):
- error_msg = 'cannot find the executable "{0}"'.format(executable)
- if exe_spec:
- error_msg += ' from spec "{0}'.format(exe_spec)
- raise RuntimeError(error_msg)
-
- # If we're not allowed to install this for ourselves, we can't find it
- if not install:
- _raise_error(exe, spec)
-
- with spack_python_interpreter():
- # We will install for ourselves, using this python if needed
- # Concretize the spec
- spec.concretize()
-
- spec.package.do_install()
- # filter out directories of the same name as the executable
- exe_path = [exe_p for exe_p in fs.find(spec.prefix, exe)
- if fs.is_exe(exe_p)]
- if exe_path:
- ret = spack.util.executable.Executable(exe_path[0])
- envmod = EnvironmentModifications()
- for dep in spec.traverse(root=True, order='post'):
- envmod.extend(
- spack.user_environment.environment_modifications_for_spec(dep)
- )
- ret.add_default_envmod(envmod)
- return ret
-
- _raise_error(exe, spec)
-
-
def _bootstrap_config_scopes():
tty.debug('[BOOTSTRAP CONFIG SCOPE] name=_builtin')
config_scopes = [
@@ -674,8 +644,30 @@ def _add_externals_if_missing():
spack.detection.update_configuration(detected_packages, scope='bootstrap')
+#: Reference counter for the bootstrapping configuration context manager
+_REF_COUNT = 0
+
+
@contextlib.contextmanager
def ensure_bootstrap_configuration():
+ # The context manager is reference counted to ensure we don't swap multiple
+ # times if there's nested use of it in the stack. One compelling use case
+ # is bootstrapping patchelf during the bootstrap of clingo.
+ global _REF_COUNT
+ already_swapped = bool(_REF_COUNT)
+ _REF_COUNT += 1
+ try:
+ if already_swapped:
+ yield
+ else:
+ with _ensure_bootstrap_configuration():
+ yield
+ finally:
+ _REF_COUNT -= 1
+
+
+@contextlib.contextmanager
+def _ensure_bootstrap_configuration():
bootstrap_store_path = store_path()
user_configuration = _read_and_sanitize_configuration()
with spack.environment.no_active_environment():
@@ -783,6 +775,205 @@ def gnupg_root_spec():
def ensure_gpg_in_path_or_raise():
"""Ensure gpg or gpg2 are in the PATH or raise."""
- ensure_executables_in_path_or_raise(
- executables=['gpg2', 'gpg'], abstract_spec=gnupg_root_spec(),
+ return ensure_executables_in_path_or_raise(
+ executables=['gpg2', 'gpg'], abstract_spec=gnupg_root_spec()
)
+
+
+def patchelf_root_spec():
+ """Return the root spec used to bootstrap patchelf"""
+ # TODO: patchelf is restricted to v0.13 since earlier versions have
+ # TODO: bugs that we don't to deal with, while v0.14 requires a C++17
+ # TODO: which may not be available on all platforms.
+ return _root_spec('patchelf@0.13.1:0.13.99')
+
+
+def ensure_patchelf_in_path_or_raise():
+ """Ensure patchelf is in the PATH or raise."""
+ return ensure_executables_in_path_or_raise(
+ executables=['patchelf'], abstract_spec=patchelf_root_spec()
+ )
+
+
+###
+# Development dependencies
+###
+
+
+def isort_root_spec():
+ return _root_spec('py-isort@4.3.5:')
+
+
+def ensure_isort_in_path_or_raise():
+ """Ensure that isort is in the PATH or raise."""
+ executable, root_spec = 'isort', isort_root_spec()
+ return ensure_executables_in_path_or_raise([executable], abstract_spec=root_spec)
+
+
+def mypy_root_spec():
+ return _root_spec('py-mypy@0.900:')
+
+
+def ensure_mypy_in_path_or_raise():
+ """Ensure that mypy is in the PATH or raise."""
+ executable, root_spec = 'mypy', mypy_root_spec()
+ return ensure_executables_in_path_or_raise([executable], abstract_spec=root_spec)
+
+
+def black_root_spec():
+ return _root_spec('py-black')
+
+
+def ensure_black_in_path_or_raise():
+ """Ensure that isort is in the PATH or raise."""
+ executable, root_spec = 'black', black_root_spec()
+ return ensure_executables_in_path_or_raise([executable], abstract_spec=root_spec)
+
+
+def flake8_root_spec():
+ return _root_spec('py-flake8')
+
+
+def ensure_flake8_in_path_or_raise():
+ """Ensure that flake8 is in the PATH or raise."""
+ executable, root_spec = 'flake8', flake8_root_spec()
+ return ensure_executables_in_path_or_raise([executable], abstract_spec=root_spec)
+
+
+def _missing(name, purpose, system_only=True):
+ """Message to be printed if an executable is not found"""
+ msg = '[{2}] MISSING "{0}": {1}'
+ if not system_only:
+ return msg.format(name, purpose, '@*y{{B}}')
+ return msg.format(name, purpose, '@*y{{-}}')
+
+
+def _required_system_executable(exes, msg):
+ """Search for an executable is the system path only."""
+ if isinstance(exes, six.string_types):
+ exes = (exes,)
+ if spack.util.executable.which_string(*exes):
+ return True, None
+ return False, msg
+
+
+def _required_python_module(module, query_spec, msg):
+ """Check if a Python module is available in the current interpreter or
+ if it can be loaded from the bootstrap store
+ """
+ if _python_import(module) or _try_import_from_store(module, query_spec):
+ return True, None
+ return False, msg
+
+
+def _required_executable(exes, query_spec, msg):
+ """Search for an executable in the system path or in the bootstrap store."""
+ if isinstance(exes, six.string_types):
+ exes = (exes,)
+ if (spack.util.executable.which_string(*exes) or
+ _executables_in_store(exes, query_spec)):
+ return True, None
+ return False, msg
+
+
+def _core_requirements():
+ _core_system_exes = {
+ 'make': _missing('make', 'required to build software from sources'),
+ 'patch': _missing('patch', 'required to patch source code before building'),
+ 'bash': _missing('bash', 'required for Spack compiler wrapper'),
+ 'tar': _missing('tar', 'required to manage code archives'),
+ 'gzip': _missing('gzip', 'required to compress/decompress code archives'),
+ 'unzip': _missing('unzip', 'required to compress/decompress code archives'),
+ 'bzip2': _missing('bzip2', 'required to compress/decompress code archives'),
+ 'git': _missing('git', 'required to fetch/manage git repositories')
+ }
+ if platform.system().lower() == 'linux':
+ _core_system_exes['xz'] = _missing(
+ 'xz', 'required to compress/decompress code archives'
+ )
+
+ # Executables that are not bootstrapped yet
+ result = [_required_system_executable(exe, msg)
+ for exe, msg in _core_system_exes.items()]
+ # Python modules
+ result.append(_required_python_module(
+ 'clingo', clingo_root_spec(),
+ _missing('clingo', 'required to concretize specs', False)
+ ))
+ return result
+
+
+def _buildcache_requirements():
+ _buildcache_exes = {
+ 'file': _missing('file', 'required to analyze files for buildcaches'),
+ ('gpg2', 'gpg'): _missing('gpg2', 'required to sign/verify buildcaches', False)
+ }
+ if platform.system().lower() == 'darwin':
+ _buildcache_exes['otool'] = _missing('otool', 'required to relocate binaries')
+
+ # Executables that are not bootstrapped yet
+ result = [_required_system_executable(exe, msg)
+ for exe, msg in _buildcache_exes.items()]
+
+ if platform.system().lower() == 'linux':
+ result.append(_required_executable(
+ 'patchelf', patchelf_root_spec(),
+ _missing('patchelf', 'required to relocate binaries', False)
+ ))
+
+ return result
+
+
+def _optional_requirements():
+ _optional_exes = {
+ 'zstd': _missing('zstd', 'required to compress/decompress code archives'),
+ 'svn': _missing('svn', 'required to manage subversion repositories'),
+ 'hg': _missing('hg', 'required to manage mercurial repositories')
+ }
+ # Executables that are not bootstrapped yet
+ result = [_required_system_executable(exe, msg)
+ for exe, msg in _optional_exes.items()]
+ return result
+
+
+def _development_requirements():
+ return [
+ _required_executable('isort', isort_root_spec(),
+ _missing('isort', 'required for style checks', False)),
+ _required_executable('mypy', mypy_root_spec(),
+ _missing('mypy', 'required for style checks', False)),
+ _required_executable('flake8', flake8_root_spec(),
+ _missing('flake8', 'required for style checks', False)),
+ _required_executable('black', black_root_spec(),
+ _missing('black', 'required for code formatting', False))
+ ]
+
+
+def status_message(section):
+ """Return a status message to be printed to screen that refers to the
+ section passed as argument and a bool which is True if there are missing
+ dependencies.
+
+ Args:
+ section (str): either 'core' or 'buildcache' or 'optional' or 'develop'
+ """
+ pass_token, fail_token = '@*g{[PASS]}', '@*r{[FAIL]}'
+
+ # Contain the header of the section and a list of requirements
+ spack_sections = {
+ 'core': ("{0} @*{{Core Functionalities}}", _core_requirements),
+ 'buildcache': ("{0} @*{{Binary packages}}", _buildcache_requirements),
+ 'optional': ("{0} @*{{Optional Features}}", _optional_requirements),
+ 'develop': ("{0} @*{{Development Dependencies}}", _development_requirements)
+ }
+ msg, required_software = spack_sections[section]
+
+ with ensure_bootstrap_configuration():
+ missing_software = False
+ for found, err_msg in required_software():
+ if not found:
+ missing_software = True
+ msg += "\n " + err_msg
+ msg += '\n'
+ msg = msg.format(pass_token if not missing_software else fail_token)
+ return msg, missing_software
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index 83aa634276..fa1ad76274 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -185,6 +185,13 @@ def clean_environment():
env.unset('LD_PRELOAD')
env.unset('DYLD_INSERT_LIBRARIES')
+ # Avoid <packagename>_ROOT user variables overriding spack dependencies
+ # https://cmake.org/cmake/help/latest/variable/PackageName_ROOT.html
+ # Spack needs SPACK_ROOT though, so we need to exclude that
+ for varname in os.environ.keys():
+ if varname.endswith('_ROOT') and varname != 'SPACK_ROOT':
+ env.unset(varname)
+
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
diff --git a/lib/spack/spack/build_systems/autotools.py b/lib/spack/spack/build_systems/autotools.py
index e0c8cf2e03..9b2de555bc 100644
--- a/lib/spack/spack/build_systems/autotools.py
+++ b/lib/spack/spack/build_systems/autotools.py
@@ -498,6 +498,9 @@ To resolve this problem, please try the following:
for ``<spec-name> foo=x +bar``
+ Note: returns an empty list when the variant is conditional and its condition
+ is not met.
+
Returns:
list: list of strings that corresponds to the activation/deactivation
of the variant that has been processed
@@ -519,6 +522,9 @@ To resolve this problem, please try the following:
msg = '"{0}" is not a variant of "{1}"'
raise KeyError(msg.format(variant, self.name))
+ if variant not in spec.variants:
+ return []
+
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
variant_desc, _ = self.variants[variant]
diff --git a/lib/spack/spack/build_systems/cmake.py b/lib/spack/spack/build_systems/cmake.py
index bf431e139d..e7ea30c6a2 100644
--- a/lib/spack/spack/build_systems/cmake.py
+++ b/lib/spack/spack/build_systems/cmake.py
@@ -267,6 +267,10 @@ class CMakePackage(PackageBase):
"-DSWR:STRING=avx;avx2]
for ``<spec-name> cxxstd=14 +shared swr=avx,avx2``
+
+ Note: if the provided variant is conditional, and the condition is not met,
+ this function returns an empty string. CMake discards empty strings
+ provided on the command line.
"""
if variant is None:
@@ -276,6 +280,9 @@ class CMakePackage(PackageBase):
raise KeyError(
'"{0}" is not a variant of "{1}"'.format(variant, self.name))
+ if variant not in self.spec.variants:
+ return ''
+
value = self.spec.variants[variant].value
if isinstance(value, (tuple, list)):
# Sort multi-valued variants for reproducibility
diff --git a/lib/spack/spack/build_systems/cuda.py b/lib/spack/spack/build_systems/cuda.py
index 634cfad637..79f57b046e 100644
--- a/lib/spack/spack/build_systems/cuda.py
+++ b/lib/spack/spack/build_systems/cuda.py
@@ -35,7 +35,8 @@ class CudaPackage(PackageBase):
variant('cuda_arch',
description='CUDA architecture',
- values=spack.variant.any_combination_of(*cuda_arch_values))
+ values=spack.variant.any_combination_of(*cuda_arch_values),
+ when='+cuda')
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#nvcc-examples
# https://llvm.org/docs/CompileCudaWithLLVM.html#compiling-cuda-code
diff --git a/lib/spack/spack/build_systems/intel.py b/lib/spack/spack/build_systems/intel.py
index 2c6732c19a..9968bc2ec6 100644
--- a/lib/spack/spack/build_systems/intel.py
+++ b/lib/spack/spack/build_systems/intel.py
@@ -690,6 +690,12 @@ class IntelPackage(PackageBase):
'--print-file-name', 'libgomp.%s' % dso_suffix, output=str)
omp_libs = LibraryList(omp_lib_path.strip())
+ elif '%clang' in self.spec:
+ clang = Executable(self.compiler.cc)
+ omp_lib_path = clang(
+ '--print-file-name', 'libomp.%s' % dso_suffix, output=str)
+ omp_libs = LibraryList(omp_lib_path.strip())
+
if len(omp_libs) < 1:
raise_lib_error('Cannot locate OpenMP libraries:', omp_libnames)
@@ -772,7 +778,7 @@ class IntelPackage(PackageBase):
if self.spec.satisfies('threads=openmp'):
if '%intel' in self.spec:
mkl_threading = 'libmkl_intel_thread'
- elif '%gcc' in self.spec:
+ elif '%gcc' in self.spec or '%clang' in self.spec:
mkl_threading = 'libmkl_gnu_thread'
threading_engine_libs = self.openmp_libs
elif self.spec.satisfies('threads=tbb'):
@@ -994,6 +1000,16 @@ class IntelPackage(PackageBase):
libnames,
root=self.component_lib_dir('mpi'),
shared=True, recursive=True) + result
+ # Intel MPI since 2019 depends on libfabric which is not in the
+ # lib directory but in a directory of its own which should be
+ # included in the rpath
+ if self.version_yearlike >= ver('2019'):
+ d = ancestor(self.component_lib_dir('mpi'))
+ if '+external-libfabric' in self.spec:
+ result += self.spec['libfabric'].libs
+ else:
+ result += find_libraries(['libfabric'],
+ os.path.join(d, 'libfabric', 'lib'))
if '^mpi' in self.spec.root and ('+mkl' in self.spec or
self.provides('scalapack')):
@@ -1091,15 +1107,6 @@ class IntelPackage(PackageBase):
# which performs dizzyingly similar but necessarily different
# actions, and (b) function code leaves a bit more breathing
# room within the suffocating corset of flake8 line length.
-
- # Intel MPI since 2019 depends on libfabric which is not in the
- # lib directory but in a directory of its own which should be
- # included in the rpath
- if self.version_yearlike >= ver('2019'):
- d = ancestor(self.component_lib_dir('mpi'))
- libfabrics_path = os.path.join(d, 'libfabric', 'lib')
- env.append_path('SPACK_COMPILER_EXTRA_RPATHS',
- libfabrics_path)
else:
raise InstallError('compilers_of_client arg required for MPI')
diff --git a/lib/spack/spack/build_systems/oneapi.py b/lib/spack/spack/build_systems/oneapi.py
index 0ff77d729b..e50e921915 100644
--- a/lib/spack/spack/build_systems/oneapi.py
+++ b/lib/spack/spack/build_systems/oneapi.py
@@ -99,7 +99,13 @@ class IntelOneApiPackage(Package):
class IntelOneApiLibraryPackage(IntelOneApiPackage):
- """Base class for Intel oneAPI library packages."""
+ """Base class for Intel oneAPI library packages.
+
+ Contains some convenient default implementations for libraries.
+ Implement the method directly in the package if something
+ different is needed.
+
+ """
@property
def headers(self):
@@ -111,3 +117,36 @@ class IntelOneApiLibraryPackage(IntelOneApiPackage):
lib_path = join_path(self.component_path, 'lib', 'intel64')
lib_path = lib_path if isdir(lib_path) else dirname(lib_path)
return find_libraries('*', root=lib_path, shared=True, recursive=True)
+
+
+class IntelOneApiStaticLibraryList(object):
+ """Provides ld_flags when static linking is needed
+
+ Oneapi puts static and dynamic libraries in the same directory, so
+ -l will default to finding the dynamic library. Use absolute
+ paths, as recommended by oneapi documentation.
+
+ Allow both static and dynamic libraries to be supplied by the
+ package.
+ """
+
+ def __init__(self, static_libs, dynamic_libs):
+ self.static_libs = static_libs
+ self.dynamic_libs = dynamic_libs
+
+ @property
+ def directories(self):
+ return self.dynamic_libs.directories
+
+ @property
+ def search_flags(self):
+ return self.dynamic_libs.search_flags
+
+ @property
+ def link_flags(self):
+ return '-Wl,--start-group {0} -Wl,--end-group {1}'.format(
+ ' '.join(self.static_libs.libraries), self.dynamic_libs.link_flags)
+
+ @property
+ def ld_flags(self):
+ return '{0} {1}'.format(self.search_flags, self.link_flags)
diff --git a/lib/spack/spack/build_systems/python.py b/lib/spack/spack/build_systems/python.py
index a308e77cb9..2d003f38e3 100644
--- a/lib/spack/spack/build_systems/python.py
+++ b/lib/spack/spack/build_systems/python.py
@@ -4,6 +4,7 @@
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
+import re
import shutil
import llnl.util.tty as tty
@@ -144,6 +145,8 @@ class PythonPackage(PackageBase):
modules.append(path.replace(root + os.sep, '', 1).replace(
'.py', '').replace('/', '.'))
+ modules = [mod for mod in modules if re.match('[a-zA-Z0-9._]+$', mod)]
+
tty.debug('Detected the following modules: {0}'.format(modules))
return modules
diff --git a/lib/spack/spack/build_systems/rocm.py b/lib/spack/spack/build_systems/rocm.py
index 977db700db..e2490aaa6e 100644
--- a/lib/spack/spack/build_systems/rocm.py
+++ b/lib/spack/spack/build_systems/rocm.py
@@ -91,7 +91,7 @@ class ROCmPackage(PackageBase):
# Possible architectures
amdgpu_targets = (
'gfx701', 'gfx801', 'gfx802', 'gfx803',
- 'gfx900', 'gfx906', 'gfx908', 'gfx1010',
+ 'gfx900', 'gfx906', 'gfx908', 'gfx90a', 'gfx1010',
'gfx1011', 'gfx1012'
)
@@ -100,7 +100,8 @@ class ROCmPackage(PackageBase):
# possible amd gpu targets for rocm builds
variant('amdgpu_target',
description='AMD GPU architecture',
- values=spack.variant.any_combination_of(*amdgpu_targets))
+ values=spack.variant.any_combination_of(*amdgpu_targets),
+ when='+rocm')
depends_on('llvm-amdgpu', when='+rocm')
depends_on('hsa-rocr-dev', when='+rocm')
diff --git a/lib/spack/spack/build_systems/sip.py b/lib/spack/spack/build_systems/sip.py
index 814aa2605b..49fdd621ee 100644
--- a/lib/spack/spack/build_systems/sip.py
+++ b/lib/spack/spack/build_systems/sip.py
@@ -5,6 +5,7 @@
import inspect
import os
+import re
import llnl.util.tty as tty
from llnl.util.filesystem import find, join_path, working_dir
@@ -81,6 +82,8 @@ class SIPPackage(PackageBase):
modules.append(path.replace(root + os.sep, '', 1).replace(
'.py', '').replace('/', '.'))
+ modules = [mod for mod in modules if re.match('[a-zA-Z0-9._]+$', mod)]
+
tty.debug('Detected the following modules: {0}'.format(modules))
return modules
diff --git a/lib/spack/spack/ci.py b/lib/spack/spack/ci.py
index 8d337196ce..f23445f1df 100644
--- a/lib/spack/spack/ci.py
+++ b/lib/spack/spack/ci.py
@@ -1271,6 +1271,7 @@ def get_concrete_specs(env, root_spec, job_name, related_builds,
def register_cdash_build(build_name, base_url, project, site, track):
url = base_url + '/api/v1/addBuild.php'
time_stamp = datetime.datetime.now().strftime('%Y%m%d-%H%M')
+ build_id = None
build_stamp = '{0}-{1}'.format(time_stamp, track)
payload = {
"project": project,
@@ -1292,17 +1293,20 @@ def register_cdash_build(build_name, base_url, project, site, track):
request = Request(url, data=enc_data, headers=headers)
- response = opener.open(request)
- response_code = response.getcode()
+ try:
+ response = opener.open(request)
+ response_code = response.getcode()
- if response_code != 200 and response_code != 201:
- msg = 'Adding build failed (response code = {0}'.format(response_code)
- tty.warn(msg)
- return (None, None)
+ if response_code != 200 and response_code != 201:
+ msg = 'Adding build failed (response code = {0}'.format(response_code)
+ tty.warn(msg)
+ return (None, None)
- response_text = response.read()
- response_json = json.loads(response_text)
- build_id = response_json['buildid']
+ response_text = response.read()
+ response_json = json.loads(response_text)
+ build_id = response_json['buildid']
+ except Exception as e:
+ print("Registering build in CDash failed: {0}".format(e))
return (build_id, build_stamp)
@@ -1412,15 +1416,26 @@ def read_cdashid_from_mirror(spec, mirror_url):
return int(contents)
-def push_mirror_contents(env, spec, specfile_path, mirror_url, sign_binaries):
+def _push_mirror_contents(env, specfile_path, sign_binaries, mirror_url):
+ """Unchecked version of the public API, for easier mocking"""
+ unsigned = not sign_binaries
+ tty.debug('Creating buildcache ({0})'.format(
+ 'unsigned' if unsigned else 'signed'))
+ hashes = env.all_hashes() if env else None
+ matches = spack.store.specfile_matches(specfile_path, hashes=hashes)
+ push_url = spack.mirror.push_url_from_mirror_url(mirror_url)
+ spec_kwargs = {'include_root': True, 'include_dependencies': False}
+ kwargs = {
+ 'force': True,
+ 'allow_root': True,
+ 'unsigned': unsigned
+ }
+ bindist.push(matches, push_url, spec_kwargs, **kwargs)
+
+
+def push_mirror_contents(env, specfile_path, mirror_url, sign_binaries):
try:
- unsigned = not sign_binaries
- tty.debug('Creating buildcache ({0})'.format(
- 'unsigned' if unsigned else 'signed'))
- spack.cmd.buildcache._createtarball(
- env, spec_file=specfile_path, add_deps=False,
- output_location=mirror_url, force=True, allow_root=True,
- unsigned=unsigned)
+ _push_mirror_contents(env, specfile_path, sign_binaries, mirror_url)
except Exception as inst:
# If the mirror we're pushing to is on S3 and there's some
# permissions problem, for example, we can't just target
diff --git a/lib/spack/spack/cmd/analyze.py b/lib/spack/spack/cmd/analyze.py
index f584674ae2..6048c47972 100644
--- a/lib/spack/spack/cmd/analyze.py
+++ b/lib/spack/spack/cmd/analyze.py
@@ -110,7 +110,6 @@ def analyze(parser, args, **kwargs):
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
- disable_auth=args.monitor_disable_auth,
)
# Run the analysis
diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py
index ae3e1b7639..7446650403 100644
--- a/lib/spack/spack/cmd/bootstrap.py
+++ b/lib/spack/spack/cmd/bootstrap.py
@@ -10,6 +10,8 @@ import shutil
import llnl.util.tty
import llnl.util.tty.color
+import spack
+import spack.bootstrap
import spack.cmd.common.arguments
import spack.config
import spack.main
@@ -32,6 +34,16 @@ def _add_scope_option(parser):
def setup_parser(subparser):
sp = subparser.add_subparsers(dest='subcommand')
+ status = sp.add_parser('status', help='get the status of Spack')
+ status.add_argument(
+ '--optional', action='store_true', default=False,
+ help='show the status of rarely used optional dependencies'
+ )
+ status.add_argument(
+ '--dev', action='store_true', default=False,
+ help='show the status of dependencies needed to develop Spack'
+ )
+
enable = sp.add_parser('enable', help='enable bootstrapping')
_add_scope_option(enable)
@@ -207,8 +219,39 @@ def _untrust(args):
llnl.util.tty.msg(msg.format(args.name))
+def _status(args):
+ sections = ['core', 'buildcache']
+ if args.optional:
+ sections.append('optional')
+ if args.dev:
+ sections.append('develop')
+
+ header = "@*b{{Spack v{0} - {1}}}".format(
+ spack.spack_version, spack.bootstrap.spec_for_current_python()
+ )
+ print(llnl.util.tty.color.colorize(header))
+ print()
+ # Use the context manager here to avoid swapping between user and
+ # bootstrap config many times
+ missing = False
+ with spack.bootstrap.ensure_bootstrap_configuration():
+ for current_section in sections:
+ status_msg, fail = spack.bootstrap.status_message(section=current_section)
+ missing = missing or fail
+ if status_msg:
+ print(llnl.util.tty.color.colorize(status_msg))
+ print()
+ legend = ('Spack will take care of bootstrapping any missing dependency marked'
+ ' as [@*y{B}]. Dependencies marked as [@*y{-}] are instead required'
+ ' to be found on the system.')
+ if missing:
+ print(llnl.util.tty.color.colorize(legend))
+ print()
+
+
def bootstrap(parser, args):
callbacks = {
+ 'status': _status,
'enable': _enable_or_disable,
'disable': _enable_or_disable,
'reset': _reset,
diff --git a/lib/spack/spack/cmd/buildcache.py b/lib/spack/spack/cmd/buildcache.py
index 1fda884e9a..d6f5d63ea6 100644
--- a/lib/spack/spack/cmd/buildcache.py
+++ b/lib/spack/spack/cmd/buildcache.py
@@ -2,11 +2,11 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-import argparse
import os
import shutil
import sys
import tempfile
+import warnings
import llnl.util.tty as tty
@@ -40,7 +40,7 @@ def setup_parser(subparser):
setup_parser.parser = subparser
subparsers = subparser.add_subparsers(help='buildcache sub-commands')
- create = subparsers.add_parser('create', help=createtarball.__doc__)
+ create = subparsers.add_parser('create', help=create_fn.__doc__)
create.add_argument('-r', '--rel', action='store_true',
help="make all rpaths relative" +
" before creating tarballs.")
@@ -86,9 +86,9 @@ def setup_parser(subparser):
' decide to build a cache for only the package'
' or only the dependencies'))
arguments.add_common_arguments(create, ['specs'])
- create.set_defaults(func=createtarball)
+ create.set_defaults(func=create_fn)
- install = subparsers.add_parser('install', help=installtarball.__doc__)
+ install = subparsers.add_parser('install', help=install_fn.__doc__)
install.add_argument('-f', '--force', action='store_true',
help="overwrite install directory if it exists.")
install.add_argument('-m', '--multiple', action='store_true',
@@ -102,16 +102,11 @@ def setup_parser(subparser):
install.add_argument('-o', '--otherarch', action='store_true',
help="install specs from other architectures" +
" instead of default platform and OS")
- # This argument is needed by the bootstrapping logic to verify checksums
- install.add_argument('--sha256', help=argparse.SUPPRESS)
- install.add_argument(
- '--only-root', action='store_true', help=argparse.SUPPRESS
- )
arguments.add_common_arguments(install, ['specs'])
- install.set_defaults(func=installtarball)
+ install.set_defaults(func=install_fn)
- listcache = subparsers.add_parser('list', help=listspecs.__doc__)
+ listcache = subparsers.add_parser('list', help=list_fn.__doc__)
arguments.add_common_arguments(listcache, ['long', 'very_long'])
listcache.add_argument('-v', '--variants',
action='store_true',
@@ -121,29 +116,25 @@ def setup_parser(subparser):
help="list specs for all available architectures" +
" instead of default platform and OS")
arguments.add_common_arguments(listcache, ['specs'])
- listcache.set_defaults(func=listspecs)
+ listcache.set_defaults(func=list_fn)
- dlkeys = subparsers.add_parser('keys', help=getkeys.__doc__)
- dlkeys.add_argument(
+ keys = subparsers.add_parser('keys', help=keys_fn.__doc__)
+ keys.add_argument(
'-i', '--install', action='store_true',
help="install Keys pulled from mirror")
- dlkeys.add_argument(
+ keys.add_argument(
'-t', '--trust', action='store_true',
help="trust all downloaded keys")
- dlkeys.add_argument('-f', '--force', action='store_true',
- help="force new download of keys")
- dlkeys.set_defaults(func=getkeys)
-
- preview_parser = subparsers.add_parser(
- 'preview',
- help='analyzes an installed spec and reports whether '
- 'executables and libraries are relocatable'
- )
- arguments.add_common_arguments(preview_parser, ['installed_specs'])
- preview_parser.set_defaults(func=preview)
+ keys.add_argument('-f', '--force', action='store_true',
+ help="force new download of keys")
+ keys.set_defaults(func=keys_fn)
+
+ preview = subparsers.add_parser('preview', help=preview_fn.__doc__)
+ arguments.add_common_arguments(preview, ['installed_specs'])
+ preview.set_defaults(func=preview_fn)
# Check if binaries need to be rebuilt on remote mirror
- check = subparsers.add_parser('check', help=check_binaries.__doc__)
+ check = subparsers.add_parser('check', help=check_fn.__doc__)
check.add_argument(
'-m', '--mirror-url', default=None,
help='Override any configured mirrors with this mirror url')
@@ -175,28 +166,28 @@ def setup_parser(subparser):
help="Default to rebuilding packages if errors are encountered " +
"during the process of checking whether rebuilding is needed")
- check.set_defaults(func=check_binaries)
+ check.set_defaults(func=check_fn)
# Download tarball and specfile
- dltarball = subparsers.add_parser('download', help=get_tarball.__doc__)
- dltarball.add_argument(
+ download = subparsers.add_parser('download', help=download_fn.__doc__)
+ download.add_argument(
'-s', '--spec', default=None,
help="Download built tarball for spec from mirror")
- dltarball.add_argument(
+ download.add_argument(
'--spec-file', default=None,
help=("Download built tarball for spec (from json or yaml file) " +
"from mirror"))
- dltarball.add_argument(
+ download.add_argument(
'-p', '--path', default=None,
help="Path to directory where tarball should be downloaded")
- dltarball.add_argument(
+ download.add_argument(
'-c', '--require-cdashid', action='store_true', default=False,
help="Require .cdashid file to be downloaded with buildcache entry")
- dltarball.set_defaults(func=get_tarball)
+ download.set_defaults(func=download_fn)
# Get buildcache name
getbuildcachename = subparsers.add_parser('get-buildcache-name',
- help=get_buildcache_name.__doc__)
+ help=get_buildcache_name_fn.__doc__)
getbuildcachename.add_argument(
'-s', '--spec', default=None,
help='Spec string for which buildcache name is desired')
@@ -204,11 +195,11 @@ def setup_parser(subparser):
'--spec-file', default=None,
help=('Path to spec json or yaml file for which buildcache name is ' +
'desired'))
- getbuildcachename.set_defaults(func=get_buildcache_name)
+ getbuildcachename.set_defaults(func=get_buildcache_name_fn)
# Given the root spec, save the yaml of the dependent spec to a file
savespecfile = subparsers.add_parser('save-specfile',
- help=save_specfiles.__doc__)
+ help=save_specfile_fn.__doc__)
savespecfile.add_argument(
'--root-spec', default=None,
help='Root spec of dependent spec')
@@ -221,10 +212,10 @@ def setup_parser(subparser):
savespecfile.add_argument(
'--specfile-dir', default=None,
help='Path to directory where spec yamls should be saved')
- savespecfile.set_defaults(func=save_specfiles)
+ savespecfile.set_defaults(func=save_specfile_fn)
# Copy buildcache from some directory to another mirror url
- copy = subparsers.add_parser('copy', help=buildcache_copy.__doc__)
+ copy = subparsers.add_parser('copy', help=copy_fn.__doc__)
copy.add_argument(
'--base-dir', default=None,
help='Path to mirror directory (root of existing buildcache)')
@@ -235,10 +226,10 @@ def setup_parser(subparser):
copy.add_argument(
'--destination-url', default=None,
help='Destination mirror url')
- copy.set_defaults(func=buildcache_copy)
+ copy.set_defaults(func=copy_fn)
# Sync buildcache entries from one mirror to another
- sync = subparsers.add_parser('sync', help=buildcache_sync.__doc__)
+ sync = subparsers.add_parser('sync', help=sync_fn.__doc__)
source = sync.add_mutually_exclusive_group(required=True)
source.add_argument('--src-directory',
metavar='DIRECTORY',
@@ -265,312 +256,110 @@ def setup_parser(subparser):
metavar='MIRROR_URL',
type=str,
help="URL of the destination mirror")
- sync.set_defaults(func=buildcache_sync)
+ sync.set_defaults(func=sync_fn)
# Update buildcache index without copying any additional packages
update_index = subparsers.add_parser(
- 'update-index', help=buildcache_update_index.__doc__)
+ 'update-index', help=update_index_fn.__doc__)
update_index.add_argument(
'-d', '--mirror-url', default=None, help='Destination mirror url')
update_index.add_argument(
'-k', '--keys', default=False, action='store_true',
help='If provided, key index will be updated as well as package index')
- update_index.set_defaults(func=buildcache_update_index)
-
-
-def find_matching_specs(pkgs, allow_multiple_matches=False, env=None):
- """Returns a list of specs matching the not necessarily
- concretized specs given from cli
+ update_index.set_defaults(func=update_index_fn)
- Args:
- pkgs (str): spec to be matched against installed packages
- allow_multiple_matches (bool): if True multiple matches are admitted
- env (spack.environment.Environment or None): active environment, or ``None``
- if there is not one
- Return:
- list: list of specs
+def _matching_specs(args):
+ """Return a list of matching specs read from either a spec file (JSON or YAML),
+ a query over the store or a query over the active environment.
"""
+ env = ev.active_environment()
hashes = env.all_hashes() if env else None
+ if args.spec_file:
+ return spack.store.specfile_matches(args.spec_file, hashes=hashes)
- # List of specs that match expressions given via command line
- specs_from_cli = []
- has_errors = False
- tty.debug('find_matching_specs: about to parse specs for {0}'.format(pkgs))
- specs = spack.cmd.parse_specs(pkgs)
- for spec in specs:
- matching = spack.store.db.query(spec, hashes=hashes)
- # For each spec provided, make sure it refers to only one package.
- # Fail and ask user to be unambiguous if it doesn't
- if not allow_multiple_matches and len(matching) > 1:
- tty.error('%s matches multiple installed packages:' % spec)
- for match in matching:
- tty.msg('"%s"' % match.format())
- has_errors = True
-
- # No installed package matches the query
- if len(matching) == 0 and spec is not any:
- tty.error('{0} does not match any installed packages.'.format(
- spec))
- has_errors = True
-
- specs_from_cli.extend(matching)
- if has_errors:
- tty.die('use one of the matching specs above')
-
- return specs_from_cli
-
-
-def match_downloaded_specs(pkgs, allow_multiple_matches=False, force=False,
- other_arch=False):
- """Returns a list of specs matching the not necessarily
- concretized specs given from cli
-
- Args:
- specs: list of specs to be matched against buildcaches on mirror
- allow_multiple_matches : if True multiple matches are admitted
-
- Return:
- list of specs
- """
- # List of specs that match expressions given via command line
- specs_from_cli = []
- has_errors = False
-
- try:
- specs = bindist.update_cache_and_get_specs()
- except bindist.FetchCacheError as e:
- tty.error(e)
-
- if not other_arch:
- arch = spack.spec.Spec.default_arch()
- specs = [s for s in specs if s.satisfies(arch)]
+ if args.specs:
+ constraints = spack.cmd.parse_specs(args.specs)
+ return spack.store.find(constraints, hashes=hashes)
- for pkg in pkgs:
- matches = []
- tty.msg("buildcache spec(s) matching %s \n" % pkg)
- for spec in sorted(specs):
- if pkg.startswith('/'):
- pkghash = pkg.replace('/', '')
- if spec.dag_hash().startswith(pkghash):
- matches.append(spec)
- else:
- if spec.satisfies(pkg):
- matches.append(spec)
- # For each pkg provided, make sure it refers to only one package.
- # Fail and ask user to be unambiguous if it doesn't
- if not allow_multiple_matches and len(matches) > 1:
- tty.error('%s matches multiple downloaded packages:' % pkg)
- for match in matches:
- tty.msg('"%s"' % match.format())
- has_errors = True
-
- # No downloaded package matches the query
- if len(matches) == 0:
- tty.error('%s does not match any downloaded packages.' % pkg)
- has_errors = True
-
- specs_from_cli.extend(matches)
- if has_errors:
- tty.die('use one of the matching specs above')
-
- return specs_from_cli
-
-
-def _createtarball(env, spec_file=None, packages=None, add_spec=True,
- add_deps=True, output_location=os.getcwd(),
- signing_key=None, force=False, make_relative=False,
- unsigned=False, allow_root=False, rebuild_index=False):
- if spec_file:
- with open(spec_file, 'r') as fd:
- specfile_contents = fd.read()
- tty.debug('createtarball read specfile contents:')
- tty.debug(specfile_contents)
- if spec_file.endswith('.json'):
- s = Spec.from_json(specfile_contents)
- else:
- s = Spec.from_yaml(specfile_contents)
- package = '/{0}'.format(s.dag_hash())
- matches = find_matching_specs(package, env=env)
-
- elif packages:
- matches = find_matching_specs(packages, env=env)
-
- elif env:
- matches = [env.specs_by_hash[h] for h in env.concretized_order]
+ if env:
+ return [env.specs_by_hash[h] for h in env.concretized_order]
- else:
- tty.die("build cache file creation requires at least one" +
- " installed package spec, an active environment," +
- " or else a path to a json or yaml file containing a spec" +
- " to install")
- specs = set()
+ tty.die("build cache file creation requires at least one" +
+ " installed package spec, an active environment," +
+ " or else a path to a json or yaml file containing a spec" +
+ " to install")
- mirror = spack.mirror.MirrorCollection().lookup(output_location)
- outdir = url_util.format(mirror.push_url)
-
- msg = 'Buildcache files will be output to %s/build_cache' % outdir
- tty.msg(msg)
- if matches:
- tty.debug('Found at least one matching spec')
+def _concrete_spec_from_args(args):
+ spec_str, specfile_path = args.spec, args.spec_file
- for match in matches:
- tty.debug('examining match {0}'.format(match.format()))
- if match.external or match.virtual:
- tty.debug('skipping external or virtual spec %s' %
- match.format())
- else:
- lookup = spack.store.db.query_one(match)
-
- if not add_spec:
- tty.debug('skipping matching root spec %s' % match.format())
- elif lookup is None:
- tty.debug('skipping uninstalled matching spec %s' %
- match.format())
- else:
- tty.debug('adding matching spec %s' % match.format())
- specs.add(match)
-
- if not add_deps:
- continue
-
- tty.debug('recursing dependencies')
- for d, node in match.traverse(order='post',
- depth=True,
- deptype=('link', 'run')):
- # skip root, since it's handled above
- if d == 0:
- continue
-
- lookup = spack.store.db.query_one(node)
-
- if node.external or node.virtual:
- tty.debug('skipping external or virtual dependency %s' %
- node.format())
- elif lookup is None:
- tty.debug('skipping uninstalled depenendency %s' %
- node.format())
- else:
- tty.debug('adding dependency %s' % node.format())
- specs.add(node)
-
- tty.debug('writing tarballs to %s/build_cache' % outdir)
+ if not spec_str and not specfile_path:
+ tty.error('must provide either spec string or path to YAML or JSON specfile')
+ sys.exit(1)
- for spec in specs:
- tty.debug('creating binary cache file for package %s ' % spec.format())
+ if spec_str:
try:
- bindist.build_tarball(spec, outdir, force, make_relative,
- unsigned, allow_root, signing_key,
- rebuild_index)
- except bindist.NoOverwriteException as e:
- tty.warn(e)
+ constraints = spack.cmd.parse_specs(spec_str)
+ spec = spack.store.find(constraints)[0]
+ spec.concretize()
+ except SpecError as spec_error:
+ tty.error('Unable to concretize spec {0}'.format(spec_str))
+ tty.debug(spec_error)
+ sys.exit(1)
+ return spec
-def createtarball(args):
- """create a binary package from an existing install"""
+ return Spec.from_specfile(specfile_path)
- # restrict matching to current environment if one is active
- env = ev.active_environment()
- output_location = None
+def create_fn(args):
+ """create a binary package and push it to a mirror"""
if args.directory:
- output_location = args.directory
-
- # User meant to provide a path to a local directory.
- # Ensure that they did not accidentally pass a URL.
- scheme = url_util.parse(output_location, scheme='<missing>').scheme
- if scheme != '<missing>':
- raise ValueError(
- '"--directory" expected a local path; got a URL, instead')
-
- # User meant to provide a path to a local directory.
- # Ensure that the mirror lookup does not mistake it for a named mirror.
- output_location = 'file://' + output_location
-
- elif args.mirror_name:
- output_location = args.mirror_name
-
- # User meant to provide the name of a preconfigured mirror.
- # Ensure that the mirror lookup actually returns a named mirror.
- result = spack.mirror.MirrorCollection().lookup(output_location)
- if result.name == "<unnamed>":
- raise ValueError(
- 'no configured mirror named "{name}"'.format(
- name=output_location))
+ push_url = spack.mirror.push_url_from_directory(args.directory)
- elif args.mirror_url:
- output_location = args.mirror_url
+ if args.mirror_name:
+ push_url = spack.mirror.push_url_from_mirror_name(args.mirror_name)
- # User meant to provide a URL for an anonymous mirror.
- # Ensure that they actually provided a URL.
- scheme = url_util.parse(output_location, scheme='<missing>').scheme
- if scheme == '<missing>':
- raise ValueError(
- '"{url}" is not a valid URL'.format(url=output_location))
- add_spec = ('package' in args.things_to_install)
- add_deps = ('dependencies' in args.things_to_install)
-
- _createtarball(env, spec_file=args.spec_file, packages=args.specs,
- add_spec=add_spec, add_deps=add_deps,
- output_location=output_location, signing_key=args.key,
- force=args.force, make_relative=args.rel,
- unsigned=args.unsigned, allow_root=args.allow_root,
- rebuild_index=args.rebuild_index)
+ if args.mirror_url:
+ push_url = spack.mirror.push_url_from_mirror_url(args.mirror_url)
+ matches = _matching_specs(args)
-def installtarball(args):
+ msg = 'Pushing binary packages to {0}/build_cache'.format(push_url)
+ tty.msg(msg)
+ specs_kwargs = {
+ 'include_root': 'package' in args.things_to_install,
+ 'include_dependencies': 'dependencies' in args.things_to_install
+ }
+ kwargs = {
+ 'key': args.key,
+ 'force': args.force,
+ 'relative': args.rel,
+ 'unsigned': args.unsigned,
+ 'allow_root': args.allow_root,
+ 'regenerate_index': args.rebuild_index
+ }
+ bindist.push(matches, push_url, specs_kwargs, **kwargs)
+
+
+def install_fn(args):
"""install from a binary package"""
if not args.specs:
- tty.die("build cache file installation requires" +
- " at least one package spec argument")
- pkgs = set(args.specs)
- matches = match_downloaded_specs(pkgs, args.multiple, args.force,
- args.otherarch)
+ tty.die("a spec argument is required to install from a buildcache")
+ query = bindist.BinaryCacheQuery(all_architectures=args.otherarch)
+ matches = spack.store.find(args.specs, multiple=args.multiple, query_fn=query)
for match in matches:
- install_tarball(match, args)
+ bindist.install_single_spec(
+ match,
+ allow_root=args.allow_root,
+ unsigned=args.unsigned,
+ force=args.force
+ )
-def install_tarball(spec, args):
- s = Spec(spec)
- if s.external or s.virtual:
- tty.warn("Skipping external or virtual package %s" % spec.format())
- return
-
- # This argument is used only for bootstrapping specs without signatures,
- # since we need to check the sha256 of each tarball
- if not args.only_root:
- for d in s.dependencies(deptype=('link', 'run')):
- tty.msg("Installing buildcache for dependency spec %s" % d)
- install_tarball(d, args)
-
- package = spack.repo.get(spec)
- if s.concrete and package.installed and not args.force:
- tty.warn("Package for spec %s already installed." % spec.format())
- else:
- tarball = bindist.download_tarball(spec)
- if tarball:
- if args.sha256:
- checker = spack.util.crypto.Checker(args.sha256)
- msg = ('cannot verify checksum for "{0}"'
- ' [expected={1}]')
- msg = msg.format(tarball, args.sha256)
- if not checker.check(tarball):
- raise spack.binary_distribution.NoChecksumException(msg)
- tty.debug('Verified SHA256 checksum of the build cache')
-
- tty.msg('Installing buildcache for spec %s' % spec.format())
- bindist.extract_tarball(spec, tarball, args.allow_root,
- args.unsigned, args.force)
- spack.hooks.post_install(spec)
- spack.store.db.add(spec, spack.store.layout)
- else:
- tty.die('Download of binary cache file for spec %s failed.' %
- spec.format())
-
-
-def listspecs(args):
+def list_fn(args):
"""list binary packages available from mirrors"""
try:
specs = bindist.update_cache_and_get_specs()
@@ -593,19 +382,17 @@ def listspecs(args):
display_specs(specs, args, all_headers=True)
-def getkeys(args):
+def keys_fn(args):
"""get public keys available on mirrors"""
bindist.get_keys(args.install, args.trust, args.force)
-def preview(args):
- """Print a status tree of the selected specs that shows which nodes are
- relocatable and which might not be.
-
- Args:
- args: command line arguments
+def preview_fn(args):
+ """analyze an installed spec and reports whether executables
+ and libraries are relocatable
"""
- specs = find_matching_specs(args.specs, allow_multiple_matches=True)
+ constraints = spack.cmd.parse_specs(args.specs)
+ specs = spack.store.find(constraints, multiple=True)
# Cycle over the specs that match
for spec in specs:
@@ -614,7 +401,7 @@ def preview(args):
print(spec.tree(status_fn=spack.relocate.is_relocatable))
-def check_binaries(args):
+def check_fn(args):
"""Check specs (either a single spec from --spec, or else the full set
of release specs) against remote binary mirror(s) to see if any need
to be rebuilt. This command uses the process exit code to indicate
@@ -622,7 +409,7 @@ def check_binaries(args):
one of the indicated specs needs to be rebuilt.
"""
if args.spec or args.spec_file:
- specs = [get_concrete_spec(args)]
+ specs = [_concrete_spec_from_args(args)]
else:
env = spack.cmd.require_active_env(cmd_name='buildcache')
env.concretize()
@@ -649,34 +436,7 @@ def check_binaries(args):
configured_mirrors, specs, args.output_file, args.rebuild_on_error))
-def download_buildcache_files(concrete_spec, local_dest, require_cdashid,
- mirror_url=None):
- tarfile_name = bindist.tarball_name(concrete_spec, '.spack')
- tarball_dir_name = bindist.tarball_directory_name(concrete_spec)
- tarball_path_name = os.path.join(tarball_dir_name, tarfile_name)
- local_tarball_path = os.path.join(local_dest, tarball_dir_name)
-
- files_to_fetch = [
- {
- 'url': [tarball_path_name],
- 'path': local_tarball_path,
- 'required': True,
- }, {
- 'url': [bindist.tarball_name(concrete_spec, '.spec.json'),
- bindist.tarball_name(concrete_spec, '.spec.yaml')],
- 'path': local_dest,
- 'required': True,
- }, {
- 'url': [bindist.tarball_name(concrete_spec, '.cdashid')],
- 'path': local_dest,
- 'required': require_cdashid,
- },
- ]
-
- return bindist.download_buildcache_entry(files_to_fetch, mirror_url)
-
-
-def get_tarball(args):
+def download_fn(args):
"""Download buildcache entry from a remote mirror to local folder. This
command uses the process exit code to indicate its result, specifically,
a non-zero exit code indicates that the command failed to download at
@@ -691,54 +451,30 @@ def get_tarball(args):
tty.msg('No download path provided, exiting')
sys.exit(0)
- spec = get_concrete_spec(args)
- result = download_buildcache_files(spec, args.path, args.require_cdashid)
+ spec = _concrete_spec_from_args(args)
+ result = bindist.download_single_spec(
+ spec, args.path, require_cdashid=args.require_cdashid
+ )
if not result:
sys.exit(1)
-def get_concrete_spec(args):
- spec_str = args.spec
- spec_yaml_path = args.spec_file
-
- if not spec_str and not spec_yaml_path:
- tty.msg('Must provide either spec string or path to ' +
- 'yaml to concretize spec')
- sys.exit(1)
-
- if spec_str:
- try:
- spec = find_matching_specs(spec_str)[0]
- spec.concretize()
- except SpecError as spec_error:
- tty.error('Unable to concrectize spec {0}'.format(args.spec))
- tty.debug(spec_error)
- sys.exit(1)
-
- return spec
-
- with open(spec_yaml_path, 'r') as fd:
- return Spec.from_yaml(fd.read())
-
-
-def get_buildcache_name(args):
+def get_buildcache_name_fn(args):
"""Get name (prefix) of buildcache entries for this spec"""
- spec = get_concrete_spec(args)
+ spec = _concrete_spec_from_args(args)
buildcache_name = bindist.tarball_name(spec, '')
-
print('{0}'.format(buildcache_name))
- sys.exit(0)
-
-def save_specfiles(args):
+def save_specfile_fn(args):
"""Get full spec for dependencies, relative to root spec, and write them
to files in the specified output directory. Uses exit code to signal
success or failure. An exit code of zero means the command was likely
successful. If any errors or exceptions are encountered, or if expected
command-line arguments are not provided, then the exit code will be
- non-zero."""
+ non-zero.
+ """
if not args.root_spec and not args.root_specfile:
tty.msg('No root spec provided, exiting.')
sys.exit(1)
@@ -765,12 +501,15 @@ def save_specfiles(args):
sys.exit(0)
-def buildcache_copy(args):
+def copy_fn(args):
"""Copy a buildcache entry and all its files from one mirror, given as
'--base-dir', to some other mirror, specified as '--destination-url'.
The specific buildcache entry to be copied from one location to the
other is identified using the '--spec-file' argument."""
- # TODO: This sub-command should go away once #11117 is merged
+ # TODO: Remove after v0.18.0 release
+ msg = ('"spack buildcache copy" is deprecated and will be removed from '
+ 'Spack starting in v0.19.0')
+ warnings.warn(msg)
if not args.spec_file:
tty.msg('No spec yaml provided, exiting.')
@@ -845,7 +584,7 @@ def buildcache_copy(args):
shutil.copyfile(cdashid_src_path, cdashid_dest_path)
-def buildcache_sync(args):
+def sync_fn(args):
""" Syncs binaries (and associated metadata) from one mirror to another.
Requires an active environment in order to know which specs to sync.
@@ -979,7 +718,7 @@ def update_index(mirror_url, update_keys=False):
bindist.generate_key_index(keys_url)
-def buildcache_update_index(args):
+def update_index_fn(args):
"""Update a buildcache index."""
outdir = '.'
if args.mirror_url:
diff --git a/lib/spack/spack/cmd/checksum.py b/lib/spack/spack/cmd/checksum.py
index b2a2bc5891..4796ed2500 100644
--- a/lib/spack/spack/cmd/checksum.py
+++ b/lib/spack/spack/cmd/checksum.py
@@ -25,17 +25,17 @@ level = "long"
def setup_parser(subparser):
subparser.add_argument(
- '--keep-stage', action='store_true',
+ '--keep-stage', action='store_true', default=False,
help="don't clean up staging area when command completes")
sp = subparser.add_mutually_exclusive_group()
sp.add_argument(
- '-b', '--batch', action='store_true',
+ '-b', '--batch', action='store_true', default=False,
help="don't ask which versions to checksum")
sp.add_argument(
- '-l', '--latest', action='store_true',
+ '-l', '--latest', action='store_true', default=False,
help="checksum the latest available version only")
sp.add_argument(
- '-p', '--preferred', action='store_true',
+ '-p', '--preferred', action='store_true', default=False,
help="checksum the preferred version only")
arguments.add_common_arguments(subparser, ['package'])
subparser.add_argument(
diff --git a/lib/spack/spack/cmd/ci.py b/lib/spack/spack/cmd/ci.py
index b8ed1fb27d..c0df5a2f40 100644
--- a/lib/spack/spack/cmd/ci.py
+++ b/lib/spack/spack/cmd/ci.py
@@ -397,8 +397,12 @@ def ci_rebuild(args):
tty.debug('Getting {0} buildcache from {1}'.format(
job_spec_pkg_name, matching_mirror))
tty.debug('Downloading to {0}'.format(build_cache_dir))
- buildcache.download_buildcache_files(
- job_spec, build_cache_dir, False, matching_mirror)
+ bindist.download_single_spec(
+ job_spec,
+ build_cache_dir,
+ require_cdashid=False,
+ mirror_url=matching_mirror
+ )
# Now we are done and successful
sys.exit(0)
@@ -433,17 +437,17 @@ def ci_rebuild(args):
cdash_build_name, cdash_base_url, cdash_project,
cdash_site, job_spec_buildgroup)
- if cdash_build_id is not None:
- cdash_upload_url = '{0}/submit.php?project={1}'.format(
- cdash_base_url, cdash_project_enc)
+ cdash_upload_url = '{0}/submit.php?project={1}'.format(
+ cdash_base_url, cdash_project_enc)
- install_args.extend([
- '--cdash-upload-url', cdash_upload_url,
- '--cdash-build', cdash_build_name,
- '--cdash-site', cdash_site,
- '--cdash-buildstamp', cdash_build_stamp,
- ])
+ install_args.extend([
+ '--cdash-upload-url', cdash_upload_url,
+ '--cdash-build', cdash_build_name,
+ '--cdash-site', cdash_site,
+ '--cdash-buildstamp', cdash_build_stamp,
+ ])
+ if cdash_build_id is not None:
tty.debug('CDash: Relating build with dependency builds')
spack_ci.relate_cdash_builds(
spec_map, cdash_base_url, cdash_build_id, cdash_project,
@@ -553,8 +557,8 @@ def ci_rebuild(args):
# per-PR mirror, if this is a PR pipeline
if buildcache_mirror_url:
spack_ci.push_mirror_contents(
- env, job_spec, job_spec_yaml_path, buildcache_mirror_url,
- sign_binaries)
+ env, job_spec_yaml_path, buildcache_mirror_url, sign_binaries
+ )
if cdash_build_id:
tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
@@ -568,8 +572,8 @@ def ci_rebuild(args):
# prefix is set)
if pipeline_mirror_url:
spack_ci.push_mirror_contents(
- env, job_spec, job_spec_yaml_path, pipeline_mirror_url,
- sign_binaries)
+ env, job_spec_yaml_path, pipeline_mirror_url, sign_binaries
+ )
if cdash_build_id:
tty.debug('Writing cdashid ({0}) to remote mirror: {1}'.format(
diff --git a/lib/spack/spack/cmd/common/arguments.py b/lib/spack/spack/cmd/common/arguments.py
index bea8ccea90..0aa30a15ca 100644
--- a/lib/spack/spack/cmd/common/arguments.py
+++ b/lib/spack/spack/cmd/common/arguments.py
@@ -328,3 +328,22 @@ def reuse():
'--reuse', action='store_true', default=False,
help='reuse installed dependencies'
)
+
+
+def add_s3_connection_args(subparser, add_help):
+ subparser.add_argument(
+ '--s3-access-key-id',
+ help="ID string to use to connect to this S3 mirror")
+ subparser.add_argument(
+ '--s3-access-key-secret',
+ help="Secret string to use to connect to this S3 mirror")
+ subparser.add_argument(
+ '--s3-access-token',
+ help="Access Token to use to connect to this S3 mirror")
+ subparser.add_argument(
+ '--s3-profile',
+ help="S3 profile name to use to connect to this S3 mirror",
+ default=None)
+ subparser.add_argument(
+ '--s3-endpoint-url',
+ help="Access Token to use to connect to this S3 mirror")
diff --git a/lib/spack/spack/cmd/containerize.py b/lib/spack/spack/cmd/containerize.py
index e22a5b4c4e..d3537d544c 100644
--- a/lib/spack/spack/cmd/containerize.py
+++ b/lib/spack/spack/cmd/containerize.py
@@ -50,7 +50,6 @@ def containerize(parser, args):
# If we have a monitor request, add monitor metadata to config
if args.use_monitor:
config['spack']['monitor'] = {
- "disable_auth": args.monitor_disable_auth,
"host": args.monitor_host,
"keep_going": args.monitor_keep_going,
"prefix": args.monitor_prefix,
diff --git a/lib/spack/spack/cmd/create.py b/lib/spack/spack/cmd/create.py
index bccf3bf66d..baeccc513e 100644
--- a/lib/spack/spack/cmd/create.py
+++ b/lib/spack/spack/cmd/create.py
@@ -316,6 +316,12 @@ class PythonPackageTemplate(PackageTemplate):
url = '/'.join([project, match.group(4)])
self.url_line = ' pypi = "{url}"'
+ else:
+ # Add a reminder about spack preferring PyPI URLs
+ self.url_line = '''
+ # FIXME: ensure the package is not available through PyPI. If it is,
+ # re-run `spack create --force` with the PyPI URL.
+''' + self.url_line
super(PythonPackageTemplate, self).__init__(name, url, *args, **kwargs)
diff --git a/lib/spack/spack/cmd/dev_build.py b/lib/spack/spack/cmd/dev_build.py
index 24c416e105..d2fc8df638 100644
--- a/lib/spack/spack/cmd/dev_build.py
+++ b/lib/spack/spack/cmd/dev_build.py
@@ -19,7 +19,7 @@ level = "long"
def setup_parser(subparser):
- arguments.add_common_arguments(subparser, ['jobs'])
+ arguments.add_common_arguments(subparser, ['jobs', 'reuse'])
subparser.add_argument(
'-d', '--source-path', dest='source_path', default=None,
help="path to source directory. defaults to the current directory")
@@ -86,7 +86,7 @@ def dev_build(self, args):
# Forces the build to run out of the source directory.
spec.constrain('dev_path=%s' % source_path)
- spec.concretize()
+ spec.concretize(reuse=args.reuse)
package = spack.repo.get(spec)
if package.installed:
diff --git a/lib/spack/spack/cmd/flake8.py b/lib/spack/spack/cmd/flake8.py
deleted file mode 100644
index 0579cb674a..0000000000
--- a/lib/spack/spack/cmd/flake8.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
-# Spack Project Developers. See the top-level COPYRIGHT file for details.
-#
-# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-from __future__ import print_function
-
-import llnl.util.tty as tty
-
-import spack.cmd.style
-
-description = "alias for spack style (deprecated)"
-section = spack.cmd.style.section
-level = spack.cmd.style.level
-
-
-def setup_parser(subparser):
- spack.cmd.style.setup_parser(subparser)
-
-
-def flake8(parser, args):
- tty.warn(
- "spack flake8 is deprecated", "please use `spack style` to run style checks"
- )
- return spack.cmd.style.style(parser, args)
diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py
index f4a4644312..2c69069ef0 100644
--- a/lib/spack/spack/cmd/install.py
+++ b/lib/spack/spack/cmd/install.py
@@ -305,7 +305,6 @@ environment variables:
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
- disable_auth=args.monitor_disable_auth,
tags=args.monitor_tags,
save_local=args.monitor_save_local,
)
@@ -404,6 +403,10 @@ environment variables:
except SpackError as e:
tty.debug(e)
reporter.concretization_report(e.message)
+
+ # Tell spack monitor about it
+ if args.use_monitor and abstract_specs:
+ monitor.failed_concretization(abstract_specs)
raise
# 2. Concrete specs from yaml files
@@ -467,7 +470,6 @@ environment variables:
# Update install_args with the monitor args, needed for build task
kwargs.update({
- "monitor_disable_auth": args.monitor_disable_auth,
"monitor_keep_going": args.monitor_keep_going,
"monitor_host": args.monitor_host,
"use_monitor": args.use_monitor,
diff --git a/lib/spack/spack/cmd/license.py b/lib/spack/spack/cmd/license.py
index 3627b35391..82cbc3b2a7 100644
--- a/lib/spack/spack/cmd/license.py
+++ b/lib/spack/spack/cmd/license.py
@@ -46,9 +46,8 @@ licensed_files = [
r'^lib/spack/docs/.*\.py$',
r'^lib/spack/docs/spack.yaml$',
- # 2 files in external
+ # 1 file in external
r'^lib/spack/external/__init__.py$',
- r'^lib/spack/external/ordereddict_backport.py$',
# shell scripts in share
r'^share/spack/.*\.sh$',
diff --git a/lib/spack/spack/cmd/mirror.py b/lib/spack/spack/cmd/mirror.py
index fa202f09f0..a9e51f019d 100644
--- a/lib/spack/spack/cmd/mirror.py
+++ b/lib/spack/spack/cmd/mirror.py
@@ -6,7 +6,6 @@
import sys
import llnl.util.tty as tty
-from llnl.util.tty.colify import colify
import spack.cmd
import spack.cmd.common.arguments as arguments
@@ -93,7 +92,7 @@ def setup_parser(subparser):
'--scope', choices=scopes, metavar=scopes_metavar,
default=spack.config.default_modify_scope(),
help="configuration scope to modify")
-
+ arguments.add_s3_connection_args(add_parser, False)
# Remove
remove_parser = sp.add_parser('remove', aliases=['rm'],
help=mirror_remove.__doc__)
@@ -117,6 +116,7 @@ def setup_parser(subparser):
'--scope', choices=scopes, metavar=scopes_metavar,
default=spack.config.default_modify_scope(),
help="configuration scope to modify")
+ arguments.add_s3_connection_args(set_url_parser, False)
# List
list_parser = sp.add_parser('list', help=mirror_list.__doc__)
@@ -129,7 +129,7 @@ def setup_parser(subparser):
def mirror_add(args):
"""Add a mirror to Spack."""
url = url_util.format(args.url)
- spack.mirror.add(args.name, url, args.scope)
+ spack.mirror.add(args.name, url, args.scope, args)
def mirror_remove(args):
@@ -140,7 +140,6 @@ def mirror_remove(args):
def mirror_set_url(args):
"""Change the URL of a mirror."""
url = url_util.format(args.url)
-
mirrors = spack.config.get('mirrors', scope=args.scope)
if not mirrors:
mirrors = syaml_dict()
@@ -149,7 +148,15 @@ def mirror_set_url(args):
tty.die("No mirror found with name %s." % args.name)
entry = mirrors[args.name]
-
+ key_values = ["s3_access_key_id", "s3_access_token", "s3_profile"]
+
+ if any(value for value in key_values if value in args):
+ incoming_data = {"url": url,
+ "access_pair": (args.s3_access_key_id,
+ args.s3_access_key_secret),
+ "access_token": args.s3_access_token,
+ "profile": args.s3_profile,
+ "endpoint_url": args.s3_endpoint_url}
try:
fetch_url = entry['fetch']
push_url = entry['push']
@@ -159,20 +166,28 @@ def mirror_set_url(args):
changes_made = False
if args.push:
- changes_made = changes_made or push_url != url
- push_url = url
+ if isinstance(push_url, dict):
+ changes_made = changes_made or push_url != incoming_data
+ push_url = incoming_data
+ else:
+ changes_made = changes_made or push_url != url
+ push_url = url
else:
- changes_made = (
- changes_made or fetch_url != push_url or push_url != url)
-
- fetch_url, push_url = url, url
+ if isinstance(push_url, dict):
+ changes_made = (changes_made or push_url != incoming_data
+ or push_url != incoming_data)
+ fetch_url, push_url = incoming_data, incoming_data
+ else:
+ changes_made = changes_made or push_url != url
+ fetch_url, push_url = url, url
items = [
(
(n, u)
if n != args.name else (
(n, {"fetch": fetch_url, "push": push_url})
- if fetch_url != push_url else (n, fetch_url)
+ if fetch_url != push_url else (n, {"fetch": fetch_url,
+ "push": fetch_url})
)
)
for n, u in mirrors.items()
@@ -183,10 +198,10 @@ def mirror_set_url(args):
if changes_made:
tty.msg(
- "Changed%s url for mirror %s." %
+ "Changed%s url or connection information for mirror %s." %
((" (push)" if args.push else ""), args.name))
else:
- tty.msg("Url already set for mirror %s." % args.name)
+ tty.msg("No changes made to mirror %s." % args.name)
def mirror_list(args):
@@ -330,7 +345,7 @@ def mirror_create(args):
" %-4d failed to fetch." % e)
if error:
tty.error("Failed downloads:")
- colify(s.cformat("{name}{@version}") for s in error)
+ tty.colify(s.cformat("{name}{@version}") for s in error)
sys.exit(1)
diff --git a/lib/spack/spack/cmd/monitor.py b/lib/spack/spack/cmd/monitor.py
index 90371f446f..c395825ff9 100644
--- a/lib/spack/spack/cmd/monitor.py
+++ b/lib/spack/spack/cmd/monitor.py
@@ -27,7 +27,6 @@ def monitor(parser, args, **kwargs):
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
- disable_auth=args.monitor_disable_auth,
)
# Upload the directory
diff --git a/lib/spack/spack/cmd/style.py b/lib/spack/spack/cmd/style.py
index eb95904dfb..648f30bed0 100644
--- a/lib/spack/spack/cmd/style.py
+++ b/lib/spack/spack/cmd/style.py
@@ -48,10 +48,10 @@ exclude_directories = [
#: double-check the results of other tools (if, e.g., --fix was provided)
#: The list maps an executable name to a spack spec needed to install it.
tool_order = [
- ("isort", "py-isort@4.3.5:"),
- ("mypy", "py-mypy@0.900:"),
- ("black", "py-black"),
- ("flake8", "py-flake8"),
+ ("isort", spack.bootstrap.ensure_isort_in_path_or_raise),
+ ("mypy", spack.bootstrap.ensure_mypy_in_path_or_raise),
+ ("black", spack.bootstrap.ensure_black_in_path_or_raise),
+ ("flake8", spack.bootstrap.ensure_flake8_in_path_or_raise),
]
#: tools we run in spack style
@@ -387,40 +387,33 @@ def style(parser, args):
file_list = [prefix_relative(p) for p in file_list]
- returncode = 0
+ return_code = 0
with working_dir(args.root):
if not file_list:
file_list = changed_files(args.base, args.untracked, args.all)
print_style_header(file_list, args)
- # run tools in order defined in tool_order
- returncode = 0
- for tool_name, tool_spec in tool_order:
- if getattr(args, tool_name):
+ commands = {}
+ with spack.bootstrap.ensure_bootstrap_configuration():
+ for tool_name, bootstrap_fn in tool_order:
+ # Skip the tool if it was not requested
+ if not getattr(args, tool_name):
+ continue
+
+ commands[tool_name] = bootstrap_fn()
+
+ for tool_name, bootstrap_fn in tool_order:
+ # Skip the tool if it was not requested
+ if not getattr(args, tool_name):
+ continue
+
run_function, required = tools[tool_name]
print_tool_header(tool_name)
+ return_code |= run_function(commands[tool_name], file_list, args)
- try:
- # Bootstrap tools so we don't need to require install
- with spack.bootstrap.ensure_bootstrap_configuration():
- spec = spack.spec.Spec(tool_spec)
- cmd = None
- cmd = spack.bootstrap.get_executable(
- tool_name, spec=spec, install=True
- )
- if not cmd:
- color.cprint(" @y{%s not in PATH, skipped}" % tool_name)
- continue
- returncode |= run_function(cmd, file_list, args)
-
- except Exception as e:
- raise spack.error.SpackError(
- "Couldn't bootstrap %s:" % tool_name, str(e)
- )
-
- if returncode == 0:
+ if return_code == 0:
tty.msg(color.colorize("@*{spack style checks were clean}"))
else:
tty.error(color.colorize("@*{spack style found errors}"))
- return returncode
+ return return_code
diff --git a/lib/spack/spack/cmd/tutorial.py b/lib/spack/spack/cmd/tutorial.py
index 7db451fdbb..e5c9439c8c 100644
--- a/lib/spack/spack/cmd/tutorial.py
+++ b/lib/spack/spack/cmd/tutorial.py
@@ -77,7 +77,9 @@ def tutorial(parser, args):
# Note that checkout MUST be last. It changes Spack under our feet.
# If you don't put this last, you'll get import errors for the code
# that follows (exacerbated by the various lazy singletons we use)
- tty.msg("Ensuring we're on the releases/v0.16 branch")
+ tty.msg("Ensuring we're on the releases/v{0}.{1} branch".format(
+ *spack.spack_version_info[:2]
+ ))
git = which("git", required=True)
with working_dir(spack.paths.prefix):
git("checkout", tutorial_branch)
diff --git a/lib/spack/spack/cmd/unit_test.py b/lib/spack/spack/cmd/unit_test.py
index ce4e2cbdbb..0f117f2d7a 100644
--- a/lib/spack/spack/cmd/unit_test.py
+++ b/lib/spack/spack/cmd/unit_test.py
@@ -7,14 +7,19 @@ from __future__ import division, print_function
import argparse
import collections
+import os.path
import re
import sys
-import pytest
+try:
+ import pytest
+except ImportError:
+ pytest = None # type: ignore
+
from six import StringIO
+import llnl.util.filesystem
import llnl.util.tty.color as color
-from llnl.util.filesystem import working_dir
from llnl.util.tty.colify import colify
import spack.paths
@@ -67,7 +72,25 @@ def setup_parser(subparser):
def do_list(args, extra_args):
"""Print a lists of tests than what pytest offers."""
- # Run test collection and get the tree out.
+ def colorize(c, prefix):
+ if isinstance(prefix, tuple):
+ return "::".join(
+ color.colorize("@%s{%s}" % (c, p))
+ for p in prefix if p != "()"
+ )
+ return color.colorize("@%s{%s}" % (c, prefix))
+
+ # To list the files we just need to inspect the filesystem,
+ # which doesn't need to wait for pytest collection and doesn't
+ # require parsing pytest output
+ files = llnl.util.filesystem.find(
+ root=spack.paths.test_path, files='*.py', recursive=True
+ )
+ files = [
+ os.path.relpath(f, start=spack.paths.spack_root)
+ for f in files if not f.endswith(('conftest.py', '__init__.py'))
+ ]
+
old_output = sys.stdout
try:
sys.stdout = output = StringIO()
@@ -76,12 +99,13 @@ def do_list(args, extra_args):
sys.stdout = old_output
lines = output.getvalue().split('\n')
- tests = collections.defaultdict(lambda: set())
- prefix = []
+ tests = collections.defaultdict(set)
# collect tests into sections
+ node_regexp = re.compile(r"(\s*)<([^ ]*) ['\"]?([^']*)['\"]?>")
+ key_parts, name_parts = [], []
for line in lines:
- match = re.match(r"(\s*)<([^ ]*) '([^']*)'", line)
+ match = node_regexp.match(line)
if not match:
continue
indent, nodetype, name = match.groups()
@@ -90,25 +114,31 @@ def do_list(args, extra_args):
if "[" in name:
name = name[:name.index("[")]
- depth = len(indent) // 2
-
- if nodetype.endswith("Function"):
- key = tuple(prefix)
- tests[key].add(name)
- else:
- prefix = prefix[:depth]
- prefix.append(name)
-
- def colorize(c, prefix):
- if isinstance(prefix, tuple):
- return "::".join(
- color.colorize("@%s{%s}" % (c, p))
- for p in prefix if p != "()"
- )
- return color.colorize("@%s{%s}" % (c, prefix))
+ len_indent = len(indent)
+ if os.path.isabs(name):
+ name = os.path.relpath(name, start=spack.paths.spack_root)
+
+ item = (len_indent, name, nodetype)
+
+ # Reduce the parts to the scopes that are of interest
+ name_parts = [x for x in name_parts if x[0] < len_indent]
+ key_parts = [x for x in key_parts if x[0] < len_indent]
+
+ # From version 3.X to version 6.X the output format
+ # changed a lot in pytest, and probably will change
+ # in the future - so this manipulation might be fragile
+ if nodetype.lower() == 'function':
+ name_parts.append(item)
+ key_end = os.path.join(*[x[1] for x in key_parts])
+ key = next(f for f in files if f.endswith(key_end))
+ tests[key].add(tuple(x[1] for x in name_parts))
+ elif nodetype.lower() == 'class':
+ name_parts.append(item)
+ elif nodetype.lower() in ('package', 'module'):
+ key_parts.append(item)
if args.list == "list":
- files = set(prefix[0] for prefix in tests)
+ files = set(tests.keys())
color_files = [colorize("B", file) for file in sorted(files)]
colify(color_files)
@@ -144,6 +174,14 @@ def add_back_pytest_args(args, unknown_args):
def unit_test(parser, args, unknown_args):
+ global pytest
+ if pytest is None:
+ vendored_pytest_dir = os.path.join(
+ spack.paths.external_path, 'pytest-fallback'
+ )
+ sys.path.append(vendored_pytest_dir)
+ import pytest
+
if args.pytest_help:
# make the pytest.main help output more accurate
sys.argv[0] = 'spack unit-test'
@@ -161,7 +199,7 @@ def unit_test(parser, args, unknown_args):
pytest_root = spack.extensions.path_for_extension(target, *extensions)
# pytest.ini lives in the root of the spack repository.
- with working_dir(pytest_root):
+ with llnl.util.filesystem.working_dir(pytest_root):
if args.list:
do_list(args, pytest_args)
return
diff --git a/lib/spack/spack/compilers/dpcpp.py b/lib/spack/spack/compilers/dpcpp.py
new file mode 100644
index 0000000000..664b1d86c5
--- /dev/null
+++ b/lib/spack/spack/compilers/dpcpp.py
@@ -0,0 +1,29 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
+# Spack Project Developers. See the top-level COPYRIGHT file for details.
+#
+# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+
+import spack.compilers.oneapi
+
+
+class Dpcpp(spack.compilers.oneapi.Oneapi):
+ """This is the same as the oneAPI compiler but uses dpcpp instead of
+ icpx (for DPC++ source files). It explicitly refers to dpcpp, so that
+ CMake test files which check the compiler name (e.g. CMAKE_CXX_COMPILER)
+ detect it as dpcpp.
+
+ Ideally we could switch out icpx for dpcpp where needed in the oneAPI
+ compiler definition, but two things are needed for that: (a) a way to
+ tell the compiler that it should be using dpcpp and (b) a way to
+ customize the link_paths
+
+ See also: https://www.intel.com/content/www/us/en/develop/documentation/oneapi-dpcpp-cpp-compiler-dev-guide-and-reference/top/compiler-setup/using-the-command-line/invoking-the-compiler.html
+ """
+ # Subclasses use possible names of C++ compiler
+ cxx_names = ['dpcpp']
+
+ # Named wrapper links within build_env_path
+ link_paths = {'cc': 'oneapi/icx',
+ 'cxx': 'oneapi/dpcpp',
+ 'f77': 'oneapi/ifx',
+ 'fc': 'oneapi/ifx'}
diff --git a/lib/spack/spack/compilers/oneapi.py b/lib/spack/spack/compilers/oneapi.py
index a28259c02c..bf5c7767e9 100644
--- a/lib/spack/spack/compilers/oneapi.py
+++ b/lib/spack/spack/compilers/oneapi.py
@@ -3,6 +3,8 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+from os.path import dirname
+
from spack.compiler import Compiler
@@ -105,3 +107,11 @@ class Oneapi(Compiler):
@property
def stdcxx_libs(self):
return ('-cxxlib', )
+
+ def setup_custom_environment(self, pkg, env):
+ # workaround bug in icpx driver where it requires sycl-post-link is on the PATH
+ # It is located in the same directory as the driver. Error message:
+ # clang++: error: unable to execute command:
+ # Executable "sycl-post-link" doesn't exist!
+ if self.cxx:
+ env.prepend_path('PATH', dirname(self.cxx))
diff --git a/lib/spack/spack/concretize.py b/lib/spack/spack/concretize.py
index f3572cfdcf..90b4c5b669 100644
--- a/lib/spack/spack/concretize.py
+++ b/lib/spack/spack/concretize.py
@@ -16,14 +16,13 @@ TODO: make this customizable and allow users to configure
"""
from __future__ import print_function
+import functools
import os.path
import platform
import tempfile
from contextlib import contextmanager
from itertools import chain
-from functools_backport import reverse_order
-
import archspec.cpu
import llnl.util.filesystem as fs
@@ -48,6 +47,23 @@ from spack.version import Version, VersionList, VersionRange, ver
_abi = llnl.util.lang.Singleton(lambda: spack.abi.ABI())
+@functools.total_ordering
+class reverse_order(object):
+ """Helper for creating key functions.
+
+ This is a wrapper that inverts the sense of the natural
+ comparisons on the object.
+ """
+ def __init__(self, value):
+ self.value = value
+
+ def __eq__(self, other):
+ return other.value == self.value
+
+ def __lt__(self, other):
+ return other.value < self.value
+
+
class Concretizer(object):
"""You can subclass this class to override some of the default
concretization strategies, or you can override all of them.
@@ -130,11 +146,11 @@ class Concretizer(object):
# Use a sort key to order the results
return sorted(usable, key=lambda spec: (
- not spec.external, # prefer externals
- pref_key(spec), # respect prefs
- spec.name, # group by name
- reverse_order(spec.versions), # latest version
- spec # natural order
+ not spec.external, # prefer externals
+ pref_key(spec), # respect prefs
+ spec.name, # group by name
+ reverse_order(spec.versions), # latest version
+ spec # natural order
))
def choose_virtual_or_external(self, spec):
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index f3fc73e4b4..d967fef122 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -39,7 +39,6 @@ from contextlib import contextmanager
from typing import List # novm
import ruamel.yaml as yaml
-from ordereddict_backport import OrderedDict
from ruamel.yaml.error import MarkedYAMLError
from six import iteritems
@@ -401,7 +400,7 @@ class Configuration(object):
Configuration, ordered from lowest to highest precedence
"""
- self.scopes = OrderedDict()
+ self.scopes = collections.OrderedDict()
for scope in scopes:
self.push_scope(scope)
self.format_updates = collections.defaultdict(list)
diff --git a/lib/spack/spack/container/writers/__init__.py b/lib/spack/spack/container/writers/__init__.py
index 9808969bfc..abe6dbf211 100644
--- a/lib/spack/spack/container/writers/__init__.py
+++ b/lib/spack/spack/container/writers/__init__.py
@@ -183,19 +183,18 @@ class PathContext(tengine.Context):
def monitor(self):
"""Enable using spack monitor during build."""
Monitor = collections.namedtuple('Monitor', [
- 'enabled', 'host', 'disable_auth', 'prefix', 'keep_going', 'tags'
+ 'enabled', 'host', 'prefix', 'keep_going', 'tags'
])
monitor = self.config.get("monitor")
# If we don't have a monitor group, cut out early.
if not monitor:
- return Monitor(False, None, None, None, None, None)
+ return Monitor(False, None, None, None, None)
return Monitor(
enabled=True,
host=monitor.get('host'),
prefix=monitor.get('prefix'),
- disable_auth=monitor.get("disable_auth"),
keep_going=monitor.get("keep_going"),
tags=monitor.get('tags')
)
diff --git a/lib/spack/spack/detection/path.py b/lib/spack/spack/detection/path.py
index 0e652ed6c2..66998e4b0b 100644
--- a/lib/spack/spack/detection/path.py
+++ b/lib/spack/spack/detection/path.py
@@ -9,6 +9,7 @@ import collections
import os
import os.path
import re
+import warnings
import llnl.util.filesystem
import llnl.util.tty
@@ -99,9 +100,14 @@ def by_executable(packages_to_check, path_hints=None):
# for one prefix, but without additional details (e.g. about the
# naming scheme which differentiates them), the spec won't be
# usable.
- specs = _convert_to_iterable(
- pkg.determine_spec_details(prefix, exes_in_prefix)
- )
+ try:
+ specs = _convert_to_iterable(
+ pkg.determine_spec_details(prefix, exes_in_prefix)
+ )
+ except Exception as e:
+ specs = []
+ msg = 'error detecting "{0}" from prefix {1} [{2}]'
+ warnings.warn(msg.format(pkg.name, prefix, str(e)))
if not specs:
llnl.util.tty.debug(
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
index f7400f4da9..c23146125a 100644
--- a/lib/spack/spack/directives.py
+++ b/lib/spack/spack/directives.py
@@ -466,7 +466,8 @@ def extends(spec, type=('build', 'run'), **kwargs):
return
_depends_on(pkg, spec, when=when, type=type)
- pkg.extendees[spec] = (spack.spec.Spec(spec), kwargs)
+ spec_obj = spack.spec.Spec(spec)
+ pkg.extendees[spec_obj.name] = (spec_obj, kwargs)
return _execute_extends
diff --git a/lib/spack/spack/environment/environment.py b/lib/spack/spack/environment/environment.py
index 3114f55638..67303c8a4d 100644
--- a/lib/spack/spack/environment/environment.py
+++ b/lib/spack/spack/environment/environment.py
@@ -13,7 +13,6 @@ import time
import ruamel.yaml as yaml
import six
-from ordereddict_backport import OrderedDict
import llnl.util.filesystem as fs
import llnl.util.tty as tty
@@ -287,6 +286,51 @@ def _eval_conditional(string):
return eval(string, valid_variables)
+def _is_dev_spec_and_has_changed(spec):
+ """Check if the passed spec is a dev build and whether it has changed since the
+ last installation"""
+ # First check if this is a dev build and in the process already try to get
+ # the dev_path
+ dev_path_var = spec.variants.get('dev_path', None)
+ if not dev_path_var:
+ return False
+
+ # Now we can check whether the code changed since the last installation
+ if not spec.package.installed:
+ # Not installed -> nothing to compare against
+ return False
+
+ _, record = spack.store.db.query_by_spec_hash(spec.dag_hash())
+ mtime = fs.last_modification_time_recursive(dev_path_var.value)
+ return mtime > record.installation_time
+
+
+def _spec_needs_overwrite(spec, changed_dev_specs):
+ """Check whether the current spec needs to be overwritten because either it has
+ changed itself or one of its dependencies have changed"""
+ # if it's not installed, we don't need to overwrite it
+ if not spec.package.installed:
+ return False
+
+ # If the spec itself has changed this is a trivial decision
+ if spec in changed_dev_specs:
+ return True
+
+ # if spec and all deps aren't dev builds, we don't need to overwrite it
+ if not any(spec.satisfies(c)
+ for c in ('dev_path=*', '^dev_path=*')):
+ return False
+
+ # If any dep needs overwrite, or any dep is missing and is a dev build then
+ # overwrite this package
+ if any(
+ ((not dep.package.installed) and dep.satisfies('dev_path=*')) or
+ _spec_needs_overwrite(dep, changed_dev_specs)
+ for dep in spec.traverse(root=False)
+ ):
+ return True
+
+
class ViewDescriptor(object):
def __init__(self, base_path, root, projections={}, select=[], exclude=[],
link=default_view_link, link_type='symlink'):
@@ -318,7 +362,11 @@ class ViewDescriptor(object):
# projections guaranteed to be ordered dict if true-ish
# for python2.6, may be syaml or ruamel.yaml implementation
# so we have to check for both
- types = (OrderedDict, syaml.syaml_dict, yaml.comments.CommentedMap)
+ types = (
+ collections.OrderedDict,
+ syaml.syaml_dict,
+ yaml.comments.CommentedMap
+ )
assert isinstance(self.projections, types)
ret['projections'] = self.projections
if self.select:
@@ -638,7 +686,7 @@ class Environment(object):
else:
self.raw_yaml, self.yaml = _read_yaml(f)
- self.spec_lists = OrderedDict()
+ self.spec_lists = collections.OrderedDict()
for item in config_dict(self.yaml).get('definitions', []):
entry = copy.deepcopy(item)
@@ -1389,52 +1437,19 @@ class Environment(object):
self.concretized_order.append(h)
self.specs_by_hash[h] = concrete
- def _spec_needs_overwrite(self, spec):
- # Overwrite the install if it's a dev build (non-transitive)
- # and the code has been changed since the last install
- # or one of the dependencies has been reinstalled since
- # the last install
-
- # if it's not installed, we don't need to overwrite it
- if not spec.package.installed:
- return False
-
- # if spec and all deps aren't dev builds, we don't need to overwrite it
- if not any(spec.satisfies(c)
- for c in ('dev_path=*', '^dev_path=*')):
- return False
-
- # if any dep needs overwrite, or any dep is missing and is a dev build
- # then overwrite this package
- if any(
- self._spec_needs_overwrite(dep) or
- ((not dep.package.installed) and dep.satisfies('dev_path=*'))
- for dep in spec.traverse(root=False)
- ):
- return True
-
- # if it's not a direct dev build and its dependencies haven't
- # changed, it hasn't changed.
- # We don't merely check satisfaction (spec.satisfies('dev_path=*')
- # because we need the value of the variant in the next block of code
- dev_path_var = spec.variants.get('dev_path', None)
- if not dev_path_var:
- return False
-
- # if it is a direct dev build, check whether the code changed
- # we already know it is installed
- _, record = spack.store.db.query_by_spec_hash(spec.dag_hash())
- mtime = fs.last_modification_time_recursive(dev_path_var.value)
- return mtime > record.installation_time
-
def _get_overwrite_specs(self):
- ret = []
+ # Collect all specs in the environment first before checking which ones
+ # to rebuild to avoid checking the same specs multiple times
+ specs_to_check = set()
for dag_hash in self.concretized_order:
- spec = self.specs_by_hash[dag_hash]
- ret.extend([d.dag_hash() for d in spec.traverse(root=True)
- if self._spec_needs_overwrite(d)])
+ root_spec = self.specs_by_hash[dag_hash]
+ specs_to_check.update(root_spec.traverse(root=True))
- return ret
+ changed_dev_specs = set(s for s in specs_to_check if
+ _is_dev_spec_and_has_changed(s))
+
+ return [s.dag_hash() for s in specs_to_check if
+ _spec_needs_overwrite(s, changed_dev_specs)]
def _install_log_links(self, spec):
if not spec.external:
@@ -1503,8 +1518,12 @@ class Environment(object):
else:
tty.debug('Processing {0} uninstalled specs'.format(len(specs_to_install)))
+ specs_to_overwrite = self._get_overwrite_specs()
+ tty.debug('{0} specs need to be overwritten'.format(
+ len(specs_to_overwrite)))
+
install_args['overwrite'] = install_args.get(
- 'overwrite', []) + self._get_overwrite_specs()
+ 'overwrite', []) + specs_to_overwrite
installs = []
for spec in specs_to_install:
diff --git a/lib/spack/spack/extensions.py b/lib/spack/spack/extensions.py
index 69a287fe85..0a1a056819 100644
--- a/lib/spack/spack/extensions.py
+++ b/lib/spack/spack/extensions.py
@@ -5,6 +5,7 @@
"""Service functions and classes to implement the hooks
for Spack's command extensions.
"""
+import importlib
import os
import re
import sys
@@ -98,9 +99,7 @@ def load_command_extension(command, path):
ensure_package_creation(extension)
ensure_package_creation(extension + '.cmd')
- # TODO: Upon removal of support for Python 2.6 substitute the call
- # TODO: below with importlib.import_module(module_name)
- module = llnl.util.lang.load_module_from_file(module_name, cmd_path)
+ module = importlib.import_module(module_name)
sys.modules[module_name] = module
return module
diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py
index 888cad7bf3..432010adca 100644
--- a/lib/spack/spack/fetch_strategy.py
+++ b/lib/spack/spack/fetch_strategy.py
@@ -1437,7 +1437,13 @@ class GCSFetchStrategy(URLFetchStrategy):
basename = os.path.basename(parsed_url.path)
with working_dir(self.stage.path):
- _, headers, stream = web_util.read_from_url(self.url)
+ import spack.util.s3 as s3_util
+ s3 = s3_util.create_s3_session(self.url,
+ connection=s3_util.get_mirror_connection(parsed_url), url_type="fetch") # noqa: E501
+
+ headers = s3.get_object(Bucket=parsed_url.netloc,
+ Key=parsed_url.path.lstrip("/"))
+ stream = headers["Body"]
with open(basename, 'wb') as f:
shutil.copyfileobj(stream, f)
diff --git a/lib/spack/spack/filesystem_view.py b/lib/spack/spack/filesystem_view.py
index 659ae550ae..741e38490a 100644
--- a/lib/spack/spack/filesystem_view.py
+++ b/lib/spack/spack/filesystem_view.py
@@ -3,14 +3,13 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+import collections
import functools as ft
import os
import re
import shutil
import sys
-from ordereddict_backport import OrderedDict
-
from llnl.util import tty
from llnl.util.filesystem import mkdirp, remove_dead_links, remove_empty_directories
from llnl.util.lang import index_by, match_predicate
@@ -79,7 +78,7 @@ def view_copy(src, dst, view, spec=None):
orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(spack.paths.spack_root)
new_sbang = sbang.sbang_shebang_line()
- prefix_to_projection = OrderedDict({
+ prefix_to_projection = collections.OrderedDict({
spec.prefix: view.get_projection_for_spec(spec)})
for dep in spec.traverse():
diff --git a/lib/spack/spack/hooks/__init__.py b/lib/spack/spack/hooks/__init__.py
index e802fa0197..ca6a5fade9 100644
--- a/lib/spack/spack/hooks/__init__.py
+++ b/lib/spack/spack/hooks/__init__.py
@@ -90,6 +90,7 @@ on_phase_error = _HookRunner('on_phase_error')
on_install_start = _HookRunner('on_install_start')
on_install_success = _HookRunner('on_install_success')
on_install_failure = _HookRunner('on_install_failure')
+on_install_cancel = _HookRunner('on_install_cancel')
# Analyzer hooks
on_analyzer_save = _HookRunner('on_analyzer_save')
diff --git a/lib/spack/spack/hooks/monitor.py b/lib/spack/spack/hooks/monitor.py
index 5ddc1223e8..9da5012593 100644
--- a/lib/spack/spack/hooks/monitor.py
+++ b/lib/spack/spack/hooks/monitor.py
@@ -41,6 +41,17 @@ def on_install_failure(spec):
tty.verbose(result.get('message'))
+def on_install_cancel(spec):
+ """Triggered on cancel of an install
+ """
+ if not spack.monitor.cli:
+ return
+
+ tty.debug("Running on_install_cancel for %s" % spec)
+ result = spack.monitor.cli.cancel_task(spec)
+ tty.verbose(result.get('message'))
+
+
def on_phase_success(pkg, phase_name, log_file):
"""Triggered on a phase success
"""
diff --git a/lib/spack/spack/install_test.py b/lib/spack/spack/install_test.py
index 07644f5cc5..a8fe0f4024 100644
--- a/lib/spack/spack/install_test.py
+++ b/lib/spack/spack/install_test.py
@@ -287,7 +287,10 @@ class TestSuite(object):
try:
with open(filename, 'r') as f:
data = sjson.load(f)
- return TestSuite.from_dict(data)
+ test_suite = TestSuite.from_dict(data)
+ content_hash = os.path.basename(os.path.dirname(filename))
+ test_suite._hash = content_hash
+ return test_suite
except Exception as e:
tty.debug(e)
raise sjson.SpackJSONError("error parsing JSON TestSuite:", str(e))
diff --git a/lib/spack/spack/installer.py b/lib/spack/spack/installer.py
index df1d704cd7..d70f535a89 100644
--- a/lib/spack/spack/installer.py
+++ b/lib/spack/spack/installer.py
@@ -1200,6 +1200,7 @@ class PackageInstaller(object):
except spack.build_environment.StopPhase as e:
# A StopPhase exception means that do_install was asked to
# stop early from clients, and is not an error at this point
+ spack.hooks.on_install_failure(task.request.pkg.spec)
pid = '{0}: '.format(self.pid) if tty.show_pid() else ''
tty.debug('{0}{1}'.format(pid, str(e)))
tty.debug('Package stage directory: {0}' .format(pkg.stage.source_path))
@@ -1657,7 +1658,7 @@ class PackageInstaller(object):
err = 'Failed to install {0} due to {1}: {2}'
tty.error(err.format(pkg.name, exc.__class__.__name__,
str(exc)))
- spack.hooks.on_install_failure(task.request.pkg.spec)
+ spack.hooks.on_install_cancel(task.request.pkg.spec)
raise
except (Exception, SystemExit) as exc:
@@ -1921,6 +1922,9 @@ class BuildProcessInstaller(object):
except BaseException:
combine_phase_logs(pkg.phase_log_files, pkg.log_path)
spack.hooks.on_phase_error(pkg, phase_name, log_file)
+
+ # phase error indicates install error
+ spack.hooks.on_install_failure(pkg.spec)
raise
# We assume loggers share echo True/False
diff --git a/lib/spack/spack/mirror.py b/lib/spack/spack/mirror.py
index a4ec57ab5b..9b6fd5ba3c 100644
--- a/lib/spack/spack/mirror.py
+++ b/lib/spack/spack/mirror.py
@@ -11,6 +11,7 @@ the main server for a particular package is down. Or, if the computer
where spack is run is not connected to the internet, it allows spack
to download packages directly from a mirror (e.g., on an intranet).
"""
+import collections
import operator
import os
import os.path
@@ -19,7 +20,6 @@ import traceback
import ruamel.yaml.error as yaml_error
import six
-from ordereddict_backport import OrderedDict
if sys.version_info >= (3, 5):
from collections.abc import Mapping # novm
@@ -41,6 +41,10 @@ from spack.util.spack_yaml import syaml_dict
from spack.version import VersionList
+def _is_string(url):
+ return isinstance(url, six.string_types)
+
+
def _display_mirror_entry(size, name, url, type_=None):
if type_:
type_ = "".join((" (", type_, ")"))
@@ -59,7 +63,8 @@ class Mirror(object):
to them. These two URLs are usually the same.
"""
- def __init__(self, fetch_url, push_url=None, name=None):
+ def __init__(self, fetch_url, push_url=None,
+ name=None):
self._fetch_url = fetch_url
self._push_url = push_url
self._name = name
@@ -85,7 +90,9 @@ class Mirror(object):
def to_dict(self):
if self._push_url is None:
- return self._fetch_url
+ return syaml_dict([
+ ('fetch', self._fetch_url),
+ ('push', self._fetch_url)])
else:
return syaml_dict([
('fetch', self._fetch_url),
@@ -96,16 +103,16 @@ class Mirror(object):
if isinstance(d, six.string_types):
return Mirror(d, name=name)
else:
- return Mirror(d['fetch'], d['push'], name)
+ return Mirror(d['fetch'], d['push'], name=name)
def display(self, max_len=0):
if self._push_url is None:
- _display_mirror_entry(max_len, self._name, self._fetch_url)
+ _display_mirror_entry(max_len, self._name, self.fetch_url)
else:
_display_mirror_entry(
- max_len, self._name, self._fetch_url, "fetch")
+ max_len, self._name, self.fetch_url, "fetch")
_display_mirror_entry(
- max_len, self._name, self._push_url, "push")
+ max_len, self._name, self.push_url, "push")
def __str__(self):
name = self._name
@@ -137,24 +144,83 @@ class Mirror(object):
def name(self):
return self._name or "<unnamed>"
+ def get_profile(self, url_type):
+ if isinstance(self._fetch_url, dict):
+ if url_type == "push":
+ return self._push_url.get('profile', None)
+ return self._fetch_url.get('profile', None)
+ else:
+ return None
+
+ def set_profile(self, url_type, profile):
+ if url_type == "push":
+ self._push_url["profile"] = profile
+ else:
+ self._fetch_url["profile"] = profile
+
+ def get_access_pair(self, url_type):
+ if isinstance(self._fetch_url, dict):
+ if url_type == "push":
+ return self._push_url.get('access_pair', None)
+ return self._fetch_url.get('access_pair', None)
+ else:
+ return None
+
+ def set_access_pair(self, url_type, connection_tuple):
+ if url_type == "push":
+ self._push_url["access_pair"] = connection_tuple
+ else:
+ self._fetch_url["access_pair"] = connection_tuple
+
+ def get_endpoint_url(self, url_type):
+ if isinstance(self._fetch_url, dict):
+ if url_type == "push":
+ return self._push_url.get('endpoint_url', None)
+ return self._fetch_url.get('endpoint_url', None)
+ else:
+ return None
+
+ def set_endpoint_url(self, url_type, url):
+ if url_type == "push":
+ self._push_url["endpoint_url"] = url
+ else:
+ self._fetch_url["endpoint_url"] = url
+
+ def get_access_token(self, url_type):
+ if isinstance(self._fetch_url, dict):
+ if url_type == "push":
+ return self._push_url.get('access_token', None)
+ return self._fetch_url.get('access_token', None)
+ else:
+ return None
+
+ def set_access_token(self, url_type, connection_token):
+ if url_type == "push":
+ self._push_url["access_token"] = connection_token
+ else:
+ self._fetch_url["access_token"] = connection_token
+
@property
def fetch_url(self):
- return self._fetch_url
+ return self._fetch_url if _is_string(self._fetch_url) \
+ else self._fetch_url["url"]
@fetch_url.setter
def fetch_url(self, url):
- self._fetch_url = url
+ self._fetch_url["url"] = url
self._normalize()
@property
def push_url(self):
if self._push_url is None:
- return self._fetch_url
- return self._push_url
+ return self._fetch_url if _is_string(self._fetch_url) \
+ else self._fetch_url["url"]
+ return self._push_url if _is_string(self._push_url) \
+ else self._push_url["url"]
@push_url.setter
def push_url(self, url):
- self._push_url = url
+ self._push_url["url"] = url
self._normalize()
def _normalize(self):
@@ -166,7 +232,7 @@ class MirrorCollection(Mapping):
"""A mapping of mirror names to mirrors."""
def __init__(self, mirrors=None, scope=None):
- self._mirrors = OrderedDict(
+ self._mirrors = collections.OrderedDict(
(name, Mirror.from_dict(mirror, name))
for name, mirror in (
mirrors.items() if mirrors is not None else
@@ -453,7 +519,7 @@ def create(path, specs, skip_unstable_versions=False):
return mirror_stats.stats()
-def add(name, url, scope):
+def add(name, url, scope, args={}):
"""Add a named mirror in the given scope"""
mirrors = spack.config.get('mirrors', scope=scope)
if not mirrors:
@@ -463,7 +529,18 @@ def add(name, url, scope):
tty.die("Mirror with name %s already exists." % name)
items = [(n, u) for n, u in mirrors.items()]
- items.insert(0, (name, url))
+ mirror_data = url
+ key_values = ["s3_access_key_id", "s3_access_token", "s3_profile"]
+ # On creation, assume connection data is set for both
+ if any(value for value in key_values if value in args):
+ url_dict = {"url": url,
+ "access_pair": (args.s3_access_key_id, args.s3_access_key_secret),
+ "access_token": args.s3_access_token,
+ "profile": args.s3_profile,
+ "endpoint_url": args.s3_endpoint_url}
+ mirror_data = {"fetch": url_dict, "push": url_dict}
+
+ items.insert(0, (name, mirror_data))
mirrors = syaml_dict(items)
spack.config.set('mirrors', mirrors, scope=scope)
@@ -569,6 +646,35 @@ def _add_single_spec(spec, mirror, mirror_stats):
mirror_stats.error()
+def push_url_from_directory(output_directory):
+ """Given a directory in the local filesystem, return the URL on
+ which to push binary packages.
+ """
+ scheme = url_util.parse(output_directory, scheme='<missing>').scheme
+ if scheme != '<missing>':
+ raise ValueError('expected a local path, but got a URL instead')
+ mirror_url = 'file://' + output_directory
+ mirror = spack.mirror.MirrorCollection().lookup(mirror_url)
+ return url_util.format(mirror.push_url)
+
+
+def push_url_from_mirror_name(mirror_name):
+ """Given a mirror name, return the URL on which to push binary packages."""
+ mirror = spack.mirror.MirrorCollection().lookup(mirror_name)
+ if mirror.name == "<unnamed>":
+ raise ValueError('no mirror named "{0}"'.format(mirror_name))
+ return url_util.format(mirror.push_url)
+
+
+def push_url_from_mirror_url(mirror_url):
+ """Given a mirror URL, return the URL on which to push binary packages."""
+ scheme = url_util.parse(mirror_url, scheme='<missing>').scheme
+ if scheme == '<missing>':
+ raise ValueError('"{0}" is not a valid URL'.format(mirror_url))
+ mirror = spack.mirror.MirrorCollection().lookup(mirror_url)
+ return url_util.format(mirror.push_url)
+
+
class MirrorError(spack.error.SpackError):
"""Superclass of all mirror-creation related errors."""
diff --git a/lib/spack/spack/monitor.py b/lib/spack/spack/monitor.py
index c0df4ea680..dd967d793d 100644
--- a/lib/spack/spack/monitor.py
+++ b/lib/spack/spack/monitor.py
@@ -38,8 +38,7 @@ import spack.util.spack_yaml as syaml
cli = None
-def get_client(host, prefix="ms1", disable_auth=False, allow_fail=False, tags=None,
- save_local=False):
+def get_client(host, prefix="ms1", allow_fail=False, tags=None, save_local=False):
"""
Get a monitor client for a particular host and prefix.
@@ -57,8 +56,8 @@ def get_client(host, prefix="ms1", disable_auth=False, allow_fail=False, tags=No
cli = SpackMonitorClient(host=host, prefix=prefix, allow_fail=allow_fail,
tags=tags, save_local=save_local)
- # If we don't disable auth, environment credentials are required
- if not disable_auth and not save_local:
+ # Auth is always required unless we are saving locally
+ if not save_local:
cli.require_auth()
# We will exit early if the monitoring service is not running, but
@@ -93,9 +92,6 @@ def get_monitor_group(subparser):
'--monitor-save-local', action='store_true', dest='monitor_save_local',
default=False, help="save monitor results to .spack instead of server.")
monitor_group.add_argument(
- '--monitor-no-auth', action='store_true', dest='monitor_disable_auth',
- default=False, help="the monitoring server does not require auth.")
- monitor_group.add_argument(
'--monitor-tags', dest='monitor_tags', default=None,
help="One or more (comma separated) tags for a build.")
monitor_group.add_argument(
@@ -122,13 +118,16 @@ class SpackMonitorClient:
def __init__(self, host=None, prefix="ms1", allow_fail=False, tags=None,
save_local=False):
+ # We can control setting an arbitrary version if needed
+ sv = spack.main.get_version()
+ self.spack_version = os.environ.get("SPACKMON_SPACK_VERSION") or sv
+
self.host = host or "http://127.0.0.1"
self.baseurl = "%s/%s" % (self.host, prefix.strip("/"))
self.token = os.environ.get("SPACKMON_TOKEN")
self.username = os.environ.get("SPACKMON_USER")
self.headers = {}
self.allow_fail = allow_fail
- self.spack_version = spack.main.get_version()
self.capture_build_environment()
self.tags = tags
self.save_local = save_local
@@ -204,6 +203,14 @@ class SpackMonitorClient:
"""
from spack.util.environment import get_host_environment_metadata
self.build_environment = get_host_environment_metadata()
+ keys = list(self.build_environment.keys())
+
+ # Allow to customize any of these values via the environment
+ for key in keys:
+ envar_name = "SPACKMON_%s" % key.upper()
+ envar = os.environ.get(envar_name)
+ if envar:
+ self.build_environment[key] = envar
def require_auth(self):
"""
@@ -417,6 +424,37 @@ class SpackMonitorClient:
return configs
+ def failed_concretization(self, specs):
+ """
+ Given a list of abstract specs, tell spack monitor concretization failed.
+ """
+ configs = {}
+
+ # There should only be one spec generally (what cases would have >1?)
+ for spec in specs:
+
+ # update the spec to have build hash indicating that cannot be built
+ meta = spec.to_dict()['spec']
+ nodes = []
+ for node in meta.get("nodes", []):
+ for hashtype in ["build_hash", "full_hash"]:
+ node[hashtype] = "FAILED_CONCRETIZATION"
+ nodes.append(node)
+ meta['nodes'] = nodes
+
+ # We can't concretize / hash
+ as_dict = {"spec": meta,
+ "spack_version": self.spack_version}
+
+ if self.save_local:
+ filename = "spec-%s-%s-config.json" % (spec.name, spec.version)
+ self.save(as_dict, filename)
+ else:
+ response = self.do_request("specs/new/", data=sjson.dump(as_dict))
+ configs[spec.package.name] = response.get('data', {})
+
+ return configs
+
def new_build(self, spec):
"""
Create a new build.
@@ -507,6 +545,11 @@ class SpackMonitorClient:
"""
return self.update_build(spec, status="FAILED")
+ def cancel_task(self, spec):
+ """Given a spec, mark it as cancelled.
+ """
+ return self.update_build(spec, status="CANCELLED")
+
def send_analyze_metadata(self, pkg, metadata):
"""
Send spack analyzer metadata to the spack monitor server.
diff --git a/lib/spack/spack/operating_systems/linux_distro.py b/lib/spack/spack/operating_systems/linux_distro.py
index 758e8bea47..8bb2aa7749 100644
--- a/lib/spack/spack/operating_systems/linux_distro.py
+++ b/lib/spack/spack/operating_systems/linux_distro.py
@@ -18,10 +18,8 @@ class LinuxDistro(OperatingSystem):
def __init__(self):
try:
# This will throw an error if imported on a non-Linux platform.
- from external.distro import linux_distribution
- distname, version, _ = linux_distribution(
- full_distribution_name=False)
- distname, version = str(distname), str(version)
+ import external.distro
+ distname, version = external.distro.id(), external.distro.version()
except ImportError:
distname, version = 'unknown', ''
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 8e1e81c124..77a881c442 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -29,7 +29,6 @@ import types
from typing import Any, Callable, Dict, List, Optional # novm
import six
-from ordereddict_backport import OrderedDict
import llnl.util.filesystem as fsys
import llnl.util.tty as tty
@@ -902,7 +901,7 @@ class PackageBase(six.with_metaclass(PackageMeta, PackageViewMixin, object)):
explicitly defined ``url`` argument. So, this list may be empty
if a package only defines ``url`` at the top level.
"""
- version_urls = OrderedDict()
+ version_urls = collections.OrderedDict()
for v, args in sorted(self.versions.items()):
if 'url' in args:
version_urls[v] = args['url']
diff --git a/lib/spack/spack/pkgkit.py b/lib/spack/spack/pkgkit.py
index fc056f14ad..19c5fac500 100644
--- a/lib/spack/spack/pkgkit.py
+++ b/lib/spack/spack/pkgkit.py
@@ -29,7 +29,11 @@ from spack.build_systems.makefile import MakefilePackage
from spack.build_systems.maven import MavenPackage
from spack.build_systems.meson import MesonPackage
from spack.build_systems.octave import OctavePackage
-from spack.build_systems.oneapi import IntelOneApiLibraryPackage, IntelOneApiPackage
+from spack.build_systems.oneapi import (
+ IntelOneApiLibraryPackage,
+ IntelOneApiPackage,
+ IntelOneApiStaticLibraryList,
+)
from spack.build_systems.perl import PerlPackage
from spack.build_systems.python import PythonPackage
from spack.build_systems.qmake import QMakePackage
diff --git a/lib/spack/spack/relocate.py b/lib/spack/spack/relocate.py
index 14bc0e1953..9cf01d7c9c 100644
--- a/lib/spack/spack/relocate.py
+++ b/lib/spack/spack/relocate.py
@@ -2,19 +2,19 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+import collections
import multiprocessing.pool
import os
import re
import shutil
-from collections import defaultdict
import macholib.mach_o
import macholib.MachO
-from ordereddict_backport import OrderedDict
import llnl.util.lang
import llnl.util.tty as tty
+import spack.bootstrap
import spack.platforms
import spack.repo
import spack.spec
@@ -76,32 +76,16 @@ class BinaryTextReplaceError(spack.error.SpackError):
def _patchelf():
- """Return the full path to the patchelf binary, if available, else None.
-
- Search first the current PATH for patchelf. If not found, try to look
- if the default patchelf spec is installed and if not install it.
-
- Return None on Darwin or if patchelf cannot be found.
- """
- # Check if patchelf is already in the PATH
- patchelf = executable.which('patchelf')
- if patchelf is not None:
- return patchelf.path
-
- # Check if patchelf spec is installed
- spec = spack.spec.Spec('patchelf')
- spec._old_concretize(deprecation_warning=False)
- exe_path = os.path.join(spec.prefix.bin, "patchelf")
- if spec.package.installed and os.path.exists(exe_path):
- return exe_path
-
- # Skip darwin
+ """Return the full path to the patchelf binary, if available, else None."""
if is_macos:
return None
- # Install the spec and return its path
- spec.package.do_install()
- return exe_path if os.path.exists(exe_path) else None
+ patchelf = executable.which('patchelf')
+ if patchelf is None:
+ with spack.bootstrap.ensure_bootstrap_configuration():
+ patchelf = spack.bootstrap.ensure_patchelf_in_path_or_raise()
+
+ return patchelf.path
def _elf_rpaths_for(path):
@@ -807,7 +791,7 @@ def relocate_text(files, prefixes, concurrency=32):
# orig_sbang = '#!/bin/bash {0}/bin/sbang'.format(orig_spack)
# new_sbang = '#!/bin/bash {0}/bin/sbang'.format(new_spack)
- compiled_prefixes = OrderedDict({})
+ compiled_prefixes = collections.OrderedDict({})
for orig_prefix, new_prefix in prefixes.items():
if orig_prefix != new_prefix:
@@ -845,7 +829,7 @@ def relocate_text_bin(binaries, prefixes, concurrency=32):
Raises:
BinaryTextReplaceError: when the new path is longer than the old path
"""
- byte_prefixes = OrderedDict({})
+ byte_prefixes = collections.OrderedDict({})
for orig_prefix, new_prefix in prefixes.items():
if orig_prefix != new_prefix:
@@ -1032,7 +1016,7 @@ def fixup_macos_rpath(root, filename):
# Convert rpaths list to (name -> number of occurrences)
add_rpaths = set()
del_rpaths = set()
- rpaths = defaultdict(int)
+ rpaths = collections.defaultdict(int)
for rpath in rpath_list:
rpaths[rpath] += 1
diff --git a/lib/spack/spack/reporters/cdash.py b/lib/spack/spack/reporters/cdash.py
index 19721fc676..73c75a1e18 100644
--- a/lib/spack/spack/reporters/cdash.py
+++ b/lib/spack/spack/reporters/cdash.py
@@ -2,9 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-
import codecs
+import collections
import hashlib
import os.path
import platform
@@ -13,7 +12,6 @@ import socket
import time
import xml.sax.saxutils
-from ordereddict_backport import OrderedDict
from six import iteritems, text_type
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import HTTPHandler, Request, build_opener
@@ -96,7 +94,7 @@ class CDash(Reporter):
buildstamp_format = "%Y%m%d-%H%M-{0}".format(args.cdash_track)
self.buildstamp = time.strftime(buildstamp_format,
time.localtime(self.endtime))
- self.buildIds = OrderedDict()
+ self.buildIds = collections.OrderedDict()
self.revision = ''
git = which('git')
with working_dir(spack.paths.spack_root):
diff --git a/lib/spack/spack/s3_handler.py b/lib/spack/spack/s3_handler.py
index 8f9322716a..3841287946 100644
--- a/lib/spack/spack/s3_handler.py
+++ b/lib/spack/spack/s3_handler.py
@@ -41,7 +41,8 @@ class WrapStream(BufferedReader):
def _s3_open(url):
parsed = url_util.parse(url)
- s3 = s3_util.create_s3_session(parsed)
+ s3 = s3_util.create_s3_session(parsed,
+ connection=s3_util.get_mirror_connection(parsed)) # noqa: E501
bucket = parsed.netloc
key = parsed.path
diff --git a/lib/spack/spack/schema/bootstrap.py b/lib/spack/spack/schema/bootstrap.py
index bd3c6630fb..e77001e854 100644
--- a/lib/spack/spack/schema/bootstrap.py
+++ b/lib/spack/spack/schema/bootstrap.py
@@ -39,7 +39,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack bootstrap configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/buildcache_spec.py b/lib/spack/spack/schema/buildcache_spec.py
index 563235c311..a72c70a59c 100644
--- a/lib/spack/spack/schema/buildcache_spec.py
+++ b/lib/spack/spack/schema/buildcache_spec.py
@@ -11,7 +11,7 @@
import spack.schema.spec
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack buildcache specfile schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/cdash.py b/lib/spack/spack/schema/cdash.py
index 41c0146b1d..b236159629 100644
--- a/lib/spack/spack/schema/cdash.py
+++ b/lib/spack/spack/schema/cdash.py
@@ -28,7 +28,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack cdash configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/compilers.py b/lib/spack/spack/schema/compilers.py
index 91ace05f4e..df32036491 100644
--- a/lib/spack/spack/schema/compilers.py
+++ b/lib/spack/spack/schema/compilers.py
@@ -84,7 +84,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack compiler configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/config.py b/lib/spack/spack/schema/config.py
index 0cf533eb18..801c6d5ea5 100644
--- a/lib/spack/spack/schema/config.py
+++ b/lib/spack/spack/schema/config.py
@@ -110,7 +110,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack core configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/database_index.py b/lib/spack/spack/schema/database_index.py
index 105f6ad6a3..7a6143d555 100644
--- a/lib/spack/spack/schema/database_index.py
+++ b/lib/spack/spack/schema/database_index.py
@@ -14,7 +14,7 @@ import spack.schema.spec
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack spec schema',
'type': 'object',
'required': ['database'],
diff --git a/lib/spack/spack/schema/env.py b/lib/spack/spack/schema/env.py
index de6a5d9568..5a9bfe9aa1 100644
--- a/lib/spack/spack/schema/env.py
+++ b/lib/spack/spack/schema/env.py
@@ -52,7 +52,7 @@ spec_list_schema = {
projections_scheme = spack.schema.projections.properties['projections']
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack environment file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/gitlab_ci.py b/lib/spack/spack/schema/gitlab_ci.py
index d6d8f564a3..4e5abe397b 100644
--- a/lib/spack/spack/schema/gitlab_ci.py
+++ b/lib/spack/spack/schema/gitlab_ci.py
@@ -153,7 +153,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack gitlab-ci configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/merged.py b/lib/spack/spack/schema/merged.py
index 11db4f78df..bbc288cb7a 100644
--- a/lib/spack/spack/schema/merged.py
+++ b/lib/spack/spack/schema/merged.py
@@ -40,7 +40,7 @@ properties = union_dicts(
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack merged configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/mirrors.py b/lib/spack/spack/schema/mirrors.py
index 6dec5aac97..38cb126d6c 100644
--- a/lib/spack/spack/schema/mirrors.py
+++ b/lib/spack/spack/schema/mirrors.py
@@ -24,8 +24,8 @@ properties = {
'type': 'object',
'required': ['fetch', 'push'],
'properties': {
- 'fetch': {'type': 'string'},
- 'push': {'type': 'string'}
+ 'fetch': {'type': ['string', 'object']},
+ 'push': {'type': ['string', 'object']}
}
}
]
@@ -37,7 +37,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack mirror configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/modules.py b/lib/spack/spack/schema/modules.py
index 90755f5588..4b30ae582f 100644
--- a/lib/spack/spack/schema/modules.py
+++ b/lib/spack/spack/schema/modules.py
@@ -219,7 +219,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack module file configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/packages.py b/lib/spack/spack/schema/packages.py
index 86aabc6649..55ed1bbf3b 100644
--- a/lib/spack/spack/schema/packages.py
+++ b/lib/spack/spack/schema/packages.py
@@ -154,7 +154,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack package configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/projections.py b/lib/spack/spack/schema/projections.py
index cab512fe3b..7623a5be6d 100644
--- a/lib/spack/spack/schema/projections.py
+++ b/lib/spack/spack/schema/projections.py
@@ -25,7 +25,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack view projection configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/repos.py b/lib/spack/spack/schema/repos.py
index 44cf688a48..d16c1b07bd 100644
--- a/lib/spack/spack/schema/repos.py
+++ b/lib/spack/spack/schema/repos.py
@@ -22,7 +22,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack repository configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/spec.py b/lib/spack/spack/schema/spec.py
index 3dd84553f7..5d2618bd80 100644
--- a/lib/spack/spack/schema/spec.py
+++ b/lib/spack/spack/schema/spec.py
@@ -192,7 +192,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack spec schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/schema/upstreams.py b/lib/spack/spack/schema/upstreams.py
index 7d721332eb..5075059d58 100644
--- a/lib/spack/spack/schema/upstreams.py
+++ b/lib/spack/spack/schema/upstreams.py
@@ -31,7 +31,7 @@ properties = {
#: Full schema with metadata
schema = {
- '$schema': 'http://json-schema.org/schema#',
+ '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack core configuration file schema',
'type': 'object',
'additionalProperties': False,
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index eaf7544e83..8523dd6838 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -1864,6 +1864,15 @@ class Spec(object):
return sjson.dump(self.to_dict(hash), stream)
@staticmethod
+ def from_specfile(path):
+ """Construct a spec from aJSON or YAML spec file path"""
+ with open(path, 'r') as fd:
+ file_content = fd.read()
+ if path.endswith('.json'):
+ return Spec.from_json(file_content)
+ return Spec.from_yaml(file_content)
+
+ @staticmethod
def from_node_dict(node):
spec = Spec()
if 'name' in node.keys():
@@ -3921,7 +3930,7 @@ class Spec(object):
elif 'version' in parts:
col = '@'
- # Finally, write the ouptut
+ # Finally, write the output
write(sig + morph(spec, str(current)), col)
attribute = ''
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index bffd06ab73..ceff320d6e 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -437,11 +437,20 @@ class Stage(object):
# Join URLs of mirror roots with mirror paths. Because
# urljoin() will strip everything past the final '/' in
# the root, so we add a '/' if it is not present.
- mirror_urls = []
+ mirror_urls = {}
for mirror in spack.mirror.MirrorCollection().values():
for rel_path in self.mirror_paths:
- mirror_urls.append(
- url_util.join(mirror.fetch_url, rel_path))
+ mirror_url = url_util.join(mirror.fetch_url, rel_path)
+ mirror_urls[mirror_url] = {}
+ if mirror.get_access_pair("fetch") or \
+ mirror.get_access_token("fetch") or \
+ mirror.get_profile("fetch"):
+ mirror_urls[mirror_url] = {
+ "access_token": mirror.get_access_token("fetch"),
+ "access_pair": mirror.get_access_pair("fetch"),
+ "access_profile": mirror.get_profile("fetch"),
+ "endpoint_url": mirror.get_endpoint_url("fetch")
+ }
# If this archive is normally fetched from a tarball URL,
# then use the same digest. `spack mirror` ensures that
@@ -460,10 +469,11 @@ class Stage(object):
# Add URL strategies for all the mirrors with the digest
# Insert fetchers in the order that the URLs are provided.
- for url in reversed(mirror_urls):
+ for url in reversed(list(mirror_urls.keys())):
fetchers.insert(
0, fs.from_url_scheme(
- url, digest, expand=expand, extension=extension))
+ url, digest, expand=expand, extension=extension,
+ connection=mirror_urls[url]))
if self.default_fetcher.cachable:
for rel_path in reversed(list(self.mirror_paths)):
diff --git a/lib/spack/spack/store.py b/lib/spack/spack/store.py
index b0a418c7e3..1b372b848c 100644
--- a/lib/spack/spack/store.py
+++ b/lib/spack/spack/store.py
@@ -29,6 +29,7 @@ import llnl.util.tty as tty
import spack.config
import spack.database
import spack.directory_layout
+import spack.error
import spack.paths
import spack.util.path
@@ -284,6 +285,75 @@ def _construct_upstream_dbs_from_install_roots(
return accumulated_upstream_dbs
+def find(constraints, multiple=False, query_fn=None, **kwargs):
+ """Return a list of specs matching the constraints passed as inputs.
+
+ At least one spec per constraint must match, otherwise the function
+ will error with an appropriate message.
+
+ By default, this function queries the current store, but a custom query
+ function can be passed to hit any other source of concretized specs
+ (e.g. a binary cache).
+
+ The query function must accept a spec as its first argument.
+
+ Args:
+ constraints (List[spack.spec.Spec]): specs to be matched against
+ installed packages
+ multiple (bool): if True multiple matches per constraint are admitted
+ query_fn (Callable): query function to get matching specs. By default,
+ ``spack.store.db.query``
+ **kwargs: keyword arguments forwarded to the query function
+
+ Return:
+ List of matching specs
+ """
+ # Normalize input to list of specs
+ if isinstance(constraints, six.string_types):
+ constraints = [spack.spec.Spec(constraints)]
+
+ matching_specs, errors = [], []
+ query_fn = query_fn or spack.store.db.query
+ for spec in constraints:
+ current_matches = query_fn(spec, **kwargs)
+
+ # For each spec provided, make sure it refers to only one package.
+ if not multiple and len(current_matches) > 1:
+ msg_fmt = '"{0}" matches multiple packages: [{1}]'
+ errors.append(
+ msg_fmt.format(spec, ', '.join([m.format() for m in current_matches]))
+ )
+
+ # No installed package matches the query
+ if len(current_matches) == 0 and spec is not any:
+ msg_fmt = '"{0}" does not match any installed packages'
+ errors.append(msg_fmt.format(spec))
+
+ matching_specs.extend(current_matches)
+
+ if errors:
+ raise MatchError(
+ message="errors occurred when looking for specs in the store",
+ long_message='\n'.join(errors)
+ )
+
+ return matching_specs
+
+
+def specfile_matches(filename, **kwargs):
+ """Same as find but reads the query from a spec file.
+
+ Args:
+ filename (str): YAML or JSON file from which to read the query.
+ **kwargs: keyword arguments forwarded to "find"
+
+ Return:
+ List of matches
+ """
+ query = [spack.spec.Spec.from_specfile(filename)]
+ return spack.store.find(query, **kwargs)
+
+
@contextlib.contextmanager
def use_store(store_or_path):
"""Use the store passed as argument within the context manager.
@@ -314,3 +384,7 @@ def use_store(store_or_path):
store = original_store
db, layout = original_store.db, original_store.layout
root, unpadded_root = original_store.root, original_store.unpadded_root
+
+
+class MatchError(spack.error.SpackError):
+ """Error occurring when trying to match specs in store against a constraint"""
diff --git a/lib/spack/spack/test/bootstrap.py b/lib/spack/spack/test/bootstrap.py
index 99c1a61fd3..9ae4c85c6a 100644
--- a/lib/spack/spack/test/bootstrap.py
+++ b/lib/spack/spack/test/bootstrap.py
@@ -140,3 +140,30 @@ spack:
with spack.bootstrap.ensure_bootstrap_configuration():
pass
assert str(spack.store.root) == '/tmp/store'
+
+
+def test_nested_use_of_context_manager(mutable_config):
+ """Test nested use of the context manager"""
+ user_config = spack.config.config
+ with spack.bootstrap.ensure_bootstrap_configuration():
+ assert spack.config.config != user_config
+ with spack.bootstrap.ensure_bootstrap_configuration():
+ assert spack.config.config != user_config
+ assert spack.config.config == user_config
+
+
+@pytest.mark.parametrize('expected_missing', [False, True])
+def test_status_function_find_files(
+ mutable_config, mock_executable, tmpdir, monkeypatch, expected_missing
+):
+ if not expected_missing:
+ mock_executable('foo', 'echo Hello WWorld!')
+
+ monkeypatch.setattr(
+ spack.bootstrap, '_optional_requirements',
+ lambda: [spack.bootstrap._required_system_executable('foo', 'NOT FOUND')]
+ )
+ monkeypatch.setenv('PATH', str(tmpdir.join('bin')))
+
+ _, missing = spack.bootstrap.status_message('optional')
+ assert missing is expected_missing
diff --git a/lib/spack/spack/test/build_distribution.py b/lib/spack/spack/test/build_distribution.py
index 1514b3e903..042e7fe9f9 100644
--- a/lib/spack/spack/test/build_distribution.py
+++ b/lib/spack/spack/test/build_distribution.py
@@ -22,13 +22,12 @@ def test_build_tarball_overwrite(
install(str(spec))
# Runs fine the first time, throws the second time
- spack.binary_distribution.build_tarball(spec, '.', unsigned=True)
+ spack.binary_distribution._build_tarball(spec, '.', unsigned=True)
with pytest.raises(spack.binary_distribution.NoOverwriteException):
- spack.binary_distribution.build_tarball(spec, '.', unsigned=True)
+ spack.binary_distribution._build_tarball(spec, '.', unsigned=True)
# Should work fine with force=True
- spack.binary_distribution.build_tarball(
- spec, '.', force=True, unsigned=True)
+ spack.binary_distribution._build_tarball(spec, '.', force=True, unsigned=True)
# Remove the tarball and try again.
# This must *also* throw, because of the existing .spec.json file
@@ -38,4 +37,4 @@ def test_build_tarball_overwrite(
spack.binary_distribution.tarball_name(spec, '.spack')))
with pytest.raises(spack.binary_distribution.NoOverwriteException):
- spack.binary_distribution.build_tarball(spec, '.', unsigned=True)
+ spack.binary_distribution._build_tarball(spec, '.', unsigned=True)
diff --git a/lib/spack/spack/test/build_systems.py b/lib/spack/spack/test/build_systems.py
index b9105538b2..fec337eb8f 100644
--- a/lib/spack/spack/test/build_systems.py
+++ b/lib/spack/spack/test/build_systems.py
@@ -429,3 +429,19 @@ class TestXorgPackage(object):
assert pkg.urls[0] == 'https://www.x.org/archive/individual/' \
'util/util-macros-1.19.1.tar.bz2'
+
+
+def test_cmake_define_from_variant_conditional(config, mock_packages):
+ """Test that define_from_variant returns empty string when a condition on a variant
+ is not met. When this is the case, the variant is not set in the spec."""
+ s = Spec('cmake-conditional-variants-test').concretized()
+ assert 'example' not in s.variants
+ assert s.package.define_from_variant('EXAMPLE', 'example') == ''
+
+
+def test_autotools_args_from_conditional_variant(config, mock_packages):
+ """Test that _activate_or_not returns an empty string when a condition on a variant
+ is not met. When this is the case, the variant is not set in the spec."""
+ s = Spec('autotools-conditional-variants-test').concretized()
+ assert 'example' not in s.variants
+ assert len(s.package._activate_or_not('example', 'enable', 'disable')) == 0
diff --git a/lib/spack/spack/test/cc.py b/lib/spack/spack/test/cc.py
index 5381782480..f9cbe9fe17 100644
--- a/lib/spack/spack/test/cc.py
+++ b/lib/spack/spack/test/cc.py
@@ -102,7 +102,7 @@ common_compile_args = (
)
-@pytest.fixture(scope='session')
+@pytest.fixture(scope='function')
def wrapper_environment():
with set_env(
SPACK_CC=real_cc,
diff --git a/lib/spack/spack/test/cmd/audit.py b/lib/spack/spack/test/cmd/audit.py
index 2c9dc6b124..e7fe2d68da 100644
--- a/lib/spack/spack/test/cmd/audit.py
+++ b/lib/spack/spack/test/cmd/audit.py
@@ -41,7 +41,7 @@ def test_audit_packages_https(mutable_config, mock_packages):
assert audit.returncode == 1
# This uses http and should fail
- audit('packages-https', "preferred-test", fail_on_error=False)
+ audit('packages-https', "test-dependency", fail_on_error=False)
assert audit.returncode == 1
# providing one or more package names with https should work
diff --git a/lib/spack/spack/test/cmd/build_env.py b/lib/spack/spack/test/cmd/build_env.py
index d3d941f0b7..d7458e0d3c 100644
--- a/lib/spack/spack/test/cmd/build_env.py
+++ b/lib/spack/spack/test/cmd/build_env.py
@@ -15,12 +15,12 @@ build_env = SpackCommand('build-env')
('zlib',),
('zlib', '--')
])
-@pytest.mark.usefixtures('config')
+@pytest.mark.usefixtures('config', 'mock_packages', 'working_env')
def test_it_just_runs(pkg):
build_env(*pkg)
-@pytest.mark.usefixtures('config')
+@pytest.mark.usefixtures('config', 'mock_packages', 'working_env')
def test_error_when_multiple_specs_are_given():
output = build_env('libelf libdwarf', fail_on_error=False)
assert 'only takes one spec' in output
@@ -31,7 +31,7 @@ def test_error_when_multiple_specs_are_given():
('--',),
(),
])
-@pytest.mark.usefixtures('config')
+@pytest.mark.usefixtures('config', 'mock_packages', 'working_env')
def test_build_env_requires_a_spec(args):
output = build_env(*args, fail_on_error=False)
assert 'requires a spec' in output
@@ -40,7 +40,7 @@ def test_build_env_requires_a_spec(args):
_out_file = 'env.out'
-@pytest.mark.usefixtures('config')
+@pytest.mark.usefixtures('config', 'mock_packages', 'working_env')
def test_dump(tmpdir):
with tmpdir.as_cwd():
build_env('--dump', _out_file, 'zlib')
@@ -48,7 +48,7 @@ def test_dump(tmpdir):
assert(any(line.startswith('PATH=') for line in f.readlines()))
-@pytest.mark.usefixtures('config')
+@pytest.mark.usefixtures('config', 'mock_packages', 'working_env')
def test_pickle(tmpdir):
with tmpdir.as_cwd():
build_env('--pickle', _out_file, 'zlib')
diff --git a/lib/spack/spack/test/cmd/checksum.py b/lib/spack/spack/test/cmd/checksum.py
index 6c05b03d59..c741377ce6 100644
--- a/lib/spack/spack/test/cmd/checksum.py
+++ b/lib/spack/spack/test/cmd/checksum.py
@@ -30,7 +30,7 @@ def test_checksum_args(arguments, expected):
@pytest.mark.parametrize('arguments,expected', [
- (['--batch', 'preferred-test'], 'versions of preferred-test'),
+ (['--batch', 'preferred-test'], 'version of preferred-test'),
(['--latest', 'preferred-test'], 'Found 1 version'),
(['--preferred', 'preferred-test'], 'Found 1 version'),
])
@@ -47,7 +47,7 @@ def test_checksum_interactive(
monkeypatch.setattr(tty, 'get_number', _get_number)
output = spack_checksum('preferred-test')
- assert 'versions of preferred-test' in output
+ assert 'version of preferred-test' in output
assert 'version(' in output
diff --git a/lib/spack/spack/test/cmd/ci.py b/lib/spack/spack/test/cmd/ci.py
index c17ab1b4d5..b9bf02adb2 100644
--- a/lib/spack/spack/test/cmd/ci.py
+++ b/lib/spack/spack/test/cmd/ci.py
@@ -12,8 +12,8 @@ import pytest
from jsonschema import ValidationError, validate
import spack
+import spack.binary_distribution
import spack.ci as ci
-import spack.cmd.buildcache as buildcache
import spack.compilers as compilers
import spack.config
import spack.environment as ev
@@ -897,11 +897,11 @@ spack:
set_env_var('SPACK_COMPILER_ACTION', 'NONE')
set_env_var('SPACK_REMOTE_MIRROR_URL', mirror_url)
- def fake_dl_method(spec, dest, require_cdashid, m_url=None):
+ def fake_dl_method(spec, *args, **kwargs):
print('fake download buildcache {0}'.format(spec.name))
monkeypatch.setattr(
- buildcache, 'download_buildcache_files', fake_dl_method)
+ spack.binary_distribution, 'download_single_spec', fake_dl_method)
ci_out = ci_cmd('rebuild', output=str)
@@ -970,8 +970,7 @@ spack:
install_cmd('--keep-stage', json_path)
# env, spec, json_path, mirror_url, build_id, sign_binaries
- ci.push_mirror_contents(
- env, concrete_spec, json_path, mirror_url, True)
+ ci.push_mirror_contents(env, json_path, mirror_url, True)
ci.write_cdashid_to_mirror('42', concrete_spec, mirror_url)
@@ -1063,23 +1062,20 @@ spack:
def test_push_mirror_contents_exceptions(monkeypatch, capsys):
- def faked(env, spec_file=None, packages=None, add_spec=True,
- add_deps=True, output_location=os.getcwd(),
- signing_key=None, force=False, make_relative=False,
- unsigned=False, allow_root=False, rebuild_index=False):
+ def failing_access(*args, **kwargs):
raise Exception('Error: Access Denied')
- import spack.cmd.buildcache as buildcache
- monkeypatch.setattr(buildcache, '_createtarball', faked)
+ monkeypatch.setattr(spack.ci, '_push_mirror_contents', failing_access)
+ # Input doesn't matter, as wwe are faking exceptional output
url = 'fakejunk'
- ci.push_mirror_contents(None, None, None, url, None)
+ ci.push_mirror_contents(None, None, url, None)
captured = capsys.readouterr()
std_out = captured[0]
expect_msg = 'Permission problem writing to {0}'.format(url)
- assert(expect_msg in std_out)
+ assert expect_msg in std_out
def test_ci_generate_override_runner_attrs(tmpdir, mutable_mock_env_path,
diff --git a/lib/spack/spack/test/cmd/dev_build.py b/lib/spack/spack/test/cmd/dev_build.py
index 765d4dc81b..4c59e300be 100644
--- a/lib/spack/spack/test/cmd/dev_build.py
+++ b/lib/spack/spack/test/cmd/dev_build.py
@@ -134,7 +134,7 @@ def mock_module_noop(*args):
def test_dev_build_drop_in(tmpdir, mock_packages, monkeypatch,
- install_mockery):
+ install_mockery, working_env):
monkeypatch.setattr(os, 'execvp', print_spack_cc)
monkeypatch.setattr(spack.build_environment, 'module', mock_module_noop)
diff --git a/lib/spack/spack/test/cmd/mirror.py b/lib/spack/spack/test/cmd/mirror.py
index 8716d59e05..4c0238deb2 100644
--- a/lib/spack/spack/test/cmd/mirror.py
+++ b/lib/spack/spack/test/cmd/mirror.py
@@ -155,7 +155,7 @@ def test_mirror_crud(tmp_scope, capsys):
# no-op
output = mirror('set-url', '--scope', tmp_scope,
'mirror', 'http://spack.io')
- assert 'Url already set' in output
+ assert 'No changes made' in output
output = mirror('set-url', '--scope', tmp_scope,
'--push', 'mirror', 's3://spack-public')
@@ -164,7 +164,32 @@ def test_mirror_crud(tmp_scope, capsys):
# no-op
output = mirror('set-url', '--scope', tmp_scope,
'--push', 'mirror', 's3://spack-public')
- assert 'Url already set' in output
+ assert 'No changes made' in output
+
+ output = mirror('remove', '--scope', tmp_scope, 'mirror')
+ assert 'Removed mirror' in output
+
+ # Test S3 connection info token
+ mirror('add', '--scope', tmp_scope,
+ '--s3-access-token', 'aaaaaazzzzz',
+ 'mirror', 's3://spack-public')
+
+ output = mirror('remove', '--scope', tmp_scope, 'mirror')
+ assert 'Removed mirror' in output
+
+ # Test S3 connection info id/key
+ mirror('add', '--scope', tmp_scope,
+ '--s3-access-key-id', 'foo', '--s3-access-key-secret', 'bar',
+ 'mirror', 's3://spack-public')
+
+ output = mirror('remove', '--scope', tmp_scope, 'mirror')
+ assert 'Removed mirror' in output
+
+ # Test S3 connection info with endpoint URL
+ mirror('add', '--scope', tmp_scope,
+ '--s3-access-token', 'aaaaaazzzzz',
+ '--s3-endpoint-url', 'http://localhost/',
+ 'mirror', 's3://spack-public')
output = mirror('remove', '--scope', tmp_scope, 'mirror')
assert 'Removed mirror' in output
diff --git a/lib/spack/spack/test/cmd/style.py b/lib/spack/spack/test/cmd/style.py
index 29cde14400..4e8b3b9784 100644
--- a/lib/spack/spack/test/cmd/style.py
+++ b/lib/spack/spack/test/cmd/style.py
@@ -41,7 +41,8 @@ pytestmark = pytest.mark.skipif(not has_develop_branch(),
# The style tools have requirements to use newer Python versions. We simplify by
# requiring Python 3.6 or higher to run spack style.
skip_old_python = pytest.mark.skipif(
- sys.version_info < (3, 6), reason='requires Python 3.6 or higher')
+ sys.version_info < (3, 6), reason='requires Python 3.6 or higher'
+)
@pytest.fixture(scope="function")
@@ -164,18 +165,6 @@ def test_style_is_package(tmpdir):
assert not spack.cmd.style.is_package("lib/spack/external/pytest.py")
-@skip_old_python
-def test_bad_bootstrap(monkeypatch):
- """Ensure we fail gracefully when we can't bootstrap spack style."""
- monkeypatch.setattr(spack.cmd.style, "tool_order", [
- ("isort", "py-isort@4.3:4.0"), # bad spec to force concretization failure
- ])
- # zero out path to ensure we don't find isort
- with pytest.raises(spack.error.SpackError) as e:
- style(env={"PATH": ""})
- assert "Couldn't bootstrap isort" in str(e)
-
-
@pytest.fixture
def external_style_root(flake8_package_with_errors, tmpdir):
"""Create a mock git repository for running spack style."""
diff --git a/lib/spack/spack/test/cmd/test.py b/lib/spack/spack/test/cmd/test.py
index 9ee117b281..f82f767bc8 100644
--- a/lib/spack/spack/test/cmd/test.py
+++ b/lib/spack/spack/test/cmd/test.py
@@ -11,6 +11,7 @@ import pytest
import spack.cmd.install
import spack.config
import spack.package
+import spack.store
from spack.cmd.test import has_test_method
from spack.main import SpackCommand
@@ -231,3 +232,31 @@ def test_has_test_method_fails(capsys):
captured = capsys.readouterr()[1]
assert 'is not a class' in captured
+
+
+def test_hash_change(mock_test_stage, mock_packages, mock_archive, mock_fetch,
+ install_mockery_mutable_config):
+ """Ensure output printed from pkgs is captured by output redirection."""
+ install('printing-package')
+ spack_test('run', '--alias', 'printpkg', 'printing-package')
+
+ stage_files = os.listdir(mock_test_stage)
+
+ # Grab test stage directory contents
+ testdir = os.path.join(mock_test_stage, stage_files[0])
+
+ outfile = os.path.join(testdir, 'test_suite.lock')
+ with open(outfile, 'r') as f:
+ output = f.read()
+ changed_hash = output.replace(
+ spack.store.db.query('printing-package')[0].full_hash(),
+ 'fakehash492ucwhwvzhxfbmcc45x49ha')
+ with open(outfile, 'w') as f:
+ f.write(changed_hash)
+
+ # The find command should show the contents
+ find_output = spack_test('find')
+ assert 'printpkg' in find_output
+ # The results should be obtainable
+ results_output = spack_test('results')
+ assert 'PASSED' in results_output
diff --git a/lib/spack/spack/test/cmd/unit_test.py b/lib/spack/spack/test/cmd/unit_test.py
index 1a273ff244..9811bdbfbd 100644
--- a/lib/spack/spack/test/cmd/unit_test.py
+++ b/lib/spack/spack/test/cmd/unit_test.py
@@ -22,7 +22,10 @@ def test_list_with_pytest_arg():
def test_list_with_keywords():
- output = spack_test('--list', '-k', 'cmd/unit_test.py')
+ # Here we removed querying with a "/" to separate directories
+ # since the behavior is inconsistent across different pytest
+ # versions, see https://stackoverflow.com/a/48814787/771663
+ output = spack_test('--list', '-k', 'unit_test.py')
assert output.strip() == cmd_test_py
diff --git a/lib/spack/spack/test/cmd/url.py b/lib/spack/spack/test/cmd/url.py
index f9179720b1..249ea6ec57 100644
--- a/lib/spack/spack/test/cmd/url.py
+++ b/lib/spack/spack/test/cmd/url.py
@@ -3,7 +3,6 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
-import sys
import pytest
@@ -69,14 +68,6 @@ def test_url_with_no_version_fails():
url('parse', 'http://www.netlib.org/voronoi/triangle.zip')
-skip_python_26 = pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason="Python 2.6 tests are run in a container, where "
- "networking is super slow"
-)
-
-
-@skip_python_26
def test_url_list(mock_packages):
out = url('list')
total_urls = len(out.split('\n'))
@@ -106,7 +97,6 @@ def test_url_list(mock_packages):
assert 0 < correct_version_urls < total_urls
-@skip_python_26
def test_url_summary(mock_packages):
"""Test the URL summary command."""
# test url_summary, the internal function that does the work
@@ -133,7 +123,6 @@ def test_url_summary(mock_packages):
assert out_correct_versions == correct_versions
-@skip_python_26
def test_url_stats(capfd, mock_packages):
with capfd.disabled():
output = url('stats')
diff --git a/lib/spack/spack/test/compilers/detection.py b/lib/spack/spack/test/compilers/detection.py
index 8bc3285d43..6eaced6b6d 100644
--- a/lib/spack/spack/test/compilers/detection.py
+++ b/lib/spack/spack/test/compilers/detection.py
@@ -204,6 +204,11 @@ def test_intel_version_detection(version_str, expected_version):
'Copyright (C) 1985-2021 Intel Corporation. All rights reserved.',
'2021.4.0'
),
+ ( # IFX
+ 'ifx (IFORT) 2022.0.0 20211123\n'
+ 'Copyright (C) 1985-2021 Intel Corporation. All rights reserved.',
+ '2022.0.0'
+ ),
])
def test_oneapi_version_detection(version_str, expected_version):
version = spack.compilers.oneapi.Oneapi.extract_version_from_output(
diff --git a/lib/spack/spack/test/concretize_preferences.py b/lib/spack/spack/test/concretize_preferences.py
index 733e3efa77..80d9b0eb94 100644
--- a/lib/spack/spack/test/concretize_preferences.py
+++ b/lib/spack/spack/test/concretize_preferences.py
@@ -176,16 +176,16 @@ class TestConcretizePreferences(object):
def test_preferred(self):
""""Test packages with some version marked as preferred=True"""
- spec = Spec('preferred-test')
+ spec = Spec('python')
spec.concretize()
- assert spec.version == Version('0.2.15')
+ assert spec.version == Version('2.7.11')
# now add packages.yaml with versions other than preferred
# ensure that once config is in place, non-preferred version is used
- update_packages('preferred-test', 'version', ['0.2.16'])
- spec = Spec('preferred-test')
+ update_packages('python', 'version', ['3.5.0'])
+ spec = Spec('python')
spec.concretize()
- assert spec.version == Version('0.2.16')
+ assert spec.version == Version('3.5.0')
def test_develop(self):
"""Test concretization with develop-like versions"""
diff --git a/lib/spack/spack/test/conftest.py b/lib/spack/spack/test/conftest.py
index 9a6dc0b13b..d502cf1db1 100644
--- a/lib/spack/spack/test/conftest.py
+++ b/lib/spack/spack/test/conftest.py
@@ -430,8 +430,14 @@ def _skip_if_missing_executables(request):
"""Permits to mark tests with 'require_executables' and skip the
tests if the executables passed as arguments are not found.
"""
- if request.node.get_marker('requires_executables'):
- required_execs = request.node.get_marker('requires_executables').args
+ if hasattr(request.node, 'get_marker'):
+ # TODO: Remove the deprecated API as soon as we drop support for Python 2.6
+ marker = request.node.get_marker('requires_executables')
+ else:
+ marker = request.node.get_closest_marker('requires_executables')
+
+ if marker:
+ required_execs = marker.args
missing_execs = [
x for x in required_execs if spack.util.executable.which(x) is None
]
@@ -1453,7 +1459,7 @@ def invalid_spec(request):
return request.param
-@pytest.fixture("module")
+@pytest.fixture(scope='module')
def mock_test_repo(tmpdir_factory):
"""Create an empty repository."""
repo_namespace = 'mock_test_repo'
diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py
index 57a03a5db9..925acc83d3 100644
--- a/lib/spack/spack/test/database.py
+++ b/lib/spack/spack/test/database.py
@@ -909,3 +909,18 @@ def test_database_works_with_empty_dir(tmpdir):
db.query()
# Check that reading an empty directory didn't create a new index.json
assert not os.path.exists(db._index_path)
+
+
+@pytest.mark.parametrize('query_arg,exc_type,msg_str', [
+ (['callpath'], spack.store.MatchError, 'matches multiple packages'),
+ (['tensorflow'], spack.store.MatchError, 'does not match any')
+])
+def test_store_find_failures(database, query_arg, exc_type, msg_str):
+ with pytest.raises(exc_type) as exc_info:
+ spack.store.find(query_arg, multiple=False)
+ assert msg_str in str(exc_info.value)
+
+
+def test_store_find_accept_string(database):
+ result = spack.store.find('callpath', multiple=True)
+ assert len(result) == 3
diff --git a/lib/spack/spack/test/directives.py b/lib/spack/spack/test/directives.py
index 31038ebb47..ad7c98bdb9 100644
--- a/lib/spack/spack/test/directives.py
+++ b/lib/spack/spack/test/directives.py
@@ -51,3 +51,12 @@ def test_constraints_from_context_are_merged(mock_packages):
assert pkg_cls.dependencies
assert spack.spec.Spec('@0.14:15 ^b@3.8:4.0') in pkg_cls.dependencies['c']
+
+
+@pytest.mark.regression('27754')
+def test_extends_spec(config, mock_packages):
+ extender = spack.spec.Spec('extends-spec').concretized()
+ extendee = spack.spec.Spec('extendee').concretized()
+
+ assert extender.dependencies
+ assert extender.package.extends(extendee)
diff --git a/lib/spack/spack/test/llnl/util/tty/log.py b/lib/spack/spack/test/llnl/util/tty/log.py
index 78d4e24fa4..c3cb96da8a 100644
--- a/lib/spack/spack/test/llnl/util/tty/log.py
+++ b/lib/spack/spack/test/llnl/util/tty/log.py
@@ -62,6 +62,24 @@ def test_log_python_output_without_echo(capfd, tmpdir):
assert capfd.readouterr()[0] == ''
+def test_log_python_output_with_invalid_utf8(capfd, tmpdir):
+ with tmpdir.as_cwd():
+ with log_output('foo.txt'):
+ sys.stdout.buffer.write(b'\xc3\x28\n')
+
+ # python2 and 3 treat invalid UTF-8 differently
+ if sys.version_info.major == 2:
+ expected = b'\xc3(\n'
+ else:
+ expected = b'<line lost: output was not encoded as UTF-8>\n'
+ with open('foo.txt', 'rb') as f:
+ written = f.read()
+ assert written == expected
+
+ # nothing on stdout or stderr
+ assert capfd.readouterr()[0] == ''
+
+
def test_log_python_output_and_echo_output(capfd, tmpdir):
with tmpdir.as_cwd():
# echo two lines
@@ -429,10 +447,6 @@ def mock_shell_v_v_no_termios(proc, ctl, **kwargs):
(mock_shell_v_v, nullcontext),
(mock_shell_v_v_no_termios, no_termios),
])
-@pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason="Python 2.6 tests are run in a container, where this fails often"
-)
def test_foreground_background_output(
test_fn, capfd, termios_on_or_off, tmpdir):
"""Tests hitting 'v' toggles output, and that force_echo works."""
diff --git a/lib/spack/spack/test/monitor.py b/lib/spack/spack/test/monitor.py
index b060c725ee..e5888231e5 100644
--- a/lib/spack/spack/test/monitor.py
+++ b/lib/spack/spack/test/monitor.py
@@ -18,18 +18,13 @@ from spack.monitor import SpackMonitorClient
install = SpackCommand('install')
-def get_client(host, prefix="ms1", disable_auth=False, allow_fail=False, tags=None,
- save_local=False):
+def get_client(host, prefix="ms1", allow_fail=False, tags=None, save_local=False):
"""
We replicate this function to not generate a global client.
"""
cli = SpackMonitorClient(host=host, prefix=prefix, allow_fail=allow_fail,
tags=tags, save_local=save_local)
- # If we don't disable auth, environment credentials are required
- if not disable_auth and not save_local:
- cli.require_auth()
-
# We will exit early if the monitoring service is not running, but
# only if we aren't doing a local save
if not save_local:
@@ -131,20 +126,17 @@ def mock_monitor_request(monkeypatch):
def test_spack_monitor_auth(mock_monitor_request):
- with pytest.raises(SystemExit):
- get_client(host="http://127.0.0.1")
-
os.environ["SPACKMON_TOKEN"] = "xxxxxxxxxxxxxxxxx"
os.environ["SPACKMON_USER"] = "spackuser"
get_client(host="http://127.0.0.1")
def test_spack_monitor_without_auth(mock_monitor_request):
- get_client(host="hostname", disable_auth=True)
+ get_client(host="hostname")
def test_spack_monitor_build_env(mock_monitor_request, install_mockery_mutable_config):
- monitor = get_client(host="hostname", disable_auth=True)
+ monitor = get_client(host="hostname")
assert hasattr(monitor, "build_environment")
for key in ["host_os", "platform", "host_target", "hostname", "spack_version",
"kernel_version"]:
@@ -157,7 +149,7 @@ def test_spack_monitor_build_env(mock_monitor_request, install_mockery_mutable_c
def test_spack_monitor_basic_auth(mock_monitor_request):
- monitor = get_client(host="hostname", disable_auth=True)
+ monitor = get_client(host="hostname")
# Headers should be empty
assert not monitor.headers
@@ -167,7 +159,7 @@ def test_spack_monitor_basic_auth(mock_monitor_request):
def test_spack_monitor_new_configuration(mock_monitor_request, install_mockery):
- monitor = get_client(host="hostname", disable_auth=True)
+ monitor = get_client(host="hostname")
spec = spack.spec.Spec("dttop")
spec.concretize()
response = monitor.new_configuration([spec])
@@ -178,7 +170,7 @@ def test_spack_monitor_new_configuration(mock_monitor_request, install_mockery):
def test_spack_monitor_new_build(mock_monitor_request, install_mockery_mutable_config,
install_mockery):
- monitor = get_client(host="hostname", disable_auth=True)
+ monitor = get_client(host="hostname")
spec = spack.spec.Spec("dttop")
spec.concretize()
response = monitor.new_build(spec)
@@ -190,7 +182,7 @@ def test_spack_monitor_new_build(mock_monitor_request, install_mockery_mutable_c
def test_spack_monitor_update_build(mock_monitor_request, install_mockery,
install_mockery_mutable_config):
- monitor = get_client(host="hostname", disable_auth=True)
+ monitor = get_client(host="hostname")
spec = spack.spec.Spec("dttop")
spec.concretize()
response = monitor.update_build(spec, status="SUCCESS")
@@ -200,7 +192,7 @@ def test_spack_monitor_update_build(mock_monitor_request, install_mockery,
def test_spack_monitor_fail_task(mock_monitor_request, install_mockery,
install_mockery_mutable_config):
- monitor = get_client(host="hostname", disable_auth=True)
+ monitor = get_client(host="hostname")
spec = spack.spec.Spec("dttop")
spec.concretize()
response = monitor.fail_task(spec)
@@ -215,7 +207,7 @@ def test_spack_monitor_send_analyze_metadata(monkeypatch, mock_monitor_request,
def buildid(*args, **kwargs):
return 1
monkeypatch.setattr(spack.monitor.SpackMonitorClient, "get_build_id", buildid)
- monitor = get_client(host="hostname", disable_auth=True)
+ monitor = get_client(host="hostname")
spec = spack.spec.Spec("dttop")
spec.concretize()
response = monitor.send_analyze_metadata(spec.package, metadata={"boop": "beep"})
@@ -226,7 +218,7 @@ def test_spack_monitor_send_analyze_metadata(monkeypatch, mock_monitor_request,
def test_spack_monitor_send_phase(mock_monitor_request, install_mockery,
install_mockery_mutable_config):
- monitor = get_client(host="hostname", disable_auth=True)
+ monitor = get_client(host="hostname")
def get_build_id(*args, **kwargs):
return 1
diff --git a/lib/spack/spack/test/relocate.py b/lib/spack/spack/test/relocate.py
index 1e4b771fa6..61a605c976 100644
--- a/lib/spack/spack/test/relocate.py
+++ b/lib/spack/spack/test/relocate.py
@@ -2,7 +2,6 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-import collections
import os.path
import re
import shutil
@@ -73,47 +72,6 @@ def source_file(tmpdir, is_relocatable):
return src
-@pytest.fixture(params=['which_found', 'installed', 'to_be_installed'])
-def expected_patchelf_path(request, mutable_database, monkeypatch):
- """Prepare the stage to tests different cases that can occur
- when searching for patchelf.
- """
- case = request.param
-
- # Mock the which function
- which_fn = {
- 'which_found': lambda x: collections.namedtuple(
- '_', ['path']
- )('/usr/bin/patchelf')
- }
- monkeypatch.setattr(
- spack.util.executable, 'which',
- which_fn.setdefault(case, lambda x: None)
- )
- if case == 'which_found':
- return '/usr/bin/patchelf'
-
- # TODO: Mock a case for Darwin architecture
-
- spec = spack.spec.Spec('patchelf')
- spec.concretize()
-
- patchelf_cls = type(spec.package)
- do_install = patchelf_cls.do_install
- expected_path = os.path.join(spec.prefix.bin, 'patchelf')
-
- def do_install_mock(self, **kwargs):
- do_install(self, fake=True)
- with open(expected_path):
- pass
-
- monkeypatch.setattr(patchelf_cls, 'do_install', do_install_mock)
- if case == 'installed':
- spec.package.do_install()
-
- return expected_path
-
-
@pytest.fixture()
def mock_patchelf(tmpdir, mock_executable):
def _factory(output):
@@ -227,6 +185,7 @@ def copy_binary():
@pytest.mark.requires_executables(
'/usr/bin/gcc', 'patchelf', 'strings', 'file'
)
+@skip_unless_linux
def test_file_is_relocatable(source_file, is_relocatable):
compiler = spack.util.executable.Executable('/usr/bin/gcc')
executable = str(source_file).replace('.c', '.x')
@@ -240,8 +199,9 @@ def test_file_is_relocatable(source_file, is_relocatable):
@pytest.mark.requires_executables('patchelf', 'strings', 'file')
+@skip_unless_linux
def test_patchelf_is_relocatable():
- patchelf = spack.relocate._patchelf()
+ patchelf = os.path.realpath(spack.relocate._patchelf())
assert llnl.util.filesystem.is_exe(patchelf)
assert spack.relocate.file_is_relocatable(patchelf)
@@ -263,12 +223,6 @@ def test_file_is_relocatable_errors(tmpdir):
assert 'is not an absolute path' in str(exc_info.value)
-@skip_unless_linux
-def test_search_patchelf(expected_patchelf_path):
- current = spack.relocate._patchelf()
- assert current == expected_patchelf_path
-
-
@pytest.mark.parametrize('patchelf_behavior,expected', [
('echo ', []),
('echo /opt/foo/lib:/opt/foo/lib64', ['/opt/foo/lib', '/opt/foo/lib64']),
diff --git a/lib/spack/spack/test/stage.py b/lib/spack/spack/test/stage.py
index b337486e7f..0c6292af9d 100644
--- a/lib/spack/spack/test/stage.py
+++ b/lib/spack/spack/test/stage.py
@@ -10,7 +10,6 @@ import getpass
import os
import shutil
import stat
-import tempfile
import pytest
@@ -825,29 +824,6 @@ class TestStage(object):
assert os.path.exists(test_path)
shutil.rmtree(test_path)
- def test_get_stage_root_in_spack(self, clear_stage_root):
- """Ensure an instance path is an accessible build stage path."""
- base = canonicalize_path(os.path.join('$spack', '.spack-test-stage'))
- mkdirp(base)
- test_path = tempfile.mkdtemp(dir=base)
-
- try:
- with spack.config.override('config:build_stage', test_path):
- path = spack.stage.get_stage_root()
-
- assert 'spack' in path.split(os.path.sep)
-
- # Make sure cached stage path value was changed appropriately
- assert spack.stage._stage_root in (
- test_path, os.path.join(test_path, getpass.getuser()))
-
- # Make sure the directory exists
- assert os.path.isdir(spack.stage._stage_root)
-
- finally:
- # Clean up regardless of outcome
- shutil.rmtree(base)
-
def test_stage_constructor_no_fetcher(self):
"""Ensure Stage constructor with no URL or fetch strategy fails."""
with pytest.raises(ValueError):
diff --git a/lib/spack/spack/test/web.py b/lib/spack/spack/test/web.py
index 2ccbf51225..41aa1e6121 100644
--- a/lib/spack/spack/test/web.py
+++ b/lib/spack/spack/test/web.py
@@ -2,9 +2,9 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
+import collections
import os
-import ordereddict_backport
import pytest
import llnl.util.tty as tty
@@ -152,7 +152,7 @@ def test_get_header():
# If lookup has to fallback to fuzzy matching and there are more than one
# fuzzy match, the result depends on the internal ordering of the given
# mapping
- headers = ordereddict_backport.OrderedDict()
+ headers = collections.OrderedDict()
headers['Content-type'] = 'text/plain'
headers['contentType'] = 'text/html'
@@ -161,7 +161,7 @@ def test_get_header():
assert(spack.util.web.get_header(headers, 'CONTENT_TYPE') == 'text/html')
# Same as above, but different ordering
- headers = ordereddict_backport.OrderedDict()
+ headers = collections.OrderedDict()
headers['contentType'] = 'text/html'
headers['Content-type'] = 'text/plain'
@@ -246,10 +246,33 @@ class MockS3Client(object):
raise self.ClientError
+def test_gather_s3_information(monkeypatch, capfd):
+ mock_connection_data = {"access_token": "AAAAAAA",
+ "profile": "SPacKDeV",
+ "access_pair": ("SPA", "CK"),
+ "endpoint_url": "https://127.0.0.1:8888"}
+
+ session_args, client_args = spack.util.s3.get_mirror_s3_connection_info(mock_connection_data) # noqa: E501
+
+ # Session args are used to create the S3 Session object
+ assert "aws_session_token" in session_args
+ assert session_args.get("aws_session_token") == "AAAAAAA"
+ assert "aws_access_key_id" in session_args
+ assert session_args.get("aws_access_key_id") == "SPA"
+ assert "aws_secret_access_key" in session_args
+ assert session_args.get("aws_secret_access_key") == "CK"
+ assert "profile_name" in session_args
+ assert session_args.get("profile_name") == "SPacKDeV"
+
+ # In addition to the session object, use the client_args to create the s3
+ # Client object
+ assert "endpoint_url" in client_args
+
+
def test_remove_s3_url(monkeypatch, capfd):
fake_s3_url = 's3://my-bucket/subdirectory/mirror'
- def mock_create_s3_session(url):
+ def mock_create_s3_session(url, connection={}):
return MockS3Client()
monkeypatch.setattr(
@@ -269,7 +292,7 @@ def test_remove_s3_url(monkeypatch, capfd):
def test_s3_url_exists(monkeypatch, capfd):
- def mock_create_s3_session(url):
+ def mock_create_s3_session(url, connection={}):
return MockS3Client()
monkeypatch.setattr(
spack.util.s3, 'create_s3_session', mock_create_s3_session)
diff --git a/lib/spack/spack/util/mock_package.py b/lib/spack/spack/util/mock_package.py
index d8b3ec468b..ab7b8439e5 100644
--- a/lib/spack/spack/util/mock_package.py
+++ b/lib/spack/spack/util/mock_package.py
@@ -2,10 +2,8 @@
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
"""Infrastructure used by tests for mocking packages and repos."""
-
-import ordereddict_backport
+import collections
import spack.provider_index
import spack.util.naming
@@ -149,7 +147,7 @@ class MockPackageMultiRepo(object):
MockPackage._repo = self
# set up dependencies
- MockPackage.dependencies = ordereddict_backport.OrderedDict()
+ MockPackage.dependencies = collections.OrderedDict()
for dep, dtype in zip(dependencies, dependency_types):
d = Dependency(MockPackage, Spec(dep.name), type=dtype)
if not conditions or dep.name not in conditions:
diff --git a/lib/spack/spack/util/s3.py b/lib/spack/spack/util/s3.py
index b9b56e0498..82d51eb72e 100644
--- a/lib/spack/spack/util/s3.py
+++ b/lib/spack/spack/util/s3.py
@@ -11,6 +11,15 @@ import spack
import spack.util.url as url_util
+def get_mirror_connection(url, url_type="push"):
+ connection = {}
+ # Try to find a mirror for potential connection information
+ for mirror in spack.mirror.MirrorCollection().values():
+ if "%s://%s" % (url.scheme, url.netloc) == mirror.push_url:
+ connection = mirror.to_dict()[url_type]
+ return connection
+
+
def _parse_s3_endpoint_url(endpoint_url):
if not urllib_parse.urlparse(endpoint_url, scheme='').scheme:
endpoint_url = '://'.join(('https', endpoint_url))
@@ -18,7 +27,31 @@ def _parse_s3_endpoint_url(endpoint_url):
return endpoint_url
-def create_s3_session(url):
+def get_mirror_s3_connection_info(connection):
+ s3_connection = {}
+
+ s3_connection_is_dict = connection and isinstance(connection, dict)
+ if s3_connection_is_dict:
+ if connection.get("access_token"):
+ s3_connection["aws_session_token"] = connection["access_token"]
+ if connection.get("access_pair"):
+ s3_connection["aws_access_key_id"] = connection["access_pair"][0]
+ s3_connection["aws_secret_access_key"] = connection["access_pair"][1]
+ if connection.get("profile"):
+ s3_connection["profile_name"] = connection["profile"]
+
+ s3_client_args = {"use_ssl": spack.config.get('config:verify_ssl')}
+
+ endpoint_url = os.environ.get('S3_ENDPOINT_URL')
+ if endpoint_url:
+ s3_client_args['endpoint_url'] = _parse_s3_endpoint_url(endpoint_url)
+ elif s3_connection_is_dict and connection.get("endpoint_url"):
+ s3_client_args["endpoint_url"] = _parse_s3_endpoint_url(connection["endpoint_url"]) # noqa: E501
+
+ return (s3_connection, s3_client_args)
+
+
+def create_s3_session(url, connection={}):
url = url_util.parse(url)
if url.scheme != 's3':
raise ValueError(
@@ -31,14 +64,9 @@ def create_s3_session(url):
from boto3 import Session
from botocore.exceptions import ClientError
- session = Session()
-
- s3_client_args = {"use_ssl": spack.config.get('config:verify_ssl')}
-
- endpoint_url = os.environ.get('S3_ENDPOINT_URL')
- if endpoint_url:
- s3_client_args['endpoint_url'] = _parse_s3_endpoint_url(endpoint_url)
+ s3_connection, s3_client_args = get_mirror_s3_connection_info(connection)
+ session = Session(**s3_connection)
# if no access credentials provided above, then access anonymously
if not session.get_credentials():
from botocore import UNSIGNED
diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py
index a67b6491f8..9812943a79 100644
--- a/lib/spack/spack/util/spack_yaml.py
+++ b/lib/spack/spack/util/spack_yaml.py
@@ -12,13 +12,13 @@
default unorderd dict.
"""
+import collections
import ctypes
import re
import sys
from typing import List # novm
import ruamel.yaml as yaml
-from ordereddict_backport import OrderedDict
from ruamel.yaml import RoundTripDumper, RoundTripLoader
from six import StringIO, string_types
@@ -39,7 +39,7 @@ __all__ = ['load', 'dump', 'SpackYAMLError']
# Also, use OrderedDict instead of just dict.
-class syaml_dict(OrderedDict):
+class syaml_dict(collections.OrderedDict):
def __repr__(self):
mappings = ('%r: %r' % (k, v) for k, v in self.items())
return '{%s}' % ', '.join(mappings)
diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py
index f1b01ae310..2db91b8080 100644
--- a/lib/spack/spack/util/web.py
+++ b/lib/spack/spack/util/web.py
@@ -193,7 +193,8 @@ def push_to_url(
while remote_path.startswith('/'):
remote_path = remote_path[1:]
- s3 = s3_util.create_s3_session(remote_url)
+ s3 = s3_util.create_s3_session(remote_url,
+ connection=s3_util.get_mirror_connection(remote_url)) # noqa: E501
s3.upload_file(local_file_path, remote_url.netloc,
remote_path, ExtraArgs=extra_args)
@@ -219,7 +220,9 @@ def url_exists(url):
return os.path.exists(local_path)
if url.scheme == 's3':
- s3 = s3_util.create_s3_session(url)
+ # Check for URL specific connection information
+ s3 = s3_util.create_s3_session(url, connection=s3_util.get_mirror_connection(url)) # noqa: E501
+
try:
s3.get_object(Bucket=url.netloc, Key=url.path.lstrip('/'))
return True
@@ -263,7 +266,8 @@ def remove_url(url, recursive=False):
return
if url.scheme == 's3':
- s3 = s3_util.create_s3_session(url)
+ # Try to find a mirror for potential connection information
+ s3 = s3_util.create_s3_session(url, connection=s3_util.get_mirror_connection(url)) # noqa: E501
bucket = url.netloc
if recursive:
# Because list_objects_v2 can only return up to 1000 items