From c49508648a5c4d36fe3985b1e4eb39b4cfa3b128 Mon Sep 17 00:00:00 2001 From: Dom Heinzeller Date: Mon, 9 May 2022 19:35:17 +0200 Subject: Get timeout for web requests with urllib from spack config, same as for curl (#30468) --- lib/spack/spack/util/web.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py index 6896c308cf..4bd0d92586 100644 --- a/lib/spack/spack/util/web.py +++ b/lib/spack/spack/util/web.py @@ -49,9 +49,6 @@ else: class HTMLParseError(Exception): pass -# Timeout in seconds for web requests -_timeout = 10 - class LinkParser(HTMLParser): """This parser just takes an HTML page and strips out the hrefs on the @@ -100,6 +97,9 @@ def read_from_url(url, accept_content_type=None): verify_ssl = spack.config.get('config:verify_ssl') + # Timeout in seconds for web requests + timeout = spack.config.get('config:connect_timeout', 10) + # Don't even bother with a context unless the URL scheme is one that uses # SSL certs. if uses_ssl(url): @@ -131,7 +131,7 @@ def read_from_url(url, accept_content_type=None): # one round-trip. However, most servers seem to ignore the header # if you ask for a tarball with Accept: text/html. req.get_method = lambda: "HEAD" - resp = _urlopen(req, timeout=_timeout, context=context) + resp = _urlopen(req, timeout=timeout, context=context) content_type = get_header(resp.headers, 'Content-type') @@ -139,7 +139,7 @@ def read_from_url(url, accept_content_type=None): req.get_method = lambda: "GET" try: - response = _urlopen(req, timeout=_timeout, context=context) + response = _urlopen(req, timeout=timeout, context=context) except URLError as err: raise SpackWebError('Download failed: {ERROR}'.format( ERROR=str(err))) -- cgit v1.2.3-70-g09d2