summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam J. Stewart <ajstewart426@gmail.com>2017-03-31 20:19:18 -0500
committerTodd Gamblin <tgamblin@llnl.gov>2017-04-01 15:10:45 -0700
commit28d6d375b46273779e377b844ddb80739f393514 (patch)
tree61b26e9fe2e731d1db39ff363b0014d12db931bb
parentb67e2db159cf6a572ea8059ad1f6e57854639b23 (diff)
downloadspack-28d6d375b46273779e377b844ddb80739f393514.tar.gz
spack-28d6d375b46273779e377b844ddb80739f393514.tar.bz2
spack-28d6d375b46273779e377b844ddb80739f393514.tar.xz
spack-28d6d375b46273779e377b844ddb80739f393514.zip
Fix Python 3 support in spack versions
- Add missing import, fixes spack versions in Python 2 - Fix spack versions in Python 3
-rw-r--r--lib/spack/spack/url.py2
-rw-r--r--lib/spack/spack/util/web.py7
2 files changed, 5 insertions, 4 deletions
diff --git a/lib/spack/spack/url.py b/lib/spack/spack/url.py
index 7502f23994..38ff74f7bc 100644
--- a/lib/spack/spack/url.py
+++ b/lib/spack/spack/url.py
@@ -513,7 +513,7 @@ def wildcard_version(path):
name_parts = re.split(name_re, path)
# Even elements in the array did *not* match the name
- for i in xrange(0, len(name_parts), 2):
+ for i in range(0, len(name_parts), 2):
# Split each part by things that look like versions.
vparts = re.split(v.wildcard(), name_parts[i])
diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py
index 0d7d0d3792..09bf2c34e1 100644
--- a/lib/spack/spack/util/web.py
+++ b/lib/spack/spack/util/web.py
@@ -28,6 +28,7 @@ import sys
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.error import URLError
+from six.moves.urllib.parse import urljoin
from multiprocessing import Pool
try:
@@ -38,7 +39,7 @@ except ImportError:
from html.parser import HTMLParser
# Also, HTMLParseError is deprecated and never raised.
- class HTMLParseError:
+ class HTMLParseError(Exception):
pass
import llnl.util.tty as tty
@@ -110,7 +111,7 @@ def _spider(args):
response_url = response.geturl()
# Read the page and and stick it in the map we'll return
- page = response.read()
+ page = response.read().decode('utf-8')
pages[response_url] = page
# Parse out the links in the page
@@ -120,7 +121,7 @@ def _spider(args):
while link_parser.links:
raw_link = link_parser.links.pop()
- abs_link = urlparse.urljoin(response_url, raw_link.strip())
+ abs_link = urljoin(response_url, raw_link.strip())
links.add(abs_link)