From 6f1fe3904cdda29cb6a373be7d1b3aac2adbc753 Mon Sep 17 00:00:00 2001 From: Justin S <3630356+codeandkey@users.noreply.github.com> Date: Fri, 19 Apr 2019 19:39:13 -0500 Subject: Fix outdated R packages failing to fetch (#11039) PR #10758 made a slight change to find_versions_of_archive() which included archive_url in the search process. While this fixed `spack create` and `spack checksum` missing command-line arguments, it caused `spack install` to prefer those URLs over those it found in the scrape process. As a result, the package url was treated as a list_url causing all R packages to stop fetching once the package was updated on CRAN. This patch is more selective about including the archive_url in the remote versions, explicitly overriding it with matching versions found by the scraper. --- lib/spack/spack/util/web.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/spack/spack/util/web.py b/lib/spack/spack/util/web.py index 959d03781e..99078b203a 100644 --- a/lib/spack/spack/util/web.py +++ b/lib/spack/spack/util/web.py @@ -304,9 +304,8 @@ def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): list_urls.update(additional_list_urls) # Grab some web pages to scrape. - # Start with any links already given. pages = {} - links = set(archive_urls) + links = set() for lurl in list_urls: pg, lnk = spider(lurl, depth=list_depth) pages.update(pg) @@ -345,8 +344,10 @@ def find_versions_of_archive(archive_urls, list_url=None, list_depth=0): regexes.append(url_regex) # Build a dict version -> URL from any links that match the wildcards. + # Walk through archive_url links first. + # Any conflicting versions will be overwritten by the list_url links. versions = {} - for url in sorted(links): + for url in archive_urls + sorted(links): if any(re.search(r, url) for r in regexes): try: ver = spack.url.parse_version(url) -- cgit v1.2.3-70-g09d2