summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHarmen Stoppels <me@harmenstoppels.nl>2024-08-14 17:19:45 +0200
committerGitHub <noreply@github.com>2024-08-14 17:19:45 +0200
commit29b50527a614862727e4a62f23379d8a13a62ee7 (patch)
treedc6e995e9b5caa7adfc7b65ec02db860d6baeabf
parent94961ffe0a0c12dab154a435a74c8dc9ac1054a8 (diff)
downloadspack-29b50527a614862727e4a62f23379d8a13a62ee7.tar.gz
spack-29b50527a614862727e4a62f23379d8a13a62ee7.tar.bz2
spack-29b50527a614862727e4a62f23379d8a13a62ee7.tar.xz
spack-29b50527a614862727e4a62f23379d8a13a62ee7.zip
spack buildcache push: parallel in general (#45682)
Make spack buildcache push for the non-oci case also parallel, and --update-index more efficieny
-rw-r--r--lib/spack/spack/binary_distribution.py946
-rw-r--r--lib/spack/spack/ci.py23
-rw-r--r--lib/spack/spack/cmd/buildcache.py512
-rw-r--r--lib/spack/spack/cmd/gpg.py10
-rw-r--r--lib/spack/spack/hooks/autopush.py7
-rw-r--r--lib/spack/spack/oci/oci.py11
-rw-r--r--lib/spack/spack/test/bindist.py23
-rw-r--r--lib/spack/spack/test/build_distribution.py56
-rw-r--r--lib/spack/spack/test/ci.py2
-rw-r--r--lib/spack/spack/test/cmd/buildcache.py13
-rw-r--r--lib/spack/spack/test/cmd/ci.py10
-rw-r--r--lib/spack/spack/test/install.py4
-rw-r--r--lib/spack/spack/test/oci/integration_test.py9
-rw-r--r--lib/spack/spack/util/gpg.py10
-rw-r--r--share/spack/spack-completion.bash2
-rw-r--r--share/spack/spack-completion.fish6
16 files changed, 837 insertions, 807 deletions
diff --git a/lib/spack/spack/binary_distribution.py b/lib/spack/spack/binary_distribution.py
index 26b6396f26..adcc5f9b5c 100644
--- a/lib/spack/spack/binary_distribution.py
+++ b/lib/spack/spack/binary_distribution.py
@@ -5,6 +5,9 @@
import codecs
import collections
+import concurrent.futures
+import contextlib
+import copy
import hashlib
import io
import itertools
@@ -22,7 +25,7 @@ import urllib.parse
import urllib.request
import warnings
from contextlib import closing
-from typing import Dict, Iterable, NamedTuple, Optional, Set, Tuple
+from typing import Dict, Generator, Iterable, List, NamedTuple, Optional, Set, Tuple, Union
import llnl.util.filesystem as fsys
import llnl.util.lang
@@ -35,6 +38,7 @@ import spack.cmd
import spack.config as config
import spack.database as spack_db
import spack.error
+import spack.hash_types as ht
import spack.hooks
import spack.hooks.sbang
import spack.mirror
@@ -44,19 +48,39 @@ import spack.oci.opener
import spack.platforms
import spack.relocate as relocate
import spack.repo
+import spack.spec
import spack.stage
import spack.store
+import spack.user_environment
import spack.util.archive
import spack.util.crypto
import spack.util.file_cache as file_cache
import spack.util.gpg
+import spack.util.parallel
import spack.util.path
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
import spack.util.timer as timer
import spack.util.url as url_util
import spack.util.web as web_util
+from spack import traverse
from spack.caches import misc_cache_location
+from spack.oci.image import (
+ Digest,
+ ImageReference,
+ default_config,
+ default_index_tag,
+ default_manifest,
+ default_tag,
+ tag_is_spec,
+)
+from spack.oci.oci import (
+ copy_missing_layers_with_retry,
+ get_manifest_and_config_with_retry,
+ list_tags,
+ upload_blob_with_retry,
+ upload_manifest_with_retry,
+)
from spack.package_prefs import get_package_dir_permissions, get_package_group
from spack.relocate_text import utf8_paths_to_single_binary_regex
from spack.spec import Spec
@@ -744,34 +768,25 @@ def tarball_path_name(spec, ext):
return os.path.join(tarball_directory_name(spec), tarball_name(spec, ext))
-def select_signing_key(key=None):
- if key is None:
- keys = spack.util.gpg.signing_keys()
- if len(keys) == 1:
- key = keys[0]
-
- if len(keys) > 1:
- raise PickKeyException(str(keys))
-
- if len(keys) == 0:
- raise NoKeyException(
- "No default key available for signing.\n"
- "Use spack gpg init and spack gpg create"
- " to create a default key."
- )
- return key
-
+def select_signing_key() -> str:
+ keys = spack.util.gpg.signing_keys()
+ num = len(keys)
+ if num > 1:
+ raise PickKeyException(str(keys))
+ elif num == 0:
+ raise NoKeyException(
+ "No default key available for signing.\n"
+ "Use spack gpg init and spack gpg create"
+ " to create a default key."
+ )
+ return keys[0]
-def sign_specfile(key, force, specfile_path):
- signed_specfile_path = "%s.sig" % specfile_path
- if os.path.exists(signed_specfile_path):
- if force:
- os.remove(signed_specfile_path)
- else:
- raise NoOverwriteException(signed_specfile_path)
- key = select_signing_key(key)
+def sign_specfile(key: str, specfile_path: str) -> str:
+ """sign and return the path to the signed specfile"""
+ signed_specfile_path = f"{specfile_path}.sig"
spack.util.gpg.sign(key, specfile_path, signed_specfile_path, clearsign=True)
+ return signed_specfile_path
def _read_specs_and_push_index(file_list, read_method, cache_prefix, db, temp_dir, concurrency):
@@ -879,7 +894,7 @@ def _specs_from_cache_aws_cli(cache_prefix):
return file_list, read_fn
-def _specs_from_cache_fallback(cache_prefix):
+def _specs_from_cache_fallback(url: str):
"""Use spack.util.web module to get a list of all the specs at the remote url.
Args:
@@ -903,25 +918,25 @@ def _specs_from_cache_fallback(cache_prefix):
try:
file_list = [
- url_util.join(cache_prefix, entry)
- for entry in web_util.list_url(cache_prefix)
+ url_util.join(url, entry)
+ for entry in web_util.list_url(url)
if entry.endswith("spec.json") or entry.endswith("spec.json.sig")
]
read_fn = url_read_method
except Exception as err:
# If we got some kind of S3 (access denied or other connection error), the first non
# boto-specific class in the exception is Exception. Just print a warning and return
- tty.warn(f"Encountered problem listing packages at {cache_prefix}: {err}")
+ tty.warn(f"Encountered problem listing packages at {url}: {err}")
return file_list, read_fn
-def _spec_files_from_cache(cache_prefix):
+def _spec_files_from_cache(url: str):
"""Get a list of all the spec files in the mirror and a function to
read them.
Args:
- cache_prefix (str): Base url of mirror (location of spec files)
+ url: Base url of mirror (location of spec files)
Return:
A tuple where the first item is a list of absolute file paths or
@@ -930,56 +945,49 @@ def _spec_files_from_cache(cache_prefix):
returning the spec read from that location.
"""
callbacks = []
- if cache_prefix.startswith("s3"):
+ if url.startswith("s3://"):
callbacks.append(_specs_from_cache_aws_cli)
callbacks.append(_specs_from_cache_fallback)
for specs_from_cache_fn in callbacks:
- file_list, read_fn = specs_from_cache_fn(cache_prefix)
+ file_list, read_fn = specs_from_cache_fn(url)
if file_list:
return file_list, read_fn
- raise ListMirrorSpecsError("Failed to get list of specs from {0}".format(cache_prefix))
+ raise ListMirrorSpecsError("Failed to get list of specs from {0}".format(url))
-def generate_package_index(cache_prefix, concurrency=32):
+def generate_package_index(url: str, tmpdir: str, concurrency: int = 32):
"""Create or replace the build cache index on the given mirror. The
buildcache index contains an entry for each binary package under the
cache_prefix.
Args:
- cache_prefix(str): Base url of binary mirror.
- concurrency: (int): The desired threading concurrency to use when
- fetching the spec files from the mirror.
+ url: Base url of binary mirror.
+ concurrency: The desired threading concurrency to use when fetching the spec files from
+ the mirror.
Return:
None
"""
+ url = url_util.join(url, build_cache_relative_path())
try:
- file_list, read_fn = _spec_files_from_cache(cache_prefix)
+ file_list, read_fn = _spec_files_from_cache(url)
except ListMirrorSpecsError as e:
raise GenerateIndexError(f"Unable to generate package index: {e}") from e
- tty.debug(f"Retrieving spec descriptor files from {cache_prefix} to build index")
-
- tmpdir = tempfile.mkdtemp()
+ tty.debug(f"Retrieving spec descriptor files from {url} to build index")
db = BuildCacheDatabase(tmpdir)
- db.root = None
- db_root_dir = db.database_directory
try:
- _read_specs_and_push_index(file_list, read_fn, cache_prefix, db, db_root_dir, concurrency)
+ _read_specs_and_push_index(file_list, read_fn, url, db, db.database_directory, concurrency)
except Exception as e:
- raise GenerateIndexError(
- f"Encountered problem pushing package index to {cache_prefix}: {e}"
- ) from e
- finally:
- shutil.rmtree(tmpdir, ignore_errors=True)
+ raise GenerateIndexError(f"Encountered problem pushing package index to {url}: {e}") from e
-def generate_key_index(key_prefix, tmpdir=None):
+def generate_key_index(key_prefix: str, tmpdir: str) -> None:
"""Create the key index page.
Creates (or replaces) the "index.json" page at the location given in key_prefix. This page
@@ -997,36 +1005,23 @@ def generate_key_index(key_prefix, tmpdir=None):
except Exception as e:
raise CannotListKeys(f"Encountered problem listing keys at {key_prefix}: {e}") from e
- remove_tmpdir = False
-
- keys_local = url_util.local_file_path(key_prefix)
- if keys_local:
- target = os.path.join(keys_local, "index.json")
- else:
- if not tmpdir:
- tmpdir = tempfile.mkdtemp()
- remove_tmpdir = True
- target = os.path.join(tmpdir, "index.json")
+ target = os.path.join(tmpdir, "index.json")
index = {"keys": dict((fingerprint, {}) for fingerprint in sorted(set(fingerprints)))}
with open(target, "w") as f:
sjson.dump(index, f)
- if not keys_local:
- try:
- web_util.push_to_url(
- target,
- url_util.join(key_prefix, "index.json"),
- keep_original=False,
- extra_args={"ContentType": "application/json"},
- )
- except Exception as e:
- raise GenerateIndexError(
- f"Encountered problem pushing key index to {key_prefix}: {e}"
- ) from e
- finally:
- if remove_tmpdir:
- shutil.rmtree(tmpdir, ignore_errors=True)
+ try:
+ web_util.push_to_url(
+ target,
+ url_util.join(key_prefix, "index.json"),
+ keep_original=False,
+ extra_args={"ContentType": "application/json"},
+ )
+ except Exception as e:
+ raise GenerateIndexError(
+ f"Encountered problem pushing key index to {key_prefix}: {e}"
+ ) from e
def tarfile_of_spec_prefix(tar: tarfile.TarFile, prefix: str) -> None:
@@ -1077,127 +1072,653 @@ def _do_create_tarball(tarfile_path: str, binaries_dir: str, buildinfo: dict):
return inner_checksum.hexdigest(), outer_checksum.hexdigest()
-class PushOptions(NamedTuple):
- #: Overwrite existing tarball/metadata files in buildcache
- force: bool = False
+class ExistsInBuildcache(NamedTuple):
+ signed: bool
+ unsigned: bool
+ tarball: bool
- #: Regenerated indices after pushing
- regenerate_index: bool = False
- #: Whether to sign or not.
- unsigned: bool = False
+class BuildcacheFiles:
+ def __init__(self, spec: Spec, local: str, remote: str):
+ """
+ Args:
+ spec: The spec whose tarball and specfile are being managed.
+ local: The local path to the buildcache.
+ remote: The remote URL to the buildcache.
+ """
+ self.local = local
+ self.remote = remote
+ self.spec = spec
+
+ def remote_specfile(self, signed: bool) -> str:
+ return url_util.join(
+ self.remote,
+ build_cache_relative_path(),
+ tarball_name(self.spec, ".spec.json.sig" if signed else ".spec.json"),
+ )
- #: What key to use for signing
- key: Optional[str] = None
+ def remote_tarball(self) -> str:
+ return url_util.join(
+ self.remote, build_cache_relative_path(), tarball_path_name(self.spec, ".spack")
+ )
+ def local_specfile(self) -> str:
+ return os.path.join(self.local, f"{self.spec.dag_hash()}.spec.json")
-def push_or_raise(spec: Spec, out_url: str, options: PushOptions):
- """
- Build a tarball from given spec and put it into the directory structure
- used at the mirror (following <tarball_directory_name>).
+ def local_tarball(self) -> str:
+ return os.path.join(self.local, f"{self.spec.dag_hash()}.tar.gz")
- This method raises :py:class:`NoOverwriteException` when ``force=False`` and the tarball or
- spec.json file already exist in the buildcache. It raises :py:class:`PushToBuildCacheError`
- when the tarball or spec.json file cannot be pushed to the buildcache.
- """
- if not spec.concrete:
- raise ValueError("spec must be concrete to build tarball")
- with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
- _build_tarball_in_stage_dir(spec, out_url, stage_dir=tmpdir, options=options)
+def _exists_in_buildcache(spec: Spec, tmpdir: str, out_url: str) -> ExistsInBuildcache:
+ """returns a tuple of bools (signed, unsigned, tarball) indicating whether specfiles/tarballs
+ exist in the buildcache"""
+ files = BuildcacheFiles(spec, tmpdir, out_url)
+ signed = web_util.url_exists(files.remote_specfile(signed=True))
+ unsigned = web_util.url_exists(files.remote_specfile(signed=False))
+ tarball = web_util.url_exists(files.remote_tarball())
+ return ExistsInBuildcache(signed, unsigned, tarball)
-def _build_tarball_in_stage_dir(spec: Spec, out_url: str, stage_dir: str, options: PushOptions):
- cache_prefix = build_cache_prefix(stage_dir)
- tarfile_name = tarball_name(spec, ".spack")
- tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
- tarfile_path = os.path.join(tarfile_dir, tarfile_name)
- spackfile_path = os.path.join(cache_prefix, tarball_path_name(spec, ".spack"))
- remote_spackfile_path = url_util.join(out_url, os.path.relpath(spackfile_path, stage_dir))
+def _upload_tarball_and_specfile(
+ spec: Spec, tmpdir: str, out_url: str, exists: ExistsInBuildcache, signing_key: Optional[str]
+):
+ files = BuildcacheFiles(spec, tmpdir, out_url)
+ tarball = files.local_tarball()
+ checksum, _ = _do_create_tarball(tarball, spec.prefix, get_buildinfo_dict(spec))
+ spec_dict = spec.to_dict(hash=ht.dag_hash)
+ spec_dict["buildcache_layout_version"] = CURRENT_BUILD_CACHE_LAYOUT_VERSION
+ spec_dict["binary_cache_checksum"] = {"hash_algorithm": "sha256", "hash": checksum}
- mkdirp(tarfile_dir)
- if web_util.url_exists(remote_spackfile_path):
- if options.force:
- web_util.remove_url(remote_spackfile_path)
- else:
- raise NoOverwriteException(url_util.format(remote_spackfile_path))
+ if exists.tarball:
+ web_util.remove_url(files.remote_tarball())
+ if exists.signed:
+ web_util.remove_url(files.remote_specfile(signed=True))
+ if exists.unsigned:
+ web_util.remove_url(files.remote_specfile(signed=False))
+ web_util.push_to_url(tarball, files.remote_tarball(), keep_original=False)
- # need to copy the spec file so the build cache can be downloaded
- # without concretizing with the current spack packages
- # and preferences
+ specfile = files.local_specfile()
+ with open(specfile, "w") as f:
+ # Note: when using gpg clear sign, we need to avoid long lines (19995 chars).
+ # If lines are longer, they are truncated without error. Thanks GPG!
+ # So, here we still add newlines, but no indent, so save on file size and
+ # line length.
+ json.dump(spec_dict, f, indent=0, separators=(",", ":"))
- spec_file = spack.store.STORE.layout.spec_file_path(spec)
- specfile_name = tarball_name(spec, ".spec.json")
- specfile_path = os.path.realpath(os.path.join(cache_prefix, specfile_name))
- signed_specfile_path = "{0}.sig".format(specfile_path)
+ # sign the tarball and spec file with gpg
+ if signing_key:
+ specfile = sign_specfile(signing_key, specfile)
- remote_specfile_path = url_util.join(
- out_url, os.path.relpath(specfile_path, os.path.realpath(stage_dir))
+ web_util.push_to_url(
+ specfile, files.remote_specfile(signed=bool(signing_key)), keep_original=False
)
- remote_signed_specfile_path = "{0}.sig".format(remote_specfile_path)
-
- # If force and exists, overwrite. Otherwise raise exception on collision.
- if options.force:
- if web_util.url_exists(remote_specfile_path):
- web_util.remove_url(remote_specfile_path)
- if web_util.url_exists(remote_signed_specfile_path):
- web_util.remove_url(remote_signed_specfile_path)
- elif web_util.url_exists(remote_specfile_path) or web_util.url_exists(
- remote_signed_specfile_path
- ):
- raise NoOverwriteException(url_util.format(remote_specfile_path))
- binaries_dir = spec.prefix
- # create info for later relocation and create tar
- buildinfo = get_buildinfo_dict(spec)
+def _format_spec(spec: Spec) -> str:
+ return spec.cformat("{name}{@version}{/hash:7}")
+
+
+@contextlib.contextmanager
+def default_push_context() -> Generator[Tuple[str, concurrent.futures.Executor], None, None]:
+ with tempfile.TemporaryDirectory(
+ dir=spack.stage.get_stage_root()
+ ) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
+ yield tmpdir, executor
- checksum, _ = _do_create_tarball(tarfile_path, binaries_dir, buildinfo)
- # add sha256 checksum to spec.json
- with open(spec_file, "r") as inputfile:
- content = inputfile.read()
- if spec_file.endswith(".json"):
- spec_dict = sjson.load(content)
+def push_or_raise(
+ specs: List[Spec],
+ out_url: str,
+ signing_key: Optional[str],
+ force: bool = False,
+ update_index: bool = False,
+) -> List[Spec]:
+ """Same as push, but raises an exception on error. Returns a list of skipped specs already
+ present in the build cache when force=False."""
+ skipped, errors = push(specs, out_url, signing_key, force, update_index)
+ if errors:
+ raise PushToBuildCacheError(
+ f"Failed to push {len(errors)} specs to {out_url}:\n"
+ + "\n".join(f"Failed to push {_format_spec(spec)}: {error}" for spec, error in errors)
+ )
+ return skipped
+
+
+def push(
+ specs: List[Spec],
+ out_url: str,
+ signing_key: Optional[str],
+ force: bool = False,
+ update_index: bool = False,
+) -> Tuple[List[Spec], List[Tuple[Spec, BaseException]]]:
+ """Pushes to the provided build cache, and returns a list of skipped specs that were already
+ present (when force=False). Does not raise on error."""
+ with default_push_context() as (tmpdir, executor):
+ return _push(specs, out_url, signing_key, force, update_index, tmpdir, executor)
+
+
+class FancyProgress:
+ def __init__(self, total: int):
+ self.n = 0
+ self.total = total
+ self.running = False
+ self.enable = sys.stdout.isatty()
+ self.pretty_spec: str = ""
+ self.pre = ""
+
+ def _clear(self):
+ if self.enable and self.running:
+ sys.stdout.write("\033[F\033[K")
+
+ def _progress(self):
+ if self.total > 1:
+ digits = len(str(self.total))
+ return f"[{self.n:{digits}}/{self.total}] "
+ return ""
+
+ def start(self, spec: Spec, running: bool) -> None:
+ self.n += 1
+ self.running = running
+ self.pre = self._progress()
+ self.pretty_spec = _format_spec(spec)
+ if self.enable and self.running:
+ tty.info(f"{self.pre}Pushing {self.pretty_spec}...")
+
+ def ok(self, msg: Optional[str] = None) -> None:
+ self._clear()
+ msg = msg or f"Pushed {self.pretty_spec}"
+ tty.info(f"{self.pre}{msg}")
+
+ def fail(self) -> None:
+ self._clear()
+ tty.info(f"{self.pre}Failed to push {self.pretty_spec}")
+
+
+def _push(
+ specs: List[Spec],
+ out_url: str,
+ signing_key: Optional[str],
+ force: bool,
+ update_index: bool,
+ tmpdir: str,
+ executor: concurrent.futures.Executor,
+) -> Tuple[List[Spec], List[Tuple[Spec, BaseException]]]:
+ """Pushes to the provided build cache, and returns a list of skipped specs that were already
+ present (when force=False), and a list of errors. Does not raise on error."""
+ skipped: List[Spec] = []
+ errors: List[Tuple[Spec, BaseException]] = []
+
+ exists_futures = [
+ executor.submit(_exists_in_buildcache, spec, tmpdir, out_url) for spec in specs
+ ]
+
+ exists = {
+ spec.dag_hash(): exists_future.result()
+ for spec, exists_future in zip(specs, exists_futures)
+ }
+
+ if not force:
+ specs_to_upload = []
+
+ for spec in specs:
+ signed, unsigned, tarball = exists[spec.dag_hash()]
+ if (signed or unsigned) and tarball:
+ skipped.append(spec)
+ else:
+ specs_to_upload.append(spec)
+ else:
+ specs_to_upload = specs
+
+ if not specs_to_upload:
+ return skipped, errors
+
+ total = len(specs_to_upload)
+
+ if total != len(specs):
+ tty.info(f"{total} specs need to be pushed to {out_url}")
+
+ upload_futures = [
+ executor.submit(
+ _upload_tarball_and_specfile,
+ spec,
+ tmpdir,
+ out_url,
+ exists[spec.dag_hash()],
+ signing_key,
+ )
+ for spec in specs_to_upload
+ ]
+
+ uploaded_any = False
+ fancy_progress = FancyProgress(total)
+
+ for spec, upload_future in zip(specs_to_upload, upload_futures):
+ fancy_progress.start(spec, upload_future.running())
+ error = upload_future.exception()
+ if error is None:
+ uploaded_any = True
+ fancy_progress.ok()
else:
- raise ValueError("{0} not a valid spec file type".format(spec_file))
- spec_dict["buildcache_layout_version"] = CURRENT_BUILD_CACHE_LAYOUT_VERSION
- spec_dict["binary_cache_checksum"] = {"hash_algorithm": "sha256", "hash": checksum}
+ fancy_progress.fail()
+ errors.append((spec, error))
- with open(specfile_path, "w") as outfile:
- # Note: when using gpg clear sign, we need to avoid long lines (19995 chars).
- # If lines are longer, they are truncated without error. Thanks GPG!
- # So, here we still add newlines, but no indent, so save on file size and
- # line length.
- json.dump(spec_dict, outfile, indent=0, separators=(",", ":"))
+ # don't bother pushing keys / index if all failed to upload
+ if not uploaded_any:
+ return skipped, errors
+
+ if signing_key:
+ keys_tmpdir = os.path.join(tmpdir, "keys")
+ os.mkdir(keys_tmpdir)
+ push_keys(out_url, keys=[signing_key], update_index=update_index, tmpdir=keys_tmpdir)
+
+ if update_index:
+ index_tmpdir = os.path.join(tmpdir, "index")
+ os.mkdir(index_tmpdir)
+ generate_package_index(out_url, index_tmpdir)
+
+ return skipped, errors
+
+
+def _oci_upload_success_msg(spec: Spec, digest: Digest, size: int, elapsed: float):
+ elapsed = max(elapsed, 0.001) # guard against division by zero
+ return (
+ f"Pushed {_format_spec(spec)}: {digest} ({elapsed:.2f}s, "
+ f"{size / elapsed / 1024 / 1024:.2f} MB/s)"
+ )
- # sign the tarball and spec file with gpg
- if not options.unsigned:
- key = select_signing_key(options.key)
- sign_specfile(key, options.force, specfile_path)
+def _oci_get_blob_info(image_ref: ImageReference) -> Optional[spack.oci.oci.Blob]:
+ """Get the spack tarball layer digests and size if it exists"""
try:
- # push tarball and signed spec json to remote mirror
- web_util.push_to_url(spackfile_path, remote_spackfile_path, keep_original=False)
- web_util.push_to_url(
- signed_specfile_path if not options.unsigned else specfile_path,
- remote_signed_specfile_path if not options.unsigned else remote_specfile_path,
- keep_original=False,
+ manifest, config = get_manifest_and_config_with_retry(image_ref)
+
+ return spack.oci.oci.Blob(
+ compressed_digest=Digest.from_string(manifest["layers"][-1]["digest"]),
+ uncompressed_digest=Digest.from_string(config["rootfs"]["diff_ids"][-1]),
+ size=manifest["layers"][-1]["size"],
)
- except Exception as e:
- raise PushToBuildCacheError(
- f"Encountered problem pushing binary {remote_spackfile_path}: {e}"
- ) from e
+ except Exception:
+ return None
+
+
+def _oci_push_pkg_blob(
+ image_ref: ImageReference, spec: spack.spec.Spec, tmpdir: str
+) -> Tuple[spack.oci.oci.Blob, float]:
+ """Push a package blob to the registry and return the blob info and the time taken"""
+ filename = os.path.join(tmpdir, f"{spec.dag_hash()}.tar.gz")
+
+ # Create an oci.image.layer aka tarball of the package
+ compressed_tarfile_checksum, tarfile_checksum = spack.oci.oci.create_tarball(spec, filename)
+
+ blob = spack.oci.oci.Blob(
+ Digest.from_sha256(compressed_tarfile_checksum),
+ Digest.from_sha256(tarfile_checksum),
+ os.path.getsize(filename),
+ )
+
+ # Upload the blob
+ start = time.time()
+ upload_blob_with_retry(image_ref, file=filename, digest=blob.compressed_digest)
+ elapsed = time.time() - start
+
+ # delete the file
+ os.unlink(filename)
- # push the key to the build cache's _pgp directory so it can be
- # imported
- if not options.unsigned:
- push_keys(out_url, keys=[key], regenerate_index=options.regenerate_index, tmpdir=stage_dir)
+ return blob, elapsed
- # create an index.json for the build_cache directory so specs can be
- # found
- if options.regenerate_index:
- generate_package_index(url_util.join(out_url, os.path.relpath(cache_prefix, stage_dir)))
+
+def _oci_retrieve_env_dict_from_config(config: dict) -> dict:
+ """Retrieve the environment variables from the image config file.
+ Sets a default value for PATH if it is not present.
+
+ Args:
+ config (dict): The image config file.
+
+ Returns:
+ dict: The environment variables.
+ """
+ env = {"PATH": "/bin:/usr/bin"}
+
+ if "Env" in config.get("config", {}):
+ for entry in config["config"]["Env"]:
+ key, value = entry.split("=", 1)
+ env[key] = value
+ return env
+
+
+def _oci_archspec_to_gooarch(spec: spack.spec.Spec) -> str:
+ name = spec.target.family.name
+ name_map = {"aarch64": "arm64", "x86_64": "amd64"}
+ return name_map.get(name, name)
+
+
+def _oci_put_manifest(
+ base_images: Dict[str, Tuple[dict, dict]],
+ checksums: Dict[str, spack.oci.oci.Blob],
+ image_ref: ImageReference,
+ tmpdir: str,
+ extra_config: Optional[dict],
+ annotations: Optional[dict],
+ *specs: spack.spec.Spec,
+):
+ architecture = _oci_archspec_to_gooarch(specs[0])
+
+ expected_blobs: List[Spec] = [
+ s
+ for s in traverse.traverse_nodes(specs, order="topo", deptype=("link", "run"), root=True)
+ if not s.external
+ ]
+ expected_blobs.reverse()
+
+ base_manifest, base_config = base_images[architecture]
+ env = _oci_retrieve_env_dict_from_config(base_config)
+
+ # If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
+ # This is because Singularity / Apptainer is very strict about not mixing them.
+ base_manifest_mediaType = base_manifest.get(
+ "mediaType", "application/vnd.oci.image.manifest.v1+json"
+ )
+ use_docker_format = (
+ base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
+ )
+
+ spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
+
+ # Create an oci.image.config file
+ config = copy.deepcopy(base_config)
+
+ # Add the diff ids of the blobs
+ for s in expected_blobs:
+ # If a layer for a dependency has gone missing (due to removed manifest in the registry, a
+ # failed push, or a local forced uninstall), we cannot create a runnable container image.
+ # If an OCI registry is only used for storage, this is not a hard error, but for now we
+ # raise an exception unconditionally, until someone requests a more lenient behavior.
+ checksum = checksums.get(s.dag_hash())
+ if not checksum:
+ raise MissingLayerError(f"missing layer for {_format_spec(s)}")
+ config["rootfs"]["diff_ids"].append(str(checksum.uncompressed_digest))
+
+ # Set the environment variables
+ config["config"]["Env"] = [f"{k}={v}" for k, v in env.items()]
+
+ if extra_config:
+ # From the OCI v1.0 spec:
+ # > Any extra fields in the Image JSON struct are considered implementation
+ # > specific and MUST be ignored by any implementations which are unable to
+ # > interpret them.
+ config.update(extra_config)
+
+ config_file = os.path.join(tmpdir, f"{specs[0].dag_hash()}.config.json")
+
+ with open(config_file, "w") as f:
+ json.dump(config, f, separators=(",", ":"))
+
+ config_file_checksum = Digest.from_sha256(
+ spack.util.crypto.checksum(hashlib.sha256, config_file)
+ )
+
+ # Upload the config file
+ upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
+
+ manifest = {
+ "mediaType": base_manifest_mediaType,
+ "schemaVersion": 2,
+ "config": {
+ "mediaType": base_manifest["config"]["mediaType"],
+ "digest": str(config_file_checksum),
+ "size": os.path.getsize(config_file),
+ },
+ "layers": [
+ *(layer for layer in base_manifest["layers"]),
+ *(
+ {
+ "mediaType": (
+ "application/vnd.docker.image.rootfs.diff.tar.gzip"
+ if use_docker_format
+ else "application/vnd.oci.image.layer.v1.tar+gzip"
+ ),
+ "digest": str(checksums[s.dag_hash()].compressed_digest),
+ "size": checksums[s.dag_hash()].size,
+ }
+ for s in expected_blobs
+ ),
+ ],
+ }
+
+ if not use_docker_format and annotations:
+ manifest["annotations"] = annotations
+
+ # Finally upload the manifest
+ upload_manifest_with_retry(image_ref, manifest=manifest)
+
+ # delete the config file
+ os.unlink(config_file)
+
+
+def _oci_update_base_images(
+ *,
+ base_image: Optional[ImageReference],
+ target_image: ImageReference,
+ spec: spack.spec.Spec,
+ base_image_cache: Dict[str, Tuple[dict, dict]],
+):
+ """For a given spec and base image, copy the missing layers of the base image with matching
+ arch to the registry of the target image. If no base image is specified, create a dummy
+ manifest and config file."""
+ architecture = _oci_archspec_to_gooarch(spec)
+ if architecture in base_image_cache:
+ return
+ if base_image is None:
+ base_image_cache[architecture] = (
+ default_manifest(),
+ default_config(architecture, "linux"),
+ )
+ else:
+ base_image_cache[architecture] = copy_missing_layers_with_retry(
+ base_image, target_image, architecture
+ )
+
+
+def _push_oci(
+ *,
+ target_image: ImageReference,
+ base_image: Optional[ImageReference],
+ installed_specs_with_deps: List[Spec],
+ tmpdir: str,
+ executor: concurrent.futures.Executor,
+ force: bool = False,
+) -> Tuple[
+ List[Spec],
+ Dict[str, Tuple[dict, dict]],
+ Dict[str, spack.oci.oci.Blob],
+ List[Tuple[Spec, BaseException]],
+]:
+
+ # Spec dag hash -> blob
+ checksums: Dict[str, spack.oci.oci.Blob] = {}
+
+ # arch -> (manifest, config)
+ base_images: Dict[str, Tuple[dict, dict]] = {}
+
+ # Specs not uploaded because they already exist
+ skipped: List[Spec] = []
+
+ if not force:
+ tty.info("Checking for existing specs in the buildcache")
+ blobs_to_upload = []
+
+ tags_to_check = (target_image.with_tag(default_tag(s)) for s in installed_specs_with_deps)
+ available_blobs = executor.map(_oci_get_blob_info, tags_to_check)
+
+ for spec, maybe_blob in zip(installed_specs_with_deps, available_blobs):
+ if maybe_blob is not None:
+ checksums[spec.dag_hash()] = maybe_blob
+ skipped.append(spec)
+ else:
+ blobs_to_upload.append(spec)
+ else:
+ blobs_to_upload = installed_specs_with_deps
+
+ if not blobs_to_upload:
+ return skipped, base_images, checksums, []
+
+ if len(blobs_to_upload) != len(installed_specs_with_deps):
+ tty.info(
+ f"{len(blobs_to_upload)} specs need to be pushed to "
+ f"{target_image.domain}/{target_image.name}"
+ )
+
+ blob_progress = FancyProgress(len(blobs_to_upload))
+
+ # Upload blobs
+ blob_futures = [
+ executor.submit(_oci_push_pkg_blob, target_image, spec, tmpdir) for spec in blobs_to_upload
+ ]
+
+ manifests_to_upload: List[Spec] = []
+ errors: List[Tuple[Spec, BaseException]] = []
+
+ # And update the spec to blob mapping for successful uploads
+ for spec, blob_future in zip(blobs_to_upload, blob_futures):
+ blob_progress.start(spec, blob_future.running())
+ error = blob_future.exception()
+ if error is None:
+ blob, elapsed = blob_future.result()
+ blob_progress.ok(
+ _oci_upload_success_msg(spec, blob.compressed_digest, blob.size, elapsed)
+ )
+ manifests_to_upload.append(spec)
+ checksums[spec.dag_hash()] = blob
+ else:
+ blob_progress.fail()
+ errors.append((spec, error))
+
+ # Copy base images if necessary
+ for spec in manifests_to_upload:
+ _oci_update_base_images(
+ base_image=base_image,
+ target_image=target_image,
+ spec=spec,
+ base_image_cache=base_images,
+ )
+
+ def extra_config(spec: Spec):
+ spec_dict = spec.to_dict(hash=ht.dag_hash)
+ spec_dict["buildcache_layout_version"] = CURRENT_BUILD_CACHE_LAYOUT_VERSION
+ spec_dict["binary_cache_checksum"] = {
+ "hash_algorithm": "sha256",
+ "hash": checksums[spec.dag_hash()].compressed_digest.digest,
+ }
+ return spec_dict
+
+ # Upload manifests
+ tty.info("Uploading manifests")
+ manifest_futures = [
+ executor.submit(
+ _oci_put_manifest,
+ base_images,
+ checksums,
+ target_image.with_tag(default_tag(spec)),
+ tmpdir,
+ extra_config(spec),
+ {"org.opencontainers.image.description": spec.format()},
+ spec,
+ )
+ for spec in manifests_to_upload
+ ]
+
+ manifest_progress = FancyProgress(len(manifests_to_upload))
+
+ # Print the image names of the top-level specs
+ for spec, manifest_future in zip(manifests_to_upload, manifest_futures):
+ error = manifest_future.exception()
+ manifest_progress.start(spec, manifest_future.running())
+ if error is None:
+ manifest_progress.ok(
+ f"Tagged {_format_spec(spec)} as {target_image.with_tag(default_tag(spec))}"
+ )
+ else:
+ manifest_progress.fail()
+ errors.append((spec, error))
+
+ return skipped, base_images, checksums, errors
+
+
+def _oci_config_from_tag(image_ref_and_tag: Tuple[ImageReference, str]) -> Optional[dict]:
+ image_ref, tag = image_ref_and_tag
+ # Don't allow recursion here, since Spack itself always uploads
+ # vnd.oci.image.manifest.v1+json, not vnd.oci.image.index.v1+json
+ _, config = get_manifest_and_config_with_retry(image_ref.with_tag(tag), tag, recurse=0)
+
+ # Do very basic validation: if "spec" is a key in the config, it
+ # must be a Spec object too.
+ return config if "spec" in config else None
+
+
+def _oci_update_index(
+ image_ref: ImageReference, tmpdir: str, pool: concurrent.futures.Executor
+) -> None:
+ tags = list_tags(image_ref)
+
+ # Fetch all image config files in parallel
+ spec_dicts = pool.map(
+ _oci_config_from_tag, ((image_ref, tag) for tag in tags if tag_is_spec(tag))
+ )
+
+ # Populate the database
+ db_root_dir = os.path.join(tmpdir, "db_root")
+ db = BuildCacheDatabase(db_root_dir)
+
+ for spec_dict in spec_dicts:
+ spec = Spec.from_dict(spec_dict)
+ db.add(spec, directory_layout=None)
+ db.mark(spec, "in_buildcache", True)
+
+ # Create the index.json file
+ index_json_path = os.path.join(tmpdir, "index.json")
+ with open(index_json_path, "w") as f:
+ db._write_to_file(f)
+
+ # Create an empty config.json file
+ empty_config_json_path = os.path.join(tmpdir, "config.json")
+ with open(empty_config_json_path, "wb") as f:
+ f.write(b"{}")
+
+ # Upload the index.json file
+ index_shasum = Digest.from_sha256(spack.util.crypto.checksum(hashlib.sha256, index_json_path))
+ upload_blob_with_retry(image_ref, file=index_json_path, digest=index_shasum)
+
+ # Upload the config.json file
+ empty_config_digest = Digest.from_sha256(
+ spack.util.crypto.checksum(hashlib.sha256, empty_config_json_path)
+ )
+ upload_blob_with_retry(image_ref, file=empty_config_json_path, digest=empty_config_digest)
+
+ # Push a manifest file that references the index.json file as a layer
+ # Notice that we push this as if it is an image, which it of course is not.
+ # When the ORAS spec becomes official, we can use that instead of a fake image.
+ # For now we just use the OCI image spec, so that we don't run into issues with
+ # automatic garbage collection of blobs that are not referenced by any image manifest.
+ oci_manifest = {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "schemaVersion": 2,
+ # Config is just an empty {} file for now, and irrelevant
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": str(empty_config_digest),
+ "size": os.path.getsize(empty_config_json_path),
+ },
+ # The buildcache index is the only layer, and is not a tarball, we lie here.
+ "layers": [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": str(index_shasum),
+ "size": os.path.getsize(index_json_path),
+ }
+ ],
+ }
+
+ upload_manifest_with_retry(image_ref.with_tag(default_index_tag), oci_manifest)
def try_verify(specfile_path):
@@ -2124,67 +2645,32 @@ def get_keys(install=False, trust=False, force=False, mirrors=None):
)
-def push_keys(*mirrors, **kwargs):
- """
- Upload pgp public keys to the given mirrors
- """
- keys = kwargs.get("keys")
- regenerate_index = kwargs.get("regenerate_index", False)
- tmpdir = kwargs.get("tmpdir")
- remove_tmpdir = False
+def push_keys(
+ *mirrors: Union[spack.mirror.Mirror, str],
+ keys: List[str],
+ tmpdir: str,
+ update_index: bool = False,
+):
+ """Upload pgp public keys to the given mirrors"""
+ keys = spack.util.gpg.public_keys(*(keys or ()))
+ files = [os.path.join(tmpdir, f"{key}.pub") for key in keys]
- keys = spack.util.gpg.public_keys(*(keys or []))
+ for key, file in zip(keys, files):
+ spack.util.gpg.export_keys(file, [key])
- try:
- for mirror in mirrors:
- push_url = getattr(mirror, "push_url", mirror)
- keys_url = url_util.join(
- push_url, BUILD_CACHE_RELATIVE_PATH, BUILD_CACHE_KEYS_RELATIVE_PATH
- )
- keys_local = url_util.local_file_path(keys_url)
+ for mirror in mirrors:
+ push_url = mirror if isinstance(mirror, str) else mirror.push_url
+ keys_url = url_util.join(
+ push_url, BUILD_CACHE_RELATIVE_PATH, BUILD_CACHE_KEYS_RELATIVE_PATH
+ )
- verb = "Writing" if keys_local else "Uploading"
- tty.debug("{0} public keys to {1}".format(verb, url_util.format(push_url)))
+ tty.debug(f"Pushing public keys to {url_util.format(push_url)}")
- if keys_local: # mirror is local, don't bother with the tmpdir
- prefix = keys_local
- mkdirp(keys_local)
- else:
- # A tmp dir is created for the first mirror that is non-local.
- # On the off-hand chance that all the mirrors are local, then
- # we can avoid the need to create a tmp dir.
- if tmpdir is None:
- tmpdir = tempfile.mkdtemp()
- remove_tmpdir = True
- prefix = tmpdir
-
- for fingerprint in keys:
- tty.debug(" " + fingerprint)
- filename = fingerprint + ".pub"
-
- export_target = os.path.join(prefix, filename)
-
- # Export public keys (private is set to False)
- spack.util.gpg.export_keys(export_target, [fingerprint])
-
- # If mirror is local, the above export writes directly to the
- # mirror (export_target points directly to the mirror).
- #
- # If not, then export_target is a tmpfile that needs to be
- # uploaded to the mirror.
- if not keys_local:
- spack.util.web.push_to_url(
- export_target, url_util.join(keys_url, filename), keep_original=False
- )
+ for key, file in zip(keys, files):
+ web_util.push_to_url(file, url_util.join(keys_url, os.path.basename(file)))
- if regenerate_index:
- if keys_local:
- generate_key_index(keys_url)
- else:
- generate_key_index(keys_url, tmpdir)
- finally:
- if remove_tmpdir:
- shutil.rmtree(tmpdir)
+ if update_index:
+ generate_key_index(keys_url, tmpdir=tmpdir)
def needs_rebuild(spec, mirror_url):
@@ -2610,3 +3096,7 @@ class CannotListKeys(GenerateIndexError):
class PushToBuildCacheError(spack.error.SpackError):
"""Raised when unable to push objects to binary mirror"""
+
+
+class MissingLayerError(spack.error.SpackError):
+ """Raised when a required layer for a dependency is missing in an OCI registry."""
diff --git a/lib/spack/spack/ci.py b/lib/spack/spack/ci.py
index 95e23cc64f..57a15bbd54 100644
--- a/lib/spack/spack/ci.py
+++ b/lib/spack/spack/ci.py
@@ -38,6 +38,7 @@ import spack.mirror
import spack.paths
import spack.repo
import spack.spec
+import spack.stage
import spack.util.git
import spack.util.gpg as gpg_util
import spack.util.spack_yaml as syaml
@@ -1370,15 +1371,6 @@ def can_verify_binaries():
return len(gpg_util.public_keys()) >= 1
-def _push_to_build_cache(spec: spack.spec.Spec, sign_binaries: bool, mirror_url: str) -> None:
- """Unchecked version of the public API, for easier mocking"""
- bindist.push_or_raise(
- spec,
- spack.mirror.Mirror.from_url(mirror_url).push_url,
- bindist.PushOptions(force=True, unsigned=not sign_binaries),
- )
-
-
def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: bool) -> bool:
"""Push one or more binary packages to the mirror.
@@ -1389,20 +1381,13 @@ def push_to_build_cache(spec: spack.spec.Spec, mirror_url: str, sign_binaries: b
sign_binaries: If True, spack will attempt to sign binary package before pushing.
"""
tty.debug(f"Pushing to build cache ({'signed' if sign_binaries else 'unsigned'})")
+ signing_key = bindist.select_signing_key() if sign_binaries else None
try:
- _push_to_build_cache(spec, sign_binaries, mirror_url)
+ bindist.push_or_raise([spec], out_url=mirror_url, signing_key=signing_key)
return True
except bindist.PushToBuildCacheError as e:
- tty.error(str(e))
+ tty.error(f"Problem writing to {mirror_url}: {e}")
return False
- except Exception as e:
- # TODO (zackgalbreath): write an adapter for boto3 exceptions so we can catch a specific
- # exception instead of parsing str(e)...
- msg = str(e)
- if any(x in msg for x in ["Access Denied", "InvalidAccessKeyId"]):
- tty.error(f"Permission problem writing to {mirror_url}: {msg}")
- return False
- raise
def remove_other_mirrors(mirrors_to_keep, scope=None):
diff --git a/lib/spack/spack/cmd/buildcache.py b/lib/spack/spack/cmd/buildcache.py
index 78d8e7a00c..2da4a561f0 100644
--- a/lib/spack/spack/cmd/buildcache.py
+++ b/lib/spack/spack/cmd/buildcache.py
@@ -3,16 +3,13 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
-import concurrent.futures
-import copy
import glob
-import hashlib
import json
import os
import shutil
import sys
import tempfile
-from typing import Dict, List, Optional, Tuple
+from typing import List, Tuple
import llnl.util.tty as tty
from llnl.string import plural
@@ -24,7 +21,6 @@ import spack.config
import spack.deptypes as dt
import spack.environment as ev
import spack.error
-import spack.hash_types as ht
import spack.mirror
import spack.oci.oci
import spack.oci.opener
@@ -41,22 +37,7 @@ import spack.util.web as web_util
from spack import traverse
from spack.cmd import display_specs
from spack.cmd.common import arguments
-from spack.oci.image import (
- Digest,
- ImageReference,
- default_config,
- default_index_tag,
- default_manifest,
- default_tag,
- tag_is_spec,
-)
-from spack.oci.oci import (
- copy_missing_layers_with_retry,
- get_manifest_and_config_with_retry,
- list_tags,
- upload_blob_with_retry,
- upload_manifest_with_retry,
-)
+from spack.oci.image import ImageReference
from spack.spec import Spec, save_dependency_specfiles
description = "create, download and install binary packages"
@@ -340,13 +321,6 @@ def _format_spec(spec: Spec) -> str:
return spec.cformat("{name}{@version}{/hash:7}")
-def _progress(i: int, total: int):
- if total > 1:
- digits = len(str(total))
- return f"[{i+1:{digits}}/{total}] "
- return ""
-
-
def _skip_no_redistribute_for_public(specs):
remaining_specs = list()
removed_specs = list()
@@ -372,7 +346,7 @@ class PackagesAreNotInstalledError(spack.error.SpackError):
def __init__(self, specs: List[Spec]):
super().__init__(
"Cannot push non-installed packages",
- ", ".join(elide_list(list(_format_spec(s) for s in specs), 5)),
+ ", ".join(elide_list([_format_spec(s) for s in specs], 5)),
)
@@ -380,10 +354,6 @@ class PackageNotInstalledError(spack.error.SpackError):
"""Raised when a spec is not installed but picked to be packaged."""
-class MissingLayerError(spack.error.SpackError):
- """Raised when a required layer for a dependency is missing in an OCI registry."""
-
-
def _specs_to_be_packaged(
requested: List[Spec], things_to_install: str, build_deps: bool
) -> List[Spec]:
@@ -394,7 +364,7 @@ def _specs_to_be_packaged(
deptype = dt.ALL
else:
deptype = dt.RUN | dt.LINK | dt.TEST
- return [
+ specs = [
s
for s in traverse.traverse_nodes(
requested,
@@ -405,6 +375,8 @@ def _specs_to_be_packaged(
)
if not s.external
]
+ specs.reverse()
+ return specs
def push_fn(args):
@@ -445,6 +417,10 @@ def push_fn(args):
"Code signing is currently not supported for OCI images. "
"Use --unsigned to silence this warning."
)
+ unsigned = True
+
+ # Select a signing key, or None if unsigned.
+ signing_key = None if unsigned else (args.key or bindist.select_signing_key())
specs = _specs_to_be_packaged(
roots,
@@ -471,13 +447,10 @@ def push_fn(args):
(s, PackageNotInstalledError("package not installed")) for s in not_installed
)
- # TODO: move into bindist.push_or_raise
- if target_image:
- base_image = ImageReference.from_string(args.base_image) if args.base_image else None
- with tempfile.TemporaryDirectory(
- dir=spack.stage.get_stage_root()
- ) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
- skipped, base_images, checksums, upload_errors = _push_oci(
+ with bindist.default_push_context() as (tmpdir, executor):
+ if target_image:
+ base_image = ImageReference.from_string(args.base_image) if args.base_image else None
+ skipped, base_images, checksums, upload_errors = bindist._push_oci(
target_image=target_image,
base_image=base_image,
installed_specs_with_deps=specs,
@@ -495,46 +468,28 @@ def push_fn(args):
tagged_image = target_image.with_tag(args.tag)
# _push_oci may not populate base_images if binaries were already in the registry
for spec in roots:
- _update_base_images(
+ bindist._oci_update_base_images(
base_image=base_image,
target_image=target_image,
spec=spec,
base_image_cache=base_images,
)
- _put_manifest(base_images, checksums, tagged_image, tmpdir, None, None, *roots)
- tty.info(f"Tagged {tagged_image}")
-
- else:
- skipped = []
-
- for i, spec in enumerate(specs):
- try:
- bindist.push_or_raise(
- spec,
- push_url,
- bindist.PushOptions(
- force=args.force,
- unsigned=unsigned,
- key=args.key,
- regenerate_index=args.update_index,
- ),
+ bindist._oci_put_manifest(
+ base_images, checksums, tagged_image, tmpdir, None, None, *roots
)
+ tty.info(f"Tagged {tagged_image}")
- msg = f"{_progress(i, len(specs))}Pushed {_format_spec(spec)}"
- if len(specs) == 1:
- msg += f" to {push_url}"
- tty.info(msg)
-
- except bindist.NoOverwriteException:
- skipped.append(_format_spec(spec))
-
- # Catch any other exception unless the fail fast option is set
- except Exception as e:
- if args.fail_fast or isinstance(
- e, (bindist.PickKeyException, bindist.NoKeyException)
- ):
- raise
- failed.append((spec, e))
+ else:
+ skipped, upload_errors = bindist._push(
+ specs,
+ out_url=push_url,
+ force=args.force,
+ update_index=args.update_index,
+ signing_key=signing_key,
+ tmpdir=tmpdir,
+ executor=executor,
+ )
+ failed.extend(upload_errors)
if skipped:
if len(specs) == 1:
@@ -567,409 +522,12 @@ def push_fn(args):
),
)
- # Update the index if requested
- # TODO: remove update index logic out of bindist; should be once after all specs are pushed
- # not once per spec.
+ # Update the OCI index if requested
if target_image and len(skipped) < len(specs) and args.update_index:
with tempfile.TemporaryDirectory(
dir=spack.stage.get_stage_root()
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
- _update_index_oci(target_image, tmpdir, executor)
-
-
-def _get_spack_binary_blob(image_ref: ImageReference) -> Optional[spack.oci.oci.Blob]:
- """Get the spack tarball layer digests and size if it exists"""
- try:
- manifest, config = get_manifest_and_config_with_retry(image_ref)
-
- return spack.oci.oci.Blob(
- compressed_digest=Digest.from_string(manifest["layers"][-1]["digest"]),
- uncompressed_digest=Digest.from_string(config["rootfs"]["diff_ids"][-1]),
- size=manifest["layers"][-1]["size"],
- )
- except Exception:
- return None
-
-
-def _push_single_spack_binary_blob(image_ref: ImageReference, spec: spack.spec.Spec, tmpdir: str):
- filename = os.path.join(tmpdir, f"{spec.dag_hash()}.tar.gz")
-
- # Create an oci.image.layer aka tarball of the package
- compressed_tarfile_checksum, tarfile_checksum = spack.oci.oci.create_tarball(spec, filename)
-
- blob = spack.oci.oci.Blob(
- Digest.from_sha256(compressed_tarfile_checksum),
- Digest.from_sha256(tarfile_checksum),
- os.path.getsize(filename),
- )
-
- # Upload the blob
- upload_blob_with_retry(image_ref, file=filename, digest=blob.compressed_digest)
-
- # delete the file
- os.unlink(filename)
-
- return blob
-
-
-def _retrieve_env_dict_from_config(config: dict) -> dict:
- """Retrieve the environment variables from the image config file.
- Sets a default value for PATH if it is not present.
-
- Args:
- config (dict): The image config file.
-
- Returns:
- dict: The environment variables.
- """
- env = {"PATH": "/bin:/usr/bin"}
-
- if "Env" in config.get("config", {}):
- for entry in config["config"]["Env"]:
- key, value = entry.split("=", 1)
- env[key] = value
- return env
-
-
-def _archspec_to_gooarch(spec: spack.spec.Spec) -> str:
- name = spec.target.family.name
- name_map = {"aarch64": "arm64", "x86_64": "amd64"}
- return name_map.get(name, name)
-
-
-def _put_manifest(
- base_images: Dict[str, Tuple[dict, dict]],
- checksums: Dict[str, spack.oci.oci.Blob],
- image_ref: ImageReference,
- tmpdir: str,
- extra_config: Optional[dict],
- annotations: Optional[dict],
- *specs: spack.spec.Spec,
-):
- architecture = _archspec_to_gooarch(specs[0])
-
- expected_blobs: List[Spec] = [
- s
- for s in traverse.traverse_nodes(specs, order="topo", deptype=("link", "run"), root=True)
- if not s.external
- ]
- expected_blobs.reverse()
-
- base_manifest, base_config = base_images[architecture]
- env = _retrieve_env_dict_from_config(base_config)
-
- # If the base image uses `vnd.docker.distribution.manifest.v2+json`, then we use that too.
- # This is because Singularity / Apptainer is very strict about not mixing them.
- base_manifest_mediaType = base_manifest.get(
- "mediaType", "application/vnd.oci.image.manifest.v1+json"
- )
- use_docker_format = (
- base_manifest_mediaType == "application/vnd.docker.distribution.manifest.v2+json"
- )
-
- spack.user_environment.environment_modifications_for_specs(*specs).apply_modifications(env)
-
- # Create an oci.image.config file
- config = copy.deepcopy(base_config)
-
- # Add the diff ids of the blobs
- for s in expected_blobs:
- # If a layer for a dependency has gone missing (due to removed manifest in the registry, a
- # failed push, or a local forced uninstall), we cannot create a runnable container image.
- # If an OCI registry is only used for storage, this is not a hard error, but for now we
- # raise an exception unconditionally, until someone requests a more lenient behavior.
- checksum = checksums.get(s.dag_hash())
- if not checksum:
- raise MissingLayerError(f"missing layer for {_format_spec(s)}")
- config["rootfs"]["diff_ids"].append(str(checksum.uncompressed_digest))
-
- # Set the environment variables
- config["config"]["Env"] = [f"{k}={v}" for k, v in env.items()]
-
- if extra_config:
- # From the OCI v1.0 spec:
- # > Any extra fields in the Image JSON struct are considered implementation
- # > specific and MUST be ignored by any implementations which are unable to
- # > interpret them.
- config.update(extra_config)
-
- config_file = os.path.join(tmpdir, f"{specs[0].dag_hash()}.config.json")
-
- with open(config_file, "w") as f:
- json.dump(config, f, separators=(",", ":"))
-
- config_file_checksum = Digest.from_sha256(
- spack.util.crypto.checksum(hashlib.sha256, config_file)
- )
-
- # Upload the config file
- upload_blob_with_retry(image_ref, file=config_file, digest=config_file_checksum)
-
- manifest = {
- "mediaType": base_manifest_mediaType,
- "schemaVersion": 2,
- "config": {
- "mediaType": base_manifest["config"]["mediaType"],
- "digest": str(config_file_checksum),
- "size": os.path.getsize(config_file),
- },
- "layers": [
- *(layer for layer in base_manifest["layers"]),
- *(
- {
- "mediaType": (
- "application/vnd.docker.image.rootfs.diff.tar.gzip"
- if use_docker_format
- else "application/vnd.oci.image.layer.v1.tar+gzip"
- ),
- "digest": str(checksums[s.dag_hash()].compressed_digest),
- "size": checksums[s.dag_hash()].size,
- }
- for s in expected_blobs
- ),
- ],
- }
-
- if not use_docker_format and annotations:
- manifest["annotations"] = annotations
-
- # Finally upload the manifest
- upload_manifest_with_retry(image_ref, manifest=manifest)
-
- # delete the config file
- os.unlink(config_file)
-
-
-def _update_base_images(
- *,
- base_image: Optional[ImageReference],
- target_image: ImageReference,
- spec: spack.spec.Spec,
- base_image_cache: Dict[str, Tuple[dict, dict]],
-):
- """For a given spec and base image, copy the missing layers of the base image with matching
- arch to the registry of the target image. If no base image is specified, create a dummy
- manifest and config file."""
- architecture = _archspec_to_gooarch(spec)
- if architecture in base_image_cache:
- return
- if base_image is None:
- base_image_cache[architecture] = (
- default_manifest(),
- default_config(architecture, "linux"),
- )
- else:
- base_image_cache[architecture] = copy_missing_layers_with_retry(
- base_image, target_image, architecture
- )
-
-
-def _push_oci(
- *,
- target_image: ImageReference,
- base_image: Optional[ImageReference],
- installed_specs_with_deps: List[Spec],
- tmpdir: str,
- executor: concurrent.futures.Executor,
- force: bool = False,
-) -> Tuple[
- List[str],
- Dict[str, Tuple[dict, dict]],
- Dict[str, spack.oci.oci.Blob],
- List[Tuple[Spec, BaseException]],
-]:
- """Push specs to an OCI registry
-
- Args:
- image_ref: The target OCI image
- base_image: Optional base image, which will be copied to the target registry.
- installed_specs_with_deps: The installed specs to push, excluding externals,
- including deps, ordered from roots to leaves.
- force: Whether to overwrite existing layers and manifests in the buildcache.
-
- Returns:
- A tuple consisting of the list of skipped specs already in the build cache,
- a dictionary mapping architectures to base image manifests and configs,
- a dictionary mapping each spec's dag hash to a blob,
- and a list of tuples of specs with errors of failed uploads.
- """
-
- # Reverse the order
- installed_specs_with_deps = list(reversed(installed_specs_with_deps))
-
- # Spec dag hash -> blob
- checksums: Dict[str, spack.oci.oci.Blob] = {}
-
- # arch -> (manifest, config)
- base_images: Dict[str, Tuple[dict, dict]] = {}
-
- # Specs not uploaded because they already exist
- skipped = []
-
- if not force:
- tty.info("Checking for existing specs in the buildcache")
- blobs_to_upload = []
-
- tags_to_check = (target_image.with_tag(default_tag(s)) for s in installed_specs_with_deps)
- available_blobs = executor.map(_get_spack_binary_blob, tags_to_check)
-
- for spec, maybe_blob in zip(installed_specs_with_deps, available_blobs):
- if maybe_blob is not None:
- checksums[spec.dag_hash()] = maybe_blob
- skipped.append(_format_spec(spec))
- else:
- blobs_to_upload.append(spec)
- else:
- blobs_to_upload = installed_specs_with_deps
-
- if not blobs_to_upload:
- return skipped, base_images, checksums, []
-
- tty.info(
- f"{len(blobs_to_upload)} specs need to be pushed to "
- f"{target_image.domain}/{target_image.name}"
- )
-
- # Upload blobs
- blob_futures = [
- executor.submit(_push_single_spack_binary_blob, target_image, spec, tmpdir)
- for spec in blobs_to_upload
- ]
-
- concurrent.futures.wait(blob_futures)
-
- manifests_to_upload: List[Spec] = []
- errors: List[Tuple[Spec, BaseException]] = []
-
- # And update the spec to blob mapping for successful uploads
- for spec, blob_future in zip(blobs_to_upload, blob_futures):
- error = blob_future.exception()
- if error is None:
- manifests_to_upload.append(spec)
- checksums[spec.dag_hash()] = blob_future.result()
- else:
- errors.append((spec, error))
-
- # Copy base images if necessary
- for spec in manifests_to_upload:
- _update_base_images(
- base_image=base_image,
- target_image=target_image,
- spec=spec,
- base_image_cache=base_images,
- )
-
- def extra_config(spec: Spec):
- spec_dict = spec.to_dict(hash=ht.dag_hash)
- spec_dict["buildcache_layout_version"] = bindist.CURRENT_BUILD_CACHE_LAYOUT_VERSION
- spec_dict["binary_cache_checksum"] = {
- "hash_algorithm": "sha256",
- "hash": checksums[spec.dag_hash()].compressed_digest.digest,
- }
- return spec_dict
-
- # Upload manifests
- tty.info("Uploading manifests")
- manifest_futures = [
- executor.submit(
- _put_manifest,
- base_images,
- checksums,
- target_image.with_tag(default_tag(spec)),
- tmpdir,
- extra_config(spec),
- {"org.opencontainers.image.description": spec.format()},
- spec,
- )
- for spec in manifests_to_upload
- ]
-
- concurrent.futures.wait(manifest_futures)
-
- # Print the image names of the top-level specs
- for spec, manifest_future in zip(manifests_to_upload, manifest_futures):
- error = manifest_future.exception()
- if error is None:
- tty.info(f"Pushed {_format_spec(spec)} to {target_image.with_tag(default_tag(spec))}")
- else:
- errors.append((spec, error))
-
- return skipped, base_images, checksums, errors
-
-
-def _config_from_tag(image_ref_and_tag: Tuple[ImageReference, str]) -> Optional[dict]:
- image_ref, tag = image_ref_and_tag
- # Don't allow recursion here, since Spack itself always uploads
- # vnd.oci.image.manifest.v1+json, not vnd.oci.image.index.v1+json
- _, config = get_manifest_and_config_with_retry(image_ref.with_tag(tag), tag, recurse=0)
-
- # Do very basic validation: if "spec" is a key in the config, it
- # must be a Spec object too.
- return config if "spec" in config else None
-
-
-def _update_index_oci(
- image_ref: ImageReference, tmpdir: str, pool: concurrent.futures.Executor
-) -> None:
- tags = list_tags(image_ref)
-
- # Fetch all image config files in parallel
- spec_dicts = pool.map(_config_from_tag, ((image_ref, tag) for tag in tags if tag_is_spec(tag)))
-
- # Populate the database
- db_root_dir = os.path.join(tmpdir, "db_root")
- db = bindist.BuildCacheDatabase(db_root_dir)
-
- for spec_dict in spec_dicts:
- spec = Spec.from_dict(spec_dict)
- db.add(spec, directory_layout=None)
- db.mark(spec, "in_buildcache", True)
-
- # Create the index.json file
- index_json_path = os.path.join(tmpdir, "index.json")
- with open(index_json_path, "w") as f:
- db._write_to_file(f)
-
- # Create an empty config.json file
- empty_config_json_path = os.path.join(tmpdir, "config.json")
- with open(empty_config_json_path, "wb") as f:
- f.write(b"{}")
-
- # Upload the index.json file
- index_shasum = Digest.from_sha256(spack.util.crypto.checksum(hashlib.sha256, index_json_path))
- upload_blob_with_retry(image_ref, file=index_json_path, digest=index_shasum)
-
- # Upload the config.json file
- empty_config_digest = Digest.from_sha256(
- spack.util.crypto.checksum(hashlib.sha256, empty_config_json_path)
- )
- upload_blob_with_retry(image_ref, file=empty_config_json_path, digest=empty_config_digest)
-
- # Push a manifest file that references the index.json file as a layer
- # Notice that we push this as if it is an image, which it of course is not.
- # When the ORAS spec becomes official, we can use that instead of a fake image.
- # For now we just use the OCI image spec, so that we don't run into issues with
- # automatic garbage collection of blobs that are not referenced by any image manifest.
- oci_manifest = {
- "mediaType": "application/vnd.oci.image.manifest.v1+json",
- "schemaVersion": 2,
- # Config is just an empty {} file for now, and irrelevant
- "config": {
- "mediaType": "application/vnd.oci.image.config.v1+json",
- "digest": str(empty_config_digest),
- "size": os.path.getsize(empty_config_json_path),
- },
- # The buildcache index is the only layer, and is not a tarball, we lie here.
- "layers": [
- {
- "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
- "digest": str(index_shasum),
- "size": os.path.getsize(index_json_path),
- }
- ],
- }
-
- upload_manifest_with_retry(image_ref.with_tag(default_index_tag), oci_manifest)
+ bindist._oci_update_index(target_image, tmpdir, executor)
def install_fn(args):
@@ -1251,13 +809,14 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
with tempfile.TemporaryDirectory(
dir=spack.stage.get_stage_root()
) as tmpdir, spack.util.parallel.make_concurrent_executor() as executor:
- _update_index_oci(image_ref, tmpdir, executor)
+ bindist._oci_update_index(image_ref, tmpdir, executor)
return
# Otherwise, assume a normal mirror.
url = mirror.push_url
- bindist.generate_package_index(url_util.join(url, bindist.build_cache_relative_path()))
+ with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
+ bindist.generate_package_index(url, tmpdir)
if update_keys:
keys_url = url_util.join(
@@ -1265,7 +824,8 @@ def update_index(mirror: spack.mirror.Mirror, update_keys=False):
)
try:
- bindist.generate_key_index(keys_url)
+ with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
+ bindist.generate_key_index(keys_url, tmpdir)
except bindist.CannotListKeys as e:
# Do not error out if listing keys went wrong. This usually means that the _gpg path
# does not exist. TODO: distinguish between this and other errors.
diff --git a/lib/spack/spack/cmd/gpg.py b/lib/spack/spack/cmd/gpg.py
index d05db0a61e..d16b03b7bc 100644
--- a/lib/spack/spack/cmd/gpg.py
+++ b/lib/spack/spack/cmd/gpg.py
@@ -5,10 +5,12 @@
import argparse
import os
+import tempfile
import spack.binary_distribution
import spack.mirror
import spack.paths
+import spack.stage
import spack.util.gpg
import spack.util.url
from spack.cmd.common import arguments
@@ -115,6 +117,7 @@ def setup_parser(subparser):
help="URL of the mirror where keys will be published",
)
publish.add_argument(
+ "--update-index",
"--rebuild-index",
action="store_true",
default=False,
@@ -220,9 +223,10 @@ def gpg_publish(args):
elif args.mirror_url:
mirror = spack.mirror.Mirror(args.mirror_url, args.mirror_url)
- spack.binary_distribution.push_keys(
- mirror, keys=args.keys, regenerate_index=args.rebuild_index
- )
+ with tempfile.TemporaryDirectory(dir=spack.stage.get_stage_root()) as tmpdir:
+ spack.binary_distribution.push_keys(
+ mirror, keys=args.keys, tmpdir=tmpdir, update_index=args.update_index
+ )
def gpg(parser, args):
diff --git a/lib/spack/spack/hooks/autopush.py b/lib/spack/spack/hooks/autopush.py
index 1974c8d3ab..cb951b7b4b 100644
--- a/lib/spack/spack/hooks/autopush.py
+++ b/lib/spack/spack/hooks/autopush.py
@@ -23,9 +23,6 @@ def post_install(spec, explicit):
# Push the package to all autopush mirrors
for mirror in spack.mirror.MirrorCollection(binary=True, autopush=True).values():
- bindist.push_or_raise(
- spec,
- mirror.push_url,
- bindist.PushOptions(force=True, regenerate_index=False, unsigned=not mirror.signed),
- )
+ signing_key = bindist.select_signing_key() if mirror.signed else None
+ bindist.push_or_raise([spec], out_url=mirror.push_url, signing_key=signing_key, force=True)
tty.msg(f"{spec.name}: Pushed to build cache: '{mirror.name}'")
diff --git a/lib/spack/spack/oci/oci.py b/lib/spack/spack/oci/oci.py
index 09e79df347..bbe403400b 100644
--- a/lib/spack/spack/oci/oci.py
+++ b/lib/spack/spack/oci/oci.py
@@ -6,7 +6,6 @@
import hashlib
import json
import os
-import time
import urllib.error
import urllib.parse
import urllib.request
@@ -43,11 +42,6 @@ def create_tarball(spec: spack.spec.Spec, tarfile_path):
return spack.binary_distribution._do_create_tarball(tarfile_path, spec.prefix, buildinfo)
-def _log_upload_progress(digest: Digest, size: int, elapsed: float):
- elapsed = max(elapsed, 0.001) # guard against division by zero
- tty.info(f"Uploaded {digest} ({elapsed:.2f}s, {size / elapsed / 1024 / 1024:.2f} MB/s)")
-
-
def with_query_param(url: str, param: str, value: str) -> str:
"""Add a query parameter to a URL
@@ -141,8 +135,6 @@ def upload_blob(
if not force and blob_exists(ref, digest, _urlopen):
return False
- start = time.time()
-
with open(file, "rb") as f:
file_size = os.fstat(f.fileno()).st_size
@@ -167,7 +159,6 @@ def upload_blob(
# Created the blob in one go.
if response.status == 201:
- _log_upload_progress(digest, file_size, time.time() - start)
return True
# Otherwise, do another PUT request.
@@ -191,8 +182,6 @@ def upload_blob(
spack.oci.opener.ensure_status(request, response, 201)
- # print elapsed time and # MB/s
- _log_upload_progress(digest, file_size, time.time() - start)
return True
diff --git a/lib/spack/spack/test/bindist.py b/lib/spack/spack/test/bindist.py
index d80ddfe6e5..06cff7f5f7 100644
--- a/lib/spack/spack/test/bindist.py
+++ b/lib/spack/spack/test/bindist.py
@@ -337,7 +337,7 @@ def test_relative_rpaths_install_nondefault(mirror_dir):
buildcache_cmd("install", "-uf", cspec.name)
-def test_push_and_fetch_keys(mock_gnupghome):
+def test_push_and_fetch_keys(mock_gnupghome, tmp_path):
testpath = str(mock_gnupghome)
mirror = os.path.join(testpath, "mirror")
@@ -357,7 +357,7 @@ def test_push_and_fetch_keys(mock_gnupghome):
assert len(keys) == 1
fpr = keys[0]
- bindist.push_keys(mirror, keys=[fpr], regenerate_index=True)
+ bindist.push_keys(mirror, keys=[fpr], tmpdir=str(tmp_path), update_index=True)
# dir 2: import the key from the mirror, and confirm that its fingerprint
# matches the one created above
@@ -464,7 +464,7 @@ def test_generate_index_missing(monkeypatch, tmpdir, mutable_config):
assert "libelf" not in cache_list
-def test_generate_key_index_failure(monkeypatch):
+def test_generate_key_index_failure(monkeypatch, tmp_path):
def list_url(url, recursive=False):
if "fails-listing" in url:
raise Exception("Couldn't list the directory")
@@ -477,13 +477,13 @@ def test_generate_key_index_failure(monkeypatch):
monkeypatch.setattr(web_util, "push_to_url", push_to_url)
with pytest.raises(CannotListKeys, match="Encountered problem listing keys"):
- bindist.generate_key_index("s3://non-existent/fails-listing")
+ bindist.generate_key_index("s3://non-existent/fails-listing", str(tmp_path))
with pytest.raises(GenerateIndexError, match="problem pushing .* Couldn't upload"):
- bindist.generate_key_index("s3://non-existent/fails-uploading")
+ bindist.generate_key_index("s3://non-existent/fails-uploading", str(tmp_path))
-def test_generate_package_index_failure(monkeypatch, capfd):
+def test_generate_package_index_failure(monkeypatch, tmp_path, capfd):
def mock_list_url(url, recursive=False):
raise Exception("Some HTTP error")
@@ -492,15 +492,16 @@ def test_generate_package_index_failure(monkeypatch, capfd):
test_url = "file:///fake/keys/dir"
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
- bindist.generate_package_index(test_url)
+ bindist.generate_package_index(test_url, str(tmp_path))
assert (
- f"Warning: Encountered problem listing packages at {test_url}: Some HTTP error"
+ "Warning: Encountered problem listing packages at "
+ f"{test_url}/{bindist.BUILD_CACHE_RELATIVE_PATH}: Some HTTP error"
in capfd.readouterr().err
)
-def test_generate_indices_exception(monkeypatch, capfd):
+def test_generate_indices_exception(monkeypatch, tmp_path, capfd):
def mock_list_url(url, recursive=False):
raise Exception("Test Exception handling")
@@ -509,10 +510,10 @@ def test_generate_indices_exception(monkeypatch, capfd):
url = "file:///fake/keys/dir"
with pytest.raises(GenerateIndexError, match=f"Encountered problem listing keys at {url}"):
- bindist.generate_key_index(url)
+ bindist.generate_key_index(url, str(tmp_path))
with pytest.raises(GenerateIndexError, match="Unable to generate package index"):
- bindist.generate_package_index(url)
+ bindist.generate_package_index(url, str(tmp_path))
assert f"Encountered problem listing packages at {url}" in capfd.readouterr().err
diff --git a/lib/spack/spack/test/build_distribution.py b/lib/spack/spack/test/build_distribution.py
index 6ab68659e6..4ad621ab0d 100644
--- a/lib/spack/spack/test/build_distribution.py
+++ b/lib/spack/spack/test/build_distribution.py
@@ -13,34 +13,34 @@ import spack.main
import spack.spec
import spack.util.url
-install = spack.main.SpackCommand("install")
-
pytestmark = pytest.mark.not_on_windows("does not run on windows")
-def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmpdir):
- with tmpdir.as_cwd():
- spec = spack.spec.Spec("trivial-install-test-package").concretized()
- install(str(spec))
-
- # Runs fine the first time, throws the second time
- out_url = spack.util.url.path_to_file_url(str(tmpdir))
- bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
- with pytest.raises(bd.NoOverwriteException):
- bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
-
- # Should work fine with force=True
- bd.push_or_raise(spec, out_url, bd.PushOptions(force=True, unsigned=True))
-
- # Remove the tarball and try again.
- # This must *also* throw, because of the existing .spec.json file
- os.remove(
- os.path.join(
- bd.build_cache_prefix("."),
- bd.tarball_directory_name(spec),
- bd.tarball_name(spec, ".spack"),
- )
- )
-
- with pytest.raises(bd.NoOverwriteException):
- bd.push_or_raise(spec, out_url, bd.PushOptions(unsigned=True))
+def test_build_tarball_overwrite(install_mockery, mock_fetch, monkeypatch, tmp_path):
+ spec = spack.spec.Spec("trivial-install-test-package").concretized()
+ spec.package.do_install(fake=True)
+
+ specs = [spec]
+
+ # Runs fine the first time, second time it's a no-op
+ out_url = spack.util.url.path_to_file_url(str(tmp_path))
+ skipped = bd.push_or_raise(specs, out_url, signing_key=None)
+ assert not skipped
+
+ skipped = bd.push_or_raise(specs, out_url, signing_key=None)
+ assert skipped == specs
+
+ # Should work fine with force=True
+ skipped = bd.push_or_raise(specs, out_url, signing_key=None, force=True)
+ assert not skipped
+
+ # Remove the tarball, which should cause push to push.
+ os.remove(
+ tmp_path
+ / bd.BUILD_CACHE_RELATIVE_PATH
+ / bd.tarball_directory_name(spec)
+ / bd.tarball_name(spec, ".spack")
+ )
+
+ skipped = bd.push_or_raise(specs, out_url, signing_key=None)
+ assert not skipped
diff --git a/lib/spack/spack/test/ci.py b/lib/spack/spack/test/ci.py
index f2cca09c5a..9eb0d45d2d 100644
--- a/lib/spack/spack/test/ci.py
+++ b/lib/spack/spack/test/ci.py
@@ -286,7 +286,7 @@ def test_ci_process_command_fail(repro_dir, monkeypatch):
def test_ci_create_buildcache(tmpdir, working_env, config, mock_packages, monkeypatch):
"""Test that create_buildcache returns a list of objects with the correct
keys and types."""
- monkeypatch.setattr(spack.ci, "_push_to_build_cache", lambda a, b, c: True)
+ monkeypatch.setattr(ci, "push_to_build_cache", lambda a, b, c: True)
results = ci.create_buildcache(
None, destination_mirror_urls=["file:///fake-url-one", "file:///fake-url-two"]
diff --git a/lib/spack/spack/test/cmd/buildcache.py b/lib/spack/spack/test/cmd/buildcache.py
index eee8c160f1..fb56bb7335 100644
--- a/lib/spack/spack/test/cmd/buildcache.py
+++ b/lib/spack/spack/test/cmd/buildcache.py
@@ -384,11 +384,14 @@ def test_correct_specs_are_pushed(
packages_to_push = []
- def fake_push(node, push_url, options):
- assert isinstance(node, Spec)
- packages_to_push.append(node.name)
-
- monkeypatch.setattr(spack.binary_distribution, "push_or_raise", fake_push)
+ def fake_push(specs, *args, **kwargs):
+ assert all(isinstance(s, Spec) for s in specs)
+ packages_to_push.extend(s.name for s in specs)
+ skipped = []
+ errors = []
+ return skipped, errors
+
+ monkeypatch.setattr(spack.binary_distribution, "_push", fake_push)
buildcache_create_args = ["create", "--unsigned"]
diff --git a/lib/spack/spack/test/cmd/ci.py b/lib/spack/spack/test/cmd/ci.py
index c101805032..1747c4a26a 100644
--- a/lib/spack/spack/test/cmd/ci.py
+++ b/lib/spack/spack/test/cmd/ci.py
@@ -797,7 +797,7 @@ def test_ci_rebuild_mock_failure_to_push(
def mock_success(*args, **kwargs):
return 0
- monkeypatch.setattr(spack.ci, "process_command", mock_success)
+ monkeypatch.setattr(ci, "process_command", mock_success)
# Mock failure to push to the build cache
def mock_push_or_raise(*args, **kwargs):
@@ -1256,15 +1256,15 @@ spack:
def test_push_to_build_cache_exceptions(monkeypatch, tmp_path, capsys):
- def _push_to_build_cache(spec, sign_binaries, mirror_url):
- raise Exception("Error: Access Denied")
+ def push_or_raise(*args, **kwargs):
+ raise spack.binary_distribution.PushToBuildCacheError("Error: Access Denied")
- monkeypatch.setattr(spack.ci, "_push_to_build_cache", _push_to_build_cache)
+ monkeypatch.setattr(spack.binary_distribution, "push_or_raise", push_or_raise)
# Input doesn't matter, as we are faking exceptional output
url = tmp_path.as_uri()
ci.push_to_build_cache(None, url, None)
- assert f"Permission problem writing to {url}" in capsys.readouterr().err
+ assert f"Problem writing to {url}: Error: Access Denied" in capsys.readouterr().err
@pytest.mark.parametrize("match_behavior", ["first", "merge"])
diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py
index 02b0c31e0b..b141583031 100644
--- a/lib/spack/spack/test/install.py
+++ b/lib/spack/spack/test/install.py
@@ -612,9 +612,7 @@ def test_install_from_binary_with_missing_patch_succeeds(
# Push it to a binary cache
build_cache = tmp_path / "my_build_cache"
binary_distribution.push_or_raise(
- s,
- build_cache.as_uri(),
- binary_distribution.PushOptions(unsigned=True, regenerate_index=True),
+ [s], out_url=build_cache.as_uri(), signing_key=None, force=False
)
# Now re-install it.
diff --git a/lib/spack/spack/test/oci/integration_test.py b/lib/spack/spack/test/oci/integration_test.py
index cefd95f92d..fdb149c9f3 100644
--- a/lib/spack/spack/test/oci/integration_test.py
+++ b/lib/spack/spack/test/oci/integration_test.py
@@ -15,6 +15,7 @@ from contextlib import contextmanager
import pytest
+import spack.binary_distribution
import spack.cmd.buildcache
import spack.database
import spack.environment as ev
@@ -294,8 +295,8 @@ def test_uploading_with_base_image_in_docker_image_manifest_v2_format(
def test_best_effort_upload(mutable_database: spack.database.Database, monkeypatch):
"""Failure to upload a blob or manifest should not prevent others from being uploaded"""
- _push_blob = spack.cmd.buildcache._push_single_spack_binary_blob
- _push_manifest = spack.cmd.buildcache._put_manifest
+ _push_blob = spack.binary_distribution._oci_push_pkg_blob
+ _push_manifest = spack.binary_distribution._oci_put_manifest
def push_blob(image_ref, spec, tmpdir):
# fail to upload the blob of mpich
@@ -311,8 +312,8 @@ def test_best_effort_upload(mutable_database: spack.database.Database, monkeypat
base_images, checksums, image_ref, tmpdir, extra_config, annotations, *specs
)
- monkeypatch.setattr(spack.cmd.buildcache, "_push_single_spack_binary_blob", push_blob)
- monkeypatch.setattr(spack.cmd.buildcache, "_put_manifest", put_manifest)
+ monkeypatch.setattr(spack.binary_distribution, "_oci_push_pkg_blob", push_blob)
+ monkeypatch.setattr(spack.binary_distribution, "_oci_put_manifest", put_manifest)
registry = InMemoryOCIRegistry("example.com")
with oci_servers(registry):
diff --git a/lib/spack/spack/util/gpg.py b/lib/spack/spack/util/gpg.py
index 7c5218e236..99263d22d5 100644
--- a/lib/spack/spack/util/gpg.py
+++ b/lib/spack/spack/util/gpg.py
@@ -7,6 +7,7 @@ import errno
import functools
import os
import re
+from typing import List
import llnl.util.filesystem
@@ -124,8 +125,8 @@ def gnupghome_override(dir):
SOCKET_DIR, GNUPGHOME = _SOCKET_DIR, _GNUPGHOME
-def _parse_secret_keys_output(output):
- keys = []
+def _parse_secret_keys_output(output: str) -> List[str]:
+ keys: List[str] = []
found_sec = False
for line in output.split("\n"):
if found_sec:
@@ -195,9 +196,10 @@ Expire-Date: %(expires)s
@_autoinit
-def signing_keys(*args):
+def signing_keys(*args) -> List[str]:
"""Return the keys that can be used to sign binaries."""
- output = GPG("--list-secret-keys", "--with-colons", "--fingerprint", *args, output=str)
+ assert GPG
+ output: str = GPG("--list-secret-keys", "--with-colons", "--fingerprint", *args, output=str)
return _parse_secret_keys_output(output)
diff --git a/share/spack/spack-completion.bash b/share/spack/spack-completion.bash
index 0e946f3f18..139715fef7 100644
--- a/share/spack/spack-completion.bash
+++ b/share/spack/spack-completion.bash
@@ -1272,7 +1272,7 @@ _spack_gpg_export() {
_spack_gpg_publish() {
if $list_options
then
- SPACK_COMPREPLY="-h --help -d --directory -m --mirror-name --mirror-url --rebuild-index"
+ SPACK_COMPREPLY="-h --help -d --directory -m --mirror-name --mirror-url --update-index --rebuild-index"
else
_keys
fi
diff --git a/share/spack/spack-completion.fish b/share/spack/spack-completion.fish
index 6ec431feca..ae8f16e7b9 100644
--- a/share/spack/spack-completion.fish
+++ b/share/spack/spack-completion.fish
@@ -1908,7 +1908,7 @@ complete -c spack -n '__fish_spack_using_command gpg export' -l secret -f -a sec
complete -c spack -n '__fish_spack_using_command gpg export' -l secret -d 'export secret keys'
# spack gpg publish
-set -g __fish_spack_optspecs_spack_gpg_publish h/help d/directory= m/mirror-name= mirror-url= rebuild-index
+set -g __fish_spack_optspecs_spack_gpg_publish h/help d/directory= m/mirror-name= mirror-url= update-index
complete -c spack -n '__fish_spack_using_command_pos_remainder 0 gpg publish' -f -a '(__fish_spack_gpg_keys)'
complete -c spack -n '__fish_spack_using_command gpg publish' -s h -l help -f -a help
complete -c spack -n '__fish_spack_using_command gpg publish' -s h -l help -d 'show this help message and exit'
@@ -1918,8 +1918,8 @@ complete -c spack -n '__fish_spack_using_command gpg publish' -s m -l mirror-nam
complete -c spack -n '__fish_spack_using_command gpg publish' -s m -l mirror-name -r -d 'name of the mirror where keys will be published'
complete -c spack -n '__fish_spack_using_command gpg publish' -l mirror-url -r -f -a mirror_url
complete -c spack -n '__fish_spack_using_command gpg publish' -l mirror-url -r -d 'URL of the mirror where keys will be published'
-complete -c spack -n '__fish_spack_using_command gpg publish' -l rebuild-index -f -a rebuild_index
-complete -c spack -n '__fish_spack_using_command gpg publish' -l rebuild-index -d 'regenerate buildcache key index after publishing key(s)'
+complete -c spack -n '__fish_spack_using_command gpg publish' -l update-index -l rebuild-index -f -a update_index
+complete -c spack -n '__fish_spack_using_command gpg publish' -l update-index -l rebuild-index -d 'regenerate buildcache key index after publishing key(s)'
# spack graph
set -g __fish_spack_optspecs_spack_graph h/help a/ascii d/dot s/static c/color i/installed deptype=