summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorScott Wittenburg <scott.wittenburg@kitware.com>2019-05-28 17:21:55 -0600
committerTodd Gamblin <tgamblin@llnl.gov>2019-09-13 22:57:15 -0700
commitfabbb3d58aca35dda337141e56d3f18289204125 (patch)
treee31821b4db9cc5057961a70c24f7ff4deddfbd87 /lib
parent73c161b7de4de6461bec03187ae03bf72bb3f39c (diff)
downloadspack-fabbb3d58aca35dda337141e56d3f18289204125.tar.gz
spack-fabbb3d58aca35dda337141e56d3f18289204125.tar.bz2
spack-fabbb3d58aca35dda337141e56d3f18289204125.tar.xz
spack-fabbb3d58aca35dda337141e56d3f18289204125.zip
Refactor release-jobs cmd based on use of environments (no docker either)
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/spack/cmd/release_jobs.py414
-rw-r--r--lib/spack/spack/schema/os_container_mapping.py50
2 files changed, 173 insertions, 291 deletions
diff --git a/lib/spack/spack/cmd/release_jobs.py b/lib/spack/spack/cmd/release_jobs.py
index fd79acd573..e982c76562 100644
--- a/lib/spack/spack/cmd/release_jobs.py
+++ b/lib/spack/spack/cmd/release_jobs.py
@@ -3,26 +3,20 @@
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-import argparse
import json
-import os
-import shutil
-import tempfile
+import sys
-import subprocess
from jsonschema import validate, ValidationError
from six import iteritems
+from six.moves.urllib.request import build_opener, HTTPHandler, Request
+from six.moves.urllib.parse import urlencode
import llnl.util.tty as tty
-from spack.architecture import sys_type
+import spack.environment as ev
from spack.dependency import all_deptypes
-from spack.spec import Spec, CompilerSpec
-from spack.paths import spack_root
-from spack.error import SpackError
-from spack.schema.os_container_mapping import schema as mapping_schema
+from spack.spec import Spec
from spack.schema.specs_deps import schema as specs_deps_schema
-from spack.spec_set import CombinatorialSpecSet
import spack.util.spack_yaml as syaml
description = "generate release build set as .gitlab-ci.yml"
@@ -32,57 +26,101 @@ level = "long"
def setup_parser(subparser):
subparser.add_argument(
- '-s', '--spec-set', default=None,
- help="path to release spec-set yaml file")
-
- subparser.add_argument(
- '-m', '--mirror-url', default=None,
- help="url of binary mirror where builds should be pushed")
+ '-f', '--force', action='store_true', default=False,
+ help="Force re-concretization of environment first")
subparser.add_argument(
'-o', '--output-file', default=".gitlab-ci.yml",
help="path to output file to write")
subparser.add_argument(
- '-t', '--shared-runner-tag', default=None,
- help="tag to add to jobs for shared runner selection")
-
- subparser.add_argument(
'-k', '--signing-key', default=None,
help="hash of gpg key to use for package signing")
subparser.add_argument(
- '-c', '--cdash-url', default='https://cdash.spack.io',
- help="Base url of CDash instance jobs should communicate with")
-
- subparser.add_argument(
'-p', '--print-summary', action='store_true', default=False,
help="Print summary of staged jobs to standard output")
subparser.add_argument(
- '--resolve-deps-locally', action='store_true', default=False,
- help="Use only the current machine to concretize specs, " +
- "instead of iterating over items in os-container-mapping.yaml " +
- "and using docker run. Assumes the current machine architecure " +
- "is listed in the os-container-mapping.yaml config file.")
+ '-c', '--cdash-credentials', default=None,
+ help="Path to file containing CDash authentication token")
- subparser.add_argument(
- '--specs-deps-output', default='/dev/stdout',
- help="A file path to which spec deps should be written. This " +
- "argument is generally for internal use, and should not be " +
- "provided by end-users under normal conditions.")
- subparser.add_argument(
- 'specs', nargs=argparse.REMAINDER,
- help="These positional arguments are generally for internal use. " +
- "The --spec-set argument should be used to identify a yaml " +
- "file describing the set of release specs to include in the " +
- ".gitlab-ci.yml file.")
+def _create_buildgroup(opener, headers, url, project, group_name, group_type):
+ data = {
+ "newbuildgroup": group_name,
+ "project": project,
+ "type": group_type
+ }
+
+ request = Request(url, data=json.dumps(data), headers=headers)
+
+ response = opener.open(request)
+ response_code = response.getcode()
+
+ if response_code != 200 and response_code != 201:
+ print('Creating buildgroup failed (response code = {0}'.format(
+ response_code))
+ return None
+
+ response_text = response.read()
+ response_json = json.loads(response_text)
+ build_group_id = response_json['id']
+
+ return build_group_id
+
+def populate_buildgroup(job_names, group_name, project, site,
+ credentials, cdash_url, exit_on_fail=False):
+ url = "{0}/api/v1/buildgroup.php".format(cdash_url)
-def get_job_name(spec, osarch):
- return '{0} {1} {2} {3}'.format(spec.name, spec.version,
- spec.compiler, osarch)
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(credentials),
+ 'Content-Type': 'application/json',
+ }
+
+ opener = build_opener(HTTPHandler)
+
+ parent_group_id = _create_buildgroup(
+ opener, headers, url, project, group_name, 'Daily')
+ group_id = _create_buildgroup(
+ opener, headers, url, project, 'Latest {0}'.format(group_name),
+ 'Latest')
+
+ if not parent_group_id or not group_id:
+ print('Unable to create or retrieve the build groups')
+ sys.exit(1)
+
+ data = {
+ 'project': project,
+ 'buildgroupid': group_id,
+ 'dynamiclist': [{
+ 'match': name,
+ 'parentgroupid': parent_group_id,
+ 'site': site
+ } for name in job_names]
+ }
+
+ request = Request(url, data=json.dumps(data), headers=headers)
+ request.get_method = lambda: 'PUT'
+
+ response = opener.open(request)
+ response_code = response.getcode()
+
+ if response_code != 200 and exit_on_fail:
+ print('Unexpected response ({0}) when populating buildgroup'.format(
+ response_code))
+ sys.exit(1)
+
+
+def get_job_name(spec, osarch, build_group):
+ return '{0} {1} {2} {3} {4}'.format(
+ spec.name, spec.version, spec.compiler, osarch, build_group)
+
+
+def get_cdash_build_name(spec, build_group):
+ return '{0}@{1}%{2} arch={3} ({4})'.format(
+ spec.name, spec.version, spec.compiler, spec.architecture, build_group)
def get_spec_string(spec):
@@ -109,84 +147,8 @@ def _add_dependency(spec_label, dep_label, deps):
deps[spec_label].add(dep_label)
-def get_deps_using_container(specs, image):
- image_home_dir = '/home/spackuser'
- repo_mount_location = '{0}/spack'.format(image_home_dir)
- temp_dir = tempfile.mkdtemp(dir='/tmp')
-
- # The paths this module will see (from outside the container)
- temp_file = os.path.join(temp_dir, 'spec_deps.json')
- temp_err = os.path.join(temp_dir, 'std_err.log')
-
- # The paths the bash_command will see inside the container
- json_output = '/work/spec_deps.json'
- std_error = '/work/std_err.log'
-
- specs_arg = ' '.join([str(spec) for spec in specs])
-
- bash_command = " ".join(["source {0}/share/spack/setup-env.sh ;",
- "spack release-jobs",
- "--specs-deps-output {1}",
- "{2}",
- "2> {3}"]).format(
- repo_mount_location, json_output, specs_arg, std_error)
-
- docker_cmd_to_run = [
- 'docker', 'run', '--rm',
- '-v', '{0}:{1}'.format(spack_root, repo_mount_location),
- '-v', '{0}:{1}'.format(temp_dir, '/work'),
- '--entrypoint', 'bash',
- '-t', str(image),
- '-c',
- bash_command,
- ]
-
- tty.debug('Running subprocess command:')
- tty.debug(' '.join(docker_cmd_to_run))
-
- # Docker is going to merge the stdout/stderr from the script and write it
- # all to the stdout of the running container. For this reason, we won't
- # pipe any stdout/stderr from the docker command, but rather write the
- # output we care about to a file in a mounted directory. Similarly, any
- # errors from running the spack command inside the container are redirected
- # to another file in the mounted directory.
- proc = subprocess.Popen(docker_cmd_to_run)
- proc.wait()
-
- # Check for errors from spack command
- if os.path.exists(temp_err) and os.path.getsize(temp_err) > 0:
- # Spack wrote something to stderr inside the container. We will
- # print out whatever it is, but attempt to carry on with the process.
- tty.error('Encountered spack error running command in container:')
- with open(temp_err, 'r') as err:
- tty.error(err.read())
-
- spec_deps_obj = {}
-
- try:
- # Finally, try to read/parse the output we really care about: the
- # specs and dependency edges for the provided spec, as it was
- # concretized in the appropriate container.
- with open(temp_file, 'r') as fd:
- spec_deps_obj = json.loads(fd.read())
-
- except ValueError as val_err:
- tty.error('Failed to read json object from spec-deps output file:')
- tty.error(str(val_err))
- except IOError as io_err:
- tty.error('Problem reading from spec-deps json output file:')
- tty.error(str(io_err))
- finally:
- shutil.rmtree(temp_dir)
-
- return spec_deps_obj
-
-
-def get_spec_dependencies(specs, deps, spec_labels, image=None):
- if image:
- spec_deps_obj = get_deps_using_container(specs, image)
- else:
- spec_deps_obj = compute_spec_deps(specs)
+def get_spec_dependencies(specs, deps, spec_labels):
+ spec_deps_obj = compute_spec_deps(specs)
try:
validate(spec_deps_obj, specs_deps_schema)
@@ -210,7 +172,7 @@ def get_spec_dependencies(specs, deps, spec_labels, image=None):
_add_dependency(entry['spec'], entry['depends'], deps)
-def stage_spec_jobs(spec_set, containers, current_system=None):
+def stage_spec_jobs(specs):
"""Take a set of release specs along with a dictionary describing the
available docker containers and what compilers they have, and generate
a list of "stages", where the jobs in any stage are dependent only on
@@ -265,46 +227,7 @@ def stage_spec_jobs(spec_set, containers, current_system=None):
deps = {}
spec_labels = {}
- if current_system:
- if current_system not in containers:
- error_msg = ' '.join(['Current system ({0}) does not appear in',
- 'os_container_mapping.yaml, ignoring',
- 'request']).format(
- current_system)
- raise SpackError(error_msg)
- os_names = [current_system]
- else:
- os_names = [name for name in containers]
-
- container_specs = {}
- for name in os_names:
- container_specs[name] = {'image': None, 'specs': []}
-
- # Collect together all the specs that should be concretized in each
- # container so they can all be done at once, avoiding the need to
- # run the docker container for each spec separately.
- for spec in spec_set:
- for osname in os_names:
- container_info = containers[osname]
- image = None if current_system else container_info['image']
- if image:
- container_specs[osname]['image'] = image
- if 'compilers' in container_info:
- found_at_least_one = False
- for item in container_info['compilers']:
- container_compiler_spec = CompilerSpec(item['name'])
- if spec.compiler == container_compiler_spec:
- container_specs[osname]['specs'].append(spec)
- found_at_least_one = True
- if not found_at_least_one:
- tty.warn('No compiler in {0} satisfied {1}'.format(
- osname, spec.compiler))
-
- for osname in container_specs:
- if container_specs[osname]['specs']:
- image = container_specs[osname]['image']
- specs = container_specs[osname]['specs']
- get_spec_dependencies(specs, deps, spec_labels, image)
+ get_spec_dependencies(specs, deps, spec_labels)
# Save the original deps, as we need to return them at the end of the
# function. In the while loop below, the "dependencies" variable is
@@ -452,49 +375,48 @@ def compute_spec_deps(spec_list, stream_like=None):
return deps_json_obj
-def release_jobs(parser, args):
- share_path = os.path.join(spack_root, 'share', 'spack', 'docker')
- os_container_mapping_path = os.path.join(
- share_path, 'os-container-mapping.yaml')
+def spec_matches(spec, match_string):
+ return spec.satisfies(match_string)
- with open(os_container_mapping_path, 'r') as fin:
- os_container_mapping = syaml.load(fin)
- try:
- validate(os_container_mapping, mapping_schema)
- except ValidationError as val_err:
- tty.error('Ill-formed os-container-mapping configuration object')
- tty.error(os_container_mapping)
- tty.debug(val_err)
- return
+def find_matching_config(spec, ci_mappings):
+ for ci_mapping in ci_mappings:
+ for match_string in ci_mapping['match']:
+ if spec_matches(spec, match_string):
+ return ci_mapping['runner-attributes']
+ return None
- containers = os_container_mapping['containers']
- if args.specs:
- # Just print out the spec labels and all dependency edges in
- # a json format.
- spec_list = [Spec(s) for s in args.specs]
- with open(args.specs_deps_output, 'w') as out:
- compute_spec_deps(spec_list, out)
- return
+def release_jobs(parser, args):
+ env = ev.get_env(args, 'release-jobs', required=True)
+ env.concretize(force=args.force)
- current_system = sys_type() if args.resolve_deps_locally else None
+ yaml_root = env.yaml['spack']
- release_specs_path = args.spec_set
- if not release_specs_path:
- raise SpackError('Must provide path to release spec-set')
+ if 'gitlab-ci' not in yaml_root:
+ tty.die('Environment yaml does not have "gitlab-ci" section')
- release_spec_set = CombinatorialSpecSet.from_file(release_specs_path)
+ ci_mappings = yaml_root['gitlab-ci']['mappings']
- mirror_url = args.mirror_url
+ ci_cdash = yaml_root['cdash']
+ build_group = ci_cdash['build-group']
+ cdash_url = ci_cdash['url']
+ cdash_project = ci_cdash['project']
+ proj_enc = urlencode({'project': cdash_project})
+ eq_idx = proj_enc.find('=') + 1
+ cdash_project_enc = proj_enc[eq_idx:]
+ cdash_site = ci_cdash['site']
+ cdash_auth_token = None
- if not mirror_url:
- raise SpackError('Must provide url of target binary mirror')
+ if args.cdash_credentials:
+ with open(args.cdash_credentials) as fd:
+ cdash_auth_token = fd.read()
+ cdash_auth_token = cdash_auth_token.strip()
- cdash_url = args.cdash_url
+ ci_mirrors = yaml_root['mirrors']
+ mirror_urls = ci_mirrors.values()
- spec_labels, dependencies, stages = stage_spec_jobs(
- release_spec_set, containers, current_system)
+ spec_labels, dependencies, stages = stage_spec_jobs(env.all_specs())
if not stages:
tty.msg('No jobs staged, exiting.')
@@ -503,6 +425,7 @@ def release_jobs(parser, args):
if args.print_summary:
print_staging_summary(spec_labels, dependencies, stages)
+ all_job_names = []
output_object = {}
job_count = 0
@@ -516,37 +439,56 @@ def release_jobs(parser, args):
release_spec = spec_labels[spec_label]['spec']
root_spec = spec_labels[spec_label]['rootSpec']
- pkg_compiler = release_spec.compiler
- pkg_hash = release_spec.dag_hash()
+ runner_attribs = find_matching_config(release_spec, ci_mappings)
+
+ if not runner_attribs:
+ tty.warn('No match found for {0}, skipping it'.format(
+ release_spec))
+ continue
+
+ tags = [tag for tag in runner_attribs['tags']]
+
+ variables = {}
+ if 'variables' in runner_attribs:
+ variables.update(runner_attribs['variables'])
+
+ build_image = None
+ if 'image' in runner_attribs:
+ build_image = runner_attribs['image']
osname = str(release_spec.architecture)
- job_name = get_job_name(release_spec, osname)
- container_info = containers[osname]
- build_image = container_info['image']
+ job_name = get_job_name(release_spec, osname, build_group)
+ cdash_build_name = get_cdash_build_name(release_spec, build_group)
- job_scripts = ['./bin/rebuild-package.sh']
+ all_job_names.append(cdash_build_name)
- if 'setup_script' in container_info:
- job_scripts.insert(
- 0, container_info['setup_script'] % pkg_compiler)
+ job_scripts = ['./bin/rebuild-package.sh']
job_dependencies = []
if spec_label in dependencies:
job_dependencies = (
- [get_job_name(spec_labels[dep_label]['spec'], osname)
- for dep_label in dependencies[spec_label]])
+ [get_job_name(spec_labels[d]['spec'], osname, build_group)
+ for d in dependencies[spec_label]])
+
+ job_variables = {
+ 'MIRROR_URL': mirror_urls[0],
+ 'CDASH_BASE_URL': cdash_url,
+ 'CDASH_PROJECT': cdash_project,
+ 'CDASH_PROJECT_ENC': cdash_project_enc,
+ 'CDASH_BUILD_NAME': cdash_build_name,
+ 'DEPENDENCIES': ';'.join(job_dependencies),
+ 'ROOT_SPEC': str(root_spec),
+ }
+
+ if args.signing_key:
+ job_variables['SIGN_KEY_HASH'] = args.signing_key
+
+ variables.update(job_variables)
job_object = {
'stage': stage_name,
- 'variables': {
- 'MIRROR_URL': mirror_url,
- 'CDASH_BASE_URL': cdash_url,
- 'HASH': pkg_hash,
- 'DEPENDENCIES': ';'.join(job_dependencies),
- 'ROOT_SPEC': str(root_spec),
- },
+ 'variables': variables,
'script': job_scripts,
- 'image': build_image,
'artifacts': {
'paths': [
'local_mirror/build_cache',
@@ -556,51 +498,41 @@ def release_jobs(parser, args):
'when': 'always',
},
'dependencies': job_dependencies,
+ 'tags': tags,
}
- # If we see 'compilers' in the container iformation, it's a
- # filter for the compilers this container can handle, else we
- # assume it can handle any compiler
- if 'compilers' in container_info:
- do_job = False
- for item in container_info['compilers']:
- container_compiler_spec = CompilerSpec(item['name'])
- if pkg_compiler == container_compiler_spec:
- do_job = True
- else:
- do_job = True
-
- if args.shared_runner_tag:
- job_object['tags'] = [args.shared_runner_tag]
+ if build_image:
+ job_object['image'] = build_image
- if args.signing_key:
- job_object['variables']['SIGN_KEY_HASH'] = args.signing_key
-
- if do_job:
- output_object[job_name] = job_object
- job_count += 1
+ output_object[job_name] = job_object
+ job_count += 1
stage += 1
tty.msg('{0} build jobs generated in {1} stages'.format(
job_count, len(stages)))
- final_stage = 'stage-rebuild-index'
+ # Use "all_job_names" to populate the build group for this set
+ if cdash_auth_token:
+ populate_buildgroup(all_job_names, build_group, cdash_project,
+ cdash_site, cdash_auth_token, cdash_url)
+ else:
+ tty.warn('Unable to populate buildgroup without CDash credentials')
+ # Add an extra, final job to regenerate the index
+ final_stage = 'stage-rebuild-index'
final_job = {
'stage': final_stage,
'variables': {
- 'MIRROR_URL': mirror_url,
+ 'MIRROR_URL': mirror_urls[0],
},
- 'image': build_image,
+ 'image': 'scottwittenburg/spack_ci_generator_alpine', # just needs some basic python image
'script': './bin/rebuild-index.sh',
+ 'tags': ['spack-k8s'] # may want a runner to handle this
}
-
- if args.shared_runner_tag:
- final_job['tags'] = [args.shared_runner_tag]
-
output_object['rebuild-index'] = final_job
stage_names.append(final_stage)
+
output_object['stages'] = stage_names
with open(args.output_file, 'w') as outf:
diff --git a/lib/spack/spack/schema/os_container_mapping.py b/lib/spack/spack/schema/os_container_mapping.py
deleted file mode 100644
index a69549d51d..0000000000
--- a/lib/spack/spack/schema/os_container_mapping.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
-# Spack Project Developers. See the top-level COPYRIGHT file for details.
-#
-# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-"""Schema for os-container-mapping.yaml configuration file.
-
-.. literalinclude:: _spack_root/lib/spack/spack/schema/os_container_mapping.py
- :lines: 32-
-"""
-
-
-schema = {
- '$schema': 'http://json-schema.org/schema#',
- 'title': 'Spack release builds os/container mapping config file schema',
- 'type': 'object',
- 'additionalProperties': False,
- 'patternProperties': {
- r'containers': {
- 'type': 'object',
- 'default': {},
- 'patternProperties': {
- r'[\w\d\-_\.]+': {
- 'type': 'object',
- 'default': {},
- 'additionalProperties': False,
- 'required': ['image'],
- 'properties': {
- 'image': {'type': 'string'},
- 'setup_script': {'type': 'string'},
- 'compilers': {
- 'type': 'array',
- 'default': [],
- 'items': {
- 'type': 'object',
- 'default': {},
- 'additionalProperties': False,
- 'required': ['name'],
- 'properties': {
- 'name': {'type': 'string'},
- 'path': {'type': 'string'},
- },
- },
- },
- },
- },
- },
- },
- },
-}