summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authoralalazo <massimiliano.culpo@googlemail.com>2016-04-27 13:56:32 +0200
committeralalazo <massimiliano.culpo@googlemail.com>2016-04-27 13:56:32 +0200
commit12dbd65f4cbf3218155b86b5f9b1281198a2453d (patch)
tree218d378ab41849d07d997dd10c440151da88e96e /lib
parent99b52e6e719efcee14cbba0a338c6593aad53422 (diff)
downloadspack-12dbd65f4cbf3218155b86b5f9b1281198a2453d.tar.gz
spack-12dbd65f4cbf3218155b86b5f9b1281198a2453d.tar.bz2
spack-12dbd65f4cbf3218155b86b5f9b1281198a2453d.tar.xz
spack-12dbd65f4cbf3218155b86b5f9b1281198a2453d.zip
test-install : first draft that works
Diffstat (limited to 'lib')
-rw-r--r--lib/spack/spack/cmd/test-install.py249
1 files changed, 125 insertions, 124 deletions
diff --git a/lib/spack/spack/cmd/test-install.py b/lib/spack/spack/cmd/test-install.py
index 733b58f252..c214cd3f66 100644
--- a/lib/spack/spack/cmd/test-install.py
+++ b/lib/spack/spack/cmd/test-install.py
@@ -24,10 +24,9 @@
##############################################################################
import argparse
import codecs
-import itertools
import os
-import re
import time
+import xml.dom.minidom
import xml.etree.ElementTree as ET
import llnl.util.tty as tty
@@ -58,54 +57,72 @@ def setup_parser(subparser):
subparser.add_argument('package', nargs=argparse.REMAINDER, help="spec of package to install")
-class JunitResultFormat(object):
- def __init__(self):
- self.root = ET.Element('testsuite')
- self.tests = []
-
- def add_test(self, buildId, testResult, buildInfo=None):
- self.tests.append((buildId, testResult, buildInfo))
-
- def write_to(self, stream):
- self.root.set('tests', '{0}'.format(len(self.tests)))
- for buildId, testResult, buildInfo in self.tests:
- testcase = ET.SubElement(self.root, 'testcase')
- testcase.set('classname', buildId.name)
- testcase.set('name', buildId.stringId())
- if testResult == TestResult.FAILED:
- failure = ET.SubElement(testcase, 'failure')
- failure.set('type', "Build Error")
- failure.text = buildInfo
- elif testResult == TestResult.SKIPPED:
- skipped = ET.SubElement(testcase, 'skipped')
- skipped.set('type', "Skipped Build")
- skipped.text = buildInfo
- ET.ElementTree(self.root).write(stream)
-
-
class TestResult(object):
PASSED = 0
FAILED = 1
SKIPPED = 2
+ ERRORED = 3
-class BuildId(object):
- def __init__(self, spec):
- self.name = spec.name
- self.version = spec.version
- self.hashId = spec.dag_hash()
-
- def stringId(self):
- return "-".join(str(x) for x in (self.name, self.version, self.hashId))
-
- def __hash__(self):
- return hash((self.name, self.version, self.hashId))
-
- def __eq__(self, other):
- if not isinstance(other, BuildId):
- return False
+class TestSuite(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self.root = ET.Element('testsuite')
+ self.tests = []
- return ((self.name, self.version, self.hashId) == (other.name, other.version, other.hashId))
+ def __enter__(self):
+ return self
+
+ def append(self, item):
+ if not isinstance(item, TestCase):
+ raise TypeError('only TestCase instances may be appended to a TestSuite instance')
+ self.tests.append(item) # Append the item to the list of tests
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Prepare the header for the entire test suite
+ number_of_errors = sum(x.result_type == TestResult.ERRORED for x in self.tests)
+ self.root.set('errors', str(number_of_errors))
+ number_of_failures = sum(x.result_type == TestResult.FAILED for x in self.tests)
+ self.root.set('failures', str(number_of_failures))
+ self.root.set('tests', str(len(self.tests)))
+
+ for item in self.tests:
+ self.root.append(item.element)
+
+ with open(self.filename, 'wb') as file:
+ xml_string = ET.tostring(self.root)
+ xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml()
+ file.write(xml_string)
+
+
+class TestCase(object):
+
+ results = {
+ TestResult.PASSED: None,
+ TestResult.SKIPPED: 'skipped',
+ TestResult.FAILED: 'failure',
+ TestResult.ERRORED: 'error',
+ }
+
+ def __init__(self, classname, name, time=None):
+ self.element = ET.Element('testcase')
+ self.element.set('classname', str(classname))
+ self.element.set('name', str(name))
+ if time is not None:
+ self.element.set('time', str(time))
+ self.result_type = None
+
+ def set_result(self, result_type, message=None, error_type=None, text=None):
+ self.result_type = result_type
+ result = TestCase.results[self.result_type]
+ if result is not None:
+ subelement = ET.SubElement(self.element, result)
+ if error_type is not None:
+ subelement.set('type', error_type)
+ if message is not None:
+ subelement.set('message', str(message))
+ if text is not None:
+ subelement.text = text
def fetch_log(path):
@@ -116,39 +133,7 @@ def fetch_log(path):
def failed_dependencies(spec):
- return set(childSpec for childSpec in spec.dependencies.itervalues() if not spack.repo.get(childSpec).installed)
-
-
-def create_test_output(top_spec, newInstalls, output, getLogFunc=fetch_log):
- # Post-order traversal is not strictly required but it makes sense to output
- # tests for dependencies first.
- for spec in top_spec.traverse(order='post'):
- if spec not in newInstalls:
- continue
-
- failedDeps = failed_dependencies(spec)
- package = spack.repo.get(spec)
- if failedDeps:
- result = TestResult.SKIPPED
- dep = iter(failedDeps).next()
- depBID = BuildId(dep)
- errOutput = "Skipped due to failed dependency: {0}".format(depBID.stringId())
- elif (not package.installed) and (not package.stage.source_path):
- result = TestResult.FAILED
- errOutput = "Failure to fetch package resources."
- elif not package.installed:
- result = TestResult.FAILED
- lines = getLogFunc(package.build_log_path)
- errMessages = list(line for line in lines if re.search('error:', line, re.IGNORECASE))
- errOutput = errMessages if errMessages else lines[-10:]
- errOutput = '\n'.join(itertools.chain([spec.to_yaml(), "Errors:"], errOutput, ["Build Log:",
- package.build_log_path]))
- else:
- result = TestResult.PASSED
- errOutput = None
-
- bId = BuildId(spec)
- output.add_test(bId, result, errOutput)
+ return set(item for item in spec.dependencies.itervalues() if not spack.repo.get(item).installed)
def get_top_spec_or_die(args):
@@ -159,6 +144,62 @@ def get_top_spec_or_die(args):
return top_spec
+def install_single_spec(spec, number_of_jobs):
+ package = spack.repo.get(spec)
+
+ # If it is already installed, skip the test
+ if spack.repo.get(spec).installed:
+ testcase = TestCase(package.name, package.spec.short_spec, time=0.0)
+ testcase.set_result(TestResult.SKIPPED, message='Skipped [already installed]', error_type='already_installed')
+ return testcase
+
+ # If it relies on dependencies that did not install, skip
+ if failed_dependencies(spec):
+ testcase = TestCase(package.name, package.spec.short_spec, time=0.0)
+ testcase.set_result(TestResult.SKIPPED, message='Skipped [failed dependencies]', error_type='dep_failed')
+ return testcase
+
+ # Otherwise try to install the spec
+ try:
+ start_time = time.time()
+ package.do_install(keep_prefix=False,
+ keep_stage=True,
+ ignore_deps=False,
+ make_jobs=number_of_jobs,
+ verbose=True,
+ fake=False)
+ duration = time.time() - start_time
+ testcase = TestCase(package.name, package.spec.short_spec, duration)
+ except InstallError:
+ # An InstallError is considered a failure (the recipe didn't work correctly)
+ duration = time.time() - start_time
+ # Try to get the log
+ lines = fetch_log(package.build_log_path)
+ text = '\n'.join(lines)
+ testcase = TestCase(package.name, package.spec.short_spec, duration)
+ testcase.set_result(TestResult.FAILED, message='Installation failure', text=text)
+
+ except FetchError:
+ # A FetchError is considered an error (we didn't even start building)
+ duration = time.time() - start_time
+ testcase = TestCase(package.name, package.spec.short_spec, duration)
+ testcase.set_result(TestResult.ERRORED, message='Unable to fetch package')
+
+ return testcase
+
+
+def get_filename(args, top_spec):
+ if not args.output:
+ fname = 'test-{x.name}-{x.version}-{hash}.xml'.format(x=top_spec, hash=top_spec.dag_hash())
+ output_directory = join_path(os.getcwd(), 'test-output')
+ if not os.path.exists(output_directory):
+ os.mkdir(output_directory)
+ output_filename = join_path(output_directory, fname)
+ else:
+ output_filename = args.output
+ return output_filename
+
+
def test_install(parser, args):
# Check the input
if not args.package:
@@ -173,51 +214,11 @@ def test_install(parser, args):
# Get the one and only top spec
top_spec = get_top_spec_or_die(args)
-
- if not args.output:
- bId = BuildId(top_spec)
- outputDir = join_path(os.getcwd(), "test-output")
- if not os.path.exists(outputDir):
- os.mkdir(outputDir)
- outputFpath = join_path(outputDir, "test-{0}.xml".format(bId.stringId()))
- else:
- outputFpath = args.output
-
- new_installs = set()
- for spec in top_spec.traverse(order='post'):
- # Calling do_install for the top-level package would be sufficient but
- # this attempts to keep going if any package fails (other packages which
- # are not dependents may succeed)
- package = spack.repo.get(spec)
-
- if not package.installed:
- new_installs.add(spec)
-
- duration = 0.0
- if (not failed_dependencies(spec)) and (not package.installed):
- try:
- start_time = time.time()
- package.do_install(keep_prefix=False,
- keep_stage=True,
- ignore_deps=False,
- make_jobs=args.jobs,
- verbose=True,
- fake=False)
- duration = time.time() - start_time
- except InstallError:
- pass
- except FetchError:
- pass
-
- jrf = JunitResultFormat()
- handled = {}
- create_test_output(top_spec, new_installs, jrf)
-
- with open(outputFpath, 'wb') as F:
- jrf.write_to(F)
-
- # with JunitTestSuite(filename) as test_suite:
- # for spec in top_spec.traverse(order='post'):
- # test_case = install_test(spec)
- # test_suite.append( test_case )
- #
+ # Get the filename of the test
+ output_filename = get_filename(args, top_spec)
+ # TEST SUITE
+ with TestSuite(output_filename) as test_suite:
+ # Traverse in post order : each spec is a test case
+ for spec in top_spec.traverse(order='post'):
+ test_case = install_single_spec(spec, args.jobs)
+ test_suite.append(test_case)