diff options
author | Peter Scheibel <scheibel1@llnl.gov> | 2015-10-15 11:52:08 -0700 |
---|---|---|
committer | Peter Scheibel <scheibel1@llnl.gov> | 2015-10-15 11:52:08 -0700 |
commit | b9bf0b942c4e2770de65282311876d02d00e7f5b (patch) | |
tree | dbf71604fcf37381570ca820b0ad8157eee287ab /lib | |
parent | f2b4341ad6d9dc925364214863440dde29d61b50 (diff) | |
download | spack-b9bf0b942c4e2770de65282311876d02d00e7f5b.tar.gz spack-b9bf0b942c4e2770de65282311876d02d00e7f5b.tar.bz2 spack-b9bf0b942c4e2770de65282311876d02d00e7f5b.tar.xz spack-b9bf0b942c4e2770de65282311876d02d00e7f5b.zip |
Use spec.traverse vs. recursive function.
Also even though I calculated which installs are new (e.g. vs. packages that
have already been installed by a previous command) I forgot to make use of that
in create_test_output (so I was always generating test output even if a package
had been installed before running the test-install command).
Note to avoid confusion: the 'handled' variable (removed in this commit) did not
serve the same purpose as 'newInstalls': it was originally required because the
recursive approach would visit the same dependency twice if more than one
package depended on it.
Diffstat (limited to 'lib')
-rw-r--r-- | lib/spack/spack/cmd/test-install.py | 36 |
1 files changed, 16 insertions, 20 deletions
diff --git a/lib/spack/spack/cmd/test-install.py b/lib/spack/spack/cmd/test-install.py index ec413115c0..6652a204fb 100644 --- a/lib/spack/spack/cmd/test-install.py +++ b/lib/spack/spack/cmd/test-install.py @@ -81,18 +81,21 @@ class BuildId(object): return "-".join(str(x) for x in (self.name, self.version, self.hashId)) -def create_test_output(spec, handled, output): - if spec in handled: - return handled[spec] - - childSuccesses = list(create_test_output(dep, handled, output) - for dep in spec.dependencies.itervalues()) - package = spack.db.get(spec) - handled[spec] = package.installed - - if all(childSuccesses): +def create_test_output(topSpec, newInstalls, output): + # Post-order traversal is not strictly required but it makes sense to output + # tests for dependencies first. + for spec in topSpec.traverse(order='post'): + if spec not in newInstalls: + continue + + if not all(spack.db.get(childSpec).installed for childSpec in + spec.dependencies.itervalues()): + #TODO: create a failed test if a dependency didn't install? + continue + bId = BuildId(spec.name, spec.version, spec.dag_hash()) + package = spack.db.get(spec) if package.installed: buildLogPath = spack.install_layout.build_log_path(spec) else: @@ -106,10 +109,7 @@ def create_test_output(spec, handled, output): # lines. It may be better to look for errors. output.add_test(bId, package.installed, buildLogPath + '\n' + spec.to_yaml() + buildLog) - #TODO: create a failed test if a dependency didn't install? - - return handled[spec] - + def test_install(parser, args): if not args.package: @@ -143,14 +143,10 @@ def test_install(parser, args): verbose=True, fake=False) finally: - #Find all packages that are not a dependency of another package - topLevelNewInstalls = newInstalls - set(itertools.chain.from_iterable( - spec.dependencies for spec in newInstalls)) - jrf = JunitResultFormat() handled = {} - for spec in topLevelNewInstalls: - create_test_output(spec, handled, jrf) + for spec in specs: + create_test_output(spec, newInstalls, jrf) with open(args.output, 'wb') as F: jrf.write_to(F) |