# -*- mode:python -*-
-
+#
+# Copyright (c) 2016 ARM Limited
+# All rights reserved
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# All rights reserved.
#
#
# Authors: Steve Reinhardt
# Kevin Lim
+# Andreas Sandberg
+
+from __future__ import print_function
+from SCons.Script.SConscript import SConsEnvironment
import os
+import pickle
import sys
-import glob
-from SCons.Script.SConscript import SConsEnvironment
+
+sys.path.insert(0, Dir(".").srcnode().abspath)
+import testing.tests as tests
+import testing.results as results
+from gem5_scons.util import get_termcap
Import('env')
-env['DIFFOUT'] = File('diff-out')
+# get the termcap from the environment
+termcap = get_termcap()
# Dict that accumulates lists of tests by category (quick, medium, long)
env.Tests = {}
+gpu_isa = env['TARGET_GPU_ISA'] if env['BUILD_GPU'] else None
+for cat in tests.all_categories:
+ env.Tests[cat] = tuple(
+ tests.get_tests(env["TARGET_ISA"],
+ categories=(cat, ),
+ ruby_protocol=env["PROTOCOL"],
+ gpu_isa=gpu_isa))
-def contents(node):
- return file(str(node)).read()
+def color_message(color, msg):
+ return color + msg + termcap.Normal
-def check_test(target, source, env):
- """Check output from running test.
+def run_test(target, source, env):
+ """Run a test and produce results as a pickle file.
Targets are as follows:
- target[0] : outdiff
- target[1] : statsdiff
- target[2] : status
+ target[0] : Pickle file
+
+ Sources are:
+ source[0] : gem5 binary
+ source[1] : tests/run.py script
+ source[2:] : reference files
"""
- # make sure target files are all gone
- for t in target:
- if os.path.exists(t.abspath):
- Execute(Delete(t.abspath))
- # Run diff on output & ref directories to find differences.
- # Exclude m5stats.txt since we will use diff-out on that.
- Execute(env.subst('diff -ubr ${SOURCES[0].dir} ${SOURCES[1].dir} ' +
- '-I "^command line:" ' + # for stdout file
- '-I "^M5 compiled " ' + # for stderr file
- '-I "^M5 started " ' + # for stderr file
- '-I "^M5 executing on " ' + # for stderr file
- '-I "^Simulation complete at" ' + # for stderr file
- '-I "^Listening for" ' + # for stderr file
- '-I "listening for remote gdb" ' + # for stderr file
- '--exclude=m5stats.txt --exclude=SCCS ' +
- '--exclude=${TARGETS[0].file} ' +
- '> ${TARGETS[0]}', target=target, source=source), None)
- print "===== Output differences ====="
- print contents(target[0])
- # Run diff-out on m5stats.txt file
- status = Execute(env.subst('$DIFFOUT $SOURCES > ${TARGETS[1]}',
- target=target, source=source),
- strfunction=None)
- print "===== Statistics differences ====="
- print contents(target[1])
- # Generate status file contents based on exit status of diff-out
- if status == 0:
- status_str = "passed."
- else:
- status_str = "FAILED!"
- f = file(str(target[2]), 'w')
- print >>f, env.subst('${TARGETS[2].dir}', target=target, source=source), \
- status_str
- f.close()
- # done
+ tgt_dir = os.path.dirname(str(target[0]))
+ config = tests.ClassicConfig(*tgt_dir.split('/')[-6:])
+ test = tests.ClassicTest(source[0].abspath, tgt_dir, config,
+ timeout=5*60*60,
+ skip_diff_out=True)
+
+ for ref in test.ref_files():
+ out_file = os.path.join(tgt_dir, ref)
+ if os.path.exists(out_file):
+ env.Execute(Delete(out_file))
+
+ with open(target[0].abspath, "wb") as fout:
+ formatter = results.Pickle(fout=fout)
+ formatter.dump_suites([ test.run() ])
+
return 0
-def check_test_string(target, source, env):
- return env.subst("Comparing outputs in ${TARGETS[0].dir}.",
+def run_test_string(target, source, env):
+ return env.subst("Running test in ${TARGETS[0].dir}.",
target=target, source=source)
-testAction = env.Action(check_test, check_test_string)
+testAction = env.Action(run_test, run_test_string)
def print_test(target, source, env):
- print '***** ' + contents(source[0])
+ """Run a test and produce results as a pickle file.
+
+ Targets are as follows:
+ target[*] : Dummy targets
+
+ Sources are:
+ source[0] : Pickle file
+
+ """
+ with open(source[0].abspath, "rb") as fin:
+ result = pickle.load(fin)
+
+ assert len(result) == 1
+ result = result[0]
+
+ formatter = None
+ if result.skipped():
+ status = color_message(termcap.Cyan, "skipped.")
+ elif result.changed():
+ status = color_message(termcap.Yellow, "CHANGED!")
+ formatter = results.Text()
+ elif result:
+ status = color_message(termcap.Green, "passed.")
+ else:
+ status = color_message(termcap.Red, "FAILED!")
+ formatter = results.Text()
+
+ if formatter:
+ formatter.dump_suites([result])
+
+ print("***** %s: %s" % (source[0].dir, status))
return 0
-printAction = env.Action(print_test, strfunction = None)
-
-# Static vars for update_test:
-# - long-winded message about ignored sources
-ignore_msg = '''
-Note: The following file(s) will not be copied. New non-standard
- output files must be copied manually once before update_ref will
- recognize them as outputs. Otherwise they are assumed to be
- inputs and are ignored.
-'''
-# - reference files always needed
-needed_files = set(['stdout', 'stderr', 'm5stats.txt', 'config.ini'])
-# - source files we always want to ignore
-known_ignores = set(['status', 'outdiff', 'statsdiff'])
+printAction = env.Action(print_test, strfunction=None)
def update_test(target, source, env):
- """Update reference test outputs.
+ """Update test reference data
- Target is phony. First two sources are the ref & new m5stats.txt
- files, respectively. We actually copy everything in the
- respective directories except the status & diff output files.
+ Targets are as follows:
+ target[0] : Dummy file
+ Sources are:
+ source[0] : Pickle file
"""
- dest_dir = str(source[0].get_dir())
- src_dir = str(source[1].get_dir())
- dest_files = set(os.listdir(dest_dir))
- src_files = set(os.listdir(src_dir))
- # Copy all of the required files plus any existing dest files.
- wanted_files = needed_files | dest_files
- missing_files = wanted_files - src_files
- if len(missing_files) > 0:
- print " WARNING: the following file(s) are missing " \
- "and will not be updated:"
- print " ", " ,".join(missing_files)
- copy_files = wanted_files - missing_files
- warn_ignored_files = (src_files - copy_files) - known_ignores
- if len(warn_ignored_files) > 0:
- print ignore_msg,
- print " ", ", ".join(warn_ignored_files)
- for f in copy_files:
- if f in dest_files:
- print " Replacing file", f
- dest_files.remove(f)
- else:
- print " Creating new file", f
- copyAction = Copy(os.path.join(dest_dir, f), os.path.join(src_dir, f))
- copyAction.strfunction = None
- Execute(copyAction)
+
+ src_dir = os.path.dirname(str(source[0]))
+ config = tests.ClassicConfig(*src_dir.split('/')[-6:])
+ test = tests.ClassicTest(source[0].abspath, src_dir, config)
+ ref_dir = test.ref_dir
+
+ with open(source[0].abspath, "rb") as fin:
+ result = pickle.load(fin)
+
+ assert len(result) == 1
+ result = result[0]
+
+ if result.skipped():
+ print("*** %s: %s: Test skipped, not updating." %
+ (source[0].dir, color_message(termcap.Yellow, "WARNING")))
+ return 0
+ elif result:
+ print("*** %s: %s: Test successful, not updating." %
+ (source[0].dir, color_message(termcap.Green, "skipped")))
+ return 0
+ elif result.failed_run():
+ print("*** %s: %s: Test failed, not updating." %
+ (source[0].dir, color_message(termcap.Red, "ERROR")))
+ return 1
+
+ print("** Updating %s" % test)
+ test.update_ref()
+
return 0
def update_test_string(target, source, env):
- return env.subst("Updating ${SOURCES[0].dir} from ${SOURCES[1].dir}",
+ return env.subst("Updating ${SOURCES[0].dir}",
target=target, source=source)
updateAction = env.Action(update_test, update_test_string)
-def test_builder(env, ref_dir):
+def test_builder(test_tuple):
"""Define a test."""
- (category, name, _ref, isa, opsys, config) = ref_dir.split('/')
- assert(_ref == 'ref')
-
- # target path (where test output goes) is the same except without
- # the 'ref' component
- tgt_dir = os.path.join(category, name, isa, opsys, config)
+ out_dir = "/".join(test_tuple)
+ binary = env.M5Binary.abspath
+ test = tests.ClassicTest(binary, out_dir, test_tuple)
- # prepend file name with tgt_dir
- def tgt(f):
- return os.path.join(tgt_dir, f)
+ def tgt(name):
+ return os.path.join(out_dir, name)
- ref_stats = os.path.join(ref_dir, 'm5stats.txt')
- new_stats = tgt('m5stats.txt')
- status_file = tgt('status')
+ def ref(name):
+ return os.path.join(test.ref_dir, name)
- # Base command for running test. We mess around with indirectly
- # referring to files via SOURCES and TARGETS so that scons can
- # mess with paths all it wants to and we still get the right
- # files.
- cmd = '${SOURCES[0]} -d $TARGET.dir'
- cmd += ' -re --stdout-file stdout --stderr-file stderr'
- cmd += ' ${SOURCES[1]} %s' % tgt_dir
+ pickle_file = tgt("status.pickle")
+ targets = [
+ pickle_file,
+ ]
- # Prefix test run with batch job submission command if appropriate.
- # Batch command also supports timeout arg (in seconds, not minutes).
- timeout = 15 * 60 # used to be a param, probably should be again
- if env['BATCH']:
- cmd = '%s -t %d %s' % (env['BATCH_CMD'], timeout, cmd)
+ sources = [
+ env.M5Binary,
+ "run.py",
+ ] + [ ref(f) for f in test.ref_files() ]
- env.Command([tgt('stdout'), tgt('stderr'), new_stats],
- [env.M5Binary, 'run.py'], cmd)
-
- # order of targets is important... see check_test
- env.Command([tgt('outdiff'), tgt('statsdiff'), status_file],
- [ref_stats, new_stats],
- testAction)
+ env.Command(targets, sources, testAction)
# phony target to echo status
- if env['update_ref']:
- p = env.Command(tgt('_update'),
- [ref_stats, new_stats, status_file],
- updateAction)
+ if GetOption('update_ref'):
+ p = env.Command(tgt("_update"), [pickle_file], updateAction)
else:
- p = env.Command(tgt('_print'), [status_file], printAction)
+ p = env.Command(tgt("_print"), [pickle_file], printAction)
env.AlwaysBuild(p)
+def list_tests(target, source, env):
+ """Create a list of tests
+
+ Targets are as follows:
+ target[0] : List file (e.g., tests/opt/all.list, tests/opt/quick.list)
+
+ Sources are: -
+
+ """
+
+ tgt_name = os.path.basename(str(target[0]))
+ base, ext = os.path.splitext(tgt_name)
+ categories = tests.all_categories if base == "all" else (base, )
+
+ with open(target[0].abspath, "w") as fout:
+ for cat in categories:
+ for test in env.Tests[cat]:
+ print("/".join(test), file=fout)
+
+ return 0
+
+testListAction = env.Action(list_tests, strfunction=None)
-# Figure out applicable configs based on build type
-configs = []
-if env['FULL_SYSTEM']:
- if env['TARGET_ISA'] == 'alpha':
- if not env['ALPHA_TLASER']:
- configs += ['tsunami-simple-atomic',
- 'tsunami-simple-timing',
- 'tsunami-simple-atomic-dual',
- 'tsunami-simple-timing-dual',
- 'twosys-tsunami-simple-atomic']
- if env['TARGET_ISA'] == 'sparc':
- configs += ['t1000-simple-atomic',
- 't1000-simple-timing']
-
-else:
- configs += ['simple-atomic', 'simple-timing', 'o3-timing', 'memtest']
-
-cwd = os.getcwd()
-os.chdir(str(Dir('.').srcdir))
-for config in configs:
- dirs = glob.glob('*/*/ref/%s/*/%s' % (env['TARGET_ISA'], config))
- for d in dirs:
- test_builder(env, d)
-os.chdir(cwd)
+env.Command("all.list", tuple(), testListAction)
+for cat, test_list in env.Tests.items():
+ env.Command("%s.list" % cat, tuple(), testListAction)
+ for test in test_list:
+ test_builder(test)