# Authors: Steve Reinhardt
# Kevin Lim
-import os
+import os, signal
import sys
import glob
from SCons.Script.SConscript import SConsEnvironment
def contents(node):
return file(str(node)).read()
-def check_test(target, source, env):
+# functions to parse return value from scons Execute()... not the same
+# as wait() etc., so python built-in os funcs don't work.
+def signaled(status):
+ return (status & 0x80) != 0;
+
+def signum(status):
+ return (status & 0x7f);
+
+# List of signals that indicate that we should retry the test rather
+# than consider it failed.
+retry_signals = (signal.SIGTERM, signal.SIGKILL, signal.SIGINT,
+ signal.SIGQUIT, signal.SIGHUP)
+
+# regular expressions of lines to ignore when diffing outputs
+output_ignore_regexes = (
+ '^command line:', # for stdout file
+ '^M5 compiled ', # for stderr file
+ '^M5 started ', # for stderr file
+ '^M5 executing on ', # for stderr file
+ '^Simulation complete at', # for stderr file
+ '^Listening for', # for stderr file
+ 'listening for remote gdb', # for stderr file
+ )
+
+output_ignore_args = ' '.join(["-I '"+s+"'" for s in output_ignore_regexes])
+
+output_ignore_args += ' --exclude=stats.txt --exclude=outdiff'
+
+def run_test(target, source, env):
"""Check output from running test.
Targets are as follows:
- target[0] : outdiff
- target[1] : statsdiff
- target[2] : status
+ target[0] : status
+
+ Sources are:
+ source[0] : M5 binary
+ source[1] : tests/run.py script
+ source[2] : reference stats file
"""
# make sure target files are all gone
for t in target:
if os.path.exists(t.abspath):
- Execute(Delete(t.abspath))
- # Run diff on output & ref directories to find differences.
- # Exclude m5stats.txt since we will use diff-out on that.
- Execute(env.subst('diff -ubr ${SOURCES[0].dir} ${SOURCES[1].dir} ' +
- '-I "^command line:" ' + # for stdout file
- '-I "^M5 compiled on" ' + # for stderr file
- '-I "^M5 simulation started" ' + # for stderr file
- '-I "^Simulation complete at" ' + # for stderr file
- '-I "^Listening for" ' + # for stderr file
- '--exclude=m5stats.txt --exclude=SCCS ' +
- '--exclude=${TARGETS[0].file} ' +
- '> ${TARGETS[0]}', target=target, source=source), None)
- print "===== Output differences ====="
- print contents(target[0])
- # Run diff-out on m5stats.txt file
- status = Execute(env.subst('$DIFFOUT $SOURCES > ${TARGETS[1]}',
- target=target, source=source),
- strfunction=None)
- print "===== Statistics differences ====="
- print contents(target[1])
- # Generate status file contents based on exit status of diff-out
+ env.Execute(Delete(t.abspath))
+
+ tgt_dir = os.path.dirname(str(target[0]))
+
+ # Base command for running test. We mess around with indirectly
+ # referring to files via SOURCES and TARGETS so that scons can mess
+ # with paths all it wants to and we still get the right files.
+ cmd = '${SOURCES[0]} -d %s -re ${SOURCES[1]} %s' % (tgt_dir, tgt_dir)
+
+ # Prefix test run with batch job submission command if appropriate.
+ # Batch command also supports timeout arg (in seconds, not minutes).
+ timeout = 15 * 60 # used to be a param, probably should be again
+ if env['BATCH']:
+ cmd = '%s -t %d %s' % (env['BATCH_CMD'], timeout, cmd)
+
+ status = env.Execute(env.subst(cmd, target=target, source=source))
+ if status == 0:
+ # M5 terminated normally.
+ # Run diff on output & ref directories to find differences.
+ # Exclude the stats file since we will use diff-out on that.
+ outdiff = os.path.join(tgt_dir, 'outdiff')
+ diffcmd = 'diff -ubr %s ${SOURCES[2].dir} %s > %s' \
+ % (output_ignore_args, tgt_dir, outdiff)
+ env.Execute(env.subst(diffcmd, target=target, source=source))
+ print "===== Output differences ====="
+ print contents(outdiff)
+ # Run diff-out on stats.txt file
+ statsdiff = os.path.join(tgt_dir, 'statsdiff')
+ diffcmd = '$DIFFOUT ${SOURCES[2]} %s > %s' \
+ % (os.path.join(tgt_dir, 'stats.txt'), statsdiff)
+ diffcmd = env.subst(diffcmd, target=target, source=source)
+ status = env.Execute(diffcmd, strfunction=None)
+ print "===== Statistics differences ====="
+ print contents(statsdiff)
+
+ else: # m5 exit status != 0
+ # M5 did not terminate properly, so no need to check the output
+ if signaled(status):
+ print 'M5 terminated with signal', signum(status)
+ if signum(status) in retry_signals:
+ # Consider the test incomplete; don't create a 'status' output.
+ # Hand the return status to scons and let scons decide what
+ # to do about it (typically terminate unless run with -k).
+ return status
+ else:
+ print 'M5 exited with non-zero status', status
+ # complete but failed execution (call to exit() with non-zero
+ # status, SIGABORT due to assertion failure, etc.)... fall through
+ # and generate FAILED status as if output comparison had failed
+
+ # Generate status file contents based on exit status of m5 or diff-out
if status == 0:
status_str = "passed."
else:
status_str = "FAILED!"
- f = file(str(target[2]), 'w')
- print >>f, env.subst('${TARGETS[2].dir}', target=target, source=source), \
- status_str
+ f = file(str(target[0]), 'w')
+ print >>f, tgt_dir, status_str
f.close()
# done
return 0
-def check_test_string(target, source, env):
- return env.subst("Comparing outputs in ${TARGETS[0].dir}.",
+def run_test_string(target, source, env):
+ return env.subst("Running test in ${TARGETS[0].dir}.",
target=target, source=source)
-testAction = env.Action(check_test, check_test_string)
+testAction = env.Action(run_test, run_test_string)
def print_test(target, source, env):
print '***** ' + contents(source[0])
printAction = env.Action(print_test, strfunction = None)
+# Static vars for update_test:
+# - long-winded message about ignored sources
+ignore_msg = '''
+Note: The following file(s) will not be copied. New non-standard
+ output files must be copied manually once before update_ref will
+ recognize them as outputs. Otherwise they are assumed to be
+ inputs and are ignored.
+'''
+# - reference files always needed
+needed_files = set(['simout', 'simerr', 'stats.txt', 'config.ini'])
+# - source files we always want to ignore
+known_ignores = set(['status', 'outdiff', 'statsdiff'])
+
def update_test(target, source, env):
"""Update reference test outputs.
- Target is phony. First two sources are the ref & new m5stats.txt
+ Target is phony. First two sources are the ref & new stats.txt file
files, respectively. We actually copy everything in the
respective directories except the status & diff output files.
"""
dest_dir = str(source[0].get_dir())
src_dir = str(source[1].get_dir())
- dest_files = os.listdir(dest_dir)
- src_files = os.listdir(src_dir)
- # Exclude status & diff outputs
- for f in ('outdiff', 'statsdiff', 'status'):
- if f in src_files:
- src_files.remove(f)
- for f in src_files:
+ dest_files = set(os.listdir(dest_dir))
+ src_files = set(os.listdir(src_dir))
+ # Copy all of the required files plus any existing dest files.
+ wanted_files = needed_files | dest_files
+ missing_files = wanted_files - src_files
+ if len(missing_files) > 0:
+ print " WARNING: the following file(s) are missing " \
+ "and will not be updated:"
+ print " ", " ,".join(missing_files)
+ copy_files = wanted_files - missing_files
+ warn_ignored_files = (src_files - copy_files) - known_ignores
+ if len(warn_ignored_files) > 0:
+ print ignore_msg,
+ print " ", ", ".join(warn_ignored_files)
+ for f in copy_files:
if f in dest_files:
print " Replacing file", f
dest_files.remove(f)
print " Creating new file", f
copyAction = Copy(os.path.join(dest_dir, f), os.path.join(src_dir, f))
copyAction.strfunction = None
- Execute(copyAction)
- # warn about any files in dest not overwritten (other than SCCS dir)
- if 'SCCS' in dest_files:
- dest_files.remove('SCCS')
- if dest_files:
- print "Warning: file(s) in", dest_dir, "not updated:",
- print ', '.join(dest_files)
+ env.Execute(copyAction)
return 0
def update_test_string(target, source, env):
updateAction = env.Action(update_test, update_test_string)
-def test_builder(env, category, cpu_list=[], os_list=[], refdir='ref',
- timeout=15):
- """Define a test.
+def test_builder(env, ref_dir):
+ """Define a test."""
- Args:
- category -- string describing test category (e.g., 'quick')
- cpu_list -- list of CPUs to runs this test on (blank means all compiled CPUs)
- os_list -- list of OSs to run this test on
- refdir -- subdirectory containing reference output (default 'ref')
- timeout -- test timeout in minutes (only enforced on pool)
+ (category, name, _ref, isa, opsys, config) = ref_dir.split('/')
+ assert(_ref == 'ref')
- """
+ # target path (where test output goes) is the same except without
+ # the 'ref' component
+ tgt_dir = os.path.join(category, name, isa, opsys, config)
- default_refdir = False
- if refdir == 'ref':
- default_refdir = True
- valid_cpu_list = []
- if len(cpu_list) == 0:
- valid_cpu_list = env['CPU_MODELS']
- else:
- for i in cpu_list:
- if i in env['CPU_MODELS']:
- valid_cpu_list.append(i)
- cpu_list = valid_cpu_list
- if env['TEST_CPU_MODELS']:
- valid_cpu_list = []
- for i in env['TEST_CPU_MODELS']:
- if i in cpu_list:
- valid_cpu_list.append(i)
- cpu_list = valid_cpu_list
-# Code commented out that shows the general structure if we want to test
-# different OS's as well.
-# if len(os_list) == 0:
-# for test_cpu in cpu_list:
-# build_cpu_test(env, category, '', test_cpu, refdir, timeout)
-# else:
-# for test_os in os_list:
-# for test_cpu in cpu_list:
-# build_cpu_test(env, category, test_os, test_cpu, refdir,
-# timeout)
- # Loop through CPU models and generate proper options, ref directories
- for cpu in cpu_list:
- test_os = ''
- if cpu == "AtomicSimpleCPU":
- cpu_option = ('','atomic/')
- elif cpu == "TimingSimpleCPU":
- cpu_option = ('--timing','timing/')
- elif cpu == "O3CPU":
- cpu_option = ('--detailed','detailed/')
- else:
- raise TypeError, "Unknown CPU model specified"
-
- if default_refdir:
- # Reference stats located in ref/arch/os/cpu or ref/arch/cpu
- # if no OS specified
- test_refdir = os.path.join(refdir, env['TARGET_ISA'])
- if test_os != '':
- test_refdir = os.path.join(test_refdir, test_os)
- cpu_refdir = os.path.join(test_refdir, cpu_option[1])
-
- ref_stats = os.path.join(cpu_refdir, 'm5stats.txt')
-
- # base command for running test
- base_cmd = '${SOURCES[0]} -d $TARGET.dir ${SOURCES[1]}'
- base_cmd = base_cmd + ' ' + cpu_option[0]
- # stdout and stderr files
- cmd_stdout = '${TARGETS[0]}'
- cmd_stderr = '${TARGETS[1]}'
-
- stdout_string = cpu_option[1] + 'stdout'
- stderr_string = cpu_option[1] + 'stderr'
- m5stats_string = cpu_option[1] + 'm5stats.txt'
- outdiff_string = cpu_option[1] + 'outdiff'
- statsdiff_string = cpu_option[1] + 'statsdiff'
- status_string = cpu_option[1] + 'status'
-
- # Prefix test run with batch job submission command if appropriate.
- # Output redirection is also different for batch runs.
- # Batch command also supports timeout arg (in seconds, not minutes).
- if env['BATCH']:
- cmd = [env['BATCH_CMD'], '-t', str(timeout * 60),
- '-o', cmd_stdout, '-e', cmd_stderr, base_cmd]
- else:
- cmd = [base_cmd, '>', cmd_stdout, '2>', cmd_stderr]
-
- env.Command([stdout_string, stderr_string, m5stats_string],
- [env.M5Binary, 'run.py'], ' '.join(cmd))
-
- # order of targets is important... see check_test
- env.Command([outdiff_string, statsdiff_string, status_string],
- [ref_stats, m5stats_string],
- testAction)
-
- # phony target to echo status
- if env['update_ref']:
- p = env.Command(cpu_option[1] + '_update',
- [ref_stats, m5stats_string, status_string],
- updateAction)
- else:
- p = env.Command(cpu_option[1] + '_print', [status_string],
- printAction)
- env.AlwaysBuild(p)
+ # prepend file name with tgt_dir
+ def tgt(f):
+ return os.path.join(tgt_dir, f)
+
+ ref_stats = os.path.join(ref_dir, 'stats.txt')
+ new_stats = tgt('stats.txt')
+ status_file = tgt('status')
- env.Tests.setdefault(category, [])
- env.Tests[category] += p
+ env.Command([status_file],
+ [env.M5Binary, 'run.py', ref_stats],
+ testAction)
-# Make test_builder a "wrapper" function. See SCons wiki page at
-# http://www.scons.org/cgi-bin/wiki/WrapperFunctions.
-SConsEnvironment.Test = test_builder
+ # phony target to echo status
+ if env['update_ref']:
+ p = env.Command(tgt('_update'),
+ [ref_stats, new_stats, status_file],
+ updateAction)
+ else:
+ p = env.Command(tgt('_print'), [status_file], printAction)
+
+ env.AlwaysBuild(p)
+
+
+# Figure out applicable configs based on build type
+configs = []
+if env['FULL_SYSTEM']:
+ if env['TARGET_ISA'] == 'alpha':
+ configs += ['tsunami-simple-atomic',
+ 'tsunami-simple-timing',
+ 'tsunami-simple-atomic-dual',
+ 'tsunami-simple-timing-dual',
+ 'twosys-tsunami-simple-atomic',
+ 'tsunami-o3', 'tsunami-o3-dual']
+ if env['TARGET_ISA'] == 'sparc':
+ configs += ['t1000-simple-atomic',
+ 't1000-simple-timing']
+
+else:
+ configs += ['simple-atomic', 'simple-timing', 'o3-timing', 'memtest',
+ 'simple-atomic-mp', 'simple-timing-mp', 'o3-timing-mp',
+ 'inorder-timing']
+
+if env['RUBY']:
+ # Hack for Ruby
+ configs += [c + '-ruby' for c in configs]
cwd = os.getcwd()
os.chdir(str(Dir('.').srcdir))
-scripts = glob.glob('*/SConscript')
+for config in configs:
+ dirs = glob.glob('*/*/ref/%s/*/%s' % (env['TARGET_ISA'], config))
+ for d in dirs:
+ if not os.path.exists(os.path.join(d, 'skip')):
+ test_builder(env, d)
os.chdir(cwd)
-
-for s in scripts:
- SConscript(s, exports = 'env', duplicate = False)
-
-# Set up phony commands for various test categories
-allTests = []
-for (key, val) in env.Tests.iteritems():
- env.Command(key, val, env.NoAction)
- allTests += val
-
-# The 'all' target is redundant since just specifying the test
-# directory name (e.g., ALPHA_SE/test/opt) has the same effect.
-env.Command('all', allTests, env.NoAction)