-def test_builder(env, category, cpu_list=[], os_list=[], refdir='ref', timeout=15):
- """Define a test.
-
- Args:
- category -- string describing test category (e.g., 'quick')
- cpu_list -- list of CPUs to runs this test on (blank means all compiled CPUs)
- os_list -- list of OSs to run this test on
- refdir -- subdirectory containing reference output (default 'ref')
- timeout -- test timeout in minutes (only enforced on pool)
-
- """
-
- default_refdir = False
- if refdir == 'ref':
- default_refdir = True
- if len(cpu_list) == 0:
- cpu_list = env['CPU_MODELS']
-# if len(os_list) == 0:
-# raise RuntimeError, "No OS specified"
-# else:
-# for test_os in os_list:
-# build_cpu_test(env, category, test_os, cpu_list, refdir, timeout)
- # Loop through CPU models and generate proper options, ref directories for each
- for cpu in cpu_list:
- test_os = ''
- if cpu == "AtomicSimpleCPU":
- cpu_option = ('','atomic/')
- elif cpu == "TimingSimpleCPU":
- cpu_option = ('--timing','timing/')
- elif cpu == "O3CPU":
- cpu_option = ('--detailed','detailed/')
- else:
- raise TypeError, "Unknown CPU model specified"
-
- if default_refdir:
- # Reference stats located in ref/arch/os/cpu or ref/arch/cpu if no OS specified
- test_refdir = os.path.join(refdir, env['TARGET_ISA'])
- if test_os != '':
- test_refdir = os.path.join(test_refdir, test_os)
- cpu_refdir = os.path.join(test_refdir, cpu_option[1])
-
- ref_stats = os.path.join(cpu_refdir, 'm5stats.txt')
-
- # base command for running test
- base_cmd = '${SOURCES[0]} -d $TARGET.dir ${SOURCES[1]}'
- base_cmd = base_cmd + ' ' + cpu_option[0]
- # stdout and stderr files
- cmd_stdout = '${TARGETS[0]}'
- cmd_stderr = '${TARGETS[1]}'
-
- stdout_string = cpu_option[1] + 'stdout'
- stderr_string = cpu_option[1] + 'stderr'
- m5stats_string = cpu_option[1] + 'm5stats.txt'
- outdiff_string = cpu_option[1] + 'outdiff'
- statsdiff_string = cpu_option[1] + 'statsdiff'
- status_string = cpu_option[1] + 'status'
-
- # Prefix test run with batch job submission command if appropriate.
- # Output redirection is also different for batch runs.
- # Batch command also supports timeout arg (in seconds, not minutes).
- if env['BATCH']:
- cmd = [env['BATCH_CMD'], '-t', str(timeout * 60),
- '-o', cmd_stdout, '-e', cmd_stderr, base_cmd]
- else:
- cmd = [base_cmd, '>', cmd_stdout, '2>', cmd_stderr]
+def test_builder(env, ref_dir):
+ """Define a test."""
+
+ (category, name, _ref, isa, opsys, config) = ref_dir.split('/')
+ assert(_ref == 'ref')
+
+ # target path (where test output goes) is the same except without
+ # the 'ref' component
+ tgt_dir = os.path.join(category, name, isa, opsys, config)
+
+ # prepend file name with tgt_dir
+ def tgt(f):
+ return os.path.join(tgt_dir, f)
+
+ ref_stats = os.path.join(ref_dir, 'm5stats.txt')
+ new_stats = tgt('m5stats.txt')
+ status_file = tgt('status')
+
+ # Base command for running test. We mess around with indirectly
+ # referring to files via SOURCES and TARGETS so that scons can
+ # mess with paths all it wants to and we still get the right
+ # files.
+ base_cmd = '${SOURCES[0]} -d $TARGET.dir ${SOURCES[1]} %s' % tgt_dir
+ # stdout and stderr files
+ cmd_stdout = '${TARGETS[0]}'
+ cmd_stderr = '${TARGETS[1]}'
+
+ # Prefix test run with batch job submission command if appropriate.
+ # Output redirection is also different for batch runs.
+ # Batch command also supports timeout arg (in seconds, not minutes).
+ timeout = 15 # used to be a param, probably should be again
+ if env['BATCH']:
+ cmd = [env['BATCH_CMD'], '-t', str(timeout * 60),
+ '-o', cmd_stdout, '-e', cmd_stderr, base_cmd]
+ else:
+ cmd = [base_cmd, '>', cmd_stdout, '2>', cmd_stderr]