3 # Copyright 2018 Google, Inc.
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 from __future__
import print_function
39 import multiprocessing
.pool
45 script_path
= os
.path
.abspath(inspect
.getfile(inspect
.currentframe()))
46 script_dir
= os
.path
.dirname(script_path
)
47 config_path
= os
.path
.join(script_dir
, 'config.py')
49 systemc_rel_path
= 'systemc'
50 tests_rel_path
= os
.path
.join(systemc_rel_path
, 'tests')
51 json_rel_path
= os
.path
.join(tests_rel_path
, 'tests.json')
56 args
= ['scons'] + list(args
)
57 subprocess
.check_call(args
)
62 def __init__(self
, target
, suffix
, build_dir
, props
):
65 self
.build_dir
= build_dir
68 for key
, val
in props
.iteritems():
69 self
.set_prop(key
, val
)
71 def set_prop(self
, key
, val
):
72 setattr(self
, key
, val
)
76 return os
.path
.join(self
.build_dir
, tests_rel_path
, self
.path
)
79 return os
.path
.join(script_dir
, self
.path
)
81 def expected_returncode_file(self
):
82 return os
.path
.join(self
.src_dir(), 'expected_returncode')
85 return os
.path
.join(self
.src_dir(), 'golden')
88 return '.'.join([self
.name
, self
.suffix
])
91 return os
.path
.join(self
.dir(), self
.bin())
94 return os
.path
.join(self
.dir(), 'm5out.' + self
.suffix
)
96 def returncode_file(self
):
97 return os
.path
.join(self
.m5out_dir(), 'returncode')
101 test_phase_classes
= {}
103 class TestPhaseMeta(type):
104 def __init__(cls
, name
, bases
, d
):
105 if not d
.pop('abstract', False):
106 test_phase_classes
[d
['name']] = cls
108 super(TestPhaseMeta
, cls
).__init
__(name
, bases
, d
)
110 class TestPhaseBase(object):
111 __metaclass__
= TestPhaseMeta
114 def __init__(self
, main_args
, *args
):
115 self
.main_args
= main_args
118 def __lt__(self
, other
):
119 return self
.number
< other
.number
121 class CompilePhase(TestPhaseBase
):
125 def run(self
, tests
):
126 targets
= list([test
.full_path() for test
in tests
])
128 parser
= argparse
.ArgumentParser()
129 parser
.add_argument('-j', type=int, default
=0)
130 args
, leftovers
= parser
.parse_known_args(self
.args
)
132 self
.args
= ('-j', str(self
.main_args
.j
)) + self
.args
134 scons_args
= [ 'USE_SYSTEMC=1' ] + list(self
.args
) + targets
137 class RunPhase(TestPhaseBase
):
141 def run(self
, tests
):
142 parser
= argparse
.ArgumentParser()
143 parser
.add_argument('--timeout', type=int, metavar
='SECONDS',
144 help='Time limit for each run in seconds, '
147 parser
.add_argument('-j', type=int, default
=0,
148 help='How many tests to run in parallel.')
149 args
= parser
.parse_args(self
.args
)
153 '--kill-after', str(args
.timeout
* 2),
160 cmd
.extend(timeout_cmd
)
163 '-rd', os
.path
.abspath(test
.m5out_dir()),
164 '--listener-mode=off',
168 os
.path
.dirname(test
.src_dir())
170 # Ensure the output directory exists.
171 if not os
.path
.exists(test
.m5out_dir()):
172 os
.makedirs(test
.m5out_dir())
174 subprocess
.check_call(cmd
)
175 except subprocess
.CalledProcessError
, error
:
176 returncode
= error
.returncode
180 with
open(test
.returncode_file(), 'w') as rc
:
181 rc
.write('%d\n' % returncode
)
183 j
= self
.main_args
.j
if args
.j
== 0 else args
.j
185 runnable
= filter(lambda t
: not t
.compile_only
, tests
)
187 map(run_test
, runnable
)
189 tp
= multiprocessing
.pool
.ThreadPool(j
)
190 map(lambda t
: tp
.apply_async(run_test
, (t
,)), runnable
)
194 class Checker(object):
195 def __init__(self
, ref
, test
, tag
):
201 with
open(self
.test
) as test_f
, open(self
.ref
) as ref_f
:
202 return test_f
.read() == ref_f
.read()
204 def tagged_filt(tag
, num
):
205 return (r
'\n{}: \({}{}\) .*\n(In file: .*\n)?'
206 r
'(In process: [\w.]* @ .*\n)?').format(tag
, tag
[0], num
)
209 return tagged_filt('Error', num
)
211 def warning_filt(num
):
212 return tagged_filt('Warning', num
)
215 return tagged_filt('Info', num
)
217 class DiffingChecker(Checker
):
218 def __init__(self
, ref
, test
, tag
, out_dir
):
219 super(DiffingChecker
, self
).__init
__(ref
, test
, tag
)
220 self
.out_dir
= out_dir
222 def diffing_check(self
, ref_lines
, test_lines
):
223 test_file
= os
.path
.basename(self
.test
)
224 ref_file
= os
.path
.basename(self
.ref
)
226 diff_file
= '.'.join([ref_file
, 'diff'])
227 diff_path
= os
.path
.join(self
.out_dir
, diff_file
)
228 if test_lines
!= ref_lines
:
229 with
open(diff_path
, 'w') as diff_f
:
230 for line
in difflib
.unified_diff(
231 ref_lines
, test_lines
,
237 if os
.path
.exists(diff_path
):
241 class LogChecker(DiffingChecker
):
242 def merge_filts(*filts
):
243 filts
= map(lambda f
: '(' + f
+ ')', filts
)
244 filts
= '|'.join(filts
)
245 return re
.compile(filts
, flags
=re
.MULTILINE
)
247 # The reporting mechanism will print the actual filename when running in
248 # gem5, and the "golden" output will say "<removed by verify.py>". We want
249 # to strip out both versions to make comparing the output sensible.
250 in_file_filt
= r
'^In file: ((<removed by verify\.pl>)|([a-zA-Z0-9.:_/]*))$'
252 ref_filt
= merge_filts(
253 r
'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
254 r
'^SystemC Simulation\n',
255 r
'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
256 r
'You can turn off(.*\n){7}',
257 r
'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' +
258 r
' sc_clock\(const char(.*\n){3}',
264 test_filt
= merge_filts(
265 r
'^Global frequency set at \d* ticks per second\n',
266 r
'^info: Entering event queue @ \d*\. Starting simulation\.\.\.\n',
267 r
'warn: [^(]+\([^)]*\)( \[with [^]]*\])? not implemented\.\n',
268 r
'warn: Ignoring request to set stack size\.\n',
273 def apply_filters(self
, data
, filts
):
274 re
.sub(filt
, '', data
)
277 with
open(self
.test
) as test_f
, open(self
.ref
) as ref_f
:
278 test
= re
.sub(self
.test_filt
, '', test_f
.read())
279 ref
= re
.sub(self
.ref_filt
, '', ref_f
.read())
280 return self
.diffing_check(ref
.splitlines(True),
281 test
.splitlines(True))
283 class VcdChecker(DiffingChecker
):
285 with
open (self
.test
) as test_f
, open(self
.ref
) as ref_f
:
286 ref
= ref_f
.read().splitlines(True)
287 test
= test_f
.read().splitlines(True)
288 # Strip off the first seven lines of the test output which are
289 # date and version information.
292 return self
.diffing_check(ref
, test
)
294 class GoldenDir(object):
295 def __init__(self
, path
, platform
):
297 self
.platform
= platform
299 contents
= os
.listdir(path
)
300 suffix
= '.' + platform
301 suffixed
= filter(lambda c
: c
.endswith(suffix
), contents
)
302 bases
= map(lambda t
: t
[:-len(platform
)], suffixed
)
303 common
= filter(lambda t
: not t
.startswith(tuple(bases
)), contents
)
307 def __init__(self
, e_path
):
309 self
.path
= os
.path
.join(path
, e_path
)
314 for entry
in contents
:
315 self
.entries
[entry
] = Entry(entry
)
317 def entry(self
, name
):
319 return (n
== name
) or n
.startswith(name
+ '.')
320 matches
= { n
: e
for n
, e
in self
.entries
.items() if match(n
) }
322 for match
in matches
.values():
325 platform_name
= '.'.join([ name
, self
.platform
])
326 if platform_name
in matches
:
327 return matches
[platform_name
].path
329 return matches
[name
].path
334 items
= self
.entries
.items()
335 items
= filter(lambda i
: not i
[1].used
, items
)
340 while i
< len(items
):
344 while i
< len(items
) and items
[i
][0].startswith(root
):
348 class VerifyPhase(TestPhaseBase
):
352 def reset_status(self
):
356 def passed(self
, test
):
357 self
._passed
.append(test
)
359 def failed(self
, test
, cause
, note
=''):
360 test
.set_prop('note', note
)
361 self
._failed
.setdefault(cause
, []).append(test
)
363 def print_status(self
):
364 total_passed
= len(self
._passed
)
365 total_failed
= sum(map(len, self
._failed
.values()))
367 print('Passed: {passed:4} - Failed: {failed:4}'.format(
368 passed
=total_passed
, failed
=total_failed
))
370 def write_result_file(self
, path
):
372 'passed': map(lambda t
: t
.props
, self
._passed
),
374 cause
: map(lambda t
: t
.props
, tests
) for
375 cause
, tests
in self
._failed
.iteritems()
378 with
open(path
, 'w') as rf
:
379 json
.dump(results
, rf
)
381 def print_results(self
):
384 for path
in sorted(list([ t
.path
for t
in self
._passed
])):
391 for cause
, tests
in sorted(self
._failed
.items()):
392 block
= ' ' + cause
.capitalize() + ':\n'
393 for test
in sorted(tests
, key
=lambda t
: t
.path
):
394 block
+= ' ' + test
.path
396 block
+= ' - ' + test
.note
400 print('\n'.join(causes
))
402 def run(self
, tests
):
403 parser
= argparse
.ArgumentParser()
404 result_opts
= parser
.add_mutually_exclusive_group()
405 result_opts
.add_argument('--result-file', action
='store_true',
406 help='Create a results.json file in the current directory.')
407 result_opts
.add_argument('--result-file-at', metavar
='PATH',
408 help='Create a results json file at the given path.')
409 parser
.add_argument('--no-print-results', action
='store_true',
410 help='Don\'t print a list of tests that passed or failed')
411 args
= parser
.parse_args(self
.args
)
415 runnable
= filter(lambda t
: not t
.compile_only
, tests
)
416 compile_only
= filter(lambda t
: t
.compile_only
, tests
)
418 for test
in compile_only
:
419 if os
.path
.exists(test
.full_path()):
422 self
.failed(test
, 'compile failed')
424 for test
in runnable
:
425 with
open(test
.returncode_file()) as rc
:
426 returncode
= int(rc
.read())
428 expected_returncode
= 0
429 if os
.path
.exists(test
.expected_returncode_file()):
430 with
open(test
.expected_returncode_file()) as erc
:
431 expected_returncode
= int(erc
.read())
433 if returncode
== 124:
434 self
.failed(test
, 'time out')
436 elif returncode
!= expected_returncode
:
437 if expected_returncode
== 0:
438 self
.failed(test
, 'abort')
440 self
.failed(test
, 'missed abort')
443 out_dir
= test
.m5out_dir()
445 Diff
= collections
.namedtuple(
446 'Diff', 'ref, test, tag, ref_filter')
450 gd
= GoldenDir(test
.golden_dir(), 'linux64')
453 log_file
= '.'.join([test
.name
, 'log'])
454 log_path
= gd
.entry(log_file
)
455 simout_path
= os
.path
.join(out_dir
, 'simout')
456 if not os
.path
.exists(simout_path
):
457 missing
.append('log output')
459 diffs
.append(LogChecker(log_path
, simout_path
,
462 for name
in gd
.unused():
463 test_path
= os
.path
.join(out_dir
, name
)
464 ref_path
= gd
.entry(name
)
465 if not os
.path
.exists(test_path
):
467 elif name
.endswith('.vcd'):
468 diffs
.append(VcdChecker(ref_path
, test_path
,
471 diffs
.append(Checker(ref_path
, test_path
, name
))
474 self
.failed(test
, 'missing output', ' '.join(missing
))
477 failed_diffs
= filter(lambda d
: not d
.check(), diffs
)
479 tags
= map(lambda d
: d
.tag
, failed_diffs
)
480 self
.failed(test
, 'failed diffs', ' '.join(tags
))
485 if not args
.no_print_results
:
492 result_path
= os
.path
.join(os
.getcwd(), 'results.json')
493 elif args
.result_file_at
:
494 result_path
= args
.result_file_at
497 self
.write_result_file(result_path
)
500 parser
= argparse
.ArgumentParser(description
='SystemC test utility')
502 parser
.add_argument('build_dir', metavar
='BUILD_DIR',
503 help='The build directory (ie. build/ARM).')
505 parser
.add_argument('--update-json', action
='store_true',
506 help='Update the json manifest of tests.')
508 parser
.add_argument('--flavor', choices
=['debug', 'opt', 'fast'],
510 help='Flavor of binary to test.')
512 parser
.add_argument('--list', action
='store_true',
513 help='List the available tests')
515 parser
.add_argument('-j', type=int, default
=1,
516 help='Default level of parallelism, can be overriden '
517 'for individual stages')
519 filter_opts
= parser
.add_mutually_exclusive_group()
520 filter_opts
.add_argument('--filter', default
='True',
521 help='Python expression which filters tests based '
522 'on their properties')
523 filter_opts
.add_argument('--filter-file', default
=None,
524 type=argparse
.FileType('r'),
525 help='Same as --filter, but read from a file')
527 def collect_phases(args
):
528 phase_groups
= [list(g
) for k
, g
in
529 itertools
.groupby(args
, lambda x
: x
!= '--phase') if k
]
530 main_args
= parser
.parse_args(phase_groups
[0][1:])
533 for group
in phase_groups
[1:]:
536 raise RuntimeException('Phase %s specified more than once' % name
)
537 phase
= test_phase_classes
[name
]
538 phases
.append(phase(main_args
, *group
[1:]))
540 return main_args
, phases
542 main_args
, phases
= collect_phases(sys
.argv
)
546 CompilePhase(main_args
),
548 VerifyPhase(main_args
)
553 json_path
= os
.path
.join(main_args
.build_dir
, json_rel_path
)
555 if main_args
.update_json
:
556 scons(os
.path
.join(json_path
))
558 with
open(json_path
) as f
:
559 test_data
= json
.load(f
)
561 if main_args
.filter_file
:
562 f
= main_args
.filter_file
563 filt
= compile(f
.read(), f
.name
, 'eval')
565 filt
= compile(main_args
.filter, '<string>', 'eval')
568 target
: props
for (target
, props
) in
569 test_data
.iteritems() if eval(filt
, dict(props
))
572 if len(filtered_tests
) == 0:
573 print('All tests were filtered out.')
577 for target
, props
in sorted(filtered_tests
.iteritems()):
578 print('%s.%s' % (target
, main_args
.flavor
))
579 for key
, val
in props
.iteritems():
580 print(' %s: %s' % (key
, val
))
581 print('Total tests: %d' % len(filtered_tests
))
583 tests_to_run
= list([
584 Test(target
, main_args
.flavor
, main_args
.build_dir
, props
) for
585 target
, props
in sorted(filtered_tests
.iteritems())
589 phase
.run(tests_to_run
)