3 # Copyright 2018 Google, Inc.
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 from __future__
import print_function
39 import multiprocessing
.pool
45 script_path
= os
.path
.abspath(inspect
.getfile(inspect
.currentframe()))
46 script_dir
= os
.path
.dirname(script_path
)
47 config_path
= os
.path
.join(script_dir
, 'config.py')
49 systemc_rel_path
= 'systemc'
50 tests_rel_path
= os
.path
.join(systemc_rel_path
, 'tests')
51 json_rel_path
= os
.path
.join(tests_rel_path
, 'tests.json')
56 args
= ['scons'] + list(args
)
57 subprocess
.check_call(args
)
62 def __init__(self
, target
, suffix
, build_dir
, props
):
65 self
.build_dir
= build_dir
68 for key
, val
in props
.iteritems():
69 self
.set_prop(key
, val
)
71 def set_prop(self
, key
, val
):
72 setattr(self
, key
, val
)
76 return os
.path
.join(self
.build_dir
, tests_rel_path
, self
.path
)
79 return os
.path
.join(script_dir
, self
.path
)
82 return os
.path
.join(self
.src_dir(), 'golden')
85 return '.'.join([self
.name
, self
.suffix
])
88 return os
.path
.join(self
.dir(), self
.bin())
91 return os
.path
.join(self
.dir(), 'm5out.' + self
.suffix
)
93 def returncode_file(self
):
94 return os
.path
.join(self
.m5out_dir(), 'returncode')
98 test_phase_classes
= {}
100 class TestPhaseMeta(type):
101 def __init__(cls
, name
, bases
, d
):
102 if not d
.pop('abstract', False):
103 test_phase_classes
[d
['name']] = cls
105 super(TestPhaseMeta
, cls
).__init
__(name
, bases
, d
)
107 class TestPhaseBase(object):
108 __metaclass__
= TestPhaseMeta
111 def __init__(self
, main_args
, *args
):
112 self
.main_args
= main_args
115 def __lt__(self
, other
):
116 return self
.number
< other
.number
118 class CompilePhase(TestPhaseBase
):
122 def run(self
, tests
):
123 targets
= list([test
.full_path() for test
in tests
])
124 scons_args
= list(self
.args
) + targets
127 class RunPhase(TestPhaseBase
):
131 def run(self
, tests
):
132 parser
= argparse
.ArgumentParser()
133 parser
.add_argument('--timeout', type=int, metavar
='SECONDS',
134 help='Time limit for each run in seconds.',
136 parser
.add_argument('-j', type=int, default
=1,
137 help='How many tests to run in parallel.')
138 args
= parser
.parse_args(self
.args
)
142 '--kill-after', str(args
.timeout
* 2),
148 cmd
.extend(timeout_cmd
)
151 '-red', test
.m5out_dir(),
152 '--listener-mode=off',
156 # Ensure the output directory exists.
157 if not os
.path
.exists(test
.m5out_dir()):
158 os
.makedirs(test
.m5out_dir())
160 subprocess
.check_call(cmd
)
161 except subprocess
.CalledProcessError
, error
:
162 returncode
= error
.returncode
165 with
open(test
.returncode_file(), 'w') as rc
:
166 rc
.write('%d\n' % returncode
)
168 runnable
= filter(lambda t
: not t
.compile_only
, tests
)
170 map(run_test
, runnable
)
172 tp
= multiprocessing
.pool
.ThreadPool(args
.j
)
173 map(lambda t
: tp
.apply_async(run_test
, (t
,)), runnable
)
177 class Checker(object):
178 def __init__(self
, ref
, test
, tag
):
184 with
open(self
.text
) as test_f
, open(self
.ref
) as ref_f
:
185 return test_f
.read() == ref_f
.read()
187 class LogChecker(Checker
):
188 def merge_filts(*filts
):
189 filts
= map(lambda f
: '(' + f
+ ')', filts
)
190 filts
= '|'.join(filts
)
191 return re
.compile(filts
, flags
=re
.MULTILINE
)
193 ref_filt
= merge_filts(
194 r
'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
195 r
'^SystemC Simulation\n',
196 r
'^\nWarning: .*\nIn file: .*\n'
198 test_filt
= merge_filts(
199 r
'^Global frequency set at \d* ticks per second\n'
202 def __init__(self
, ref
, test
, tag
, out_dir
):
203 super(LogChecker
, self
).__init
__(ref
, test
, tag
)
204 self
.out_dir
= out_dir
206 def apply_filters(self
, data
, filts
):
207 re
.sub(filt
, '', data
)
210 test_file
= os
.path
.basename(self
.test
)
211 ref_file
= os
.path
.basename(self
.ref
)
212 with
open(self
.test
) as test_f
, open(self
.ref
) as ref_f
:
213 test
= re
.sub(self
.test_filt
, '', test_f
.read())
214 ref
= re
.sub(self
.ref_filt
, '', ref_f
.read())
215 diff_file
= '.'.join([ref_file
, 'diff'])
216 diff_path
= os
.path
.join(self
.out_dir
, diff_file
)
218 with
open(diff_path
, 'w') as diff_f
:
219 for line
in difflib
.unified_diff(
220 ref
.splitlines(True), test
.splitlines(True),
226 if os
.path
.exists(diff_path
):
230 class GoldenDir(object):
231 def __init__(self
, path
, platform
):
233 self
.platform
= platform
235 contents
= os
.listdir(path
)
236 suffix
= '.' + platform
237 suffixed
= filter(lambda c
: c
.endswith(suffix
), contents
)
238 bases
= map(lambda t
: t
[:-len(platform
)], suffixed
)
239 common
= filter(lambda t
: not t
.startswith(tuple(bases
)), contents
)
243 def __init__(self
, e_path
):
245 self
.path
= os
.path
.join(path
, e_path
)
250 for entry
in contents
:
251 self
.entries
[entry
] = Entry(entry
)
253 def entry(self
, name
):
255 return (n
== name
) or n
.startswith(name
+ '.')
256 matches
= { n
: e
for n
, e
in self
.entries
.items() if match(n
) }
258 for match
in matches
.values():
261 platform_name
= '.'.join([ name
, self
.platform
])
262 if platform_name
in matches
:
263 return matches
[platform_name
].path
265 return matches
[name
].path
270 items
= self
.entries
.items()
271 items
= filter(lambda i
: not i
[1].used
, items
)
276 while i
< len(items
):
280 while i
< len(items
) and items
[i
][0].startswith(root
):
284 class VerifyPhase(TestPhaseBase
):
288 def reset_status(self
):
292 def passed(self
, test
):
293 self
._passed
.append(test
)
295 def failed(self
, test
, cause
, note
=''):
296 test
.set_prop('note', note
)
297 self
._failed
.setdefault(cause
, []).append(test
)
299 def print_status(self
):
300 total_passed
= len(self
._passed
)
301 total_failed
= sum(map(len, self
._failed
.values()))
303 print('Passed: {passed:4} - Failed: {failed:4}'.format(
304 passed
=total_passed
, failed
=total_failed
))
306 def write_result_file(self
, path
):
308 'passed': map(lambda t
: t
.props
, self
._passed
),
310 cause
: map(lambda t
: t
.props
, tests
) for
311 cause
, tests
in self
._failed
.iteritems()
314 with
open(path
, 'w') as rf
:
315 json
.dump(results
, rf
)
317 def print_results(self
):
320 for path
in sorted(list([ t
.path
for t
in self
._passed
])):
327 for cause
, tests
in sorted(self
._failed
.items()):
328 block
= ' ' + cause
.capitalize() + ':\n'
329 for test
in sorted(tests
, key
=lambda t
: t
.path
):
330 block
+= ' ' + test
.path
332 block
+= ' - ' + test
.note
336 print('\n'.join(causes
))
338 def run(self
, tests
):
339 parser
= argparse
.ArgumentParser()
340 result_opts
= parser
.add_mutually_exclusive_group()
341 result_opts
.add_argument('--result-file', action
='store_true',
342 help='Create a results.json file in the current directory.')
343 result_opts
.add_argument('--result-file-at', metavar
='PATH',
344 help='Create a results json file at the given path.')
345 parser
.add_argument('--print-results', action
='store_true',
346 help='Print a list of tests that passed or failed')
347 args
= parser
.parse_args(self
.args
)
351 runnable
= filter(lambda t
: not t
.compile_only
, tests
)
352 compile_only
= filter(lambda t
: t
.compile_only
, tests
)
354 for test
in compile_only
:
355 if os
.path
.exists(test
.full_path()):
358 self
.failed(test
, 'compile failed')
360 for test
in runnable
:
361 with
open(test
.returncode_file()) as rc
:
362 returncode
= int(rc
.read())
364 if returncode
== 124:
365 self
.failed(test
, 'time out')
367 elif returncode
!= 0:
368 self
.failed(test
, 'abort')
371 out_dir
= test
.m5out_dir()
373 Diff
= collections
.namedtuple(
374 'Diff', 'ref, test, tag, ref_filter')
378 gd
= GoldenDir(test
.golden_dir(), 'linux64')
381 log_file
= '.'.join([test
.name
, 'log'])
382 log_path
= gd
.entry(log_file
)
383 simout_path
= os
.path
.join(out_dir
, 'simout')
384 if not os
.path
.exists(simout_path
):
385 missing
.append('log output')
387 diffs
.append(LogChecker(log_path
, simout_path
,
390 for name
in gd
.unused():
391 test_path
= os
.path
.join(out_dir
, name
)
392 ref_path
= gd
.entry(name
)
393 if not os
.path
.exists(test_path
):
396 diffs
.append(Checker(ref_path
, test_path
, name
))
399 self
.failed(test
, 'missing output', ' '.join(missing
))
402 failed_diffs
= filter(lambda d
: not d
.check(), diffs
)
404 tags
= map(lambda d
: d
.tag
, failed_diffs
)
405 self
.failed(test
, 'failed diffs', ' '.join(tags
))
410 if args
.print_results
:
417 result_path
= os
.path
.join(os
.getcwd(), 'results.json')
418 elif args
.result_file_at
:
419 result_path
= args
.result_file_at
422 self
.write_result_file(result_path
)
425 parser
= argparse
.ArgumentParser(description
='SystemC test utility')
427 parser
.add_argument('build_dir', metavar
='BUILD_DIR',
428 help='The build directory (ie. build/ARM).')
430 parser
.add_argument('--update-json', action
='store_true',
431 help='Update the json manifest of tests.')
433 parser
.add_argument('--flavor', choices
=['debug', 'opt', 'fast'],
435 help='Flavor of binary to test.')
437 parser
.add_argument('--list', action
='store_true',
438 help='List the available tests')
440 filter_opts
= parser
.add_mutually_exclusive_group()
441 filter_opts
.add_argument('--filter', default
='True',
442 help='Python expression which filters tests based '
443 'on their properties')
444 filter_opts
.add_argument('--filter-file', default
=None,
445 type=argparse
.FileType('r'),
446 help='Same as --filter, but read from a file')
448 def collect_phases(args
):
449 phase_groups
= [list(g
) for k
, g
in
450 itertools
.groupby(args
, lambda x
: x
!= '--phase') if k
]
451 main_args
= parser
.parse_args(phase_groups
[0][1:])
454 for group
in phase_groups
[1:]:
457 raise RuntimeException('Phase %s specified more than once' % name
)
458 phase
= test_phase_classes
[name
]
459 phases
.append(phase(main_args
, *group
[1:]))
461 return main_args
, phases
463 main_args
, phases
= collect_phases(sys
.argv
)
467 CompilePhase(main_args
),
469 VerifyPhase(main_args
)
474 json_path
= os
.path
.join(main_args
.build_dir
, json_rel_path
)
476 if main_args
.update_json
:
477 scons(os
.path
.join(json_path
))
479 with
open(json_path
) as f
:
480 test_data
= json
.load(f
)
482 if main_args
.filter_file
:
483 f
= main_args
.filter_file
484 filt
= compile(f
.read(), f
.name
, 'eval')
486 filt
= compile(main_args
.filter, '<string>', 'eval')
489 target
: props
for (target
, props
) in
490 test_data
.iteritems() if eval(filt
, dict(props
))
494 for target
, props
in sorted(filtered_tests
.iteritems()):
495 print('%s.%s' % (target
, main_args
.flavor
))
496 for key
, val
in props
.iteritems():
497 print(' %s: %s' % (key
, val
))
498 print('Total tests: %d' % len(filtered_tests
))
500 tests_to_run
= list([
501 Test(target
, main_args
.flavor
, main_args
.build_dir
, props
) for
502 target
, props
in sorted(filtered_tests
.iteritems())
506 phase
.run(tests_to_run
)