dc0045970915182c2b38bfe31cc505ebcbd64112
[gem5.git] / src / systemc / tests / verify.py
1 #!/usr/bin/env python2
2 #
3 # Copyright 2018 Google, Inc.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
15 #
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #
28 # Authors: Gabe Black
29
30 from __future__ import print_function
31
32 import argparse
33 import collections
34 import difflib
35 import functools
36 import inspect
37 import itertools
38 import json
39 import multiprocessing.pool
40 import os
41 import re
42 import subprocess
43 import sys
44
45 script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46 script_dir = os.path.dirname(script_path)
47 config_path = os.path.join(script_dir, 'config.py')
48
49 systemc_rel_path = 'systemc'
50 tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51 json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55 def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61 class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def expected_returncode_file(self):
82 return os.path.join(self.src_dir(), 'expected_returncode')
83
84 def golden_dir(self):
85 return os.path.join(self.src_dir(), 'golden')
86
87 def bin(self):
88 return '.'.join([self.name, self.suffix])
89
90 def full_path(self):
91 return os.path.join(self.dir(), self.bin())
92
93 def m5out_dir(self):
94 return os.path.join(self.dir(), 'm5out.' + self.suffix)
95
96 def returncode_file(self):
97 return os.path.join(self.m5out_dir(), 'returncode')
98
99
100
101 test_phase_classes = {}
102
103 class TestPhaseMeta(type):
104 def __init__(cls, name, bases, d):
105 if not d.pop('abstract', False):
106 test_phase_classes[d['name']] = cls
107
108 super(TestPhaseMeta, cls).__init__(name, bases, d)
109
110 class TestPhaseBase(object):
111 __metaclass__ = TestPhaseMeta
112 abstract = True
113
114 def __init__(self, main_args, *args):
115 self.main_args = main_args
116 self.args = args
117
118 def __lt__(self, other):
119 return self.number < other.number
120
121 class CompilePhase(TestPhaseBase):
122 name = 'compile'
123 number = 1
124
125 def run(self, tests):
126 targets = list([test.full_path() for test in tests])
127 scons_args = [ 'USE_SYSTEMC=1' ] + list(self.args) + targets
128 scons(*scons_args)
129
130 class RunPhase(TestPhaseBase):
131 name = 'execute'
132 number = 2
133
134 def run(self, tests):
135 parser = argparse.ArgumentParser()
136 parser.add_argument('--timeout', type=int, metavar='SECONDS',
137 help='Time limit for each run in seconds.',
138 default=0)
139 parser.add_argument('-j', type=int, default=1,
140 help='How many tests to run in parallel.')
141 args = parser.parse_args(self.args)
142
143 timeout_cmd = [
144 'timeout',
145 '--kill-after', str(args.timeout * 2),
146 str(args.timeout)
147 ]
148 curdir = os.getcwd()
149 def run_test(test):
150 cmd = []
151 if args.timeout:
152 cmd.extend(timeout_cmd)
153 cmd.extend([
154 test.full_path(),
155 '-red', os.path.abspath(test.m5out_dir()),
156 '--listener-mode=off',
157 '--quiet',
158 config_path,
159 '--working-dir',
160 os.path.dirname(test.src_dir())
161 ])
162 # Ensure the output directory exists.
163 if not os.path.exists(test.m5out_dir()):
164 os.makedirs(test.m5out_dir())
165 try:
166 subprocess.check_call(cmd)
167 except subprocess.CalledProcessError, error:
168 returncode = error.returncode
169 else:
170 returncode = 0
171 os.chdir(curdir)
172 with open(test.returncode_file(), 'w') as rc:
173 rc.write('%d\n' % returncode)
174
175 runnable = filter(lambda t: not t.compile_only, tests)
176 if args.j == 1:
177 map(run_test, runnable)
178 else:
179 tp = multiprocessing.pool.ThreadPool(args.j)
180 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
181 tp.close()
182 tp.join()
183
184 class Checker(object):
185 def __init__(self, ref, test, tag):
186 self.ref = ref
187 self.test = test
188 self.tag = tag
189
190 def check(self):
191 with open(self.text) as test_f, open(self.ref) as ref_f:
192 return test_f.read() == ref_f.read()
193
194 def tagged_filt(tag, num):
195 return (r'^\n{}: \({}{}\) .*\n(In file: .*\n)?'
196 r'(In process: [\w.]* @ .*\n)?').format(tag, tag[0], num)
197
198 def error_filt(num):
199 return tagged_filt('Error', num)
200
201 def warning_filt(num):
202 return tagged_filt('Warning', num)
203
204 def info_filt(num):
205 return tagged_filt('Info', num)
206
207 class LogChecker(Checker):
208 def merge_filts(*filts):
209 filts = map(lambda f: '(' + f + ')', filts)
210 filts = '|'.join(filts)
211 return re.compile(filts, flags=re.MULTILINE)
212
213 ref_filt = merge_filts(
214 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
215 r'^SystemC Simulation\n',
216 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
217 r'You can turn off(.*\n){7}',
218 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' +
219 r' sc_clock\(const char(.*\n){3}',
220 warning_filt(540),
221 warning_filt(569),
222 warning_filt(571),
223 error_filt(541),
224 error_filt(542),
225 error_filt(543),
226 info_filt(804),
227 )
228 test_filt = merge_filts(
229 r'^Global frequency set at \d* ticks per second\n',
230 info_filt(804),
231 )
232
233 def __init__(self, ref, test, tag, out_dir):
234 super(LogChecker, self).__init__(ref, test, tag)
235 self.out_dir = out_dir
236
237 def apply_filters(self, data, filts):
238 re.sub(filt, '', data)
239
240 def check(self):
241 test_file = os.path.basename(self.test)
242 ref_file = os.path.basename(self.ref)
243 with open(self.test) as test_f, open(self.ref) as ref_f:
244 test = re.sub(self.test_filt, '', test_f.read())
245 ref = re.sub(self.ref_filt, '', ref_f.read())
246 diff_file = '.'.join([ref_file, 'diff'])
247 diff_path = os.path.join(self.out_dir, diff_file)
248 if test != ref:
249 with open(diff_path, 'w') as diff_f:
250 for line in difflib.unified_diff(
251 ref.splitlines(True), test.splitlines(True),
252 fromfile=ref_file,
253 tofile=test_file):
254 diff_f.write(line)
255 return False
256 else:
257 if os.path.exists(diff_path):
258 os.unlink(diff_path)
259 return True
260
261 class GoldenDir(object):
262 def __init__(self, path, platform):
263 self.path = path
264 self.platform = platform
265
266 contents = os.listdir(path)
267 suffix = '.' + platform
268 suffixed = filter(lambda c: c.endswith(suffix), contents)
269 bases = map(lambda t: t[:-len(platform)], suffixed)
270 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
271
272 self.entries = {}
273 class Entry(object):
274 def __init__(self, e_path):
275 self.used = False
276 self.path = os.path.join(path, e_path)
277
278 def use(self):
279 self.used = True
280
281 for entry in contents:
282 self.entries[entry] = Entry(entry)
283
284 def entry(self, name):
285 def match(n):
286 return (n == name) or n.startswith(name + '.')
287 matches = { n: e for n, e in self.entries.items() if match(n) }
288
289 for match in matches.values():
290 match.use()
291
292 platform_name = '.'.join([ name, self.platform ])
293 if platform_name in matches:
294 return matches[platform_name].path
295 if name in matches:
296 return matches[name].path
297 else:
298 return None
299
300 def unused(self):
301 items = self.entries.items()
302 items = filter(lambda i: not i[1].used, items)
303
304 items.sort()
305 sources = []
306 i = 0
307 while i < len(items):
308 root = items[i][0]
309 sources.append(root)
310 i += 1
311 while i < len(items) and items[i][0].startswith(root):
312 i += 1
313 return sources
314
315 class VerifyPhase(TestPhaseBase):
316 name = 'verify'
317 number = 3
318
319 def reset_status(self):
320 self._passed = []
321 self._failed = {}
322
323 def passed(self, test):
324 self._passed.append(test)
325
326 def failed(self, test, cause, note=''):
327 test.set_prop('note', note)
328 self._failed.setdefault(cause, []).append(test)
329
330 def print_status(self):
331 total_passed = len(self._passed)
332 total_failed = sum(map(len, self._failed.values()))
333 print()
334 print('Passed: {passed:4} - Failed: {failed:4}'.format(
335 passed=total_passed, failed=total_failed))
336
337 def write_result_file(self, path):
338 results = {
339 'passed': map(lambda t: t.props, self._passed),
340 'failed': {
341 cause: map(lambda t: t.props, tests) for
342 cause, tests in self._failed.iteritems()
343 }
344 }
345 with open(path, 'w') as rf:
346 json.dump(results, rf)
347
348 def print_results(self):
349 print()
350 print('Passed:')
351 for path in sorted(list([ t.path for t in self._passed ])):
352 print(' ', path)
353
354 print()
355 print('Failed:')
356
357 causes = []
358 for cause, tests in sorted(self._failed.items()):
359 block = ' ' + cause.capitalize() + ':\n'
360 for test in sorted(tests, key=lambda t: t.path):
361 block += ' ' + test.path
362 if test.note:
363 block += ' - ' + test.note
364 block += '\n'
365 causes.append(block)
366
367 print('\n'.join(causes))
368
369 def run(self, tests):
370 parser = argparse.ArgumentParser()
371 result_opts = parser.add_mutually_exclusive_group()
372 result_opts.add_argument('--result-file', action='store_true',
373 help='Create a results.json file in the current directory.')
374 result_opts.add_argument('--result-file-at', metavar='PATH',
375 help='Create a results json file at the given path.')
376 parser.add_argument('--print-results', action='store_true',
377 help='Print a list of tests that passed or failed')
378 args = parser.parse_args(self.args)
379
380 self.reset_status()
381
382 runnable = filter(lambda t: not t.compile_only, tests)
383 compile_only = filter(lambda t: t.compile_only, tests)
384
385 for test in compile_only:
386 if os.path.exists(test.full_path()):
387 self.passed(test)
388 else:
389 self.failed(test, 'compile failed')
390
391 for test in runnable:
392 with open(test.returncode_file()) as rc:
393 returncode = int(rc.read())
394
395 expected_returncode = 0
396 if os.path.exists(test.expected_returncode_file()):
397 with open(test.expected_returncode_file()) as erc:
398 expected_returncode = int(erc.read())
399
400 if returncode == 124:
401 self.failed(test, 'time out')
402 continue
403 elif returncode != expected_returncode:
404 if expected_returncode == 0:
405 self.failed(test, 'abort')
406 else:
407 self.failed(test, 'missed abort')
408 continue
409
410 out_dir = test.m5out_dir()
411
412 Diff = collections.namedtuple(
413 'Diff', 'ref, test, tag, ref_filter')
414
415 diffs = []
416
417 gd = GoldenDir(test.golden_dir(), 'linux64')
418
419 missing = []
420 log_file = '.'.join([test.name, 'log'])
421 log_path = gd.entry(log_file)
422 simout_path = os.path.join(out_dir, 'simout')
423 if not os.path.exists(simout_path):
424 missing.append('log output')
425 elif log_path:
426 diffs.append(LogChecker(log_path, simout_path,
427 log_file, out_dir))
428
429 for name in gd.unused():
430 test_path = os.path.join(out_dir, name)
431 ref_path = gd.entry(name)
432 if not os.path.exists(test_path):
433 missing.append(name)
434 else:
435 diffs.append(Checker(ref_path, test_path, name))
436
437 if missing:
438 self.failed(test, 'missing output', ' '.join(missing))
439 continue
440
441 failed_diffs = filter(lambda d: not d.check(), diffs)
442 if failed_diffs:
443 tags = map(lambda d: d.tag, failed_diffs)
444 self.failed(test, 'failed diffs', ' '.join(tags))
445 continue
446
447 self.passed(test)
448
449 if args.print_results:
450 self.print_results()
451
452 self.print_status()
453
454 result_path = None
455 if args.result_file:
456 result_path = os.path.join(os.getcwd(), 'results.json')
457 elif args.result_file_at:
458 result_path = args.result_file_at
459
460 if result_path:
461 self.write_result_file(result_path)
462
463
464 parser = argparse.ArgumentParser(description='SystemC test utility')
465
466 parser.add_argument('build_dir', metavar='BUILD_DIR',
467 help='The build directory (ie. build/ARM).')
468
469 parser.add_argument('--update-json', action='store_true',
470 help='Update the json manifest of tests.')
471
472 parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
473 default='opt',
474 help='Flavor of binary to test.')
475
476 parser.add_argument('--list', action='store_true',
477 help='List the available tests')
478
479 filter_opts = parser.add_mutually_exclusive_group()
480 filter_opts.add_argument('--filter', default='True',
481 help='Python expression which filters tests based '
482 'on their properties')
483 filter_opts.add_argument('--filter-file', default=None,
484 type=argparse.FileType('r'),
485 help='Same as --filter, but read from a file')
486
487 def collect_phases(args):
488 phase_groups = [list(g) for k, g in
489 itertools.groupby(args, lambda x: x != '--phase') if k]
490 main_args = parser.parse_args(phase_groups[0][1:])
491 phases = []
492 names = []
493 for group in phase_groups[1:]:
494 name = group[0]
495 if name in names:
496 raise RuntimeException('Phase %s specified more than once' % name)
497 phase = test_phase_classes[name]
498 phases.append(phase(main_args, *group[1:]))
499 phases.sort()
500 return main_args, phases
501
502 main_args, phases = collect_phases(sys.argv)
503
504 if len(phases) == 0:
505 phases = [
506 CompilePhase(main_args),
507 RunPhase(main_args),
508 VerifyPhase(main_args)
509 ]
510
511
512
513 json_path = os.path.join(main_args.build_dir, json_rel_path)
514
515 if main_args.update_json:
516 scons(os.path.join(json_path))
517
518 with open(json_path) as f:
519 test_data = json.load(f)
520
521 if main_args.filter_file:
522 f = main_args.filter_file
523 filt = compile(f.read(), f.name, 'eval')
524 else:
525 filt = compile(main_args.filter, '<string>', 'eval')
526
527 filtered_tests = {
528 target: props for (target, props) in
529 test_data.iteritems() if eval(filt, dict(props))
530 }
531
532 if len(filtered_tests) == 0:
533 print('All tests were filtered out.')
534 exit()
535
536 if main_args.list:
537 for target, props in sorted(filtered_tests.iteritems()):
538 print('%s.%s' % (target, main_args.flavor))
539 for key, val in props.iteritems():
540 print(' %s: %s' % (key, val))
541 print('Total tests: %d' % len(filtered_tests))
542 else:
543 tests_to_run = list([
544 Test(target, main_args.flavor, main_args.build_dir, props) for
545 target, props in sorted(filtered_tests.iteritems())
546 ])
547
548 for phase in phases:
549 phase.run(tests_to_run)