systemc: Teach verify.py how to verify vcd files.
[gem5.git] / src / systemc / tests / verify.py
1 #!/usr/bin/env python2
2 #
3 # Copyright 2018 Google, Inc.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
15 #
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #
28 # Authors: Gabe Black
29
30 from __future__ import print_function
31
32 import argparse
33 import collections
34 import difflib
35 import functools
36 import inspect
37 import itertools
38 import json
39 import multiprocessing.pool
40 import os
41 import re
42 import subprocess
43 import sys
44
45 script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46 script_dir = os.path.dirname(script_path)
47 config_path = os.path.join(script_dir, 'config.py')
48
49 systemc_rel_path = 'systemc'
50 tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51 json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55 def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61 class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def expected_returncode_file(self):
82 return os.path.join(self.src_dir(), 'expected_returncode')
83
84 def golden_dir(self):
85 return os.path.join(self.src_dir(), 'golden')
86
87 def bin(self):
88 return '.'.join([self.name, self.suffix])
89
90 def full_path(self):
91 return os.path.join(self.dir(), self.bin())
92
93 def m5out_dir(self):
94 return os.path.join(self.dir(), 'm5out.' + self.suffix)
95
96 def returncode_file(self):
97 return os.path.join(self.m5out_dir(), 'returncode')
98
99
100
101 test_phase_classes = {}
102
103 class TestPhaseMeta(type):
104 def __init__(cls, name, bases, d):
105 if not d.pop('abstract', False):
106 test_phase_classes[d['name']] = cls
107
108 super(TestPhaseMeta, cls).__init__(name, bases, d)
109
110 class TestPhaseBase(object):
111 __metaclass__ = TestPhaseMeta
112 abstract = True
113
114 def __init__(self, main_args, *args):
115 self.main_args = main_args
116 self.args = args
117
118 def __lt__(self, other):
119 return self.number < other.number
120
121 class CompilePhase(TestPhaseBase):
122 name = 'compile'
123 number = 1
124
125 def run(self, tests):
126 targets = list([test.full_path() for test in tests])
127
128 parser = argparse.ArgumentParser()
129 parser.add_argument('-j', type=int, default=0)
130 args, leftovers = parser.parse_known_args(self.args)
131 if args.j == 0:
132 self.args = ('-j', str(self.main_args.j)) + self.args
133
134 scons_args = [ 'USE_SYSTEMC=1' ] + list(self.args) + targets
135 scons(*scons_args)
136
137 class RunPhase(TestPhaseBase):
138 name = 'execute'
139 number = 2
140
141 def run(self, tests):
142 parser = argparse.ArgumentParser()
143 parser.add_argument('--timeout', type=int, metavar='SECONDS',
144 help='Time limit for each run in seconds, '
145 '0 to disable.',
146 default=60)
147 parser.add_argument('-j', type=int, default=0,
148 help='How many tests to run in parallel.')
149 args = parser.parse_args(self.args)
150
151 timeout_cmd = [
152 'timeout',
153 '--kill-after', str(args.timeout * 2),
154 str(args.timeout)
155 ]
156 curdir = os.getcwd()
157 def run_test(test):
158 cmd = []
159 if args.timeout:
160 cmd.extend(timeout_cmd)
161 cmd.extend([
162 test.full_path(),
163 '-rd', os.path.abspath(test.m5out_dir()),
164 '--listener-mode=off',
165 '--quiet',
166 config_path,
167 '--working-dir',
168 os.path.dirname(test.src_dir())
169 ])
170 # Ensure the output directory exists.
171 if not os.path.exists(test.m5out_dir()):
172 os.makedirs(test.m5out_dir())
173 try:
174 subprocess.check_call(cmd)
175 except subprocess.CalledProcessError, error:
176 returncode = error.returncode
177 else:
178 returncode = 0
179 os.chdir(curdir)
180 with open(test.returncode_file(), 'w') as rc:
181 rc.write('%d\n' % returncode)
182
183 j = self.main_args.j if args.j == 0 else args.j
184
185 runnable = filter(lambda t: not t.compile_only, tests)
186 if j == 1:
187 map(run_test, runnable)
188 else:
189 tp = multiprocessing.pool.ThreadPool(j)
190 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
191 tp.close()
192 tp.join()
193
194 class Checker(object):
195 def __init__(self, ref, test, tag):
196 self.ref = ref
197 self.test = test
198 self.tag = tag
199
200 def check(self):
201 with open(self.test) as test_f, open(self.ref) as ref_f:
202 return test_f.read() == ref_f.read()
203
204 def tagged_filt(tag, num):
205 return (r'\n{}: \({}{}\) .*\n(In file: .*\n)?'
206 r'(In process: [\w.]* @ .*\n)?').format(tag, tag[0], num)
207
208 def error_filt(num):
209 return tagged_filt('Error', num)
210
211 def warning_filt(num):
212 return tagged_filt('Warning', num)
213
214 def info_filt(num):
215 return tagged_filt('Info', num)
216
217 class DiffingChecker(Checker):
218 def __init__(self, ref, test, tag, out_dir):
219 super(DiffingChecker, self).__init__(ref, test, tag)
220 self.out_dir = out_dir
221
222 def diffing_check(self, ref_lines, test_lines):
223 test_file = os.path.basename(self.test)
224 ref_file = os.path.basename(self.ref)
225
226 diff_file = '.'.join([ref_file, 'diff'])
227 diff_path = os.path.join(self.out_dir, diff_file)
228 if test_lines != ref_lines:
229 with open(diff_path, 'w') as diff_f:
230 for line in difflib.unified_diff(
231 ref_lines, test_lines,
232 fromfile=ref_file,
233 tofile=test_file):
234 diff_f.write(line)
235 return False
236 else:
237 if os.path.exists(diff_path):
238 os.unlink(diff_path)
239 return True
240
241 class LogChecker(DiffingChecker):
242 def merge_filts(*filts):
243 filts = map(lambda f: '(' + f + ')', filts)
244 filts = '|'.join(filts)
245 return re.compile(filts, flags=re.MULTILINE)
246
247 # The reporting mechanism will print the actual filename when running in
248 # gem5, and the "golden" output will say "<removed by verify.py>". We want
249 # to strip out both versions to make comparing the output sensible.
250 in_file_filt = r'^In file: ((<removed by verify\.pl>)|([a-zA-Z0-9.:_/]*))$'
251
252 ref_filt = merge_filts(
253 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
254 r'^SystemC Simulation\n',
255 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: ' +
256 r'You can turn off(.*\n){7}',
257 r'^\nInfo: \(I804\) /IEEE_Std_1666/deprecated: \n' +
258 r' sc_clock\(const char(.*\n){3}',
259 warning_filt(540),
260 warning_filt(571),
261 info_filt(804),
262 in_file_filt,
263 )
264 test_filt = merge_filts(
265 r'^Global frequency set at \d* ticks per second\n',
266 r'^info: Entering event queue @ \d*\. Starting simulation\.\.\.\n',
267 r'warn: [^(]+\([^)]*\)( \[with [^]]*\])? not implemented\.\n',
268 r'warn: Ignoring request to set stack size\.\n',
269 info_filt(804),
270 in_file_filt,
271 )
272
273 def apply_filters(self, data, filts):
274 re.sub(filt, '', data)
275
276 def check(self):
277 with open(self.test) as test_f, open(self.ref) as ref_f:
278 test = re.sub(self.test_filt, '', test_f.read())
279 ref = re.sub(self.ref_filt, '', ref_f.read())
280 return self.diffing_check(ref.splitlines(True),
281 test.splitlines(True))
282
283 class VcdChecker(DiffingChecker):
284 def check(self):
285 with open (self.test) as test_f, open(self.ref) as ref_f:
286 ref = ref_f.read().splitlines(True)
287 test = test_f.read().splitlines(True)
288 # Strip off the first seven lines of the test output which are
289 # date and version information.
290 test = test[7:]
291
292 return self.diffing_check(ref, test)
293
294 class GoldenDir(object):
295 def __init__(self, path, platform):
296 self.path = path
297 self.platform = platform
298
299 contents = os.listdir(path)
300 suffix = '.' + platform
301 suffixed = filter(lambda c: c.endswith(suffix), contents)
302 bases = map(lambda t: t[:-len(platform)], suffixed)
303 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
304
305 self.entries = {}
306 class Entry(object):
307 def __init__(self, e_path):
308 self.used = False
309 self.path = os.path.join(path, e_path)
310
311 def use(self):
312 self.used = True
313
314 for entry in contents:
315 self.entries[entry] = Entry(entry)
316
317 def entry(self, name):
318 def match(n):
319 return (n == name) or n.startswith(name + '.')
320 matches = { n: e for n, e in self.entries.items() if match(n) }
321
322 for match in matches.values():
323 match.use()
324
325 platform_name = '.'.join([ name, self.platform ])
326 if platform_name in matches:
327 return matches[platform_name].path
328 if name in matches:
329 return matches[name].path
330 else:
331 return None
332
333 def unused(self):
334 items = self.entries.items()
335 items = filter(lambda i: not i[1].used, items)
336
337 items.sort()
338 sources = []
339 i = 0
340 while i < len(items):
341 root = items[i][0]
342 sources.append(root)
343 i += 1
344 while i < len(items) and items[i][0].startswith(root):
345 i += 1
346 return sources
347
348 class VerifyPhase(TestPhaseBase):
349 name = 'verify'
350 number = 3
351
352 def reset_status(self):
353 self._passed = []
354 self._failed = {}
355
356 def passed(self, test):
357 self._passed.append(test)
358
359 def failed(self, test, cause, note=''):
360 test.set_prop('note', note)
361 self._failed.setdefault(cause, []).append(test)
362
363 def print_status(self):
364 total_passed = len(self._passed)
365 total_failed = sum(map(len, self._failed.values()))
366 print()
367 print('Passed: {passed:4} - Failed: {failed:4}'.format(
368 passed=total_passed, failed=total_failed))
369
370 def write_result_file(self, path):
371 results = {
372 'passed': map(lambda t: t.props, self._passed),
373 'failed': {
374 cause: map(lambda t: t.props, tests) for
375 cause, tests in self._failed.iteritems()
376 }
377 }
378 with open(path, 'w') as rf:
379 json.dump(results, rf)
380
381 def print_results(self):
382 print()
383 print('Passed:')
384 for path in sorted(list([ t.path for t in self._passed ])):
385 print(' ', path)
386
387 print()
388 print('Failed:')
389
390 causes = []
391 for cause, tests in sorted(self._failed.items()):
392 block = ' ' + cause.capitalize() + ':\n'
393 for test in sorted(tests, key=lambda t: t.path):
394 block += ' ' + test.path
395 if test.note:
396 block += ' - ' + test.note
397 block += '\n'
398 causes.append(block)
399
400 print('\n'.join(causes))
401
402 def run(self, tests):
403 parser = argparse.ArgumentParser()
404 result_opts = parser.add_mutually_exclusive_group()
405 result_opts.add_argument('--result-file', action='store_true',
406 help='Create a results.json file in the current directory.')
407 result_opts.add_argument('--result-file-at', metavar='PATH',
408 help='Create a results json file at the given path.')
409 parser.add_argument('--no-print-results', action='store_true',
410 help='Don\'t print a list of tests that passed or failed')
411 args = parser.parse_args(self.args)
412
413 self.reset_status()
414
415 runnable = filter(lambda t: not t.compile_only, tests)
416 compile_only = filter(lambda t: t.compile_only, tests)
417
418 for test in compile_only:
419 if os.path.exists(test.full_path()):
420 self.passed(test)
421 else:
422 self.failed(test, 'compile failed')
423
424 for test in runnable:
425 with open(test.returncode_file()) as rc:
426 returncode = int(rc.read())
427
428 expected_returncode = 0
429 if os.path.exists(test.expected_returncode_file()):
430 with open(test.expected_returncode_file()) as erc:
431 expected_returncode = int(erc.read())
432
433 if returncode == 124:
434 self.failed(test, 'time out')
435 continue
436 elif returncode != expected_returncode:
437 if expected_returncode == 0:
438 self.failed(test, 'abort')
439 else:
440 self.failed(test, 'missed abort')
441 continue
442
443 out_dir = test.m5out_dir()
444
445 Diff = collections.namedtuple(
446 'Diff', 'ref, test, tag, ref_filter')
447
448 diffs = []
449
450 gd = GoldenDir(test.golden_dir(), 'linux64')
451
452 missing = []
453 log_file = '.'.join([test.name, 'log'])
454 log_path = gd.entry(log_file)
455 simout_path = os.path.join(out_dir, 'simout')
456 if not os.path.exists(simout_path):
457 missing.append('log output')
458 elif log_path:
459 diffs.append(LogChecker(log_path, simout_path,
460 log_file, out_dir))
461
462 for name in gd.unused():
463 test_path = os.path.join(out_dir, name)
464 ref_path = gd.entry(name)
465 if not os.path.exists(test_path):
466 missing.append(name)
467 elif name.endswith('.vcd'):
468 diffs.append(VcdChecker(ref_path, test_path,
469 name, out_dir))
470 else:
471 diffs.append(Checker(ref_path, test_path, name))
472
473 if missing:
474 self.failed(test, 'missing output', ' '.join(missing))
475 continue
476
477 failed_diffs = filter(lambda d: not d.check(), diffs)
478 if failed_diffs:
479 tags = map(lambda d: d.tag, failed_diffs)
480 self.failed(test, 'failed diffs', ' '.join(tags))
481 continue
482
483 self.passed(test)
484
485 if not args.no_print_results:
486 self.print_results()
487
488 self.print_status()
489
490 result_path = None
491 if args.result_file:
492 result_path = os.path.join(os.getcwd(), 'results.json')
493 elif args.result_file_at:
494 result_path = args.result_file_at
495
496 if result_path:
497 self.write_result_file(result_path)
498
499
500 parser = argparse.ArgumentParser(description='SystemC test utility')
501
502 parser.add_argument('build_dir', metavar='BUILD_DIR',
503 help='The build directory (ie. build/ARM).')
504
505 parser.add_argument('--update-json', action='store_true',
506 help='Update the json manifest of tests.')
507
508 parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
509 default='opt',
510 help='Flavor of binary to test.')
511
512 parser.add_argument('--list', action='store_true',
513 help='List the available tests')
514
515 parser.add_argument('-j', type=int, default=1,
516 help='Default level of parallelism, can be overriden '
517 'for individual stages')
518
519 filter_opts = parser.add_mutually_exclusive_group()
520 filter_opts.add_argument('--filter', default='True',
521 help='Python expression which filters tests based '
522 'on their properties')
523 filter_opts.add_argument('--filter-file', default=None,
524 type=argparse.FileType('r'),
525 help='Same as --filter, but read from a file')
526
527 def collect_phases(args):
528 phase_groups = [list(g) for k, g in
529 itertools.groupby(args, lambda x: x != '--phase') if k]
530 main_args = parser.parse_args(phase_groups[0][1:])
531 phases = []
532 names = []
533 for group in phase_groups[1:]:
534 name = group[0]
535 if name in names:
536 raise RuntimeException('Phase %s specified more than once' % name)
537 phase = test_phase_classes[name]
538 phases.append(phase(main_args, *group[1:]))
539 phases.sort()
540 return main_args, phases
541
542 main_args, phases = collect_phases(sys.argv)
543
544 if len(phases) == 0:
545 phases = [
546 CompilePhase(main_args),
547 RunPhase(main_args),
548 VerifyPhase(main_args)
549 ]
550
551
552
553 json_path = os.path.join(main_args.build_dir, json_rel_path)
554
555 if main_args.update_json:
556 scons(os.path.join(json_path))
557
558 with open(json_path) as f:
559 test_data = json.load(f)
560
561 if main_args.filter_file:
562 f = main_args.filter_file
563 filt = compile(f.read(), f.name, 'eval')
564 else:
565 filt = compile(main_args.filter, '<string>', 'eval')
566
567 filtered_tests = {
568 target: props for (target, props) in
569 test_data.iteritems() if eval(filt, dict(props))
570 }
571
572 if len(filtered_tests) == 0:
573 print('All tests were filtered out.')
574 exit()
575
576 if main_args.list:
577 for target, props in sorted(filtered_tests.iteritems()):
578 print('%s.%s' % (target, main_args.flavor))
579 for key, val in props.iteritems():
580 print(' %s: %s' % (key, val))
581 print('Total tests: %d' % len(filtered_tests))
582 else:
583 tests_to_run = list([
584 Test(target, main_args.flavor, main_args.build_dir, props) for
585 target, props in sorted(filtered_tests.iteritems())
586 ])
587
588 for phase in phases:
589 phase.run(tests_to_run)