systemc: Make verify.py compare non output log reference files.
[gem5.git] / src / systemc / tests / verify.py
1 #!/usr/bin/env python2
2 #
3 # Copyright 2018 Google, Inc.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
15 #
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #
28 # Authors: Gabe Black
29
30 from __future__ import print_function
31
32 import argparse
33 import collections
34 import difflib
35 import functools
36 import inspect
37 import itertools
38 import json
39 import multiprocessing.pool
40 import os
41 import re
42 import subprocess
43 import sys
44
45 script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46 script_dir = os.path.dirname(script_path)
47 config_path = os.path.join(script_dir, 'config.py')
48
49 systemc_rel_path = 'systemc'
50 tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51 json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55 def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61 class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def golden_dir(self):
82 return os.path.join(self.src_dir(), 'golden')
83
84 def bin(self):
85 return '.'.join([self.name, self.suffix])
86
87 def full_path(self):
88 return os.path.join(self.dir(), self.bin())
89
90 def m5out_dir(self):
91 return os.path.join(self.dir(), 'm5out.' + self.suffix)
92
93 def returncode_file(self):
94 return os.path.join(self.m5out_dir(), 'returncode')
95
96
97
98 test_phase_classes = {}
99
100 class TestPhaseMeta(type):
101 def __init__(cls, name, bases, d):
102 if not d.pop('abstract', False):
103 test_phase_classes[d['name']] = cls
104
105 super(TestPhaseMeta, cls).__init__(name, bases, d)
106
107 class TestPhaseBase(object):
108 __metaclass__ = TestPhaseMeta
109 abstract = True
110
111 def __init__(self, main_args, *args):
112 self.main_args = main_args
113 self.args = args
114
115 def __lt__(self, other):
116 return self.number < other.number
117
118 class CompilePhase(TestPhaseBase):
119 name = 'compile'
120 number = 1
121
122 def run(self, tests):
123 targets = list([test.full_path() for test in tests])
124 scons_args = list(self.args) + targets
125 scons(*scons_args)
126
127 class RunPhase(TestPhaseBase):
128 name = 'execute'
129 number = 2
130
131 def run(self, tests):
132 parser = argparse.ArgumentParser()
133 parser.add_argument('--timeout', type=int, metavar='SECONDS',
134 help='Time limit for each run in seconds.',
135 default=0)
136 parser.add_argument('-j', type=int, default=1,
137 help='How many tests to run in parallel.')
138 args = parser.parse_args(self.args)
139
140 timeout_cmd = [
141 'timeout',
142 '--kill-after', str(args.timeout * 2),
143 str(args.timeout)
144 ]
145 def run_test(test):
146 cmd = []
147 if args.timeout:
148 cmd.extend(timeout_cmd)
149 cmd.extend([
150 test.full_path(),
151 '-red', test.m5out_dir(),
152 '--listener-mode=off',
153 '--quiet',
154 config_path
155 ])
156 # Ensure the output directory exists.
157 if not os.path.exists(test.m5out_dir()):
158 os.makedirs(test.m5out_dir())
159 try:
160 subprocess.check_call(cmd)
161 except subprocess.CalledProcessError, error:
162 returncode = error.returncode
163 else:
164 returncode = 0
165 with open(test.returncode_file(), 'w') as rc:
166 rc.write('%d\n' % returncode)
167
168 runnable = filter(lambda t: not t.compile_only, tests)
169 if args.j == 1:
170 map(run_test, runnable)
171 else:
172 tp = multiprocessing.pool.ThreadPool(args.j)
173 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
174 tp.close()
175 tp.join()
176
177 class Checker(object):
178 def __init__(self, ref, test, tag):
179 self.ref = ref
180 self.test = test
181 self.tag = tag
182
183 def check(self):
184 with open(self.text) as test_f, open(self.ref) as ref_f:
185 return test_f.read() == ref_f.read()
186
187 class LogChecker(Checker):
188 def merge_filts(*filts):
189 filts = map(lambda f: '(' + f + ')', filts)
190 filts = '|'.join(filts)
191 return re.compile(filts, flags=re.MULTILINE)
192
193 ref_filt = merge_filts(
194 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
195 r'^SystemC Simulation\n',
196 r'^\nWarning: .*\nIn file: .*\n'
197 )
198 test_filt = merge_filts(
199 r'^Global frequency set at \d* ticks per second\n'
200 )
201
202 def __init__(self, ref, test, tag, out_dir):
203 super(LogChecker, self).__init__(ref, test, tag)
204 self.out_dir = out_dir
205
206 def apply_filters(self, data, filts):
207 re.sub(filt, '', data)
208
209 def check(self):
210 test_file = os.path.basename(self.test)
211 ref_file = os.path.basename(self.ref)
212 with open(self.test) as test_f, open(self.ref) as ref_f:
213 test = re.sub(self.test_filt, '', test_f.read())
214 ref = re.sub(self.ref_filt, '', ref_f.read())
215 diff_file = '.'.join([ref_file, 'diff'])
216 diff_path = os.path.join(self.out_dir, diff_file)
217 if test != ref:
218 with open(diff_path, 'w') as diff_f:
219 for line in difflib.unified_diff(
220 ref.splitlines(True), test.splitlines(True),
221 fromfile=ref_file,
222 tofile=test_file):
223 diff_f.write(line)
224 return False
225 else:
226 if os.path.exists(diff_path):
227 os.unlink(diff_path)
228 return True
229
230 class GoldenDir(object):
231 def __init__(self, path, platform):
232 self.path = path
233 self.platform = platform
234
235 contents = os.listdir(path)
236 suffix = '.' + platform
237 suffixed = filter(lambda c: c.endswith(suffix), contents)
238 bases = map(lambda t: t[:-len(platform)], suffixed)
239 common = filter(lambda t: not t.startswith(tuple(bases)), contents)
240
241 self.entries = {}
242 class Entry(object):
243 def __init__(self, e_path):
244 self.used = False
245 self.path = os.path.join(path, e_path)
246
247 def use(self):
248 self.used = True
249
250 for entry in contents:
251 self.entries[entry] = Entry(entry)
252
253 def entry(self, name):
254 def match(n):
255 return (n == name) or n.startswith(name + '.')
256 matches = { n: e for n, e in self.entries.items() if match(n) }
257
258 for match in matches.values():
259 match.use()
260
261 platform_name = '.'.join([ name, self.platform ])
262 if platform_name in matches:
263 return matches[platform_name].path
264 if name in matches:
265 return matches[name].path
266 else:
267 return None
268
269 def unused(self):
270 items = self.entries.items()
271 items = filter(lambda i: not i[1].used, items)
272
273 items.sort()
274 sources = []
275 i = 0
276 while i < len(items):
277 root = items[i][0]
278 sources.append(root)
279 i += 1
280 while i < len(items) and items[i][0].startswith(root):
281 i += 1
282 return sources
283
284 class VerifyPhase(TestPhaseBase):
285 name = 'verify'
286 number = 3
287
288 def reset_status(self):
289 self._passed = []
290 self._failed = {}
291
292 def passed(self, test):
293 self._passed.append(test)
294
295 def failed(self, test, cause, note=''):
296 test.set_prop('note', note)
297 self._failed.setdefault(cause, []).append(test)
298
299 def print_status(self):
300 total_passed = len(self._passed)
301 total_failed = sum(map(len, self._failed.values()))
302 print()
303 print('Passed: {passed:4} - Failed: {failed:4}'.format(
304 passed=total_passed, failed=total_failed))
305
306 def write_result_file(self, path):
307 results = {
308 'passed': map(lambda t: t.props, self._passed),
309 'failed': {
310 cause: map(lambda t: t.props, tests) for
311 cause, tests in self._failed.iteritems()
312 }
313 }
314 with open(path, 'w') as rf:
315 json.dump(results, rf)
316
317 def print_results(self):
318 print()
319 print('Passed:')
320 for path in sorted(list([ t.path for t in self._passed ])):
321 print(' ', path)
322
323 print()
324 print('Failed:')
325
326 causes = []
327 for cause, tests in sorted(self._failed.items()):
328 block = ' ' + cause.capitalize() + ':\n'
329 for test in sorted(tests, key=lambda t: t.path):
330 block += ' ' + test.path
331 if test.note:
332 block += ' - ' + test.note
333 block += '\n'
334 causes.append(block)
335
336 print('\n'.join(causes))
337
338 def run(self, tests):
339 parser = argparse.ArgumentParser()
340 result_opts = parser.add_mutually_exclusive_group()
341 result_opts.add_argument('--result-file', action='store_true',
342 help='Create a results.json file in the current directory.')
343 result_opts.add_argument('--result-file-at', metavar='PATH',
344 help='Create a results json file at the given path.')
345 parser.add_argument('--print-results', action='store_true',
346 help='Print a list of tests that passed or failed')
347 args = parser.parse_args(self.args)
348
349 self.reset_status()
350
351 runnable = filter(lambda t: not t.compile_only, tests)
352 compile_only = filter(lambda t: t.compile_only, tests)
353
354 for test in compile_only:
355 if os.path.exists(test.full_path()):
356 self.passed(test)
357 else:
358 self.failed(test, 'compile failed')
359
360 for test in runnable:
361 with open(test.returncode_file()) as rc:
362 returncode = int(rc.read())
363
364 if returncode == 124:
365 self.failed(test, 'time out')
366 continue
367 elif returncode != 0:
368 self.failed(test, 'abort')
369 continue
370
371 out_dir = test.m5out_dir()
372
373 Diff = collections.namedtuple(
374 'Diff', 'ref, test, tag, ref_filter')
375
376 diffs = []
377
378 gd = GoldenDir(test.golden_dir(), 'linux64')
379
380 missing = []
381 log_file = '.'.join([test.name, 'log'])
382 log_path = gd.entry(log_file)
383 simout_path = os.path.join(out_dir, 'simout')
384 if not os.path.exists(simout_path):
385 missing.append('log output')
386 elif log_path:
387 diffs.append(LogChecker(log_path, simout_path,
388 log_file, out_dir))
389
390 for name in gd.unused():
391 test_path = os.path.join(out_dir, name)
392 ref_path = gd.entry(name)
393 if not os.path.exists(test_path):
394 missing.append(name)
395 else:
396 diffs.append(Checker(ref_path, test_path, name))
397
398 if missing:
399 self.failed(test, 'missing output', ' '.join(missing))
400 continue
401
402 failed_diffs = filter(lambda d: not d.check(), diffs)
403 if failed_diffs:
404 tags = map(lambda d: d.tag, failed_diffs)
405 self.failed(test, 'failed diffs', ' '.join(tags))
406 continue
407
408 self.passed(test)
409
410 if args.print_results:
411 self.print_results()
412
413 self.print_status()
414
415 result_path = None
416 if args.result_file:
417 result_path = os.path.join(os.getcwd(), 'results.json')
418 elif args.result_file_at:
419 result_path = args.result_file_at
420
421 if result_path:
422 self.write_result_file(result_path)
423
424
425 parser = argparse.ArgumentParser(description='SystemC test utility')
426
427 parser.add_argument('build_dir', metavar='BUILD_DIR',
428 help='The build directory (ie. build/ARM).')
429
430 parser.add_argument('--update-json', action='store_true',
431 help='Update the json manifest of tests.')
432
433 parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
434 default='opt',
435 help='Flavor of binary to test.')
436
437 parser.add_argument('--list', action='store_true',
438 help='List the available tests')
439
440 filter_opts = parser.add_mutually_exclusive_group()
441 filter_opts.add_argument('--filter', default='True',
442 help='Python expression which filters tests based '
443 'on their properties')
444 filter_opts.add_argument('--filter-file', default=None,
445 type=argparse.FileType('r'),
446 help='Same as --filter, but read from a file')
447
448 def collect_phases(args):
449 phase_groups = [list(g) for k, g in
450 itertools.groupby(args, lambda x: x != '--phase') if k]
451 main_args = parser.parse_args(phase_groups[0][1:])
452 phases = []
453 names = []
454 for group in phase_groups[1:]:
455 name = group[0]
456 if name in names:
457 raise RuntimeException('Phase %s specified more than once' % name)
458 phase = test_phase_classes[name]
459 phases.append(phase(main_args, *group[1:]))
460 phases.sort()
461 return main_args, phases
462
463 main_args, phases = collect_phases(sys.argv)
464
465 if len(phases) == 0:
466 phases = [
467 CompilePhase(main_args),
468 RunPhase(main_args),
469 VerifyPhase(main_args)
470 ]
471
472
473
474 json_path = os.path.join(main_args.build_dir, json_rel_path)
475
476 if main_args.update_json:
477 scons(os.path.join(json_path))
478
479 with open(json_path) as f:
480 test_data = json.load(f)
481
482 if main_args.filter_file:
483 f = main_args.filter_file
484 filt = compile(f.read(), f.name, 'eval')
485 else:
486 filt = compile(main_args.filter, '<string>', 'eval')
487
488 filtered_tests = {
489 target: props for (target, props) in
490 test_data.iteritems() if eval(filt, dict(props))
491 }
492
493 if main_args.list:
494 for target, props in sorted(filtered_tests.iteritems()):
495 print('%s.%s' % (target, main_args.flavor))
496 for key, val in props.iteritems():
497 print(' %s: %s' % (key, val))
498 print('Total tests: %d' % len(filtered_tests))
499 else:
500 tests_to_run = list([
501 Test(target, main_args.flavor, main_args.build_dir, props) for
502 target, props in sorted(filtered_tests.iteritems())
503 ])
504
505 for phase in phases:
506 phase.run(tests_to_run)