systemc: Teach verify.py to diff files when checking test results.
[gem5.git] / src / systemc / tests / verify.py
1 #!/usr/bin/env python2
2 #
3 # Copyright 2018 Google, Inc.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
15 #
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #
28 # Authors: Gabe Black
29
30 from __future__ import print_function
31
32 import argparse
33 import collections
34 import difflib
35 import functools
36 import inspect
37 import itertools
38 import json
39 import multiprocessing.pool
40 import os
41 import re
42 import subprocess
43 import sys
44
45 script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46 script_dir = os.path.dirname(script_path)
47 config_path = os.path.join(script_dir, 'config.py')
48
49 systemc_rel_path = 'systemc'
50 tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51 json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55 def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61 class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def golden_dir(self):
82 return os.path.join(self.src_dir(), 'golden')
83
84 def bin(self):
85 return '.'.join([self.name, self.suffix])
86
87 def full_path(self):
88 return os.path.join(self.dir(), self.bin())
89
90 def m5out_dir(self):
91 return os.path.join(self.dir(), 'm5out.' + self.suffix)
92
93 def returncode_file(self):
94 return os.path.join(self.m5out_dir(), 'returncode')
95
96
97
98 test_phase_classes = {}
99
100 class TestPhaseMeta(type):
101 def __init__(cls, name, bases, d):
102 if not d.pop('abstract', False):
103 test_phase_classes[d['name']] = cls
104
105 super(TestPhaseMeta, cls).__init__(name, bases, d)
106
107 class TestPhaseBase(object):
108 __metaclass__ = TestPhaseMeta
109 abstract = True
110
111 def __init__(self, main_args, *args):
112 self.main_args = main_args
113 self.args = args
114
115 def __lt__(self, other):
116 return self.number < other.number
117
118 class CompilePhase(TestPhaseBase):
119 name = 'compile'
120 number = 1
121
122 def run(self, tests):
123 targets = list([test.full_path() for test in tests])
124 scons_args = list(self.args) + targets
125 scons(*scons_args)
126
127 class RunPhase(TestPhaseBase):
128 name = 'execute'
129 number = 2
130
131 def run(self, tests):
132 parser = argparse.ArgumentParser()
133 parser.add_argument('--timeout', type=int, metavar='SECONDS',
134 help='Time limit for each run in seconds.',
135 default=0)
136 parser.add_argument('-j', type=int, default=1,
137 help='How many tests to run in parallel.')
138 args = parser.parse_args(self.args)
139
140 timeout_cmd = [
141 'timeout',
142 '--kill-after', str(args.timeout * 2),
143 str(args.timeout)
144 ]
145 def run_test(test):
146 cmd = []
147 if args.timeout:
148 cmd.extend(timeout_cmd)
149 cmd.extend([
150 test.full_path(),
151 '-red', test.m5out_dir(),
152 '--listener-mode=off',
153 '--quiet',
154 config_path
155 ])
156 # Ensure the output directory exists.
157 if not os.path.exists(test.m5out_dir()):
158 os.makedirs(test.m5out_dir())
159 try:
160 subprocess.check_call(cmd)
161 except subprocess.CalledProcessError, error:
162 returncode = error.returncode
163 else:
164 returncode = 0
165 with open(test.returncode_file(), 'w') as rc:
166 rc.write('%d\n' % returncode)
167
168 runnable = filter(lambda t: not t.compile_only, tests)
169 if args.j == 1:
170 map(run_test, runnable)
171 else:
172 tp = multiprocessing.pool.ThreadPool(args.j)
173 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
174 tp.close()
175 tp.join()
176
177 class Checker(object):
178 def __init__(self, ref, test, tag):
179 self.ref = ref
180 self.test = test
181 self.tag = tag
182
183 def check(self):
184 with open(self.text) as test_f, open(self.ref) as ref_f:
185 return test_f.read() == ref_f.read()
186
187 class LogChecker(Checker):
188 def merge_filts(*filts):
189 filts = map(lambda f: '(' + f + ')', filts)
190 filts = '|'.join(filts)
191 return re.compile(filts, flags=re.MULTILINE)
192
193 ref_filt = merge_filts(
194 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
195 r'^SystemC Simulation\n'
196 )
197 test_filt = merge_filts(
198 r'^Global frequency set at \d* ticks per second\n'
199 )
200
201 def __init__(self, ref, test, tag, out_dir):
202 super(LogChecker, self).__init__(ref, test, tag)
203 self.out_dir = out_dir
204
205 def apply_filters(self, data, filts):
206 re.sub(filt, '', data)
207
208 def check(self):
209 test_file = os.path.basename(self.test)
210 ref_file = os.path.basename(self.ref)
211 with open(self.test) as test_f, open(self.ref) as ref_f:
212 test = re.sub(self.test_filt, '', test_f.read())
213 ref = re.sub(self.ref_filt, '', ref_f.read())
214 if test != ref:
215 diff_file = '.'.join([ref_file, 'diff'])
216 diff_path = os.path.join(self.out_dir, diff_file)
217 with open(diff_path, 'w') as diff_f:
218 for line in difflib.unified_diff(
219 ref.splitlines(True), test.splitlines(True),
220 fromfile=ref_file,
221 tofile=test_file):
222 diff_f.write(line)
223 return False
224 return True
225
226 class VerifyPhase(TestPhaseBase):
227 name = 'verify'
228 number = 3
229
230 def reset_status(self):
231 self._passed = []
232 self._failed = {}
233
234 def passed(self, test):
235 self._passed.append(test)
236
237 def failed(self, test, cause, note=''):
238 test.set_prop('note', note)
239 self._failed.setdefault(cause, []).append(test)
240
241 def print_status(self):
242 total_passed = len(self._passed)
243 total_failed = sum(map(len, self._failed.values()))
244 print()
245 print('Passed: {passed:4} - Failed: {failed:4}'.format(
246 passed=total_passed, failed=total_failed))
247
248 def write_result_file(self, path):
249 results = {
250 'passed': map(lambda t: t.props, self._passed),
251 'failed': {
252 cause: map(lambda t: t.props, tests) for
253 cause, tests in self._failed.iteritems()
254 }
255 }
256 with open(path, 'w') as rf:
257 json.dump(results, rf)
258
259 def print_results(self):
260 print()
261 print('Passed:')
262 for path in sorted(list([ t.path for t in self._passed ])):
263 print(' ', path)
264
265 print()
266 print('Failed:')
267
268 causes = []
269 for cause, tests in sorted(self._failed.items()):
270 block = ' ' + cause.capitalize() + ':\n'
271 for test in sorted(tests, key=lambda t: t.path):
272 block += ' ' + test.path
273 if test.note:
274 block += ' - ' + test.note
275 block += '\n'
276 causes.append(block)
277
278 print('\n'.join(causes))
279
280 def run(self, tests):
281 parser = argparse.ArgumentParser()
282 result_opts = parser.add_mutually_exclusive_group()
283 result_opts.add_argument('--result-file', action='store_true',
284 help='Create a results.json file in the current directory.')
285 result_opts.add_argument('--result-file-at', metavar='PATH',
286 help='Create a results json file at the given path.')
287 parser.add_argument('--print-results', action='store_true',
288 help='Print a list of tests that passed or failed')
289 args = parser.parse_args(self.args)
290
291 self.reset_status()
292
293 runnable = filter(lambda t: not t.compile_only, tests)
294 compile_only = filter(lambda t: t.compile_only, tests)
295
296 for test in compile_only:
297 if os.path.exists(test.full_path()):
298 self.passed(test)
299 else:
300 self.failed(test, 'compile failed')
301
302 for test in runnable:
303 with open(test.returncode_file()) as rc:
304 returncode = int(rc.read())
305
306 if returncode == 124:
307 self.failed(test, 'time out')
308 continue
309 elif returncode != 0:
310 self.failed(test, 'abort')
311 continue
312
313 out_dir = test.m5out_dir()
314
315 Diff = collections.namedtuple(
316 'Diff', 'ref, test, tag, ref_filter')
317
318 diffs = []
319
320 log_file = '.'.join([test.name, 'log'])
321 log_path = os.path.join(test.golden_dir(), log_file)
322 simout_path = os.path.join(out_dir, 'simout')
323 if not os.path.exists(simout_path):
324 self.failed(test, 'no log output')
325 if os.path.exists(log_path):
326 diffs.append(LogChecker(
327 log_path, simout_path, log_file, out_dir))
328
329 failed_diffs = filter(lambda d: not d.check(), diffs)
330 if failed_diffs:
331 tags = map(lambda d: d.tag, failed_diffs)
332 self.failed(test, 'failed diffs', ' '.join(tags))
333 continue
334
335 self.passed(test)
336
337 if args.print_results:
338 self.print_results()
339
340 self.print_status()
341
342 result_path = None
343 if args.result_file:
344 result_path = os.path.join(os.getcwd(), 'results.json')
345 elif args.result_file_at:
346 result_path = args.result_file_at
347
348 if result_path:
349 self.write_result_file(result_path)
350
351
352 parser = argparse.ArgumentParser(description='SystemC test utility')
353
354 parser.add_argument('build_dir', metavar='BUILD_DIR',
355 help='The build directory (ie. build/ARM).')
356
357 parser.add_argument('--update-json', action='store_true',
358 help='Update the json manifest of tests.')
359
360 parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
361 default='opt',
362 help='Flavor of binary to test.')
363
364 parser.add_argument('--list', action='store_true',
365 help='List the available tests')
366
367 filter_opts = parser.add_mutually_exclusive_group()
368 filter_opts.add_argument('--filter', default='True',
369 help='Python expression which filters tests based '
370 'on their properties')
371 filter_opts.add_argument('--filter-file', default=None,
372 type=argparse.FileType('r'),
373 help='Same as --filter, but read from a file')
374
375 def collect_phases(args):
376 phase_groups = [list(g) for k, g in
377 itertools.groupby(args, lambda x: x != '--phase') if k]
378 main_args = parser.parse_args(phase_groups[0][1:])
379 phases = []
380 names = []
381 for group in phase_groups[1:]:
382 name = group[0]
383 if name in names:
384 raise RuntimeException('Phase %s specified more than once' % name)
385 phase = test_phase_classes[name]
386 phases.append(phase(main_args, *group[1:]))
387 phases.sort()
388 return main_args, phases
389
390 main_args, phases = collect_phases(sys.argv)
391
392 if len(phases) == 0:
393 phases = [
394 CompilePhase(main_args),
395 RunPhase(main_args),
396 VerifyPhase(main_args)
397 ]
398
399
400
401 json_path = os.path.join(main_args.build_dir, json_rel_path)
402
403 if main_args.update_json:
404 scons(os.path.join(json_path))
405
406 with open(json_path) as f:
407 test_data = json.load(f)
408
409 if main_args.filter_file:
410 f = main_args.filter_file
411 filt = compile(f.read(), f.name, 'eval')
412 else:
413 filt = compile(main_args.filter, '<string>', 'eval')
414
415 filtered_tests = {
416 target: props for (target, props) in
417 test_data.iteritems() if eval(filt, dict(props))
418 }
419
420 if main_args.list:
421 for target, props in sorted(filtered_tests.iteritems()):
422 print('%s.%s' % (target, main_args.flavor))
423 for key, val in props.iteritems():
424 print(' %s: %s' % (key, val))
425 print('Total tests: %d' % len(filtered_tests))
426 else:
427 tests_to_run = list([
428 Test(target, main_args.flavor, main_args.build_dir, props) for
429 target, props in sorted(filtered_tests.iteritems())
430 ])
431
432 for phase in phases:
433 phase.run(tests_to_run)