822065cb940ad28ce9f521f828c9e2856a95c629
[gem5.git] / src / systemc / tests / verify.py
1 #!/usr/bin/env python2
2 #
3 # Copyright 2018 Google, Inc.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met: redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer;
9 # redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution;
12 # neither the name of the copyright holders nor the names of its
13 # contributors may be used to endorse or promote products derived from
14 # this software without specific prior written permission.
15 #
16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #
28 # Authors: Gabe Black
29
30 from __future__ import print_function
31
32 import argparse
33 import collections
34 import difflib
35 import functools
36 import inspect
37 import itertools
38 import json
39 import multiprocessing.pool
40 import os
41 import re
42 import subprocess
43 import sys
44
45 script_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
46 script_dir = os.path.dirname(script_path)
47 config_path = os.path.join(script_dir, 'config.py')
48
49 systemc_rel_path = 'systemc'
50 tests_rel_path = os.path.join(systemc_rel_path, 'tests')
51 json_rel_path = os.path.join(tests_rel_path, 'tests.json')
52
53
54
55 def scons(*args):
56 args = ['scons'] + list(args)
57 subprocess.check_call(args)
58
59
60
61 class Test(object):
62 def __init__(self, target, suffix, build_dir, props):
63 self.target = target
64 self.suffix = suffix
65 self.build_dir = build_dir
66 self.props = {}
67
68 for key, val in props.iteritems():
69 self.set_prop(key, val)
70
71 def set_prop(self, key, val):
72 setattr(self, key, val)
73 self.props[key] = val
74
75 def dir(self):
76 return os.path.join(self.build_dir, tests_rel_path, self.path)
77
78 def src_dir(self):
79 return os.path.join(script_dir, self.path)
80
81 def golden_dir(self):
82 return os.path.join(self.src_dir(), 'golden')
83
84 def bin(self):
85 return '.'.join([self.name, self.suffix])
86
87 def full_path(self):
88 return os.path.join(self.dir(), self.bin())
89
90 def m5out_dir(self):
91 return os.path.join(self.dir(), 'm5out.' + self.suffix)
92
93 def returncode_file(self):
94 return os.path.join(self.m5out_dir(), 'returncode')
95
96
97
98 test_phase_classes = {}
99
100 class TestPhaseMeta(type):
101 def __init__(cls, name, bases, d):
102 if not d.pop('abstract', False):
103 test_phase_classes[d['name']] = cls
104
105 super(TestPhaseMeta, cls).__init__(name, bases, d)
106
107 class TestPhaseBase(object):
108 __metaclass__ = TestPhaseMeta
109 abstract = True
110
111 def __init__(self, main_args, *args):
112 self.main_args = main_args
113 self.args = args
114
115 def __lt__(self, other):
116 return self.number < other.number
117
118 class CompilePhase(TestPhaseBase):
119 name = 'compile'
120 number = 1
121
122 def run(self, tests):
123 targets = list([test.full_path() for test in tests])
124 scons_args = list(self.args) + targets
125 scons(*scons_args)
126
127 class RunPhase(TestPhaseBase):
128 name = 'execute'
129 number = 2
130
131 def run(self, tests):
132 parser = argparse.ArgumentParser()
133 parser.add_argument('--timeout', type=int, metavar='SECONDS',
134 help='Time limit for each run in seconds.',
135 default=0)
136 parser.add_argument('-j', type=int, default=1,
137 help='How many tests to run in parallel.')
138 args = parser.parse_args(self.args)
139
140 timeout_cmd = [
141 'timeout',
142 '--kill-after', str(args.timeout * 2),
143 str(args.timeout)
144 ]
145 def run_test(test):
146 cmd = []
147 if args.timeout:
148 cmd.extend(timeout_cmd)
149 cmd.extend([
150 test.full_path(),
151 '-red', test.m5out_dir(),
152 '--listener-mode=off',
153 '--quiet',
154 config_path
155 ])
156 # Ensure the output directory exists.
157 if not os.path.exists(test.m5out_dir()):
158 os.makedirs(test.m5out_dir())
159 try:
160 subprocess.check_call(cmd)
161 except subprocess.CalledProcessError, error:
162 returncode = error.returncode
163 else:
164 returncode = 0
165 with open(test.returncode_file(), 'w') as rc:
166 rc.write('%d\n' % returncode)
167
168 runnable = filter(lambda t: not t.compile_only, tests)
169 if args.j == 1:
170 map(run_test, runnable)
171 else:
172 tp = multiprocessing.pool.ThreadPool(args.j)
173 map(lambda t: tp.apply_async(run_test, (t,)), runnable)
174 tp.close()
175 tp.join()
176
177 class Checker(object):
178 def __init__(self, ref, test, tag):
179 self.ref = ref
180 self.test = test
181 self.tag = tag
182
183 def check(self):
184 with open(self.text) as test_f, open(self.ref) as ref_f:
185 return test_f.read() == ref_f.read()
186
187 class LogChecker(Checker):
188 def merge_filts(*filts):
189 filts = map(lambda f: '(' + f + ')', filts)
190 filts = '|'.join(filts)
191 return re.compile(filts, flags=re.MULTILINE)
192
193 ref_filt = merge_filts(
194 r'^\nInfo: /OSCI/SystemC: Simulation stopped by user.\n',
195 r'^SystemC Simulation\n',
196 r'^\nWarning: .*\nIn file: .*\n'
197 )
198 test_filt = merge_filts(
199 r'^Global frequency set at \d* ticks per second\n'
200 )
201
202 def __init__(self, ref, test, tag, out_dir):
203 super(LogChecker, self).__init__(ref, test, tag)
204 self.out_dir = out_dir
205
206 def apply_filters(self, data, filts):
207 re.sub(filt, '', data)
208
209 def check(self):
210 test_file = os.path.basename(self.test)
211 ref_file = os.path.basename(self.ref)
212 with open(self.test) as test_f, open(self.ref) as ref_f:
213 test = re.sub(self.test_filt, '', test_f.read())
214 ref = re.sub(self.ref_filt, '', ref_f.read())
215 if test != ref:
216 diff_file = '.'.join([ref_file, 'diff'])
217 diff_path = os.path.join(self.out_dir, diff_file)
218 with open(diff_path, 'w') as diff_f:
219 for line in difflib.unified_diff(
220 ref.splitlines(True), test.splitlines(True),
221 fromfile=ref_file,
222 tofile=test_file):
223 diff_f.write(line)
224 return False
225 return True
226
227 class VerifyPhase(TestPhaseBase):
228 name = 'verify'
229 number = 3
230
231 def reset_status(self):
232 self._passed = []
233 self._failed = {}
234
235 def passed(self, test):
236 self._passed.append(test)
237
238 def failed(self, test, cause, note=''):
239 test.set_prop('note', note)
240 self._failed.setdefault(cause, []).append(test)
241
242 def print_status(self):
243 total_passed = len(self._passed)
244 total_failed = sum(map(len, self._failed.values()))
245 print()
246 print('Passed: {passed:4} - Failed: {failed:4}'.format(
247 passed=total_passed, failed=total_failed))
248
249 def write_result_file(self, path):
250 results = {
251 'passed': map(lambda t: t.props, self._passed),
252 'failed': {
253 cause: map(lambda t: t.props, tests) for
254 cause, tests in self._failed.iteritems()
255 }
256 }
257 with open(path, 'w') as rf:
258 json.dump(results, rf)
259
260 def print_results(self):
261 print()
262 print('Passed:')
263 for path in sorted(list([ t.path for t in self._passed ])):
264 print(' ', path)
265
266 print()
267 print('Failed:')
268
269 causes = []
270 for cause, tests in sorted(self._failed.items()):
271 block = ' ' + cause.capitalize() + ':\n'
272 for test in sorted(tests, key=lambda t: t.path):
273 block += ' ' + test.path
274 if test.note:
275 block += ' - ' + test.note
276 block += '\n'
277 causes.append(block)
278
279 print('\n'.join(causes))
280
281 def run(self, tests):
282 parser = argparse.ArgumentParser()
283 result_opts = parser.add_mutually_exclusive_group()
284 result_opts.add_argument('--result-file', action='store_true',
285 help='Create a results.json file in the current directory.')
286 result_opts.add_argument('--result-file-at', metavar='PATH',
287 help='Create a results json file at the given path.')
288 parser.add_argument('--print-results', action='store_true',
289 help='Print a list of tests that passed or failed')
290 args = parser.parse_args(self.args)
291
292 self.reset_status()
293
294 runnable = filter(lambda t: not t.compile_only, tests)
295 compile_only = filter(lambda t: t.compile_only, tests)
296
297 for test in compile_only:
298 if os.path.exists(test.full_path()):
299 self.passed(test)
300 else:
301 self.failed(test, 'compile failed')
302
303 for test in runnable:
304 with open(test.returncode_file()) as rc:
305 returncode = int(rc.read())
306
307 if returncode == 124:
308 self.failed(test, 'time out')
309 continue
310 elif returncode != 0:
311 self.failed(test, 'abort')
312 continue
313
314 out_dir = test.m5out_dir()
315
316 Diff = collections.namedtuple(
317 'Diff', 'ref, test, tag, ref_filter')
318
319 diffs = []
320
321 log_file = '.'.join([test.name, 'log'])
322 log_path = os.path.join(test.golden_dir(), log_file)
323 simout_path = os.path.join(out_dir, 'simout')
324 if not os.path.exists(simout_path):
325 self.failed(test, 'no log output')
326 if os.path.exists(log_path):
327 diffs.append(LogChecker(
328 log_path, simout_path, log_file, out_dir))
329
330 failed_diffs = filter(lambda d: not d.check(), diffs)
331 if failed_diffs:
332 tags = map(lambda d: d.tag, failed_diffs)
333 self.failed(test, 'failed diffs', ' '.join(tags))
334 continue
335
336 self.passed(test)
337
338 if args.print_results:
339 self.print_results()
340
341 self.print_status()
342
343 result_path = None
344 if args.result_file:
345 result_path = os.path.join(os.getcwd(), 'results.json')
346 elif args.result_file_at:
347 result_path = args.result_file_at
348
349 if result_path:
350 self.write_result_file(result_path)
351
352
353 parser = argparse.ArgumentParser(description='SystemC test utility')
354
355 parser.add_argument('build_dir', metavar='BUILD_DIR',
356 help='The build directory (ie. build/ARM).')
357
358 parser.add_argument('--update-json', action='store_true',
359 help='Update the json manifest of tests.')
360
361 parser.add_argument('--flavor', choices=['debug', 'opt', 'fast'],
362 default='opt',
363 help='Flavor of binary to test.')
364
365 parser.add_argument('--list', action='store_true',
366 help='List the available tests')
367
368 filter_opts = parser.add_mutually_exclusive_group()
369 filter_opts.add_argument('--filter', default='True',
370 help='Python expression which filters tests based '
371 'on their properties')
372 filter_opts.add_argument('--filter-file', default=None,
373 type=argparse.FileType('r'),
374 help='Same as --filter, but read from a file')
375
376 def collect_phases(args):
377 phase_groups = [list(g) for k, g in
378 itertools.groupby(args, lambda x: x != '--phase') if k]
379 main_args = parser.parse_args(phase_groups[0][1:])
380 phases = []
381 names = []
382 for group in phase_groups[1:]:
383 name = group[0]
384 if name in names:
385 raise RuntimeException('Phase %s specified more than once' % name)
386 phase = test_phase_classes[name]
387 phases.append(phase(main_args, *group[1:]))
388 phases.sort()
389 return main_args, phases
390
391 main_args, phases = collect_phases(sys.argv)
392
393 if len(phases) == 0:
394 phases = [
395 CompilePhase(main_args),
396 RunPhase(main_args),
397 VerifyPhase(main_args)
398 ]
399
400
401
402 json_path = os.path.join(main_args.build_dir, json_rel_path)
403
404 if main_args.update_json:
405 scons(os.path.join(json_path))
406
407 with open(json_path) as f:
408 test_data = json.load(f)
409
410 if main_args.filter_file:
411 f = main_args.filter_file
412 filt = compile(f.read(), f.name, 'eval')
413 else:
414 filt = compile(main_args.filter, '<string>', 'eval')
415
416 filtered_tests = {
417 target: props for (target, props) in
418 test_data.iteritems() if eval(filt, dict(props))
419 }
420
421 if main_args.list:
422 for target, props in sorted(filtered_tests.iteritems()):
423 print('%s.%s' % (target, main_args.flavor))
424 for key, val in props.iteritems():
425 print(' %s: %s' % (key, val))
426 print('Total tests: %d' % len(filtered_tests))
427 else:
428 tests_to_run = list([
429 Test(target, main_args.flavor, main_args.build_dir, props) for
430 target, props in sorted(filtered_tests.iteritems())
431 ])
432
433 for phase in phases:
434 phase.run(tests_to_run)