Also decode strings in _DynamicStringTable.get_string() (#217)
[pyelftools.git] / test / run_readelf_tests.py
1 #!/usr/bin/env python
2 #-------------------------------------------------------------------------------
3 # test/run_readelf_tests.py
4 #
5 # Automatic test runner for elftools & readelf
6 #
7 # Eli Bendersky (eliben@gmail.com)
8 # This code is in the public domain
9 #-------------------------------------------------------------------------------
10 import argparse
11 from difflib import SequenceMatcher
12 import logging
13 from multiprocessing import Pool
14 import os
15 import platform
16 import re
17 import sys
18 import time
19
20 from utils import run_exe, is_in_rootdir, dump_output_to_temp_files
21
22 # Make it possible to run this file from the root dir of pyelftools without
23 # installing pyelftools; useful for Travis testing, etc.
24 sys.path[0:0] = ['.']
25
26 # Create a global logger object
27 testlog = logging.getLogger('run_tests')
28 testlog.setLevel(logging.DEBUG)
29 testlog.addHandler(logging.StreamHandler(sys.stdout))
30
31 # Set the path for calling readelf. We carry our own version of readelf around,
32 # because binutils tend to change its output even between daily builds of the
33 # same minor release and keeping track is a headache.
34 if platform.system() == "Darwin": # MacOS
35 READELF_PATH = 'greadelf'
36 else:
37 READELF_PATH = 'test/external_tools/readelf'
38 if not os.path.exists(READELF_PATH):
39 READELF_PATH = 'readelf'
40
41
42 def discover_testfiles(rootdir):
43 """ Discover test files in the given directory. Yield them one by one.
44 """
45 for filename in os.listdir(rootdir):
46 _, ext = os.path.splitext(filename)
47 if ext == '.elf':
48 yield os.path.join(rootdir, filename)
49
50
51 def run_test_on_file(filename, verbose=False):
52 """ Runs a test on the given input filename. Return True if all test
53 runs succeeded.
54 """
55 success = True
56 testlog.info("Test file '%s'" % filename)
57 for option in [
58 '-e', '-d', '-s', '-n', '-r', '-x.text', '-p.shstrtab', '-V',
59 '--debug-dump=info', '--debug-dump=decodedline',
60 '--debug-dump=frames', '--debug-dump=frames-interp',
61 '--debug-dump=aranges', '--debug-dump=pubtypes',
62 '--debug-dump=pubnames'
63 ]:
64 if verbose: testlog.info("..option='%s'" % option)
65
66 # TODO(zlobober): this is a dirty hack to make tests work for ELF core
67 # dump notes. Making it work properly requires a pretty deep
68 # investigation of how original readelf formats the output.
69 if "core" in filename and option == "-n":
70 if verbose:
71 testlog.warning("....will fail because corresponding part of readelf.py is not implemented yet")
72 testlog.info('.......................SKIPPED')
73 continue
74
75 # stdouts will be a 2-element list: output of readelf and output
76 # of scripts/readelf.py
77 stdouts = []
78 for exe_path in [READELF_PATH, 'scripts/readelf.py']:
79 args = [option, filename]
80 if verbose: testlog.info("....executing: '%s %s'" % (
81 exe_path, ' '.join(args)))
82 t1 = time.time()
83 rc, stdout = run_exe(exe_path, args)
84 if verbose: testlog.info("....elapsed: %s" % (time.time() - t1,))
85 if rc != 0:
86 testlog.error("@@ aborting - '%s' returned '%s'" % (exe_path, rc))
87 return False
88 stdouts.append(stdout)
89 if verbose: testlog.info('....comparing output...')
90 t1 = time.time()
91 rc, errmsg = compare_output(*stdouts)
92 if verbose: testlog.info("....elapsed: %s" % (time.time() - t1,))
93 if rc:
94 if verbose: testlog.info('.......................SUCCESS')
95 else:
96 success = False
97 testlog.info('.......................FAIL')
98 testlog.info('....for option "%s"' % option)
99 testlog.info('....Output #1 is readelf, Output #2 is pyelftools')
100 testlog.info('@@ ' + errmsg)
101 dump_output_to_temp_files(testlog, *stdouts)
102 return success
103
104
105 def compare_output(s1, s2):
106 """ Compare stdout strings s1 and s2.
107 s1 is from readelf, s2 from elftools readelf.py
108 Return pair success, errmsg. If comparison succeeds, success is True
109 and errmsg is empty. Otherwise success is False and errmsg holds a
110 description of the mismatch.
111
112 Note: this function contains some rather horrible hacks to ignore
113 differences which are not important for the verification of pyelftools.
114 This is due to some intricacies of binutils's readelf which pyelftools
115 doesn't currently implement, features that binutils doesn't support,
116 or silly inconsistencies in the output of readelf, which I was reluctant
117 to replicate. Read the documentation for more details.
118 """
119 def prepare_lines(s):
120 return [line for line in s.lower().splitlines() if line.strip() != '']
121
122 lines1 = prepare_lines(s1)
123 lines2 = prepare_lines(s2)
124
125 flag_after_symtable = False
126
127 if len(lines1) != len(lines2):
128 return False, 'Number of lines different: %s vs %s' % (
129 len(lines1), len(lines2))
130
131 for i in range(len(lines1)):
132 if 'symbol table' in lines1[i]:
133 flag_after_symtable = True
134
135 # Compare ignoring whitespace
136 lines1_parts = lines1[i].split()
137 lines2_parts = lines2[i].split()
138
139 if ''.join(lines1_parts) != ''.join(lines2_parts):
140 ok = False
141
142 try:
143 # Ignore difference in precision of hex representation in the
144 # last part (i.e. 008f3b vs 8f3b)
145 if (''.join(lines1_parts[:-1]) == ''.join(lines2_parts[:-1]) and
146 int(lines1_parts[-1], 16) == int(lines2_parts[-1], 16)):
147 ok = True
148 except ValueError:
149 pass
150
151 sm = SequenceMatcher()
152 sm.set_seqs(lines1[i], lines2[i])
153 changes = sm.get_opcodes()
154 if flag_after_symtable:
155 # Detect readelf's adding @ with lib and version after
156 # symbol name.
157 if ( len(changes) == 2 and changes[1][0] == 'delete' and
158 lines1[i][changes[1][1]] == '@'):
159 ok = True
160 elif 'at_const_value' in lines1[i]:
161 # On 32-bit machines, readelf doesn't correctly represent
162 # some boundary LEB128 numbers
163 val = lines2_parts[-1]
164 num2 = int(val, 16 if val.startswith('0x') else 10)
165 if num2 <= -2**31 and '32' in platform.architecture()[0]:
166 ok = True
167 elif 'os/abi' in lines1[i]:
168 if 'unix - gnu' in lines1[i] and 'unix - linux' in lines2[i]:
169 ok = True
170 elif ( 'unknown at value' in lines1[i] and
171 'dw_at_apple' in lines2[i]):
172 ok = True
173 else:
174 for s in ('t (tls)', 'l (large)'):
175 if s in lines1[i] or s in lines2[i]:
176 ok = True
177 break
178 if not ok:
179 errmsg = 'Mismatch on line #%s:\n>>%s<<\n>>%s<<\n (%r)' % (
180 i, lines1[i], lines2[i], changes)
181 return False, errmsg
182 return True, ''
183
184
185 def main():
186 if not is_in_rootdir():
187 testlog.error('Error: Please run me from the root dir of pyelftools!')
188 return 1
189
190 argparser = argparse.ArgumentParser(
191 usage='usage: %(prog)s [options] [file] [file] ...',
192 prog='run_readelf_tests.py')
193 argparser.add_argument('files', nargs='*', help='files to run tests on')
194 argparser.add_argument(
195 '--parallel', action='store_true',
196 help='run tests in parallel; always runs all tests w/o verbose')
197 argparser.add_argument('-V', '--verbose',
198 action='store_true', dest='verbose',
199 help='verbose output')
200 argparser.add_argument(
201 '-k', '--keep-going',
202 action='store_true', dest='keep_going',
203 help="Run all tests, don't stop at the first failure")
204 args = argparser.parse_args()
205
206 if args.parallel:
207 if args.verbose or args.keep_going == False:
208 print('WARNING: parallel mode disables verbosity and always keeps going')
209
210 if args.verbose:
211 testlog.info('Running in verbose mode')
212 testlog.info('Python executable = %s' % sys.executable)
213 testlog.info('readelf path = %s' % READELF_PATH)
214 testlog.info('Given list of files: %s' % args.files)
215
216 # If file names are given as command-line arguments, only these files
217 # are taken as inputs. Otherwise, autodiscovery is performed.
218 if len(args.files) > 0:
219 filenames = args.files
220 else:
221 filenames = sorted(discover_testfiles('test/testfiles_for_readelf'))
222
223 if len(filenames) > 1 and args.parallel:
224 pool = Pool()
225 results = pool.map(
226 run_test_on_file,
227 filenames)
228 failures = results.count(False)
229 else:
230 failures = 0
231 for filename in filenames:
232 if not run_test_on_file(filename, verbose=args.verbose):
233 failures += 1
234 if not args.keep_going:
235 break
236
237 if failures == 0:
238 testlog.info('\nConclusion: SUCCESS')
239 return 0
240 elif args.keep_going:
241 testlog.info('\nConclusion: FAIL ({}/{})'.format(
242 failures, len(filenames)))
243 return 1
244 else:
245 testlog.info('\nConclusion: FAIL')
246 return 1
247
248
249 if __name__ == '__main__':
250 sys.exit(main())