41fd9817e7cb0e17025bdd96040c76626ffdac36
[openpower-isa.git] / src / openpower / sv / sv_analysis.py
1 #!/usr/bin/env python2
2 #
3 # NOTE that this program is python2 compatible, please do not stop it
4 # from working by adding syntax that prevents that.
5 #
6 # Initial version written by lkcl Oct 2020
7 # This program analyses the Power 9 op codes and looks at in/out register uses
8 # The results are displayed:
9 # https://libre-soc.org/openpower/opcode_regs_deduped/
10 #
11 # It finds .csv files in the directory isatables/
12 # then goes through the categories and creates svp64 CSV augmentation
13 # tables on a per-opcode basis
14
15 import argparse
16 import csv
17 import enum
18 import os
19 from os.path import dirname, join
20 from glob import glob
21 from collections import defaultdict
22 from collections import OrderedDict
23 from openpower.decoder.power_svp64 import SVP64RM
24 from openpower.decoder.power_enums import find_wiki_file, get_csv
25
26
27 # Write an array of dictionaries to the CSV file name:
28 def write_csv(name, items, headers):
29 file_path = find_wiki_file(name)
30 with open(file_path, 'w') as csvfile:
31 writer = csv.DictWriter(csvfile, headers, lineterminator="\n")
32 writer.writeheader()
33 writer.writerows(items)
34
35 # This will return True if all values are true.
36 # Not sure what this is about
37
38
39 def blank_key(row):
40 # for v in row.values():
41 # if 'SPR' in v: # skip all SPRs
42 # return True
43 for v in row.values():
44 if v:
45 return False
46 return True
47
48 # General purpose registers have names like: RA, RT, R1, ...
49 # Floating point registers names like: FRT, FRA, FR1, ..., FRTp, ...
50 # Return True if field is a register
51
52
53 def isreg(field):
54 return (field.startswith('R') or field.startswith('FR') or
55 field == 'SPR')
56
57
58 # These are the attributes of the instructions,
59 # register names
60 keycolumns = ['unit', 'in1', 'in2', 'in3', 'out', 'CR in', 'CR out',
61 ] # don't think we need these: 'ldst len', 'rc', 'lk']
62
63 tablecols = ['unit', 'in', 'outcnt', 'CR in', 'CR out', 'imm'
64 ] # don't think we need these: 'ldst len', 'rc', 'lk']
65
66
67 def create_key(row):
68 res = OrderedDict()
69 #print ("row", row)
70 for key in keycolumns:
71 # registers IN - special-case: count number of regs RA/RB/RC/RS
72 if key in ['in1', 'in2', 'in3']:
73 if 'in' not in res:
74 res['in'] = 0
75 if row['unit'] == 'BRANCH': # branches must not include Vector SPRs
76 continue
77 if isreg(row[key]):
78 res['in'] += 1
79
80 # registers OUT
81 if key == 'out':
82 # If upd is 1 then increment the count of outputs
83 if 'outcnt' not in res:
84 res['outcnt'] = 0
85 if isreg(row[key]):
86 res['outcnt'] += 1
87 if row['upd'] == '1':
88 res['outcnt'] += 1
89
90 # CRs (Condition Register) (CR0 .. CR7)
91 if key.startswith('CR'):
92 if row[key].startswith('NONE'):
93 res[key] = '0'
94 else:
95 res[key] = '1'
96 if row['comment'].startswith('cr'):
97 res['crop'] = '1'
98 # unit
99 if key == 'unit':
100 if row[key] == 'LDST': # we care about LDST units
101 res[key] = row[key]
102 else:
103 res[key] = 'OTHER'
104 # LDST len (LoadStore length)
105 if key.startswith('ldst'):
106 if row[key].startswith('NONE'):
107 res[key] = '0'
108 else:
109 res[key] = '1'
110 # rc, lk
111 if key in ['rc', 'lk']:
112 if row[key] == 'ONE':
113 res[key] = '1'
114 elif row[key] == 'NONE':
115 res[key] = '0'
116 else:
117 res[key] = 'R'
118 if key == 'lk':
119 res[key] = row[key]
120
121 # Convert the numerics 'in' & 'outcnt' to strings
122 res['in'] = str(res['in'])
123 res['outcnt'] = str(res['outcnt'])
124
125 # constants
126 if row['in2'].startswith('CONST_'):
127 res['imm'] = "1" # row['in2'].split("_")[1]
128 else:
129 res['imm'] = ''
130
131 return res
132
133 #
134
135
136 def dformat(d):
137 res = []
138 for k, v in d.items():
139 res.append("%s: %s" % (k, v))
140 return ' '.join(res)
141
142
143 def tformat(d):
144 return ' | '.join(d) + " |"
145
146
147 def keyname(row):
148 res = []
149 if row['unit'] != 'OTHER':
150 res.append(row['unit'])
151 if row['in'] != '0':
152 res.append('%sR' % row['in'])
153 if row['outcnt'] != '0':
154 res.append('%sW' % row['outcnt'])
155 if row['CR in'] == '1' and row['CR out'] == '1':
156 if 'crop' in row:
157 res.append("CR=2R1W")
158 else:
159 res.append("CRio")
160 elif row['CR in'] == '1':
161 res.append("CRi")
162 elif row['CR out'] == '1':
163 res.append("CRo")
164 elif 'imm' in row and row['imm']:
165 res.append("imm")
166 return '-'.join(res)
167
168
169 class Format(enum.Enum):
170 BINUTILS = enum.auto()
171 VHDL = enum.auto()
172
173 @classmethod
174 def _missing_(cls, value):
175 return {
176 "binutils": Format.BINUTILS,
177 "vhdl": Format.VHDL,
178 }[value.lower()]
179
180 def __str__(self):
181 return self.name.lower()
182
183 def declarations(self, values, lens):
184 def declaration_binutils(value, width):
185 yield f"/* TODO: implement binutils declaration (value={value!r}, width={width!r}) */"
186
187 def declaration_vhdl(value, width):
188 yield f" type sv_{value}_rom_array_t is " \
189 f"array(0 to {width}) of sv_decode_rom_t;"
190
191 for value in values:
192 if value not in lens:
193 todo = [f"TODO {value} (or no SVP64 augmentation)"]
194 todo = self.wrap_comment(todo)
195 yield from map(lambda line: f" {line}", todo)
196 else:
197 width = lens[value]
198 yield from {
199 Format.BINUTILS: declaration_binutils,
200 Format.VHDL: declaration_vhdl,
201 }[self](value, width)
202
203 def definitions(self, entries_svp64, fullcols):
204 def definitions_vhdl():
205 for (value, entries) in entries_svp64.items():
206 yield ""
207 yield f" constant sv_{value}_decode_rom_array :"
208 yield f" sv_{value}_rom_array_t := ("
209 yield f" -- {' '.join(fullcols)}"
210
211 for (op, insn, row) in entries:
212 yield f" {op:>13} => ({', '.join(row)}), -- {insn}"
213
214 yield f" {'others':>13} => sv_illegal_inst"
215 yield " );"
216 yield ""
217
218 def definitions_binutils():
219 yield f"/* TODO: implement binutils definitions */"
220
221 yield from {
222 Format.BINUTILS: definitions_binutils,
223 Format.VHDL: definitions_vhdl,
224 }[self]()
225
226 def wrap_comment(self, lines):
227 def wrap_comment_binutils(lines):
228 lines = tuple(lines)
229 if len(lines) == 1:
230 yield f"/* {lines[0]} */"
231 else:
232 yield "/*"
233 yield from map(lambda line: f" * {line}", lines)
234 yield " */"
235
236 def wrap_comment_vhdl(lines):
237 yield from map(lambda line: f"-- {line}", lines)
238
239 yield from {
240 Format.BINUTILS: wrap_comment_binutils,
241 Format.VHDL: wrap_comment_vhdl,
242 }[self](lines)
243
244
245 def process_csvs(format):
246 csvs = {}
247 csvs_svp64 = {}
248 bykey = {}
249 primarykeys = set()
250 dictkeys = OrderedDict()
251 immediates = {}
252 insns = {} # dictionary of CSV row, by instruction
253 insn_to_csv = {}
254
255 print("# OpenPOWER ISA register 'profile's")
256 print('')
257 print("this page is auto-generated, do not edit")
258 print("created by http://libre-soc.org/openpower/sv_analysis.py")
259 print('')
260
261 # Expand that (all .csv files)
262 pth = find_wiki_file("*.csv")
263
264 # Ignore those containing: valid test sprs
265 for fname in glob(pth):
266 print("sv analysis checking", fname)
267 _, name = os.path.split(fname)
268 if '-' in name:
269 continue
270 if 'valid' in fname:
271 continue
272 if 'test' in fname:
273 continue
274 if fname.endswith('sprs.csv'):
275 continue
276 if fname.endswith('minor_19_valid.csv'):
277 continue
278 if 'RM' in fname:
279 continue
280 csvname = os.path.split(fname)[1]
281 csvname_ = csvname.split(".")[0]
282 # csvname is something like: minor_59.csv, fname the whole path
283 csv = get_csv(fname)
284 csvs[fname] = csv
285 csvs_svp64[csvname_] = []
286 for row in csv:
287 if blank_key(row):
288 continue
289 print("row", row)
290 insn_name = row['comment']
291 condition = row['CONDITIONS']
292 # skip instructions that are not suitable
293 if insn_name.startswith("l") and insn_name.endswith("br"):
294 continue # skip pseudo-alias lxxxbr
295 if insn_name in ['mcrxr', 'mcrxrx', 'darn']:
296 continue
297 if insn_name in ['bctar', 'bcctr']:
298 continue
299 if 'rfid' in insn_name:
300 continue
301 if insn_name in ['setvl', ]: # SVP64 opcodes
302 continue
303
304 insns[(insn_name, condition)] = row # accumulate csv data
305 insn_to_csv[insn_name] = csvname_ # CSV file name by instruction
306 dkey = create_key(row)
307 key = tuple(dkey.values())
308 # print("key=", key)
309 dictkeys[key] = dkey
310 primarykeys.add(key)
311 if key not in bykey:
312 bykey[key] = []
313 bykey[key].append((csvname, row['opcode'], insn_name, condition,
314 row['form'].upper() + '-Form'))
315
316 # detect immediates, collate them (useful info)
317 if row['in2'].startswith('CONST_'):
318 imm = row['in2'].split("_")[1]
319 if key not in immediates:
320 immediates[key] = set()
321 immediates[key].add(imm)
322
323 primarykeys = list(primarykeys)
324 primarykeys.sort()
325
326 # mapping to old SVPrefix "Forms"
327 mapsto = {'3R-1W-CRo': 'RM-1P-3S1D',
328 '2R-1W-CRio': 'RM-1P-2S1D',
329 '2R-1W-CRi': 'RM-1P-3S1D',
330 '2R-1W-CRo': 'RM-1P-2S1D',
331 '2R': 'non-SV',
332 '2R-1W': 'RM-1P-2S1D',
333 '1R-CRio': 'RM-2P-2S1D',
334 '2R-CRio': 'RM-1P-2S1D',
335 '2R-CRo': 'RM-1P-2S1D',
336 '1R': 'non-SV',
337 '1R-1W-CRio': 'RM-2P-1S1D',
338 '1R-1W-CRo': 'RM-2P-1S1D',
339 '1R-1W': 'RM-2P-1S1D',
340 '1R-1W-imm': 'RM-2P-1S1D',
341 '1R-CRo': 'RM-2P-1S1D',
342 '1R-imm': 'non-SV',
343 '1W-CRo': 'RM-1P-1D',
344 '1W': 'non-SV',
345 '1W-CRi': 'RM-2P-1S1D',
346 'CRio': 'RM-2P-1S1D',
347 'CR=2R1W': 'RM-1P-2S1D',
348 'CRi': 'non-SV',
349 'imm': 'non-SV',
350 '': 'non-SV',
351 'LDST-2R-imm': 'LDSTRM-2P-2S',
352 'LDST-2R-1W-imm': 'LDSTRM-2P-2S1D',
353 'LDST-2R-1W': 'LDSTRM-2P-2S1D',
354 'LDST-2R-2W': 'LDSTRM-2P-2S1D',
355 'LDST-1R-1W-imm': 'LDSTRM-2P-1S1D',
356 'LDST-1R-2W-imm': 'LDSTRM-2P-1S2D',
357 'LDST-3R': 'LDSTRM-2P-3S',
358 'LDST-3R-CRo': 'LDSTRM-2P-3S', # st*x
359 'LDST-3R-1W': 'LDSTRM-2P-2S1D', # st*x
360 }
361 print("# map to old SV Prefix")
362 print('')
363 print('[[!table data="""')
364 for key in primarykeys:
365 name = keyname(dictkeys[key])
366 value = mapsto.get(name, "-")
367 print(tformat([name, value + " "]))
368 print('"""]]')
369 print('')
370
371 print("# keys")
372 print('')
373 print('[[!table data="""')
374 print(tformat(tablecols) + " imms | name |")
375
376 # print out the keys and the table from which they're derived
377 for key in primarykeys:
378 name = keyname(dictkeys[key])
379 row = tformat(dictkeys[key].values())
380 imms = list(immediates.get(key, ""))
381 imms.sort()
382 row += " %s | " % ("/".join(imms))
383 row += " %s |" % name
384 print(row)
385 print('"""]]')
386 print('')
387
388 # print out, by remap name, all the instructions under that category
389 for key in primarykeys:
390 name = keyname(dictkeys[key])
391 value = mapsto.get(name, "-")
392 print("## %s (%s)" % (name, value))
393 print('')
394 print('[[!table data="""')
395 print(tformat(['CSV', 'opcode', 'asm', 'form']))
396 rows = bykey[key]
397 rows.sort()
398 for row in rows:
399 print(tformat(row))
400 print('"""]]')
401 print('')
402
403 # for fname, csv in csvs.items():
404 # print (fname)
405
406 # for insn, row in insns.items():
407 # print (insn, row)
408
409 print("# svp64 remaps")
410 svp64 = OrderedDict()
411 # create a CSV file, per category, with SV "augmentation" info
412 # XXX note: 'out2' not added here, needs to be added to CSV files
413 # KEEP TRACK OF THESE https://bugs.libre-soc.org/show_bug.cgi?id=619
414 csvcols = ['insn', 'mode', 'CONDITIONS', 'Ptype', 'Etype',]
415 csvcols += ['0', '1', '2', '3']
416 csvcols += ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out'] # temporary
417 for key in primarykeys:
418 # get the decoded key containing row-analysis, and name/value
419 dkey = dictkeys[key]
420 name = keyname(dkey)
421 value = mapsto.get(name, "-")
422 if value == 'non-SV':
423 continue
424
425 # print out svp64 tables by category
426 print("* **%s**: %s" % (name, value))
427
428 # store csv entries by svp64 RM category
429 if value not in svp64:
430 svp64[value] = []
431
432 rows = bykey[key]
433 rows.sort()
434
435 for row in rows:
436 # for idx in range(len(row)):
437 # if row[idx] == 'NONE':
438 # row[idx] = ''
439 # get the instruction
440 print(key, row)
441 insn_name = row[2]
442 condition = row[3]
443 insn = insns[(insn_name, condition)]
444
445 # start constructing svp64 CSV row
446 res = OrderedDict()
447 res['insn'] = insn_name
448 res['CONDITIONS'] = condition
449 res['Ptype'] = value.split('-')[1] # predication type (RM-xN-xxx)
450 # get whether R_xxx_EXTRAn fields are 2-bit or 3-bit
451 res['Etype'] = 'EXTRA2'
452 # go through each register matching to Rxxxx_EXTRAx
453 for k in ['0', '1', '2', '3']:
454 res[k] = ''
455 # create "fake" out2 (TODO, needs to be added to CSV files)
456 # KEEP TRACK HERE https://bugs.libre-soc.org/show_bug.cgi?id=619
457 res['out2'] = 'NONE'
458 if insn['upd'] == '1': # LD/ST with update has RA as out2
459 res['out2'] = 'RA'
460
461 # set the SVP64 mode to NORMAL, LDST, BRANCH or CR
462 crops = ['mfcr', 'mfocrf', 'mtcrf', 'mtocrf',
463 ]
464 mode = 'NORMAL'
465 if value.startswith('LDST'):
466 mode = 'LDST'
467 elif insn_name.startswith('bc'):
468 mode = 'BRANCH'
469 elif insn_name.startswith('cr') or insn_name in crops:
470 mode = 'CROP'
471 res['mode'] = mode
472
473 # temporary useful info
474 regs = []
475 for k in ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out']:
476 if insn[k].startswith('CONST'):
477 res[k] = ''
478 regs.append('')
479 else:
480 res[k] = insn[k]
481 if insn[k] == 'RA_OR_ZERO':
482 regs.append('RA')
483 elif insn[k] != 'NONE':
484 regs.append(insn[k])
485 else:
486 regs.append('')
487
488 print("regs", insn_name, regs)
489
490 # for LD/ST FP, use FRT/FRS not RT/RS, and use CR1 not CR0
491 if insn_name.startswith("lf"):
492 dRT = 'd:FRT'
493 dCR = 'd:CR1'
494 else:
495 dRT = 'd:RT'
496 dCR = 'd:CR0'
497 if insn_name.startswith("stf"):
498 sRS = 's:FRS'
499 dCR = 'd:CR1'
500 else:
501 sRS = 's:RS'
502 dCR = 'd:CR0'
503
504 # sigh now the fun begins. this isn't the sanest way to do it
505 # but the patterns are pretty regular.
506
507 if value == 'LDSTRM-2P-1S1D':
508 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
509 res['0'] = dRT # RT: Rdest_EXTRA3
510 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
511
512 elif value == 'LDSTRM-2P-1S2D':
513 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
514 res['0'] = dRT # RT: Rdest_EXTRA3
515 res['1'] = 'd:RA' # RA: Rdest2_EXTRA2
516 res['2'] = 's:RA' # RA: Rsrc1_EXTRA2
517
518 elif value == 'LDSTRM-2P-2S':
519 # stw, std, sth, stb
520 res['Etype'] = 'EXTRA3' # RM EXTRA2 type
521 res['0'] = sRS # RS: Rdest1_EXTRA2
522 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
523
524 elif value == 'LDSTRM-2P-2S1D':
525 if 'st' in insn_name and 'x' not in insn_name: # stwu/stbu etc
526 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
527 res['0'] = 'd:RA' # RA: Rdest1_EXTRA2
528 res['1'] = sRS # RS: Rdsrc1_EXTRA2
529 res['2'] = 's:RA' # RA: Rsrc2_EXTRA2
530 elif 'st' in insn_name and 'x' in insn_name: # stwux
531 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
532 res['0'] = 'd:RA' # RA: Rdest1_EXTRA2
533 # RS: Rdest2_EXTRA2, RA: Rsrc1_EXTRA2
534 res['1'] = sRS+'s:RA'
535 res['2'] = 's:RB' # RB: Rsrc2_EXTRA2
536 elif 'u' in insn_name: # ldux etc.
537 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
538 res['0'] = dRT # RT: Rdest1_EXTRA2
539 res['1'] = 'd:RA' # RA: Rdest2_EXTRA2
540 res['2'] = 's:RB' # RB: Rsrc1_EXTRA2
541 else:
542 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
543 res['0'] = dRT # RT: Rdest1_EXTRA2
544 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
545 res['2'] = 's:RB' # RB: Rsrc2_EXTRA2
546
547 elif value == 'LDSTRM-2P-3S':
548 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
549 if 'cx' in insn_name:
550 res['0'] = sRS+dCR # RS: Rsrc1_EXTRA2 CR0: dest
551 else:
552 res['0'] = sRS # RS: Rsrc1_EXTRA2
553 res['1'] = 's:RA' # RA: Rsrc2_EXTRA2
554 res['2'] = 's:RB' # RA: Rsrc3_EXTRA2
555
556 elif value == 'RM-2P-1S1D':
557 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
558 if insn_name == 'mtspr':
559 res['0'] = 'd:SPR' # SPR: Rdest1_EXTRA3
560 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
561 elif insn_name == 'mfspr':
562 res['0'] = 'd:RS' # RS: Rdest1_EXTRA3
563 res['1'] = 's:SPR' # SPR: Rsrc1_EXTRA3
564 elif name == 'CRio' and insn_name == 'mcrf':
565 res['0'] = 'd:BF' # BFA: Rdest1_EXTRA3
566 res['1'] = 's:BFA' # BFA: Rsrc1_EXTRA3
567 elif 'mfcr' in insn_name or 'mfocrf' in insn_name:
568 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
569 res['1'] = 's:CR' # CR: Rsrc1_EXTRA3
570 elif insn_name == 'setb':
571 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
572 res['1'] = 's:BFA' # BFA: Rsrc1_EXTRA3
573 elif insn_name.startswith('cmp'): # cmpi
574 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
575 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
576 elif regs == ['RA', '', '', 'RT', '', '']:
577 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
578 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
579 elif regs == ['RA', '', '', 'RT', '', 'CR0']:
580 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA3
581 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
582 elif (regs == ['RS', '', '', 'RA', '', 'CR0'] or
583 regs == ['', '', 'RS', 'RA', '', 'CR0']):
584 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
585 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
586 elif regs == ['RS', '', '', 'RA', '', '']:
587 res['0'] = 'd:RA' # RA: Rdest1_EXTRA3
588 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
589 elif regs == ['', 'FRB', '', 'FRT', '0', 'CR1']:
590 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
591 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
592 elif regs == ['', 'FRB', '', '', '', 'CR1']:
593 res['0'] = 'd:CR1' # CR1: Rdest1_EXTRA3
594 res['1'] = 's:FRB' # FRA: Rsrc1_EXTRA3
595 elif regs == ['', 'FRB', '', '', '', 'BF']:
596 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
597 res['1'] = 's:FRB' # FRA: Rsrc1_EXTRA3
598 elif regs == ['', 'FRB', '', 'FRT', '', 'CR1']:
599 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
600 res['1'] = 's:FRB' # FRB: Rsrc1_EXTRA3
601 elif insn_name.startswith('bc'):
602 res['0'] = 'd:BI' # BI: Rdest1_EXTRA3
603 res['1'] = 's:BI' # BI: Rsrc1_EXTRA3
604 else:
605 res['0'] = 'TODO'
606
607 elif value == 'RM-1P-2S1D':
608 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
609 if insn_name.startswith('cr'):
610 res['0'] = 'd:BT' # BT: Rdest1_EXTRA3
611 res['1'] = 's:BA' # BA: Rsrc1_EXTRA3
612 res['2'] = 's:BB' # BB: Rsrc2_EXTRA3
613 elif regs == ['FRA', '', 'FRC', 'FRT', '', 'CR1']:
614 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
615 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
616 res['2'] = 's:FRC' # FRC: Rsrc1_EXTRA3
617 # should be for fcmp
618 elif regs == ['FRA', 'FRB', '', '', '', 'BF']:
619 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
620 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
621 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
622 elif regs == ['FRA', 'FRB', '', 'FRT', '', '']:
623 res['0'] = 'd:FRT' # FRT: Rdest1_EXTRA3
624 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
625 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
626 elif regs == ['FRA', 'FRB', '', 'FRT', '', 'CR1']:
627 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
628 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
629 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
630 elif name == '2R-1W' or insn_name == 'cmpb': # cmpb
631 if insn_name in ['bpermd', 'cmpb']:
632 res['0'] = 'd:RA' # RA: Rdest1_EXTRA3
633 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
634 else:
635 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
636 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
637 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
638 elif insn_name.startswith('cmp'): # cmp
639 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
640 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
641 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
642 elif (regs == ['', 'RB', 'RS', 'RA', '', 'CR0'] or
643 regs == ['RS', 'RB', '', 'RA', '', 'CR0']):
644 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
645 res['1'] = 's:RB' # RB: Rsrc1_EXTRA3
646 res['2'] = 's:RS' # RS: Rsrc1_EXTRA3
647 elif regs == ['RA', 'RB', '', 'RT', '', 'CR0']:
648 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA3
649 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
650 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
651 elif regs == ['RA', '', 'RS', 'RA', '', 'CR0']:
652 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
653 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
654 res['2'] = 's:RS' # RS: Rsrc1_EXTRA3
655 else:
656 res['0'] = 'TODO'
657
658 elif value == 'RM-2P-2S1D':
659 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
660 if insn_name.startswith('mt'): # mtcrf
661 res['0'] = 'd:CR' # CR: Rdest1_EXTRA2
662 res['1'] = 's:RS' # RS: Rsrc1_EXTRA2
663 res['2'] = 's:CR' # CR: Rsrc2_EXTRA2
664 else:
665 res['0'] = 'TODO'
666
667 elif value == 'RM-1P-3S1D':
668 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
669 if regs == ['RA', 'RB', 'RT', 'RT', '', 'CR0']:
670 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA2
671 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
672 res['2'] = 's:RB' # RT: Rsrc2_EXTRA2
673 res['3'] = 's:RT' # RT: Rsrc3_EXTRA2
674 elif insn_name == 'isel':
675 res['0'] = 'd:RT' # RT: Rdest1_EXTRA2
676 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
677 res['2'] = 's:RB' # RT: Rsrc2_EXTRA2
678 res['3'] = 's:BC' # BC: Rsrc3_EXTRA2
679 else:
680 res['0'] = 'd:FRT;d:CR1' # FRT, CR1: Rdest1_EXTRA2
681 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA2
682 res['2'] = 's:FRB' # FRB: Rsrc2_EXTRA2
683 res['3'] = 's:FRC' # FRC: Rsrc3_EXTRA2
684
685 elif value == 'RM-1P-1D':
686 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
687 if insn_name == 'svstep':
688 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA2
689
690 # add to svp64 csvs
691 # for k in ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out']:
692 # del res[k]
693 # if res['0'] != 'TODO':
694 for k in res:
695 if k == 'CONDITIONS':
696 continue
697 if res[k] == 'NONE' or res[k] == '':
698 res[k] = '0'
699 svp64[value].append(res)
700 # also add to by-CSV version
701 csv_fname = insn_to_csv[insn_name]
702 csvs_svp64[csv_fname].append(res)
703
704 print('')
705
706 # now write out the csv files
707 for value, csv in svp64.items():
708 if value == '-':
709 from time import sleep
710 print ("WARNING, filename '-' should NOT exist. instrs missing")
711 print ("TODO: fix this (and put in the bugreport number here)")
712 sleep(2)
713 continue
714 # print out svp64 tables by category
715 print("## %s" % value)
716 print('')
717 print('[[!table format=csv file="openpower/isatables/%s.csv"]]' %
718 value)
719 print('')
720
721 #csvcols = ['insn', 'Ptype', 'Etype', '0', '1', '2', '3']
722 write_csv("%s.csv" % value, csv, csvcols + ['out2'])
723
724 # okaaay, now we re-read them back in for producing microwatt SV
725
726 # get SVP64 augmented CSV files
727 svt = SVP64RM(microwatt_format=True)
728 # Expand that (all .csv files)
729 pth = find_wiki_file("*.csv")
730
731 # Ignore those containing: valid test sprs
732 for fname in glob(pth):
733 print("post-checking", fname)
734 _, name = os.path.split(fname)
735 if '-' in name:
736 continue
737 if 'valid' in fname:
738 continue
739 if 'test' in fname:
740 continue
741 if fname.endswith('sprs.csv'):
742 continue
743 if fname.endswith('minor_19_valid.csv'):
744 continue
745 if 'RM' in fname:
746 continue
747 svp64_csv = svt.get_svp64_csv(fname)
748
749 csvcols = ['insn', 'mode', 'Ptype', 'Etype']
750 csvcols += ['in1', 'in2', 'in3', 'out', 'out2', 'CR in', 'CR out']
751
752 if format is Format.VHDL:
753 # and a nice microwatt VHDL file
754 file_path = find_wiki_file("sv_decode.vhdl")
755 elif format is Format.BINUTILS:
756 file_path = find_wiki_file("binutils.c")
757
758 with open(file_path, 'w') as stream:
759 output(format, svt, csvcols, insns, csvs_svp64, stream)
760
761
762 def output_autogen_disclaimer(format, stream):
763 lines = (
764 "this file is auto-generated, do not edit",
765 "http://libre-soc.org/openpower/sv_analysis.py",
766 "part of Libre-SOC, sponsored by NLnet",
767 )
768 for line in format.wrap_comment(lines):
769 stream.write(line)
770 stream.write("\n")
771 stream.write("\n")
772
773
774 def output(format, svt, csvcols, insns, csvs_svp64, stream):
775 lens = {
776 'major': 63,
777 'minor_4': 63,
778 'minor_19': 7,
779 'minor_30': 15,
780 'minor_31': 1023,
781 'minor_58': 63,
782 'minor_59': 31,
783 'minor_62': 63,
784 'minor_63l': 511,
785 'minor_63h': 16,
786 }
787
788 def svp64_canonicalize(item):
789 (value, csv) = item
790 value = value.lower().replace("-", "_")
791 return (value, csv)
792
793 csvs_svp64_canon = dict(map(svp64_canonicalize, csvs_svp64.items()))
794
795 # disclaimer
796 output_autogen_disclaimer(format, stream)
797
798 # declarations
799 for line in format.declarations(csvs_svp64_canon.keys(), lens):
800 stream.write(f"{line}\n")
801
802 # definitions
803 sv_cols = ['sv_in1', 'sv_in2', 'sv_in3', 'sv_out', 'sv_out2',
804 'sv_cr_in', 'sv_cr_out']
805 fullcols = csvcols + sv_cols
806
807 entries_svp64 = defaultdict(list)
808 for (value, csv) in filter(lambda kv: kv[0] in lens, csvs_svp64_canon.items()):
809 for entry in csv:
810 insn = str(entry['insn'])
811 condition = str(entry['CONDITIONS'])
812 mode = str(entry['mode'])
813 sventry = svt.svp64_instrs.get(insn, None)
814 if sventry is not None:
815 sventry['mode'] = mode
816 op = insns[(insn, condition)]['opcode']
817 # binary-to-vhdl-binary
818 if op.startswith("0b"):
819 op = "2#%s#" % op[2:]
820 row = []
821 for colname in csvcols[1:]:
822 re = entry[colname]
823 # zero replace with NONE
824 if re == '0':
825 re = 'NONE'
826 # 1/2 predication
827 re = re.replace("1P", "P1")
828 re = re.replace("2P", "P2")
829 row.append(re)
830 print("sventry", sventry)
831 for colname in sv_cols:
832 if sventry is None:
833 re = 'NONE'
834 else:
835 re = sventry[colname]
836 row.append(re)
837 entries_svp64[value].append((op, insn, row))
838
839 for line in format.definitions(entries_svp64, fullcols):
840 stream.write(f"{line}\n")
841
842
843 def main():
844 parser = argparse.ArgumentParser()
845 parser.add_argument("-f", "--format",
846 type=Format, choices=Format, default=Format.VHDL,
847 help="format to be used (binutils or VHDL)")
848 args = parser.parse_args()
849 process_csvs(args.format)
850
851
852 if __name__ == '__main__':
853 # don't do anything other than call main() here, cuz this code is bypassed
854 # by the sv_analysis command created by setup.py
855 main()