b14be43fb148bbc28e702a6ff6e478bfe3bc54ea
[openpower-isa.git] / src / openpower / sv / sv_analysis.py
1 #!/usr/bin/env python2
2 #
3 # NOTE that this program is python2 compatible, please do not stop it
4 # from working by adding syntax that prevents that.
5 #
6 # Initial version written by lkcl Oct 2020
7 # This program analyses the Power 9 op codes and looks at in/out register uses
8 # The results are displayed:
9 # https://libre-soc.org/openpower/opcode_regs_deduped/
10 #
11 # It finds .csv files in the directory isatables/
12 # then goes through the categories and creates svp64 CSV augmentation
13 # tables on a per-opcode basis
14
15 import argparse
16 import csv
17 import enum
18 import os
19 from os.path import dirname, join
20 from glob import glob
21 from collections import defaultdict
22 from collections import OrderedDict
23 from openpower.decoder.power_svp64 import SVP64RM
24 from openpower.decoder.power_enums import find_wiki_file, get_csv
25
26
27 # Write an array of dictionaries to the CSV file name:
28 def write_csv(name, items, headers):
29 file_path = find_wiki_file(name)
30 with open(file_path, 'w') as csvfile:
31 writer = csv.DictWriter(csvfile, headers, lineterminator="\n")
32 writer.writeheader()
33 writer.writerows(items)
34
35 # This will return True if all values are true.
36 # Not sure what this is about
37
38
39 def blank_key(row):
40 # for v in row.values():
41 # if 'SPR' in v: # skip all SPRs
42 # return True
43 for v in row.values():
44 if v:
45 return False
46 return True
47
48 # General purpose registers have names like: RA, RT, R1, ...
49 # Floating point registers names like: FRT, FRA, FR1, ..., FRTp, ...
50 # Return True if field is a register
51
52
53 def isreg(field):
54 return (field.startswith('R') or field.startswith('FR') or
55 field == 'SPR')
56
57
58 # These are the attributes of the instructions,
59 # register names
60 keycolumns = ['unit', 'in1', 'in2', 'in3', 'out', 'CR in', 'CR out',
61 ] # don't think we need these: 'ldst len', 'rc', 'lk']
62
63 tablecols = ['unit', 'in', 'outcnt', 'CR in', 'CR out', 'imm'
64 ] # don't think we need these: 'ldst len', 'rc', 'lk']
65
66
67 def create_key(row):
68 res = OrderedDict()
69 #print ("row", row)
70 for key in keycolumns:
71 # registers IN - special-case: count number of regs RA/RB/RC/RS
72 if key in ['in1', 'in2', 'in3']:
73 if 'in' not in res:
74 res['in'] = 0
75 if row['unit'] == 'BRANCH': # branches must not include Vector SPRs
76 continue
77 if isreg(row[key]):
78 res['in'] += 1
79
80 # registers OUT
81 if key == 'out':
82 # If upd is 1 then increment the count of outputs
83 if 'outcnt' not in res:
84 res['outcnt'] = 0
85 if isreg(row[key]):
86 res['outcnt'] += 1
87 if row['upd'] == '1':
88 res['outcnt'] += 1
89
90 # CRs (Condition Register) (CR0 .. CR7)
91 if key.startswith('CR'):
92 if row[key].startswith('NONE'):
93 res[key] = '0'
94 else:
95 res[key] = '1'
96 if row['comment'].startswith('cr'):
97 res['crop'] = '1'
98 # unit
99 if key == 'unit':
100 if row[key] == 'LDST': # we care about LDST units
101 res[key] = row[key]
102 else:
103 res[key] = 'OTHER'
104 # LDST len (LoadStore length)
105 if key.startswith('ldst'):
106 if row[key].startswith('NONE'):
107 res[key] = '0'
108 else:
109 res[key] = '1'
110 # rc, lk
111 if key in ['rc', 'lk']:
112 if row[key] == 'ONE':
113 res[key] = '1'
114 elif row[key] == 'NONE':
115 res[key] = '0'
116 else:
117 res[key] = 'R'
118 if key == 'lk':
119 res[key] = row[key]
120
121 # Convert the numerics 'in' & 'outcnt' to strings
122 res['in'] = str(res['in'])
123 res['outcnt'] = str(res['outcnt'])
124
125 # constants
126 if row['in2'].startswith('CONST_'):
127 res['imm'] = "1" # row['in2'].split("_")[1]
128 else:
129 res['imm'] = ''
130
131 return res
132
133 #
134
135
136 def dformat(d):
137 res = []
138 for k, v in d.items():
139 res.append("%s: %s" % (k, v))
140 return ' '.join(res)
141
142
143 def tformat(d):
144 return ' | '.join(d) + " |"
145
146
147 def keyname(row):
148 res = []
149 if row['unit'] != 'OTHER':
150 res.append(row['unit'])
151 if row['in'] != '0':
152 res.append('%sR' % row['in'])
153 if row['outcnt'] != '0':
154 res.append('%sW' % row['outcnt'])
155 if row['CR in'] == '1' and row['CR out'] == '1':
156 if 'crop' in row:
157 res.append("CR=2R1W")
158 else:
159 res.append("CRio")
160 elif row['CR in'] == '1':
161 res.append("CRi")
162 elif row['CR out'] == '1':
163 res.append("CRo")
164 elif 'imm' in row and row['imm']:
165 res.append("imm")
166 return '-'.join(res)
167
168
169 class Format(enum.Enum):
170 BINUTILS = enum.auto()
171 VHDL = enum.auto()
172
173 @classmethod
174 def _missing_(cls, value):
175 return {
176 "binutils": Format.BINUTILS,
177 "vhdl": Format.VHDL,
178 }[value.lower()]
179
180 def __str__(self):
181 return self.name.lower()
182
183 def declarations(self, values, lens):
184 def declaration_binutils(value, width):
185 yield f"/* TODO: implement binutils declaration (value={value!r}, width={width!r}) */"
186
187 def declaration_vhdl(value, width):
188 yield f" type sv_{value}_rom_array_t is " \
189 f"array(0 to {width}) of sv_decode_rom_t;"
190
191 for value in values:
192 if value not in lens:
193 todo = [f"TODO {value} (or no SVP64 augmentation)"]
194 todo = self.wrap_comment(todo)
195 yield from map(lambda line: f" {line}", todo)
196 else:
197 width = lens[value]
198 yield from {
199 Format.BINUTILS: declaration_binutils,
200 Format.VHDL: declaration_vhdl,
201 }[self](value, width)
202
203 def definitions(self, entries_svp64, fullcols):
204 def definitions_vhdl():
205 for (value, entries) in entries_svp64.items():
206 yield ""
207 yield f" constant sv_{value}_decode_rom_array :"
208 yield f" sv_{value}_rom_array_t := ("
209 yield f" -- {' '.join(fullcols)}"
210
211 for (op, insn, row) in entries:
212 yield f" {op:>13} => ({', '.join(row)}), -- {insn}"
213
214 yield f" {'others':>13} => sv_illegal_inst"
215 yield " );"
216 yield ""
217
218 def definitions_binutils():
219 yield f"/* TODO: implement binutils definitions */"
220
221 yield from {
222 Format.BINUTILS: definitions_binutils,
223 Format.VHDL: definitions_vhdl,
224 }[self]()
225
226 def wrap_comment(self, lines):
227 def wrap_comment_binutils(lines):
228 lines = tuple(lines)
229 if len(lines) == 1:
230 yield f"/* {lines[0]} */"
231 else:
232 yield "/*"
233 yield from map(lambda line: f" * {line}", lines)
234 yield " */"
235
236 def wrap_comment_vhdl(lines):
237 yield from map(lambda line: f"-- {line}", lines)
238
239 yield from {
240 Format.BINUTILS: wrap_comment_binutils,
241 Format.VHDL: wrap_comment_vhdl,
242 }[self](lines)
243
244
245 def process_csvs(format):
246 csvs = {}
247 csvs_svp64 = {}
248 bykey = {}
249 primarykeys = set()
250 dictkeys = OrderedDict()
251 immediates = {}
252 insns = {} # dictionary of CSV row, by instruction
253 insn_to_csv = {}
254
255 print("# OpenPOWER ISA register 'profile's")
256 print('')
257 print("this page is auto-generated, do not edit")
258 print("created by http://libre-soc.org/openpower/sv_analysis.py")
259 print('')
260
261 # Expand that (all .csv files)
262 pth = find_wiki_file("*.csv")
263
264 # Ignore those containing: valid test sprs
265 for fname in glob(pth):
266 print("sv analysis checking", fname)
267 _, name = os.path.split(fname)
268 if '-' in name:
269 continue
270 if 'valid' in fname:
271 continue
272 if 'test' in fname:
273 continue
274 if fname.endswith('sprs.csv'):
275 continue
276 if fname.endswith('minor_19_valid.csv'):
277 continue
278 if 'RM' in fname:
279 continue
280 csvname = os.path.split(fname)[1]
281 csvname_ = csvname.split(".")[0]
282 # csvname is something like: minor_59.csv, fname the whole path
283 csv = get_csv(fname)
284 csvs[fname] = csv
285 csvs_svp64[csvname_] = []
286 for row in csv:
287 if blank_key(row):
288 continue
289 print("row", row)
290 insn_name = row['comment']
291 condition = row['CONDITIONS']
292 # skip instructions that are not suitable
293 if insn_name.startswith("l") and insn_name.endswith("br"):
294 continue # skip pseudo-alias lxxxbr
295 if insn_name in ['mcrxr', 'mcrxrx', 'darn']:
296 continue
297 if insn_name in ['bctar', 'bcctr']:
298 continue
299 if 'rfid' in insn_name:
300 continue
301 if insn_name in ['setvl', ]: # SVP64 opcodes
302 continue
303
304 insns[(insn_name, condition)] = row # accumulate csv data
305 insn_to_csv[insn_name] = csvname_ # CSV file name by instruction
306 dkey = create_key(row)
307 key = tuple(dkey.values())
308 # print("key=", key)
309 dictkeys[key] = dkey
310 primarykeys.add(key)
311 if key not in bykey:
312 bykey[key] = []
313 bykey[key].append((csvname, row['opcode'], insn_name, condition,
314 row['form'].upper() + '-Form'))
315
316 # detect immediates, collate them (useful info)
317 if row['in2'].startswith('CONST_'):
318 imm = row['in2'].split("_")[1]
319 if key not in immediates:
320 immediates[key] = set()
321 immediates[key].add(imm)
322
323 primarykeys = list(primarykeys)
324 primarykeys.sort()
325
326 # mapping to old SVPrefix "Forms"
327 mapsto = {'3R-1W-CRo': 'RM-1P-3S1D',
328 '2R-1W-CRio': 'RM-1P-2S1D',
329 '2R-1W-CRi': 'RM-1P-3S1D',
330 '2R-1W-CRo': 'RM-1P-2S1D',
331 '2R': 'non-SV',
332 '2R-1W': 'RM-1P-2S1D',
333 '1R-CRio': 'RM-2P-2S1D',
334 '2R-CRio': 'RM-1P-2S1D',
335 '2R-CRo': 'RM-1P-2S1D',
336 '1R': 'non-SV',
337 '1R-1W-CRio': 'RM-2P-1S1D',
338 '1R-1W-CRo': 'RM-2P-1S1D',
339 '1R-1W': 'RM-2P-1S1D',
340 '1R-1W-imm': 'RM-2P-1S1D',
341 '1R-CRo': 'RM-2P-1S1D',
342 '1R-imm': 'non-SV',
343 '1W-CRo': 'RM-1P-1D',
344 '1W': 'non-SV',
345 '1W-CRi': 'RM-2P-1S1D',
346 'CRio': 'RM-2P-1S1D',
347 'CR=2R1W': 'RM-1P-2S1D',
348 'CRi': 'non-SV',
349 'imm': 'non-SV',
350 '': 'non-SV',
351 'LDST-2R-imm': 'LDSTRM-2P-2S',
352 'LDST-2R-1W-imm': 'LDSTRM-2P-2S1D',
353 'LDST-2R-1W': 'LDSTRM-2P-2S1D',
354 'LDST-2R-2W': 'LDSTRM-2P-2S1D',
355 'LDST-1R-1W-imm': 'LDSTRM-2P-1S1D',
356 'LDST-1R-2W-imm': 'LDSTRM-2P-1S2D',
357 'LDST-3R': 'LDSTRM-2P-3S',
358 'LDST-3R-CRo': 'LDSTRM-2P-3S', # st*x
359 'LDST-3R-1W': 'LDSTRM-2P-2S1D', # st*x
360 }
361 print("# map to old SV Prefix")
362 print('')
363 print('[[!table data="""')
364 for key in primarykeys:
365 name = keyname(dictkeys[key])
366 value = mapsto.get(name, "-")
367 print(tformat([name, value + " "]))
368 print('"""]]')
369 print('')
370
371 print("# keys")
372 print('')
373 print('[[!table data="""')
374 print(tformat(tablecols) + " imms | name |")
375
376 # print out the keys and the table from which they're derived
377 for key in primarykeys:
378 name = keyname(dictkeys[key])
379 row = tformat(dictkeys[key].values())
380 imms = list(immediates.get(key, ""))
381 imms.sort()
382 row += " %s | " % ("/".join(imms))
383 row += " %s |" % name
384 print(row)
385 print('"""]]')
386 print('')
387
388 # print out, by remap name, all the instructions under that category
389 for key in primarykeys:
390 name = keyname(dictkeys[key])
391 value = mapsto.get(name, "-")
392 print("## %s (%s)" % (name, value))
393 print('')
394 print('[[!table data="""')
395 print(tformat(['CSV', 'opcode', 'asm', 'form']))
396 rows = bykey[key]
397 rows.sort()
398 for row in rows:
399 print(tformat(row))
400 print('"""]]')
401 print('')
402
403 # for fname, csv in csvs.items():
404 # print (fname)
405
406 # for insn, row in insns.items():
407 # print (insn, row)
408
409 print("# svp64 remaps")
410 svp64 = OrderedDict()
411 # create a CSV file, per category, with SV "augmentation" info
412 # XXX note: 'out2' not added here, needs to be added to CSV files
413 # KEEP TRACK OF THESE https://bugs.libre-soc.org/show_bug.cgi?id=619
414 csvcols = ['insn', 'CONDITIONS', 'Ptype', 'Etype', '0', '1', '2', '3']
415 csvcols += ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out'] # temporary
416 for key in primarykeys:
417 # get the decoded key containing row-analysis, and name/value
418 dkey = dictkeys[key]
419 name = keyname(dkey)
420 value = mapsto.get(name, "-")
421 if value == 'non-SV':
422 continue
423
424 # print out svp64 tables by category
425 print("* **%s**: %s" % (name, value))
426
427 # store csv entries by svp64 RM category
428 if value not in svp64:
429 svp64[value] = []
430
431 rows = bykey[key]
432 rows.sort()
433
434 for row in rows:
435 # for idx in range(len(row)):
436 # if row[idx] == 'NONE':
437 # row[idx] = ''
438 # get the instruction
439 print(key, row)
440 insn_name = row[2]
441 condition = row[3]
442 insn = insns[(insn_name, condition)]
443 # start constructing svp64 CSV row
444 res = OrderedDict()
445 res['insn'] = insn_name
446 res['CONDITIONS'] = condition
447 res['Ptype'] = value.split('-')[1] # predication type (RM-xN-xxx)
448 # get whether R_xxx_EXTRAn fields are 2-bit or 3-bit
449 res['Etype'] = 'EXTRA2'
450 # go through each register matching to Rxxxx_EXTRAx
451 for k in ['0', '1', '2', '3']:
452 res[k] = ''
453 # create "fake" out2 (TODO, needs to be added to CSV files)
454 # KEEP TRACK HERE https://bugs.libre-soc.org/show_bug.cgi?id=619
455 res['out2'] = 'NONE'
456 if insn['upd'] == '1': # LD/ST with update has RA as out2
457 res['out2'] = 'RA'
458
459 # temporary useful info
460 regs = []
461 for k in ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out']:
462 if insn[k].startswith('CONST'):
463 res[k] = ''
464 regs.append('')
465 else:
466 res[k] = insn[k]
467 if insn[k] == 'RA_OR_ZERO':
468 regs.append('RA')
469 elif insn[k] != 'NONE':
470 regs.append(insn[k])
471 else:
472 regs.append('')
473
474 print("regs", insn_name, regs)
475
476 # for LD/ST FP, use FRT/FRS not RT/RS, and use CR1 not CR0
477 if insn_name.startswith("lf"):
478 dRT = 'd:FRT'
479 dCR = 'd:CR1'
480 else:
481 dRT = 'd:RT'
482 dCR = 'd:CR0'
483 if insn_name.startswith("stf"):
484 sRS = 's:FRS'
485 dCR = 'd:CR1'
486 else:
487 sRS = 's:RS'
488 dCR = 'd:CR0'
489
490 # sigh now the fun begins. this isn't the sanest way to do it
491 # but the patterns are pretty regular.
492
493 if value == 'LDSTRM-2P-1S1D':
494 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
495 res['0'] = dRT # RT: Rdest_EXTRA3
496 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
497
498 elif value == 'LDSTRM-2P-1S2D':
499 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
500 res['0'] = dRT # RT: Rdest_EXTRA3
501 res['1'] = 'd:RA' # RA: Rdest2_EXTRA2
502 res['2'] = 's:RA' # RA: Rsrc1_EXTRA2
503
504 elif value == 'LDSTRM-2P-2S':
505 # stw, std, sth, stb
506 res['Etype'] = 'EXTRA3' # RM EXTRA2 type
507 res['0'] = sRS # RS: Rdest1_EXTRA2
508 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
509
510 elif value == 'LDSTRM-2P-2S1D':
511 if 'st' in insn_name and 'x' not in insn_name: # stwu/stbu etc
512 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
513 res['0'] = 'd:RA' # RA: Rdest1_EXTRA2
514 res['1'] = sRS # RS: Rdsrc1_EXTRA2
515 res['2'] = 's:RA' # RA: Rsrc2_EXTRA2
516 elif 'st' in insn_name and 'x' in insn_name: # stwux
517 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
518 res['0'] = 'd:RA' # RA: Rdest1_EXTRA2
519 # RS: Rdest2_EXTRA2, RA: Rsrc1_EXTRA2
520 res['1'] = sRS+'s:RA'
521 res['2'] = 's:RB' # RB: Rsrc2_EXTRA2
522 elif 'u' in insn_name: # ldux etc.
523 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
524 res['0'] = dRT # RT: Rdest1_EXTRA2
525 res['1'] = 'd:RA' # RA: Rdest2_EXTRA2
526 res['2'] = 's:RB' # RB: Rsrc1_EXTRA2
527 else:
528 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
529 res['0'] = dRT # RT: Rdest1_EXTRA2
530 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
531 res['2'] = 's:RB' # RB: Rsrc2_EXTRA2
532
533 elif value == 'LDSTRM-2P-3S':
534 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
535 if 'cx' in insn_name:
536 res['0'] = sRS+dCR # RS: Rsrc1_EXTRA2 CR0: dest
537 else:
538 res['0'] = sRS # RS: Rsrc1_EXTRA2
539 res['1'] = 's:RA' # RA: Rsrc2_EXTRA2
540 res['2'] = 's:RB' # RA: Rsrc3_EXTRA2
541
542 elif value == 'RM-2P-1S1D':
543 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
544 if insn_name == 'mtspr':
545 res['0'] = 'd:SPR' # SPR: Rdest1_EXTRA3
546 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
547 elif insn_name == 'mfspr':
548 res['0'] = 'd:RS' # RS: Rdest1_EXTRA3
549 res['1'] = 's:SPR' # SPR: Rsrc1_EXTRA3
550 elif name == 'CRio' and insn_name == 'mcrf':
551 res['0'] = 'd:BF' # BFA: Rdest1_EXTRA3
552 res['1'] = 's:BFA' # BFA: Rsrc1_EXTRA3
553 elif 'mfcr' in insn_name or 'mfocrf' in insn_name:
554 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
555 res['1'] = 's:CR' # CR: Rsrc1_EXTRA3
556 elif insn_name == 'setb':
557 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
558 res['1'] = 's:BFA' # BFA: Rsrc1_EXTRA3
559 elif insn_name.startswith('cmp'): # cmpi
560 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
561 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
562 elif regs == ['RA', '', '', 'RT', '', '']:
563 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
564 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
565 elif regs == ['RA', '', '', 'RT', '', 'CR0']:
566 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA3
567 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
568 elif (regs == ['RS', '', '', 'RA', '', 'CR0'] or
569 regs == ['', '', 'RS', 'RA', '', 'CR0']):
570 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
571 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
572 elif regs == ['RS', '', '', 'RA', '', '']:
573 res['0'] = 'd:RA' # RA: Rdest1_EXTRA3
574 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
575 elif regs == ['', 'FRB', '', 'FRT', '0', 'CR1']:
576 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
577 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
578 elif regs == ['', 'FRB', '', '', '', 'CR1']:
579 res['0'] = 'd:CR1' # CR1: Rdest1_EXTRA3
580 res['1'] = 's:FRB' # FRA: Rsrc1_EXTRA3
581 elif regs == ['', 'FRB', '', '', '', 'BF']:
582 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
583 res['1'] = 's:FRB' # FRA: Rsrc1_EXTRA3
584 elif regs == ['', 'FRB', '', 'FRT', '', 'CR1']:
585 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
586 res['1'] = 's:FRB' # FRB: Rsrc1_EXTRA3
587 elif insn_name.startswith('bc'):
588 res['0'] = 'd:BI' # BI: Rdest1_EXTRA3
589 res['1'] = 's:BI' # BI: Rsrc1_EXTRA3
590 else:
591 res['0'] = 'TODO'
592
593 elif value == 'RM-1P-2S1D':
594 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
595 if insn_name.startswith('cr'):
596 res['0'] = 'd:BT' # BT: Rdest1_EXTRA3
597 res['1'] = 's:BA' # BA: Rsrc1_EXTRA3
598 res['2'] = 's:BB' # BB: Rsrc2_EXTRA3
599 elif regs == ['FRA', '', 'FRC', 'FRT', '', 'CR1']:
600 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
601 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
602 res['2'] = 's:FRC' # FRC: Rsrc1_EXTRA3
603 # should be for fcmp
604 elif regs == ['FRA', 'FRB', '', '', '', 'BF']:
605 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
606 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
607 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
608 elif regs == ['FRA', 'FRB', '', 'FRT', '', '']:
609 res['0'] = 'd:FRT' # FRT: Rdest1_EXTRA3
610 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
611 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
612 elif regs == ['FRA', 'FRB', '', 'FRT', '', 'CR1']:
613 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
614 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
615 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
616 elif name == '2R-1W' or insn_name == 'cmpb': # cmpb
617 if insn_name in ['bpermd', 'cmpb']:
618 res['0'] = 'd:RA' # RA: Rdest1_EXTRA3
619 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
620 else:
621 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
622 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
623 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
624 elif insn_name.startswith('cmp'): # cmp
625 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
626 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
627 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
628 elif (regs == ['', 'RB', 'RS', 'RA', '', 'CR0'] or
629 regs == ['RS', 'RB', '', 'RA', '', 'CR0']):
630 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
631 res['1'] = 's:RB' # RB: Rsrc1_EXTRA3
632 res['2'] = 's:RS' # RS: Rsrc1_EXTRA3
633 elif regs == ['RA', 'RB', '', 'RT', '', 'CR0']:
634 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA3
635 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
636 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
637 elif regs == ['RA', '', 'RS', 'RA', '', 'CR0']:
638 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
639 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
640 res['2'] = 's:RS' # RS: Rsrc1_EXTRA3
641 else:
642 res['0'] = 'TODO'
643
644 elif value == 'RM-2P-2S1D':
645 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
646 if insn_name.startswith('mt'): # mtcrf
647 res['0'] = 'd:CR' # CR: Rdest1_EXTRA2
648 res['1'] = 's:RS' # RS: Rsrc1_EXTRA2
649 res['2'] = 's:CR' # CR: Rsrc2_EXTRA2
650 else:
651 res['0'] = 'TODO'
652
653 elif value == 'RM-1P-3S1D':
654 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
655 if regs == ['RA', 'RB', 'RT', 'RT', '', 'CR0']:
656 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA2
657 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
658 res['2'] = 's:RB' # RT: Rsrc2_EXTRA2
659 res['3'] = 's:RT' # RT: Rsrc3_EXTRA2
660 elif insn_name == 'isel':
661 res['0'] = 'd:RT' # RT: Rdest1_EXTRA2
662 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
663 res['2'] = 's:RB' # RT: Rsrc2_EXTRA2
664 res['3'] = 's:BC' # BC: Rsrc3_EXTRA2
665 else:
666 res['0'] = 'd:FRT;d:CR1' # FRT, CR1: Rdest1_EXTRA2
667 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA2
668 res['2'] = 's:FRB' # FRB: Rsrc2_EXTRA2
669 res['3'] = 's:FRC' # FRC: Rsrc3_EXTRA2
670
671 elif value == 'RM-1P-1D':
672 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
673 if insn_name == 'svstep':
674 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA2
675
676 # add to svp64 csvs
677 # for k in ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out']:
678 # del res[k]
679 # if res['0'] != 'TODO':
680 for k in res:
681 if k == 'CONDITIONS':
682 continue
683 if res[k] == 'NONE' or res[k] == '':
684 res[k] = '0'
685 svp64[value].append(res)
686 # also add to by-CSV version
687 csv_fname = insn_to_csv[insn_name]
688 csvs_svp64[csv_fname].append(res)
689
690 print('')
691
692 # now write out the csv files
693 for value, csv in svp64.items():
694 # print out svp64 tables by category
695 print("## %s" % value)
696 print('')
697 print('[[!table format=csv file="openpower/isatables/%s.csv"]]' %
698 value)
699 print('')
700
701 #csvcols = ['insn', 'Ptype', 'Etype', '0', '1', '2', '3']
702 write_csv("%s.csv" % value, csv, csvcols + ['out2'])
703
704 # okaaay, now we re-read them back in for producing microwatt SV
705
706 # get SVP64 augmented CSV files
707 svt = SVP64RM(microwatt_format=True)
708 # Expand that (all .csv files)
709 pth = find_wiki_file("*.csv")
710
711 # Ignore those containing: valid test sprs
712 for fname in glob(pth):
713 print("post-checking", fname)
714 _, name = os.path.split(fname)
715 if '-' in name:
716 continue
717 if 'valid' in fname:
718 continue
719 if 'test' in fname:
720 continue
721 if fname.endswith('sprs.csv'):
722 continue
723 if fname.endswith('minor_19_valid.csv'):
724 continue
725 if 'RM' in fname:
726 continue
727 svp64_csv = svt.get_svp64_csv(fname)
728
729 csvcols = ['insn', 'Ptype', 'Etype']
730 csvcols += ['in1', 'in2', 'in3', 'out', 'out2', 'CR in', 'CR out']
731
732 if format is Format.VHDL:
733 # and a nice microwatt VHDL file
734 file_path = find_wiki_file("sv_decode.vhdl")
735 elif format is Format.BINUTILS:
736 file_path = find_wiki_file("binutils.c")
737
738 with open(file_path, 'w') as stream:
739 output(format, svt, csvcols, insns, csvs_svp64, stream)
740
741
742 def output_autogen_disclaimer(format, stream):
743 lines = (
744 "this file is auto-generated, do not edit",
745 "http://libre-soc.org/openpower/sv_analysis.py",
746 "part of Libre-SOC, sponsored by NLnet",
747 )
748 for line in format.wrap_comment(lines):
749 stream.write(line)
750 stream.write("\n")
751 stream.write("\n")
752
753
754 def output(format, svt, csvcols, insns, csvs_svp64, stream):
755 lens = {
756 'major': 63,
757 'minor_4': 63,
758 'minor_19': 7,
759 'minor_30': 15,
760 'minor_31': 1023,
761 'minor_58': 63,
762 'minor_59': 31,
763 'minor_62': 63,
764 'minor_63l': 511,
765 'minor_63h': 16,
766 }
767
768 def svp64_canonicalize(item):
769 (value, csv) = item
770 value = value.lower().replace("-", "_")
771 return (value, csv)
772
773 csvs_svp64_canon = dict(map(svp64_canonicalize, csvs_svp64.items()))
774
775 # disclaimer
776 output_autogen_disclaimer(format, stream)
777
778 # declarations
779 for line in format.declarations(csvs_svp64_canon.keys(), lens):
780 stream.write(f"{line}\n")
781
782 # definitions
783 sv_cols = ['sv_in1', 'sv_in2', 'sv_in3', 'sv_out', 'sv_out2',
784 'sv_cr_in', 'sv_cr_out']
785 fullcols = csvcols + sv_cols
786
787 entries_svp64 = defaultdict(list)
788 for (value, csv) in filter(lambda kv: kv[0] in lens, csvs_svp64_canon.items()):
789 for entry in csv:
790 insn = str(entry['insn'])
791 condition = str(entry['CONDITIONS'])
792 sventry = svt.svp64_instrs.get(insn, None)
793 op = insns[(insn, condition)]['opcode']
794 # binary-to-vhdl-binary
795 if op.startswith("0b"):
796 op = "2#%s#" % op[2:]
797 row = []
798 for colname in csvcols[1:]:
799 re = entry[colname]
800 # zero replace with NONE
801 if re == '0':
802 re = 'NONE'
803 # 1/2 predication
804 re = re.replace("1P", "P1")
805 re = re.replace("2P", "P2")
806 row.append(re)
807 print("sventry", sventry)
808 for colname in sv_cols:
809 if sventry is None:
810 re = 'NONE'
811 else:
812 re = sventry[colname]
813 row.append(re)
814 entries_svp64[value].append((op, insn, row))
815
816 for line in format.definitions(entries_svp64, fullcols):
817 stream.write(f"{line}\n")
818
819
820 def main():
821 parser = argparse.ArgumentParser()
822 parser.add_argument("-f", "--format",
823 type=Format, choices=Format, default=Format.VHDL,
824 help="format to be used (binutils or VHDL)")
825 args = parser.parse_args()
826 process_csvs(args.format)
827
828
829 if __name__ == '__main__':
830 # don't do anything other than call main() here, cuz this code is bypassed
831 # by the sv_analysis command created by setup.py
832 main()