add fmvis as a new RM-1P-1S SVP64 RM type
[openpower-isa.git] / src / openpower / sv / sv_analysis.py
1 #!/usr/bin/env python2
2 #
3 # NOTE that this program is python2 compatible, please do not stop it
4 # from working by adding syntax that prevents that.
5 #
6 # Initial version written by lkcl Oct 2020
7 # This program analyses the Power 9 op codes and looks at in/out register uses
8 # The results are displayed:
9 # https://libre-soc.org/openpower/opcode_regs_deduped/
10 #
11 # It finds .csv files in the directory isatables/
12 # then goes through the categories and creates svp64 CSV augmentation
13 # tables on a per-opcode basis
14
15 import argparse
16 import csv
17 import enum
18 import os
19 from os.path import dirname, join
20 from glob import glob
21 from collections import defaultdict
22 from collections import OrderedDict
23 from openpower.decoder.power_svp64 import SVP64RM
24 from openpower.decoder.power_enums import find_wiki_file, get_csv
25 from openpower.util import log
26
27
28 # Write an array of dictionaries to the CSV file name:
29 def write_csv(name, items, headers):
30 file_path = find_wiki_file(name)
31 with open(file_path, 'w') as csvfile:
32 writer = csv.DictWriter(csvfile, headers, lineterminator="\n")
33 writer.writeheader()
34 writer.writerows(items)
35
36 # This will return True if all values are true.
37 # Not sure what this is about
38
39
40 def blank_key(row):
41 # for v in row.values():
42 # if 'SPR' in v: # skip all SPRs
43 # return True
44 for v in row.values():
45 if v:
46 return False
47 return True
48
49 # General purpose registers have names like: RA, RT, R1, ...
50 # Floating point registers names like: FRT, FRA, FR1, ..., FRTp, ...
51 # Return True if field is a register
52
53
54 def isreg(field):
55 return (field.startswith('R') or field.startswith('FR') or
56 field == 'SPR')
57
58
59 # These are the attributes of the instructions,
60 # register names
61 keycolumns = ['unit', 'in1', 'in2', 'in3', 'out', 'CR in', 'CR out',
62 ] # don't think we need these: 'ldst len', 'rc', 'lk']
63
64 tablecols = ['unit', 'in', 'outcnt', 'CR in', 'CR out', 'imm'
65 ] # don't think we need these: 'ldst len', 'rc', 'lk']
66
67
68 def create_key(row):
69 res = OrderedDict()
70 #print ("row", row)
71 for key in keycolumns:
72 # registers IN - special-case: count number of regs RA/RB/RC/RS
73 if key in ['in1', 'in2', 'in3']:
74 if 'in' not in res:
75 res['in'] = 0
76 if row['unit'] == 'BRANCH': # branches must not include Vector SPRs
77 continue
78 if isreg(row[key]):
79 res['in'] += 1
80
81 # registers OUT
82 if key == 'out':
83 # If upd is 1 then increment the count of outputs
84 if 'outcnt' not in res:
85 res['outcnt'] = 0
86 if isreg(row[key]):
87 res['outcnt'] += 1
88 if row['upd'] == '1':
89 res['outcnt'] += 1
90
91 # CRs (Condition Register) (CR0 .. CR7)
92 if key.startswith('CR'):
93 if row[key].startswith('NONE'):
94 res[key] = '0'
95 else:
96 res[key] = '1'
97 if row['comment'].startswith('cr'):
98 res['crop'] = '1'
99 # unit
100 if key == 'unit':
101 if row[key] == 'LDST': # we care about LDST units
102 res[key] = row[key]
103 else:
104 res[key] = 'OTHER'
105 # LDST len (LoadStore length)
106 if key.startswith('ldst'):
107 if row[key].startswith('NONE'):
108 res[key] = '0'
109 else:
110 res[key] = '1'
111 # rc, lk
112 if key in ['rc', 'lk']:
113 if row[key] == 'ONE':
114 res[key] = '1'
115 elif row[key] == 'NONE':
116 res[key] = '0'
117 else:
118 res[key] = 'R'
119 if key == 'lk':
120 res[key] = row[key]
121
122 # Convert the numerics 'in' & 'outcnt' to strings
123 res['in'] = str(res['in'])
124 res['outcnt'] = str(res['outcnt'])
125
126 # constants
127 if row['in2'].startswith('CONST_'):
128 res['imm'] = "1" # row['in2'].split("_")[1]
129 else:
130 res['imm'] = ''
131
132 return res
133
134 #
135
136
137 def dformat(d):
138 res = []
139 for k, v in d.items():
140 res.append("%s: %s" % (k, v))
141 return ' '.join(res)
142
143
144 def tformat(d):
145 return "| " + ' | '.join(d) + " |"
146
147
148 def keyname(row):
149 res = []
150 if row['unit'] != 'OTHER':
151 res.append(row['unit'])
152 if row['in'] != '0':
153 res.append('%sR' % row['in'])
154 if row['outcnt'] != '0':
155 res.append('%sW' % row['outcnt'])
156 if row['CR in'] == '1' and row['CR out'] == '1':
157 if 'crop' in row:
158 res.append("CR=2R1W")
159 else:
160 res.append("CRio")
161 elif row['CR in'] == '1':
162 res.append("CRi")
163 elif row['CR out'] == '1':
164 res.append("CRo")
165 elif 'imm' in row and row['imm']:
166 res.append("imm")
167 return '-'.join(res)
168
169
170 class Format(enum.Enum):
171 BINUTILS = enum.auto()
172 VHDL = enum.auto()
173
174 @classmethod
175 def _missing_(cls, value):
176 return {
177 "binutils": Format.BINUTILS,
178 "vhdl": Format.VHDL,
179 }[value.lower()]
180
181 def __str__(self):
182 return self.name.lower()
183
184 def declarations(self, values, lens):
185 def declaration_binutils(value, width):
186 yield f"/* TODO: implement binutils declaration (value={value!r}, width={width!r}) */"
187
188 def declaration_vhdl(value, width):
189 yield f" type sv_{value}_rom_array_t is " \
190 f"array(0 to {width}) of sv_decode_rom_t;"
191
192 for value in values:
193 if value not in lens:
194 todo = [f"TODO {value} (or no SVP64 augmentation)"]
195 todo = self.wrap_comment(todo)
196 yield from map(lambda line: f" {line}", todo)
197 else:
198 width = lens[value]
199 yield from {
200 Format.BINUTILS: declaration_binutils,
201 Format.VHDL: declaration_vhdl,
202 }[self](value, width)
203
204 def definitions(self, entries_svp64, fullcols):
205 def definitions_vhdl():
206 for (value, entries) in entries_svp64.items():
207 yield ""
208 yield f" constant sv_{value}_decode_rom_array :"
209 yield f" sv_{value}_rom_array_t := ("
210 yield f" -- {' '.join(fullcols)}"
211
212 for (op, insn, row) in entries:
213 yield f" {op:>13} => ({', '.join(row)}), -- {insn}"
214
215 yield f" {'others':>13} => sv_illegal_inst"
216 yield " );"
217 yield ""
218
219 def definitions_binutils():
220 yield f"/* TODO: implement binutils definitions */"
221
222 yield from {
223 Format.BINUTILS: definitions_binutils,
224 Format.VHDL: definitions_vhdl,
225 }[self]()
226
227 def wrap_comment(self, lines):
228 def wrap_comment_binutils(lines):
229 lines = tuple(lines)
230 if len(lines) == 1:
231 yield f"/* {lines[0]} */"
232 else:
233 yield "/*"
234 yield from map(lambda line: f" * {line}", lines)
235 yield " */"
236
237 def wrap_comment_vhdl(lines):
238 yield from map(lambda line: f"-- {line}", lines)
239
240 yield from {
241 Format.BINUTILS: wrap_comment_binutils,
242 Format.VHDL: wrap_comment_vhdl,
243 }[self](lines)
244
245
246 def process_csvs(format):
247 csvs = {}
248 csvs_svp64 = {}
249 bykey = {}
250 primarykeys = set()
251 dictkeys = OrderedDict()
252 immediates = {}
253 insns = {} # dictionary of CSV row, by instruction
254 insn_to_csv = {}
255
256 print("# Draft SVP64 Power ISA register 'profile's")
257 print('')
258 print("this page is auto-generated, do not edit")
259 print("created by http://libre-soc.org/openpower/sv_analysis.py")
260 print('')
261
262 # Expand that (all .csv files)
263 pth = find_wiki_file("*.csv")
264
265 # Ignore those containing: valid test sprs
266 for fname in glob(pth):
267 #print("sv analysis checking", fname)
268 _, name = os.path.split(fname)
269 if '-' in name:
270 continue
271 if 'valid' in fname:
272 continue
273 if 'test' in fname:
274 continue
275 if fname.endswith('sprs.csv'):
276 continue
277 if fname.endswith('minor_19_valid.csv'):
278 continue
279 if 'RM' in fname:
280 continue
281 csvname = os.path.split(fname)[1]
282 csvname_ = csvname.split(".")[0]
283 # csvname is something like: minor_59.csv, fname the whole path
284 csv = get_csv(fname)
285 csvs[fname] = csv
286 csvs_svp64[csvname_] = []
287 for row in csv:
288 if blank_key(row):
289 continue
290 #print("row", row)
291 insn_name = row['comment']
292 condition = row['CONDITIONS']
293 # skip instructions that are not suitable
294 if insn_name.startswith("l") and insn_name.endswith("br"):
295 continue # skip pseudo-alias lxxxbr
296 if insn_name in ['mcrxr', 'mcrxrx', 'darn']:
297 continue
298 if insn_name in ['bctar', 'bcctr']:
299 continue
300 if 'rfid' in insn_name:
301 continue
302 if insn_name in ['setvl', ]: # SVP64 opcodes
303 continue
304
305 insns[(insn_name, condition)] = row # accumulate csv data
306 insn_to_csv[insn_name] = csvname_ # CSV file name by instruction
307 dkey = create_key(row)
308 key = tuple(dkey.values())
309 #print("key=", key, dkey)
310 dictkeys[key] = dkey
311 primarykeys.add(key)
312 if key not in bykey:
313 bykey[key] = []
314 bykey[key].append((csvname, row['opcode'], insn_name, condition,
315 row['form'].upper() + '-Form'))
316
317 # detect immediates, collate them (useful info)
318 if row['in2'].startswith('CONST_'):
319 imm = row['in2'].split("_")[1]
320 if key not in immediates:
321 immediates[key] = set()
322 immediates[key].add(imm)
323
324 primarykeys = list(primarykeys)
325 primarykeys.sort()
326
327 # mapping to old SVPrefix "Forms"
328 mapsto = {'3R-1W-CRo': 'RM-1P-3S1D',
329 '2R-1W-CRio': 'RM-1P-2S1D',
330 '2R-1W-CRi': 'RM-1P-3S1D',
331 '2R-1W-CRo': 'RM-1P-2S1D',
332 '2R': 'non-SV',
333 '2R-1W': 'RM-1P-2S1D',
334 '1R-CRio': 'RM-2P-2S1D',
335 '2R-CRio': 'RM-1P-2S1D',
336 '2R-CRo': 'RM-1P-2S1D',
337 '1R': 'non-SV',
338 '1R-1W-CRio': 'RM-2P-1S1D',
339 '1R-1W-CRo': 'RM-2P-1S1D',
340 '1R-1W': 'RM-2P-1S1D',
341 '1R-1W-imm': 'RM-2P-1S1D',
342 '1R-CRo': 'RM-2P-1S1D',
343 '1R-imm': 'RM-1P-1S',
344 '1W-CRo': 'RM-1P-1D',
345 '1W': 'non-SV',
346 '1W-CRi': 'RM-2P-1S1D',
347 'CRio': 'RM-2P-1S1D',
348 'CR=2R1W': 'RM-1P-2S1D',
349 'CRi': 'non-SV',
350 'imm': 'non-SV',
351 '': 'non-SV',
352 'LDST-2R-imm': 'LDSTRM-2P-2S',
353 'LDST-2R-1W-imm': 'LDSTRM-2P-2S1D',
354 'LDST-2R-1W': 'LDSTRM-2P-2S1D',
355 'LDST-2R-2W': 'LDSTRM-2P-2S1D',
356 'LDST-1R-1W-imm': 'LDSTRM-2P-1S1D',
357 'LDST-1R-2W-imm': 'LDSTRM-2P-1S2D',
358 'LDST-3R': 'LDSTRM-2P-3S',
359 'LDST-3R-CRo': 'LDSTRM-2P-3S', # st*x
360 'LDST-3R-1W': 'LDSTRM-2P-2S1D', # st*x
361 }
362 print("# map to old SV Prefix")
363 print('')
364 print('|internal key | public name |')
365 print('|----- | ---------- |')
366 for key in primarykeys:
367 name = keyname(dictkeys[key])
368 value = mapsto.get(name, "-")
369 print(tformat([name, value + " "]))
370 print('')
371 print('')
372
373 print("# keys")
374 print('')
375 print(tformat(tablecols) + " imms | name |")
376 print(tformat([" - "] * (len(tablecols)+2)))
377
378 # print out the keys and the table from which they're derived
379 for key in primarykeys:
380 name = keyname(dictkeys[key])
381 row = tformat(dictkeys[key].values())
382 imms = list(immediates.get(key, ""))
383 imms.sort()
384 row += " %s | " % ("/".join(imms))
385 row += " %s |" % name
386 print(row)
387 print('')
388 print('')
389
390 # print out, by remap name, all the instructions under that category
391 for key in primarykeys:
392 name = keyname(dictkeys[key])
393 value = mapsto.get(name, "-")
394 print("## %s (%s)" % (name, value))
395 print('')
396 print(tformat(['CSV', 'opcode', 'asm', 'flags', 'form']))
397 print(tformat(['---', '------', '---', '-----', '----']))
398 rows = bykey[key]
399 rows.sort()
400 for row in rows:
401 print(tformat(row))
402 print('')
403 print('')
404
405 # for fname, csv in csvs.items():
406 # print (fname)
407
408 # for insn, row in insns.items():
409 # print (insn, row)
410
411 print("# svp64 remaps")
412 svp64 = OrderedDict()
413 # create a CSV file, per category, with SV "augmentation" info
414 # XXX note: 'out2' not added here, needs to be added to CSV files
415 # KEEP TRACK OF THESE https://bugs.libre-soc.org/show_bug.cgi?id=619
416 csvcols = ['insn', 'mode', 'CONDITIONS', 'Ptype', 'Etype',]
417 csvcols += ['0', '1', '2', '3']
418 csvcols += ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out'] # temporary
419 for key in primarykeys:
420 # get the decoded key containing row-analysis, and name/value
421 dkey = dictkeys[key]
422 name = keyname(dkey)
423 value = mapsto.get(name, "-")
424 if value == 'non-SV':
425 continue
426
427 # print out svp64 tables by category
428 print("* **%s**: %s" % (name, value))
429
430 # store csv entries by svp64 RM category
431 if value not in svp64:
432 svp64[value] = []
433
434 rows = bykey[key]
435 rows.sort()
436
437 for row in rows:
438 # for idx in range(len(row)):
439 # if row[idx] == 'NONE':
440 # row[idx] = ''
441 # get the instruction
442 #print(key, row)
443 insn_name = row[2]
444 condition = row[3]
445 insn = insns[(insn_name, condition)]
446
447 # start constructing svp64 CSV row
448 res = OrderedDict()
449 res['insn'] = insn_name
450 res['CONDITIONS'] = condition
451 res['Ptype'] = value.split('-')[1] # predication type (RM-xN-xxx)
452 # get whether R_xxx_EXTRAn fields are 2-bit or 3-bit
453 res['Etype'] = 'EXTRA2'
454 # go through each register matching to Rxxxx_EXTRAx
455 for k in ['0', '1', '2', '3']:
456 res[k] = ''
457 # create "fake" out2 (TODO, needs to be added to CSV files)
458 # KEEP TRACK HERE https://bugs.libre-soc.org/show_bug.cgi?id=619
459 res['out2'] = 'NONE'
460 if insn['upd'] == '1': # LD/ST with update has RA as out2
461 res['out2'] = 'RA'
462
463 # set the SVP64 mode to NORMAL, LDST, BRANCH or CR
464 crops = ['mfcr', 'mfocrf', 'mtcrf', 'mtocrf',
465 ]
466 mode = 'NORMAL'
467 if value.startswith('LDST'):
468 mode = 'LDST'
469 elif insn_name.startswith('bc'):
470 mode = 'BRANCH'
471 elif insn_name.startswith('cr') or insn_name in crops:
472 mode = 'CROP'
473 res['mode'] = mode
474
475 # temporary useful info
476 regs = []
477 for k in ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out']:
478 if insn[k].startswith('CONST'):
479 res[k] = ''
480 regs.append('')
481 else:
482 res[k] = insn[k]
483 if insn[k] == 'RA_OR_ZERO':
484 regs.append('RA')
485 elif insn[k] != 'NONE':
486 regs.append(insn[k])
487 else:
488 regs.append('')
489
490 #print("regs", insn_name, regs)
491
492 # for LD/ST FP, use FRT/FRS not RT/RS, and use CR1 not CR0
493 if insn_name.startswith("lf"):
494 dRT = 'd:FRT'
495 dCR = 'd:CR1'
496 else:
497 dRT = 'd:RT'
498 dCR = 'd:CR0'
499 if insn_name.startswith("stf"):
500 sRS = 's:FRS'
501 dCR = 'd:CR1'
502 else:
503 sRS = 's:RS'
504 dCR = 'd:CR0'
505
506 # sigh now the fun begins. this isn't the sanest way to do it
507 # but the patterns are pretty regular.
508
509 if value == 'LDSTRM-2P-1S1D':
510 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
511 res['0'] = dRT # RT: Rdest_EXTRA3
512 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
513
514 elif value == 'LDSTRM-2P-1S2D':
515 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
516 res['0'] = dRT # RT: Rdest_EXTRA3
517 res['1'] = 'd:RA' # RA: Rdest2_EXTRA2
518 res['2'] = 's:RA' # RA: Rsrc1_EXTRA2
519
520 elif value == 'LDSTRM-2P-2S':
521 # stw, std, sth, stb
522 res['Etype'] = 'EXTRA3' # RM EXTRA2 type
523 res['0'] = sRS # RS: Rdest1_EXTRA2
524 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
525
526 elif value == 'LDSTRM-2P-2S1D':
527 if 'st' in insn_name and 'x' not in insn_name: # stwu/stbu etc
528 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
529 res['0'] = 'd:RA' # RA: Rdest1_EXTRA2
530 res['1'] = sRS # RS: Rdsrc1_EXTRA2
531 res['2'] = 's:RA' # RA: Rsrc2_EXTRA2
532 elif 'st' in insn_name and 'x' in insn_name: # stwux
533 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
534 res['0'] = 'd:RA' # RA: Rdest1_EXTRA2
535 # RS: Rdest2_EXTRA2, RA: Rsrc1_EXTRA2
536 res['1'] = sRS+'s:RA'
537 res['2'] = 's:RB' # RB: Rsrc2_EXTRA2
538 elif 'u' in insn_name: # ldux etc.
539 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
540 res['0'] = dRT # RT: Rdest1_EXTRA2
541 res['1'] = 'd:RA' # RA: Rdest2_EXTRA2
542 res['2'] = 's:RB' # RB: Rsrc1_EXTRA2
543 else:
544 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
545 res['0'] = dRT # RT: Rdest1_EXTRA2
546 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
547 res['2'] = 's:RB' # RB: Rsrc2_EXTRA2
548
549 elif value == 'LDSTRM-2P-3S':
550 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
551 if 'cx' in insn_name:
552 res['0'] = sRS+dCR # RS: Rsrc1_EXTRA2 CR0: dest
553 else:
554 res['0'] = sRS # RS: Rsrc1_EXTRA2
555 res['1'] = 's:RA' # RA: Rsrc2_EXTRA2
556 res['2'] = 's:RB' # RA: Rsrc3_EXTRA2
557
558 elif value == 'RM-2P-1S1D':
559 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
560 if insn_name == 'mtspr':
561 res['0'] = 'd:SPR' # SPR: Rdest1_EXTRA3
562 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
563 elif insn_name == 'mfspr':
564 res['0'] = 'd:RS' # RS: Rdest1_EXTRA3
565 res['1'] = 's:SPR' # SPR: Rsrc1_EXTRA3
566 elif name == 'CRio' and insn_name == 'mcrf':
567 res['0'] = 'd:BF' # BFA: Rdest1_EXTRA3
568 res['1'] = 's:BFA' # BFA: Rsrc1_EXTRA3
569 elif 'mfcr' in insn_name or 'mfocrf' in insn_name:
570 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
571 res['1'] = 's:CR' # CR: Rsrc1_EXTRA3
572 elif insn_name == 'setb':
573 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
574 res['1'] = 's:BFA' # BFA: Rsrc1_EXTRA3
575 elif insn_name.startswith('cmp'): # cmpi
576 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
577 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
578 elif regs == ['RA', '', '', 'RT', '', '']:
579 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
580 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
581 elif regs == ['RA', '', '', 'RT', '', 'CR0']:
582 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA3
583 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
584 elif (regs == ['RS', '', '', 'RA', '', 'CR0'] or
585 regs == ['', '', 'RS', 'RA', '', 'CR0']):
586 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
587 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
588 elif regs == ['RS', '', '', 'RA', '', '']:
589 res['0'] = 'd:RA' # RA: Rdest1_EXTRA3
590 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
591 elif regs == ['', 'FRB', '', 'FRT', '0', 'CR1']:
592 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
593 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
594 elif regs == ['', 'FRB', '', '', '', 'CR1']:
595 res['0'] = 'd:CR1' # CR1: Rdest1_EXTRA3
596 res['1'] = 's:FRB' # FRA: Rsrc1_EXTRA3
597 elif regs == ['', 'FRB', '', '', '', 'BF']:
598 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
599 res['1'] = 's:FRB' # FRA: Rsrc1_EXTRA3
600 elif regs == ['', 'FRB', '', 'FRT', '', 'CR1']:
601 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
602 res['1'] = 's:FRB' # FRB: Rsrc1_EXTRA3
603 elif insn_name.startswith('bc'):
604 res['0'] = 'd:BI' # BI: Rdest1_EXTRA3
605 res['1'] = 's:BI' # BI: Rsrc1_EXTRA3
606 else:
607 res['0'] = 'TODO'
608
609 elif value == 'RM-1P-2S1D':
610 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
611 if insn_name.startswith('cr'):
612 res['0'] = 'd:BT' # BT: Rdest1_EXTRA3
613 res['1'] = 's:BA' # BA: Rsrc1_EXTRA3
614 res['2'] = 's:BB' # BB: Rsrc2_EXTRA3
615 elif regs == ['FRA', '', 'FRC', 'FRT', '', 'CR1']:
616 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
617 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
618 res['2'] = 's:FRC' # FRC: Rsrc1_EXTRA3
619 # should be for fcmp
620 elif regs == ['FRA', 'FRB', '', '', '', 'BF']:
621 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
622 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
623 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
624 elif regs == ['FRA', 'FRB', '', 'FRT', '', '']:
625 res['0'] = 'd:FRT' # FRT: Rdest1_EXTRA3
626 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
627 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
628 elif regs == ['FRA', 'FRB', '', 'FRT', '', 'CR1']:
629 res['0'] = 'd:FRT;d:CR1' # FRT,CR1: Rdest1_EXTRA3
630 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA3
631 res['2'] = 's:FRB' # FRB: Rsrc1_EXTRA3
632 elif name == '2R-1W' or insn_name == 'cmpb': # cmpb
633 if insn_name in ['bpermd', 'cmpb']:
634 res['0'] = 'd:RA' # RA: Rdest1_EXTRA3
635 res['1'] = 's:RS' # RS: Rsrc1_EXTRA3
636 else:
637 res['0'] = 'd:RT' # RT: Rdest1_EXTRA3
638 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
639 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
640 elif insn_name.startswith('cmp'): # cmp
641 res['0'] = 'd:BF' # BF: Rdest1_EXTRA3
642 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
643 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
644 elif (regs == ['', 'RB', 'RS', 'RA', '', 'CR0'] or
645 regs == ['RS', 'RB', '', 'RA', '', 'CR0']):
646 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
647 res['1'] = 's:RB' # RB: Rsrc1_EXTRA3
648 res['2'] = 's:RS' # RS: Rsrc1_EXTRA3
649 elif regs == ['RA', 'RB', '', 'RT', '', 'CR0']:
650 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA3
651 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
652 res['2'] = 's:RB' # RB: Rsrc1_EXTRA3
653 elif regs == ['RA', '', 'RS', 'RA', '', 'CR0']:
654 res['0'] = 'd:RA;d:CR0' # RA,CR0: Rdest1_EXTRA3
655 res['1'] = 's:RA' # RA: Rsrc1_EXTRA3
656 res['2'] = 's:RS' # RS: Rsrc1_EXTRA3
657 else:
658 res['0'] = 'TODO'
659
660 elif value == 'RM-2P-2S1D':
661 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
662 if insn_name.startswith('mt'): # mtcrf
663 res['0'] = 'd:CR' # CR: Rdest1_EXTRA2
664 res['1'] = 's:RS' # RS: Rsrc1_EXTRA2
665 res['2'] = 's:CR' # CR: Rsrc2_EXTRA2
666 else:
667 res['0'] = 'TODO'
668
669 elif value == 'RM-1P-3S1D':
670 res['Etype'] = 'EXTRA2' # RM EXTRA2 type
671 if regs == ['RA', 'RB', 'RT', 'RT', '', 'CR0']:
672 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA2
673 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
674 res['2'] = 's:RB' # RT: Rsrc2_EXTRA2
675 res['3'] = 's:RT' # RT: Rsrc3_EXTRA2
676 elif insn_name == 'isel':
677 res['0'] = 'd:RT' # RT: Rdest1_EXTRA2
678 res['1'] = 's:RA' # RA: Rsrc1_EXTRA2
679 res['2'] = 's:RB' # RT: Rsrc2_EXTRA2
680 res['3'] = 's:BC' # BC: Rsrc3_EXTRA2
681 else:
682 res['0'] = 'd:FRT;d:CR1' # FRT, CR1: Rdest1_EXTRA2
683 res['1'] = 's:FRA' # FRA: Rsrc1_EXTRA2
684 res['2'] = 's:FRB' # FRB: Rsrc2_EXTRA2
685 res['3'] = 's:FRC' # FRC: Rsrc3_EXTRA2
686
687 elif value == 'RM-1P-1D':
688 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
689 if insn_name == 'svstep':
690 res['0'] = 'd:RT;d:CR0' # RT,CR0: Rdest1_EXTRA2
691
692 elif value == 'RM-1P-1S':
693 res['Etype'] = 'EXTRA3' # RM EXTRA3 type
694 if insn_name == 'fmvis':
695 res['0'] = 's:FRS0' # RS: Rsrc1_EXTRA2
696
697 # add to svp64 csvs
698 # for k in ['in1', 'in2', 'in3', 'out', 'CR in', 'CR out']:
699 # del res[k]
700 # if res['0'] != 'TODO':
701 for k in res:
702 if k == 'CONDITIONS':
703 continue
704 if res[k] == 'NONE' or res[k] == '':
705 res[k] = '0'
706 svp64[value].append(res)
707 # also add to by-CSV version
708 csv_fname = insn_to_csv[insn_name]
709 csvs_svp64[csv_fname].append(res)
710
711 print('')
712
713 # now write out the csv files
714 for value, csv in svp64.items():
715 if value == '-':
716 continue
717 from time import sleep
718 print ("WARNING, filename '-' should NOT exist. instrs missing")
719 print ("TODO: fix this (and put in the bugreport number here)")
720 sleep(2)
721 # print out svp64 tables by category
722 print("## %s" % value)
723 print('')
724 cols = csvcols + ['out2']
725 print(tformat(cols))
726 print(tformat([" - "] * (len(cols))))
727 for d in csv:
728 row = []
729 for k in cols:
730 row.append(d[k])
731 print(tformat(row))
732 print('')
733
734 #csvcols = ['insn', 'Ptype', 'Etype', '0', '1', '2', '3']
735 write_csv("%s.csv" % value, csv, csvcols + ['out2'])
736
737 # okaaay, now we re-read them back in for producing microwatt SV
738
739 # get SVP64 augmented CSV files
740 svt = SVP64RM(microwatt_format=True)
741 # Expand that (all .csv files)
742 pth = find_wiki_file("*.csv")
743
744 # Ignore those containing: valid test sprs
745 for fname in glob(pth):
746 #print("post-checking", fname)
747 _, name = os.path.split(fname)
748 if '-' in name:
749 continue
750 if 'valid' in fname:
751 continue
752 if 'test' in fname:
753 continue
754 if fname.endswith('sprs.csv'):
755 continue
756 if fname.endswith('minor_19_valid.csv'):
757 continue
758 if 'RM' in fname:
759 continue
760 svp64_csv = svt.get_svp64_csv(fname)
761
762 csvcols = ['insn', 'mode', 'Ptype', 'Etype']
763 csvcols += ['in1', 'in2', 'in3', 'out', 'out2', 'CR in', 'CR out']
764
765 if format is Format.VHDL:
766 # and a nice microwatt VHDL file
767 file_path = find_wiki_file("sv_decode.vhdl")
768 elif format is Format.BINUTILS:
769 file_path = find_wiki_file("binutils.c")
770
771 with open(file_path, 'w') as stream:
772 output(format, svt, csvcols, insns, csvs_svp64, stream)
773
774
775 def output_autogen_disclaimer(format, stream):
776 lines = (
777 "this file is auto-generated, do not edit",
778 "http://libre-soc.org/openpower/sv_analysis.py",
779 "part of Libre-SOC, sponsored by NLnet",
780 )
781 for line in format.wrap_comment(lines):
782 stream.write(line)
783 stream.write("\n")
784 stream.write("\n")
785
786
787 def output(format, svt, csvcols, insns, csvs_svp64, stream):
788 lens = {
789 'major': 63,
790 'minor_4': 63,
791 'minor_19': 7,
792 'minor_30': 15,
793 'minor_31': 1023,
794 'minor_58': 63,
795 'minor_59': 31,
796 'minor_62': 63,
797 'minor_63l': 511,
798 'minor_63h': 16,
799 }
800
801 def svp64_canonicalize(item):
802 (value, csv) = item
803 value = value.lower().replace("-", "_")
804 return (value, csv)
805
806 csvs_svp64_canon = dict(map(svp64_canonicalize, csvs_svp64.items()))
807
808 # disclaimer
809 output_autogen_disclaimer(format, stream)
810
811 # declarations
812 for line in format.declarations(csvs_svp64_canon.keys(), lens):
813 stream.write(f"{line}\n")
814
815 # definitions
816 sv_cols = ['sv_in1', 'sv_in2', 'sv_in3', 'sv_out', 'sv_out2',
817 'sv_cr_in', 'sv_cr_out']
818 fullcols = csvcols + sv_cols
819
820 entries_svp64 = defaultdict(list)
821 for (value, csv) in filter(lambda kv: kv[0] in lens, csvs_svp64_canon.items()):
822 for entry in csv:
823 insn = str(entry['insn'])
824 condition = str(entry['CONDITIONS'])
825 mode = str(entry['mode'])
826 sventry = svt.svp64_instrs.get(insn, None)
827 if sventry is not None:
828 sventry['mode'] = mode
829 op = insns[(insn, condition)]['opcode']
830 # binary-to-vhdl-binary
831 if op.startswith("0b"):
832 op = "2#%s#" % op[2:]
833 row = []
834 for colname in csvcols[1:]:
835 re = entry[colname]
836 # zero replace with NONE
837 if re == '0':
838 re = 'NONE'
839 # 1/2 predication
840 re = re.replace("1P", "P1")
841 re = re.replace("2P", "P2")
842 row.append(re)
843 #print("sventry", sventry)
844 for colname in sv_cols:
845 if sventry is None:
846 re = 'NONE'
847 else:
848 re = sventry[colname]
849 row.append(re)
850 entries_svp64[value].append((op, insn, row))
851
852 for line in format.definitions(entries_svp64, fullcols):
853 stream.write(f"{line}\n")
854
855
856 def main():
857 import os
858 os.environ['SILENCELOG'] = '1'
859 parser = argparse.ArgumentParser()
860 parser.add_argument("-f", "--format",
861 type=Format, choices=Format, default=Format.VHDL,
862 help="format to be used (binutils or VHDL)")
863 args = parser.parse_args()
864 process_csvs(args.format)
865
866
867 if __name__ == '__main__':
868 # don't do anything other than call main() here, cuz this code is bypassed
869 # by the sv_analysis command created by setup.py
870 main()