+# SPDX-License-Identifier: LGPL-3-or-later
"""Cascading Power ISA Decoder
License: LGPLv3+
"""
import gc
-from collections import namedtuple
-from nmigen import Module, Elaboratable, Signal, Cat, Mux
-from nmigen.cli import rtlil
+from collections import namedtuple, OrderedDict
+from nmigen import Module, Elaboratable, Signal, Cat, Mux, Const
+from nmigen.cli import rtlil, verilog
from openpower.decoder.power_enums import (Function, Form, MicrOp,
- In1Sel, In2Sel, In3Sel, OutSel,
- SVEXTRA, SVEtype, SVPtype, # Simple-V
- RC, LdstLen, LDSTMode, CryIn,
- single_bit_flags, CRInSel,
- CROutSel, get_signal_name,
- default_values, insns, asmidx)
+ In1Sel, In2Sel, In3Sel, OutSel,
+ SVEXTRA, SVEtype, SVPtype, # Simple-V
+ RCOE, LdstLen, LDSTMode, CryIn,
+ single_bit_flags, CRInSel,
+ CROutSel, get_signal_name,
+ default_values, insns, asmidx,
+ asmlen)
from openpower.decoder.power_fields import DecodeFields
from openpower.decoder.power_fieldsn import SigDecode, SignalBitRange
from openpower.decoder.power_svp64 import SVP64RM
"opcodes", # a dictionary of minor patterns to find
"opint", # true => the pattern must not be in "10----11" format
# the bits (as a range) against which "pattern" matches
- "bitsel",
+ "bitsel", # should be in MSB0 order but isn't! it's LSB0. um.
"suffix", # shift the opcode down before decoding
"subdecoders" # list of further subdecoders for *additional* matches,
# *ONLY* after "pattern" has *ALSO* been matched against.
power_op_types = {'function_unit': Function,
'internal_op': MicrOp,
'form': Form,
- 'asmcode': 8,
+ 'asmcode': asmlen,
'SV_Etype': SVEtype,
'SV_Ptype': SVPtype,
'in1_sel': In1Sel,
'sv_cr_out': SVEXTRA,
'ldst_len': LdstLen,
'upd': LDSTMode,
- 'rc_sel': RC,
+ 'rc_sel': RCOE,
'cry_in': CryIn
}
'cr_out': 'CR out',
'ldst_len': 'ldst len',
'upd': 'upd',
+ 'rsrv': 'rsrv', # atomic operation
'rc_sel': 'rc',
'cry_in': 'cry in',
}
# process the comment field, strip out "equals" for FP
if "=" in asmcode:
asmcode = asmcode.split("=")[-1]
- log ("asmcode stripping =", asmcode,
- asmcode in asmidx, hasattr(self, "asmcode"))
+ log("asmcode stripping =", asmcode,
+ asmcode in asmidx, hasattr(self, "asmcode"))
if hasattr(self, "asmcode") and asmcode in asmidx:
res.append(self.asmcode.eq(asmidx[asmcode]))
for bit in single_bit_flags:
"""
def __init__(self, width, dec, name=None, col_subset=None,
- row_subset=None, conditions=None):
+ row_subset=None, conditions=None):
if conditions is None:
- conditions = {}
+ # XXX conditions = {}
+ conditions = {
+ 'SVP64FFT': Const(0, 1),
+ }
self.actually_does_something = False
self.pname = name
self.conditions = conditions
for d in dec:
if d.suffix is not None and d.suffix >= width:
d.suffix = None
+
self.width = width
# create some case statement condition patterns for matching
self.ccases = {}
self.ckeys = list(conditions.keys())
self.ckeys.sort()
- cswitch = []
- for i, ckey in enumerate(self.ckeys):
- case = '-' * len(self.ckeys)
- case[i] = '1'
- self.ccases[ckey] = case
- cswitch.append(conditions[ckey])
- self.cswitch = cswitch
+
+ def find_conditions(self, opcodes):
+ # look for conditions, create dictionary entries for them
+ # sorted by opcode
+ rows = OrderedDict() # start as a dictionary, get as list (after)
+ for row in opcodes:
+ condition = row['CONDITIONS']
+ opcode = row['opcode']
+ if condition:
+ # check it's expected
+ assert (condition in self.conditions or
+ (condition[0] == '~' and
+ condition[1:] in self.conditions)), \
+ "condition %s not in %s" % (condition, str(conditions))
+ if opcode not in rows:
+ rows[opcode] = {}
+ rows[opcode][condition] = row
+ else:
+ # check it's unique
+ assert opcode not in rows, \
+ "opcode %s already in rows for %s" % \
+ (opcode, self.pname)
+ rows[opcode] = row
+ # after checking for conditions, get just the values (ordered)
+ return list(rows.values())
def suffix_mask(self, d):
return ((1 << d.suffix) - 1)
eq.append(opcode_switch.eq(look_for))
if d.suffix:
opcodes = self.divide_opcodes(d)
+ # TODO opcodes = self.find_conditions(opcodes)
opc_in = Signal(d.suffix, reset_less=True)
eq.append(opc_in.eq(opcode_switch[:d.suffix]))
# begin the dynamic Switch statement here
subdecoder = PowerDecoder(width=32, dec=sd,
name=mname,
col_subset=self.col_subset,
- row_subset=self.row_subsetfn)
+ row_subset=self.row_subsetfn,
+ conditions=self.conditions)
if not subdecoder.tree_analyse():
del subdecoder
continue
if seqs:
case_does_something = True
eq += seqs
- for row in d.opcodes:
- opcode = row['opcode']
+ opcodes = self.find_conditions(d.opcodes)
+ for row in opcodes:
+ # urrr this is an awful hack. if "conditions" are active
+ # get the FIRST item (will be the same opcode), and it
+ # had BETTER have the same unit and also pass other
+ # row subset conditions.
+ if 'opcode' not in row: # must be a "CONDITIONS" dict...
+ is_conditions = True
+ _row = row[list(row.keys())[0]]
+ else:
+ is_conditions = False
+ _row = row
+ opcode = _row['opcode']
if d.opint and '-' not in opcode:
opcode = int(opcode, 0)
- if not row['unit']:
+ if not _row['unit']:
continue
if self.row_subsetfn:
- if not self.row_subsetfn(opcode, row):
+ if not self.row_subsetfn(opcode, _row):
continue
# add in the dynamic Case statement here
- switch_case[opcode] = self.op._eq(row)
+ if is_conditions:
+ switch_case[opcode] = {}
+ for k, crow in row.items():
+ # log("ordered", k, crow)
+ switch_case[opcode][k] = self.op._eq(crow)
+ else:
+ switch_case[opcode] = self.op._eq(row)
self.actually_does_something = True
case_does_something = True
subdecoder = PowerDecoder(self.width, dec,
name=mname,
col_subset=self.col_subset,
- row_subset=self.row_subsetfn)
- log ("subdecoder", mname, subdecoder)
+ row_subset=self.row_subsetfn,
+ conditions=self.conditions)
+ log("subdecoder", mname, subdecoder)
if not subdecoder.tree_analyse(): # doesn't do anything
- log ("analysed, DELETING", mname)
+ log("analysed, DELETING", mname)
del subdecoder
continue # skip
submodules[mname] = subdecoder
entries for a given opcode match. here we discern them.
"""
comb = m.d.comb
- with m.Switch(Cat(*self.ccswitch)):
- for ckey, eqs in cases.items():
- with m.Case(self.ccases[key]):
+ cswitch = []
+ ccases = []
+ for casekey, eqs in cases.items():
+ if casekey.startswith('~'):
+ with m.If(~self.conditions[casekey[1:]]):
+ comb += eqs
+ else:
+ with m.If(self.conditions[casekey]):
comb += eqs
def ports(self):
"""
def __init__(self, width, dec, name=None, col_subset=None,
- row_subset=None, conditions=None):
+ row_subset=None, conditions=None):
PowerDecoder.__init__(self, width, dec, name,
col_subset, row_subset, conditions)
self.fields = df = DecodeFields(SignalBitRange, [self.opcode_in])
return m
def ports(self):
- return [self.raw_opcode_in, self.bigendian] + PowerDecoder.ports(self)
+ res = [self.raw_opcode_in, self.bigendian] + PowerDecoder.ports(self)
+ for condition in self.conditions.values():
+ res.append(condition)
+ return res
#############################################################
# PRIMARY FUNCTION SPECIFYING ALTERNATIVE SVP64 POWER DECODER
def create_pdecode_svp64_ldst(name=None, col_subset=None, row_subset=None,
- include_fp=False):
+ include_fp=False):
"""create_pdecode - creates a cascading hierarchical POWER ISA decoder
subsetting of the PowerOp decoding is possible by setting col_subset
"""
- log ("create_pdecode_svp64_ldst", name, col_subset, row_subset, include_fp)
+ log("create_pdecode_svp64_ldst", name, col_subset, row_subset, include_fp)
# some alteration to the CSV files is required for SV so we use
# a class to do it
Subdecoder(pattern=58, opcodes=get_csv("svldst_minor_58.csv"),
opint=True, bitsel=(0, 2), suffix=None, subdecoders=[]),
# nope - needs 4-in regs
- #Subdecoder(pattern=62, opcodes=get_csv("svldst_minor_62.csv"),
+ # Subdecoder(pattern=62, opcodes=get_csv("svldst_minor_62.csv"),
# opint=True, bitsel=(0, 2), suffix=None, subdecoders=[]),
]
if False and include_fp:
pminor.append(
Subdecoder(pattern=63, opcodes=get_csv("minor_63.csv"),
- opint=False, bitsel=(1, 11), suffix=None,
- subdecoders=[]),
- )
+ opint=False, bitsel=(1, 11), suffix=None,
+ subdecoders=[]),
+ )
pminor.append(
Subdecoder(pattern=59, opcodes=get_csv("minor_59.csv"),
- opint=False, bitsel=(1, 11), suffix=None,
- subdecoders=[]),
- )
+ opint=False, bitsel=(1, 11), suffix=None,
+ subdecoders=[]),
+ )
# top level: extra merged with major
dec = []
# PRIMARY FUNCTION SPECIFYING THE FULL POWER DECODER
def create_pdecode(name=None, col_subset=None, row_subset=None,
- include_fp=False):
+ include_fp=False, conditions=None):
"""create_pdecode - creates a cascading hierarchical POWER ISA decoder
subsetting of the PowerOp decoding is possible by setting col_subset
+
+ NOTE (sigh) the bitsel patterns are in LSB0 order, they should be MSB0
"""
- log ("create_pdecode", name, col_subset, row_subset, include_fp)
+ log("create_pdecode", name, col_subset, row_subset, include_fp)
# some alteration to the CSV files is required for SV so we use
# a class to do it
subdecoders=[]))
# XXX problem with sub-decoders (can only handle one),
# sort this another time
- #m19.append(Subdecoder(pattern=19, opcodes=get_csv("minor_19_00000.csv"),
+ # m19.append(Subdecoder(pattern=19, opcodes=get_csv("minor_19_00000.csv"),
# opint=True, bitsel=(1, 6), suffix=None,
# subdecoders=[]))
pminor = [
m19,
Subdecoder(pattern=30, opcodes=get_csv("minor_30.csv"),
- opint=True, bitsel=(1, 5), suffix=None, subdecoders=[]),
+ opint=False, bitsel=(1, 5), suffix=None, subdecoders=[]),
Subdecoder(pattern=31, opcodes=get_csv("minor_31.csv"),
opint=True, bitsel=(1, 11), suffix=0b00101, subdecoders=[]),
Subdecoder(pattern=58, opcodes=get_csv("minor_58.csv"),
Subdecoder(pattern=62, opcodes=get_csv("minor_62.csv"),
opint=True, bitsel=(0, 2), suffix=None, subdecoders=[]),
Subdecoder(pattern=22, opcodes=get_csv("minor_22.csv"),
- opint=True, bitsel=(1, 5), suffix=None, subdecoders=[]),
+ opint=False, bitsel=(0, 11), suffix=None, subdecoders=[]),
+ Subdecoder(pattern=5, opcodes=get_csv("minor_5.csv"),
+ opint=True, bitsel=(0, 11), suffix=None, subdecoders=[]),
]
# FP 63L/H decoders. TODO: move mffsfamily to separate subdecoder
if include_fp:
pminor.append(
Subdecoder(pattern=63, opcodes=get_csv("minor_63.csv"),
- opint=False, bitsel=(1, 11), suffix=None,
- subdecoders=[]),
- )
+ opint=False, bitsel=(1, 11), suffix=None,
+ subdecoders=[]),
+ )
pminor.append(
Subdecoder(pattern=59, opcodes=get_csv("minor_59.csv"),
- opint=False, bitsel=(1, 11), suffix=None,
- subdecoders=[]),
- )
+ opint=False, bitsel=(1, 11), suffix=None,
+ subdecoders=[]),
+ )
# top level: extra merged with major
dec = []
bitsel=(0, 32), suffix=None, subdecoders=[]))
return TopPowerDecoder(32, dec, name=name, col_subset=col_subset,
- row_subset=row_subset)
+ row_subset=row_subset,
+ conditions=conditions)
+
+# test function from
+# https://github.com/apertus-open-source-cinema/naps/blob/9ebbc0/naps/soc/cli.py#L17
+
+
+def fragment_repr(original):
+ from textwrap import indent
+ attrs_str = "\n"
+ for attr in ['ports', 'drivers', 'statements', 'attrs',
+ 'generated', 'flatten']:
+ attrs_str += f"{attr}={repr(getattr(original, attr))},\n"
+
+ domains_str = "\n"
+ for name, domain in original.domains.items():
+ # TODO: this is not really sound because domains could be non local
+ domains_str += f"{name}: {domain.name}\n"
+ attrs_str += f"domains={{{indent(domains_str, ' ')}}},\n"
+
+ children_str = "\n"
+ for child, name in original.subfragments:
+ children_str += f"[{name}, {fragment_repr(child)}]\n"
+ attrs_str += f"children=[{indent(children_str, ' ')}],\n"
+
+ return f"Fragment({indent(attrs_str, ' ')})"
if __name__ == '__main__':
def rowsubsetfn(opcode, row):
log("row_subset", opcode, row)
- return row['unit'] == 'FPU'
+ return row['unit'] in ['LDST', 'FPU']
+ conditions = {
+ 'SVP64FFT': Signal(name="svp64fft", reset_less=True),
+ }
pdecode = create_pdecode(name="rowsub",
col_subset={'opcode', 'function_unit',
- 'form'},
+ 'asmcode',
+ 'in2_sel', 'in3_sel'},
row_subset=rowsubsetfn,
- include_fp=True)
+ include_fp=True,
+ conditions=conditions)
vl = rtlil.convert(pdecode, ports=pdecode.ports())
with open("row_subset_decoder.il", "w") as f:
f.write(vl)
+ vl = verilog.convert(pdecode, ports=pdecode.ports())
+ with open("row_subset_decoder.v", "w") as f:
+ f.write(vl)
+
# col subset
- pdecode = create_pdecode(name="fusubset", col_subset={'function_unit'})
+ pdecode = create_pdecode(name="fusubset", col_subset={'function_unit'},
+ conditions=conditions)
vl = rtlil.convert(pdecode, ports=pdecode.ports())
with open("col_subset_decoder.il", "w") as f:
f.write(vl)
+ from nmigen.hdl.ir import Fragment
+ elaborated = Fragment.get(pdecode, platform=None)
+ elaborated_repr = fragment_repr(elaborated)
+ print(elaborated_repr)
+
+ exit(0)
+
+ exit(0)
+
# full decoder
pdecode = create_pdecode(include_fp=True)
vl = rtlil.convert(pdecode, ports=pdecode.ports())