from functools import reduce
from operator import or_
+
class PartitionPoints(dict):
"""Partition points and corresponding ``Value``s.
:attribute in2: the third input
:attribute sum: the sum output
:attribute carry: the carry output
+
+ Rather than do individual full adders (and have an array of them,
+ which would be very slow to simulate), this module can specify the
+ bit width of the inputs and outputs: in effect it performs multiple
+ Full 3-2 Add operations "in parallel".
"""
def __init__(self, width):
expanded_width += 1
expanded_width += 1
self._expanded_width = expanded_width
+ # XXX these have to remain here due to some horrible nmigen
+ # simulation bugs involving sync. it is *not* necessary to
+ # have them here, they should (under normal circumstances)
+ # be moved into elaborate, as they are entirely local
self._expanded_a = Signal(expanded_width)
self._expanded_b = Signal(expanded_width)
self._expanded_output = Signal(expanded_width)
m = Module()
expanded_index = 0
# store bits in a list, use Cat later. graphviz is much cleaner
- al = []
- bl = []
- ol = []
- ea = []
- eb = []
- eo = []
+ al, bl, ol, ea, eb, eo = [],[],[],[],[],[]
+
# partition points are "breaks" (extra zeros) in what would otherwise
# be a massive long add.
for i in range(self.width):
class ProductTerm(Elaboratable):
+ """ this class creates a single product term (a[..]*b[..]).
+ it has a design flaw in that is the *output* that is selected,
+ where the multiplication(s) are combinatorially generated
+ all the time.
+ """
def __init__(self, width, twidth, pbwid, a_index, b_index):
self.a_index = a_index
class ProductTerms(Elaboratable):
-
+ """ creates a bank of product terms. also performs the actual bit-selection
+ this class is to be wrapped with a for-loop on the "a" operand.
+ it creates a second-level for-loop on the "b" operand.
+ """
def __init__(self, width, twidth, pbwid, a_index, blen):
self.a_index = a_index
self.blen = blen
return m
+class LSBNegTerm(Elaboratable):
+
+ def __init__(self, bit_width):
+ self.bit_width = bit_width
+ self.part = Signal(reset_less=True)
+ self.signed = Signal(reset_less=True)
+ self.op = Signal(bit_width, reset_less=True)
+ self.msb = Signal(reset_less=True)
+ self.nt = Signal(bit_width*2, reset_less=True)
+ self.nl = Signal(bit_width*2, reset_less=True)
+
+ def elaborate(self, platform):
+ m = Module()
+ comb = m.d.comb
+ bit_wid = self.bit_width
+ ext = Repl(0, bit_wid) # extend output to HI part
+
+ # determine sign of each incoming number *in this partition*
+ enabled = Signal(reset_less=True)
+ m.d.comb += enabled.eq(self.part & self.msb & self.signed)
+
+ # for 8-bit values: form a * 0xFF00 by using -a * 0x100, the
+ # negation operation is split into a bitwise not and a +1.
+ # likewise for 16, 32, and 64-bit values.
+
+ # width-extended 1s complement if a is signed, otherwise zero
+ comb += self.nt.eq(Mux(enabled, Cat(ext, ~self.op), 0))
+
+ # add 1 if signed, otherwise add zero
+ comb += self.nl.eq(Cat(ext, enabled, Repl(0, bit_wid-1)))
+
+ return m
+
class Part(Elaboratable):
+ """ a key class which, depending on the partitioning, will determine
+ what action to take when parts of the output are signed or unsigned.
+
+ this requires 2 pieces of data *per operand, per partition*:
+ whether the MSB is HI/LO (per partition!), and whether a signed
+ or unsigned operation has been *requested*.
+
+ once that is determined, signed is basically carried out
+ by splitting 2's complement into 1's complement plus one.
+ 1's complement is just a bit-inversion.
+
+ the extra terms - as separate terms - are then thrown at the
+ AddReduce alongside the multiplication part-results.
+ """
def __init__(self, width, n_parts, n_levels, pbwid):
# inputs
m = Module()
pbs, parts, delayed_parts = self.pbs, self.parts, self.delayed_parts
+ # negated-temporary copy of partition bits
+ npbs = Signal.like(pbs, reset_less=True)
+ m.d.comb += npbs.eq(~pbs)
byte_count = 8 // len(parts)
for i in range(len(parts)):
pbl = []
- pbl.append(~pbs[i * byte_count - 1])
+ pbl.append(npbs[i * byte_count - 1])
for j in range(i * byte_count, (i + 1) * byte_count - 1):
pbl.append(pbs[j])
- pbl.append(~pbs[(i + 1) * byte_count - 1])
+ pbl.append(npbs[(i + 1) * byte_count - 1])
value = Signal(len(pbl), reset_less=True)
m.d.comb += value.eq(Cat(*pbl))
m.d.comb += parts[i].eq(~(value).bool())
self.not_a_term, self.neg_lsb_a_term, \
self.not_b_term, self.neg_lsb_b_term
- byte_width = 8 // len(parts)
- bit_width = 8 * byte_width
+ byte_width = 8 // len(parts) # byte width
+ bit_wid = 8 * byte_width # bit width
nat, nbt, nla, nlb = [], [], [], []
for i in range(len(parts)):
- be = parts[i] & self.a[(i + 1) * bit_width - 1] \
- & self.a_signed[i * byte_width]
- ae = parts[i] & self.b[(i + 1) * bit_width - 1] \
- & self.b_signed[i * byte_width]
- a_enabled = Signal(name="a_en_%d" % i, reset_less=True)
- b_enabled = Signal(name="b_en_%d" % i, reset_less=True)
- m.d.comb += a_enabled.eq(ae)
- m.d.comb += b_enabled.eq(be)
-
- # for 8-bit values: form a * 0xFF00 by using -a * 0x100, the
- # negation operation is split into a bitwise not and a +1.
- # likewise for 16, 32, and 64-bit values.
- nat.append(Mux(a_enabled,
- Cat(Repl(0, bit_width),
- ~self.a.bit_select(bit_width * i, bit_width)),
- 0))
-
- nla.append(Cat(Repl(0, bit_width), a_enabled,
- Repl(0, bit_width-1)))
-
- nbt.append(Mux(b_enabled,
- Cat(Repl(0, bit_width),
- ~self.b.bit_select(bit_width * i, bit_width)),
- 0))
-
- nlb.append(Cat(Repl(0, bit_width), b_enabled,
- Repl(0, bit_width-1)))
-
+ # work out bit-inverted and +1 term for a.
+ pa = LSBNegTerm(bit_wid)
+ setattr(m.submodules, "lnt_%d_a_%d" % (bit_wid, i), pa)
+ m.d.comb += pa.part.eq(parts[i])
+ m.d.comb += pa.op.eq(self.a.bit_select(bit_wid * i, bit_wid))
+ m.d.comb += pa.signed.eq(self.b_signed[i * byte_width]) # yes b
+ m.d.comb += pa.msb.eq(self.b[(i + 1) * bit_wid - 1]) # really, b
+ nat.append(pa.nt)
+ nla.append(pa.nl)
+
+ # work out bit-inverted and +1 term for b
+ pb = LSBNegTerm(bit_wid)
+ setattr(m.submodules, "lnt_%d_b_%d" % (bit_wid, i), pb)
+ m.d.comb += pb.part.eq(parts[i])
+ m.d.comb += pb.op.eq(self.b.bit_select(bit_wid * i, bit_wid))
+ m.d.comb += pb.signed.eq(self.a_signed[i * byte_width]) # yes a
+ m.d.comb += pb.msb.eq(self.a[(i + 1) * bit_wid - 1]) # really, a
+ nbt.append(pb.nt)
+ nlb.append(pb.nl)
+
+ # concatenate together and return all 4 results.
m.d.comb += [not_a_term.eq(Cat(*nat)),
not_b_term.eq(Cat(*nbt)),
neg_lsb_a_term.eq(Cat(*nla)),
class IntermediateOut(Elaboratable):
+ """ selects the HI/LO part of the multiplication, for a given bit-width
+ the output is also reconstructed in its SIMD (partition) lanes.
+ """
def __init__(self, width, out_wid, n_parts):
self.width = width
self.n_parts = n_parts
class FinalOut(Elaboratable):
+ """ selects the final output based on the partitioning.
+
+ each byte is selectable independently, i.e. it is possible
+ that some partitions requested 8-bit computation whilst others
+ requested 16 or 32 bit.
+ """
def __init__(self, out_wid):
# inputs
self.d8 = [Signal(name=f"d8_{i}", reset_less=True) for i in range(8)]
m = Module()
ol = []
for i in range(8):
+ # select one of the outputs: d8 selects i8, d16 selects i16
+ # d32 selects i32, and the default is i64.
+ # d8 and d16 are ORed together in the first Mux
+ # then the 2nd selects either i8 or i16.
+ # if neither d8 nor d16 are set, d32 selects either i32 or i64.
op = Signal(8, reset_less=True, name="op_%d" % i)
m.d.comb += op.eq(
Mux(self.d8[i] | self.d16[i // 2],
class OrMod(Elaboratable):
+ """ ORs four values together in a hierarchical tree
+ """
def __init__(self, wid):
self.wid = wid
self.orin = [Signal(wid, name="orin%d" % i, reset_less=True)
class Signs(Elaboratable):
+ """ determines whether a or b are signed numbers
+ based on the required operation type (OP_MUL_*)
+ """
def __init__(self):
self.part_ops = Signal(2, reset_less=True)
instruction.
"""
- def __init__(self, register_levels= ()):
+ def __init__(self, register_levels=()):
+ """ register_levels: specifies the points in the cascade at which
+ flip-flops are to be inserted.
+ """
# parameter(s)
self.register_levels = list(register_levels)
m.d.comb += io8.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
# final output
- m.submodules.fo = fo = FinalOut(64)
+ m.submodules.finalout = finalout = FinalOut(64)
for i in range(len(part_8.delayed_parts[-1])):
- m.d.comb += fo.d8[i].eq(part_8.dplast[i])
+ m.d.comb += finalout.d8[i].eq(part_8.dplast[i])
for i in range(len(part_16.delayed_parts[-1])):
- m.d.comb += fo.d16[i].eq(part_16.dplast[i])
+ m.d.comb += finalout.d16[i].eq(part_16.dplast[i])
for i in range(len(part_32.delayed_parts[-1])):
- m.d.comb += fo.d32[i].eq(part_32.dplast[i])
- m.d.comb += fo.i8.eq(io8.output)
- m.d.comb += fo.i16.eq(io16.output)
- m.d.comb += fo.i32.eq(io32.output)
- m.d.comb += fo.i64.eq(io64.output)
- m.d.comb += self.output.eq(fo.out)
+ m.d.comb += finalout.d32[i].eq(part_32.dplast[i])
+ m.d.comb += finalout.i8.eq(io8.output)
+ m.d.comb += finalout.i16.eq(io16.output)
+ m.d.comb += finalout.i32.eq(io32.output)
+ m.d.comb += finalout.i64.eq(io64.output)
+ m.d.comb += self.output.eq(finalout.out)
return m