return m
-class MaskedFullAdder(FullAdder):
+class MaskedFullAdder(Elaboratable):
"""Masked Full Adder.
:attribute mask: the carry partition mask
FullAdders are always used with a "mask" on the output. To keep
the graphviz "clean", this class performs the masking here rather
than inside a large for-loop.
+
+ See the following discussion as to why this is no longer derived
+ from FullAdder. Each carry is shifted here *before* being ANDed
+ with the mask, so that an AOI cell may be used (which is more
+ gate-efficient)
+ https://en.wikipedia.org/wiki/AND-OR-Invert
+ https://groups.google.com/d/msg/comp.arch/fcq-GLQqvas/vTxmcA0QAgAJ
"""
def __init__(self, width):
:param width: the bit width of the input and output
"""
- FullAdder.__init__(self, width)
- self.mask = Signal(width)
- self.mcarry = Signal(width)
+ self.width = width
+ self.mask = Signal(width, reset_less=True)
+ self.mcarry = Signal(width, reset_less=True)
+ self.in0 = Signal(width, reset_less=True)
+ self.in1 = Signal(width, reset_less=True)
+ self.in2 = Signal(width, reset_less=True)
+ self.sum = Signal(width, reset_less=True)
def elaborate(self, platform):
"""Elaborate this module."""
- m = FullAdder.elaborate(self, platform)
- m.d.comb += self.mcarry.eq((self.carry << 1) & self.mask)
+ m = Module()
+ s1 = Signal(self.width, reset_less=True)
+ s2 = Signal(self.width, reset_less=True)
+ s3 = Signal(self.width, reset_less=True)
+ c1 = Signal(self.width, reset_less=True)
+ c2 = Signal(self.width, reset_less=True)
+ c3 = Signal(self.width, reset_less=True)
+ m.d.comb += self.sum.eq(self.in0 ^ self.in1 ^ self.in2)
+ m.d.comb += s1.eq(Cat(0, self.in0))
+ m.d.comb += s2.eq(Cat(0, self.in1))
+ m.d.comb += s3.eq(Cat(0, self.in2))
+ m.d.comb += c1.eq(s1 & s2 & self.mask)
+ m.d.comb += c2.eq(s2 & s3 & self.mask)
+ m.d.comb += c3.eq(s3 & s1 & self.mask)
+ m.d.comb += self.mcarry.eq(c1 | c2 | c3)
return m
partition: .... P... P... P... P... (32 bits)
a : .... .... .... .... .... (32 bits)
b : .... .... .... .... .... (32 bits)
- exp-a : ....P....P....P....P.... (32+4 bits)
+ exp-a : ....P....P....P....P.... (32+4 bits, P=1 if no partition)
exp-b : ....0....0....0....0.... (32 bits plus 4 zeros)
- exp-o : ....xN...xN...xN...xN... (32+4 bits)
- o : .... N... N... N... N... (32 bits)
+ exp-o : ....xN...xN...xN...xN... (32+4 bits - x to be discarded)
+ o : .... N... N... N... N... (32 bits - x ignored, N is carry-over)
:attribute width: the bit width of the input and output. Read-only.
:attribute a: the first input to the adder
FULL_ADDER_INPUT_COUNT = 3
-class AddReduce(Elaboratable):
+class AddReduceSingle(Elaboratable):
"""Add list of numbers together.
:attribute inputs: input ``Signal``s to be summed. Modification not
supported, except for by ``Signal.eq``.
"""
- def __init__(self, inputs, output_width, register_levels, partition_points):
+ def __init__(self, inputs, output_width, register_levels, partition_points,
+ part_ops):
"""Create an ``AddReduce``.
:param inputs: input ``Signal``s to be summed.
pipeline registers.
:param partition_points: the input partition points.
"""
+ self.part_ops = part_ops
+ self.out_part_ops = [Signal(2, name=f"part_ops_{i}")
+ for i in range(len(part_ops))]
self.inputs = list(inputs)
self._resized_inputs = [
Signal(output_width, name=f"resized_inputs[{i}]")
if not self.partition_points.fits_in_width(output_width):
raise ValueError("partition_points doesn't fit in output_width")
self._reg_partition_points = self.partition_points.like()
- max_level = AddReduce.get_max_level(len(self.inputs))
+
+ max_level = AddReduceSingle.get_max_level(len(self.inputs))
for level in self.register_levels:
if level > max_level:
raise ValueError(
"not enough adder levels for specified register levels")
+ # this is annoying. we have to create the modules (and terms)
+ # because we need to know what they are (in order to set up the
+ # interconnects back in AddReduce), but cannot do the m.d.comb +=
+ # etc because this is not in elaboratable.
+ self.groups = AddReduceSingle.full_adder_groups(len(self.inputs))
+ self._intermediate_terms = []
+ if len(self.groups) != 0:
+ self.create_next_terms()
+
@staticmethod
def get_max_level(input_count):
"""Get the maximum level.
"""
retval = 0
while True:
- groups = AddReduce.full_adder_groups(input_count)
+ groups = AddReduceSingle.full_adder_groups(input_count)
if len(groups) == 0:
return retval
input_count %= FULL_ADDER_INPUT_COUNT
input_count += 2 * len(groups)
retval += 1
- def next_register_levels(self):
- """``Iterable`` of ``register_levels`` for next recursive level."""
- for level in self.register_levels:
- if level > 0:
- yield level - 1
-
@staticmethod
def full_adder_groups(input_count):
"""Get ``inputs`` indices for which a full adder should be built."""
# pipeline registers
resized_input_assignments = [self._resized_inputs[i].eq(self.inputs[i])
for i in range(len(self.inputs))]
+ copy_part_ops = [self.out_part_ops[i].eq(self.part_ops[i])
+ for i in range(len(self.part_ops))]
if 0 in self.register_levels:
+ m.d.sync += copy_part_ops
m.d.sync += resized_input_assignments
m.d.sync += self._reg_partition_points.eq(self.partition_points)
else:
+ m.d.comb += copy_part_ops
m.d.comb += resized_input_assignments
m.d.comb += self._reg_partition_points.eq(self.partition_points)
- groups = AddReduce.full_adder_groups(len(self.inputs))
+ for (value, term) in self._intermediate_terms:
+ m.d.comb += term.eq(value)
+
# if there are no full adders to create, then we handle the base cases
# and return, otherwise we go on to the recursive case
- if len(groups) == 0:
+ if len(self.groups) == 0:
if len(self.inputs) == 0:
# use 0 as the default output value
m.d.comb += self.output.eq(0)
# handle single input
m.d.comb += self.output.eq(self._resized_inputs[0])
else:
- # base case for adding 2 or more inputs, which get recursively
- # reduced to 2 inputs
+ # base case for adding 2 inputs
assert len(self.inputs) == 2
adder = PartitionedAdder(len(self.output),
self._reg_partition_points)
m.d.comb += adder.b.eq(self._resized_inputs[1])
m.d.comb += self.output.eq(adder.output)
return m
- # go on to handle recursive case
+
+ mask = self._reg_partition_points.as_mask(len(self.output))
+ m.d.comb += self.part_mask.eq(mask)
+
+ # add and link the intermediate term modules
+ for i, (iidx, adder_i) in enumerate(self.adders):
+ setattr(m.submodules, f"adder_{i}", adder_i)
+
+ m.d.comb += adder_i.in0.eq(self._resized_inputs[iidx])
+ m.d.comb += adder_i.in1.eq(self._resized_inputs[iidx + 1])
+ m.d.comb += adder_i.in2.eq(self._resized_inputs[iidx + 2])
+ m.d.comb += adder_i.mask.eq(self.part_mask)
+
+ return m
+
+ def create_next_terms(self):
+
+ # go on to prepare recursive case
intermediate_terms = []
+ _intermediate_terms = []
def add_intermediate_term(value):
intermediate_term = Signal(
len(self.output),
name=f"intermediate_terms[{len(intermediate_terms)}]")
+ _intermediate_terms.append((value, intermediate_term))
intermediate_terms.append(intermediate_term)
- m.d.comb += intermediate_term.eq(value)
# store mask in intermediary (simplifies graph)
- part_mask = Signal(len(self.output), reset_less=True)
- mask = self._reg_partition_points.as_mask(len(self.output))
- m.d.comb += part_mask.eq(mask)
+ self.part_mask = Signal(len(self.output), reset_less=True)
# create full adders for this recursive level.
# this shrinks N terms to 2 * (N // 3) plus the remainder
- for i in groups:
+ self.adders = []
+ for i in self.groups:
adder_i = MaskedFullAdder(len(self.output))
- setattr(m.submodules, f"adder_{i}", adder_i)
- m.d.comb += adder_i.in0.eq(self._resized_inputs[i])
- m.d.comb += adder_i.in1.eq(self._resized_inputs[i + 1])
- m.d.comb += adder_i.in2.eq(self._resized_inputs[i + 2])
- m.d.comb += adder_i.mask.eq(part_mask)
+ self.adders.append((i, adder_i))
+ # add both the sum and the masked-carry to the next level.
+ # 3 inputs have now been reduced to 2...
add_intermediate_term(adder_i.sum)
- # mask out carry bits to prevent carries between partitions
add_intermediate_term(adder_i.mcarry)
# handle the remaining inputs.
if len(self.inputs) % FULL_ADDER_INPUT_COUNT == 1:
add_intermediate_term(self._resized_inputs[-1])
else:
assert len(self.inputs) % FULL_ADDER_INPUT_COUNT == 0
- # recursive invocation of ``AddReduce``
- next_level = AddReduce(intermediate_terms,
- len(self.output),
- self.next_register_levels(),
- self._reg_partition_points)
- m.submodules.next_level = next_level
+
+ self.intermediate_terms = intermediate_terms
+ self._intermediate_terms = _intermediate_terms
+
+
+class AddReduce(Elaboratable):
+ """Recursively Add list of numbers together.
+
+ :attribute inputs: input ``Signal``s to be summed. Modification not
+ supported, except for by ``Signal.eq``.
+ :attribute register_levels: List of nesting levels that should have
+ pipeline registers.
+ :attribute output: output sum.
+ :attribute partition_points: the input partition points. Modification not
+ supported, except for by ``Signal.eq``.
+ """
+
+ def __init__(self, inputs, output_width, register_levels, partition_points,
+ part_ops):
+ """Create an ``AddReduce``.
+
+ :param inputs: input ``Signal``s to be summed.
+ :param output_width: bit-width of ``output``.
+ :param register_levels: List of nesting levels that should have
+ pipeline registers.
+ :param partition_points: the input partition points.
+ """
+ self.inputs = inputs
+ self.part_ops = part_ops
+ self.out_part_ops = [Signal(2, name=f"part_ops_{i}")
+ for i in range(len(part_ops))]
+ self.output = Signal(output_width)
+ self.output_width = output_width
+ self.register_levels = register_levels
+ self.partition_points = partition_points
+
+ self.create_levels()
+
+ @staticmethod
+ def get_max_level(input_count):
+ return AddReduceSingle.get_max_level(input_count)
+
+ @staticmethod
+ def next_register_levels(register_levels):
+ """``Iterable`` of ``register_levels`` for next recursive level."""
+ for level in register_levels:
+ if level > 0:
+ yield level - 1
+
+ def create_levels(self):
+ """creates reduction levels"""
+
+ mods = []
+ next_levels = self.register_levels
+ partition_points = self.partition_points
+ inputs = self.inputs
+ part_ops = self.part_ops
+ while True:
+ next_level = AddReduceSingle(inputs, self.output_width, next_levels,
+ partition_points, part_ops)
+ mods.append(next_level)
+ if len(next_level.groups) == 0:
+ break
+ next_levels = list(AddReduce.next_register_levels(next_levels))
+ partition_points = next_level._reg_partition_points
+ inputs = next_level.intermediate_terms
+ part_ops = next_level.part_ops
+
+ self.levels = mods
+
+ def elaborate(self, platform):
+ """Elaborate this module."""
+ m = Module()
+
+ for i, next_level in enumerate(self.levels):
+ setattr(m.submodules, "next_level%d" % i, next_level)
+
+ # output comes from last module
m.d.comb += self.output.eq(next_level.output)
+ copy_part_ops = [self.out_part_ops[i].eq(next_level.out_part_ops[i])
+ for i in range(len(self.part_ops))]
+ m.d.comb += copy_part_ops
+
return m
bsb = Signal(self.width, reset_less=True)
a_index, b_index = self.a_index, self.b_index
pwidth = self.pwidth
- m.d.comb += bsa.eq(self.a.bit_select(a_index * pwidth, pwidth))
- m.d.comb += bsb.eq(self.b.bit_select(b_index * pwidth, pwidth))
+ m.d.comb += bsa.eq(self.a.part(a_index * pwidth, pwidth))
+ m.d.comb += bsb.eq(self.b.part(b_index * pwidth, pwidth))
m.d.comb += self.ti.eq(bsa * bsb)
m.d.comb += self.term.eq(get_term(self.ti, self.shift, self.enabled))
"""
asel = Signal(width, reset_less=True)
bsel = Signal(width, reset_less=True)
a_index, b_index = self.a_index, self.b_index
- m.d.comb += asel.eq(self.a.bit_select(a_index * pwidth, pwidth))
- m.d.comb += bsel.eq(self.b.bit_select(b_index * pwidth, pwidth))
+ m.d.comb += asel.eq(self.a.part(a_index * pwidth, pwidth))
+ m.d.comb += bsel.eq(self.b.part(b_index * pwidth, pwidth))
m.d.comb += bsa.eq(get_term(asel, self.shift, self.enabled))
m.d.comb += bsb.eq(get_term(bsel, self.shift, self.enabled))
m.d.comb += self.ti.eq(bsa * bsb)
return m
+
class LSBNegTerm(Elaboratable):
def __init__(self, bit_width):
pa = LSBNegTerm(bit_wid)
setattr(m.submodules, "lnt_%d_a_%d" % (bit_wid, i), pa)
m.d.comb += pa.part.eq(parts[i])
- m.d.comb += pa.op.eq(self.a.bit_select(bit_wid * i, bit_wid))
+ m.d.comb += pa.op.eq(self.a.part(bit_wid * i, bit_wid))
m.d.comb += pa.signed.eq(self.b_signed[i * byte_width]) # yes b
m.d.comb += pa.msb.eq(self.b[(i + 1) * bit_wid - 1]) # really, b
nat.append(pa.nt)
pb = LSBNegTerm(bit_wid)
setattr(m.submodules, "lnt_%d_b_%d" % (bit_wid, i), pb)
m.d.comb += pb.part.eq(parts[i])
- m.d.comb += pb.op.eq(self.b.bit_select(bit_wid * i, bit_wid))
+ m.d.comb += pb.op.eq(self.b.part(bit_wid * i, bit_wid))
m.d.comb += pb.signed.eq(self.a_signed[i * byte_width]) # yes a
m.d.comb += pb.msb.eq(self.a[(i + 1) * bit_wid - 1]) # really, a
nbt.append(pb.nt)
op = Signal(w, reset_less=True, name="op%d_%d" % (w, i))
m.d.comb += op.eq(
Mux(self.delayed_part_ops[sel * i] == OP_MUL_LOW,
- self.intermed.bit_select(i * w*2, w),
- self.intermed.bit_select(i * w*2 + w, w)))
+ self.intermed.part(i * w*2, w),
+ self.intermed.part(i * w*2 + w, w)))
ol.append(op)
m.d.comb += self.output.eq(Cat(*ol))
op = Signal(8, reset_less=True, name="op_%d" % i)
m.d.comb += op.eq(
Mux(self.d8[i] | self.d16[i // 2],
- Mux(self.d8[i], self.i8.bit_select(i * 8, 8),
- self.i16.bit_select(i * 8, 8)),
- Mux(self.d32[i // 4], self.i32.bit_select(i * 8, 8),
- self.i64.bit_select(i * 8, 8))))
+ Mux(self.d8[i], self.i8.part(i * 8, 8),
+ self.i16.part(i * 8, 8)),
+ Mux(self.d32[i // 4], self.i32.part(i * 8, 8),
+ self.i64.part(i * 8, 8))))
ol.append(op)
m.d.comb += self.out.eq(Cat(*ol))
return m
add_reduce = AddReduce(terms,
128,
self.register_levels,
- expanded_part_pts)
+ expanded_part_pts,
+ self.part_ops)
+
+ #out_part_ops = add_reduce.levels[-1].out_part_ops
+ out_part_ops = delayed_part_ops[-1]
+
m.submodules.add_reduce = add_reduce
m.d.comb += self._intermediate_output.eq(add_reduce.output)
# create _output_64
m.submodules.io64 = io64 = IntermediateOut(64, 128, 1)
m.d.comb += io64.intermed.eq(self._intermediate_output)
for i in range(8):
- m.d.comb += io64.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
+ m.d.comb += io64.delayed_part_ops[i].eq(out_part_ops[i])
# create _output_32
m.submodules.io32 = io32 = IntermediateOut(32, 128, 2)
m.d.comb += io32.intermed.eq(self._intermediate_output)
for i in range(8):
- m.d.comb += io32.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
+ m.d.comb += io32.delayed_part_ops[i].eq(out_part_ops[i])
# create _output_16
m.submodules.io16 = io16 = IntermediateOut(16, 128, 4)
m.d.comb += io16.intermed.eq(self._intermediate_output)
for i in range(8):
- m.d.comb += io16.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
+ m.d.comb += io16.delayed_part_ops[i].eq(out_part_ops[i])
# create _output_8
m.submodules.io8 = io8 = IntermediateOut(8, 128, 8)
m.d.comb += io8.intermed.eq(self._intermediate_output)
for i in range(8):
- m.d.comb += io8.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
+ m.d.comb += io8.delayed_part_ops[i].eq(out_part_ops[i])
# final output
m.submodules.finalout = finalout = FinalOut(64)