-""" nmigen implementation of buffered pipeline stage, based on zipcpu:
+""" Pipeline and BufferedPipeline implementation, conforming to the same API.
+
+ eq:
+ --
+
+ a strategically very important function that is identical in function
+ to nmigen's Signal.eq function, except it may take objects, or a list
+ of objects, or a tuple of objects, and where objects may also be
+ Records.
+
+ Stage API:
+ ---------
+
+ stage requires compliance with a strict API that may be
+ implemented in several means, including as a static class.
+ the methods of a stage instance must be as follows:
+
+ * ispec() - Input data format specification
+ returns an object or a list or tuple of objects, or
+ a Record, each object having an "eq" function which
+ takes responsibility for copying by assignment all
+ sub-objects
+ * ospec() - Output data format specification
+ requirements as for ospec
+ * process(m, i) - Processes an ispec-formatted object
+ returns a combinatorial block of a result that
+ may be assigned to the output, by way of the "eq"
+ function
+ * setup(m, i) - Optional function for setting up submodules
+ may be used for more complex stages, to link
+ the input (i) to submodules. must take responsibility
+ for adding those submodules to the module (m).
+ the submodules must be combinatorial blocks and
+ must have their inputs and output linked combinatorially.
+
+ StageChain:
+ ----------
+
+ A useful combinatorial wrapper around stages that chains them together
+ and then presents a Stage-API-conformant interface.
+
+ UnbufferedPipeline:
+ ------------------
+
+ A simple stalling clock-synchronised pipeline that has no buffering
+ (unlike BufferedPipeline). A stall anywhere along the line will
+ result in a stall back-propagating down the entire chain.
+
+ The BufferedPipeline by contrast will buffer incoming data, allowing
+ previous stages one clock cycle's grace before also having to stall.
+
+ BufferedPipeline:
+ ----------------
+
+ nmigen implementation of buffered pipeline stage, based on zipcpu:
https://zipcpu.com/blog/2017/08/14/strategies-for-pipelining.html
this module requires quite a bit of thought to understand how it works
it's quite a complex state machine!
"""
-from nmigen import Signal, Cat, Const, Mux, Module
+from nmigen import Signal, Cat, Const, Mux, Module, Array
from nmigen.cli import verilog, rtlil
+from nmigen.hdl.rec import Record, Layout
+
from collections.abc import Sequence
class PrevControl:
""" contains signals that come *from* the previous stage (both in and out)
- * i_valid: input from previous stage indicating incoming data is valid
+ * i_valid: previous stage indicating all incoming data is valid.
+ may be a multi-bit signal, where all bits are required
+ to be asserted to indicate "valid".
* o_ready: output to next stage indicating readiness to accept data
* i_data : an input - added by the user of this class
"""
- def __init__(self):
- self.i_valid = Signal(name="p_i_valid") # >>in
- self.o_ready = Signal(name="p_o_ready") # <<out
+ def __init__(self, i_width=1):
+ self.i_valid = Signal(i_width, name="p_i_valid") # prev >>in self
+ self.o_ready = Signal(name="p_o_ready") # prev <<out self
def connect_in(self, prev):
""" helper function to connect stage to an input source. do not
eq(self.i_data, prev.i_data),
]
+ def i_valid_logic(self):
+ vlen = len(self.i_valid)
+ if vlen > 1: # multi-bit case: valid only when i_valid is all 1s
+ all1s = Const(-1, (len(self.i_valid), False))
+ return self.i_valid == all1s
+ # single-bit i_valid case
+ return self.i_valid
+
class NextControl:
""" contains the signals that go *to* the next stage (both in and out)
* o_data : an output - added by the user of this class
"""
def __init__(self):
- self.o_valid = Signal(name="n_o_valid") # out>>
- self.i_ready = Signal(name="n_i_ready") # <<in
+ self.o_valid = Signal(name="n_o_valid") # self out>> next
+ self.i_ready = Signal(name="n_i_ready") # self <<in next
def connect_to_next(self, nxt):
""" helper function to connect to the next stage data/valid/ready.
def eq(o, i):
""" makes signals equal: a helper routine which identifies if it is being
- passsed a list (or tuple) of objects, and calls the objects' eq
- function.
+ passed a list (or tuple) of objects, or signals, or Records, and calls
+ the objects' eq function.
+
+ complex objects (classes) can be used: they must follow the
+ convention of having an eq member function, which takes the
+ responsibility of further calling eq and returning a list of
+ eq assignments
+
+ Record is a special (unusual, recursive) case, where the input may be
+ specified as a dictionary (which may contain further dictionaries,
+ recursively), where the field names of the dictionary must match
+ the Record's field spec. Alternatively, an object with the same
+ member names as the Record may be assigned: it does not have to
+ *be* a Record.
"""
if not isinstance(o, Sequence):
o, i = [o], [i]
res = []
for (ao, ai) in zip(o, i):
- res.append(ao.eq(ai))
+ #print ("eq", ao, ai)
+ if isinstance(ao, Record):
+ for idx, (field_name, field_shape, _) in enumerate(ao.layout):
+ if isinstance(field_shape, Layout):
+ val = ai.fields
+ else:
+ val = ai
+ if hasattr(val, field_name): # check for attribute
+ val = getattr(val, field_name)
+ else:
+ val = val[field_name] # dictionary-style specification
+ rres = eq(ao.fields[field_name], val)
+ res += rres
+ else:
+ rres = ao.eq(ai)
+ if not isinstance(rres, Sequence):
+ rres = [rres]
+ res += rres
return res
+class StageChain:
+ """ pass in a list of stages, and they will automatically be
+ chained together via their input and output specs into a
+ combinatorial chain.
+
+ * input to this class will be the input of the first stage
+ * output of first stage goes into input of second
+ * output of second goes into input into third (etc. etc.)
+ * the output of this class will be the output of the last stage
+ """
+ def __init__(self, chain):
+ self.chain = chain
+
+ def ispec(self):
+ return self.chain[0].ispec()
+
+ def ospec(self):
+ return self.chain[-1].ospec()
+
+ def setup(self, m, i):
+ for (idx, c) in enumerate(self.chain):
+ if hasattr(c, "setup"):
+ c.setup(m, i) # stage may have some module stuff
+ o = self.chain[idx].ospec() # only the last assignment survives
+ m.d.comb += eq(o, c.process(i)) # process input into "o"
+ if idx != len(self.chain)-1:
+ ni = self.chain[idx+1].ispec() # becomes new input on next loop
+ m.d.comb += eq(ni, o) # assign output to next input
+ i = ni
+ self.o = o # last loop is the output
+
+ def process(self, i):
+ return self.o
+
+
class PipelineBase:
""" Common functions for Pipeline API
"""
- def __init__(self, stage):
+ def __init__(self, stage, in_multi=None, p_len=1, n_len=1):
""" pass in a "stage" which may be either a static class or a class
- instance, which has three functions:
+ instance, which has four functions (one optional):
* ispec: returns input signals according to the input specification
* ispec: returns output signals to the output specification
* process: takes an input instance and returns processed data
+ * setup: performs any module linkage if the stage uses one.
User must also:
* add i_data member to PrevControl and
self.stage = stage
# set up input and output IO ACK (prev/next ready/valid)
- self.p = PrevControl()
- self.n = NextControl()
-
- def connect_to_next(self, nxt):
+ p = []
+ n = []
+ for i in range(p_len):
+ p.append(PrevControl(in_multi))
+ for i in range(n_len):
+ n.append(NextControl())
+ if p_len > 1:
+ self.p = Array(p)
+ else:
+ self.p = p
+ if n_len > 1:
+ self.n = Array(n)
+ else:
+ self.n = n
+
+ def connect_to_next(self, nxt, p_idx=0, n_idx=0):
""" helper function to connect to the next stage data/valid/ready.
"""
- return self.n.connect_to_next(nxt.p)
+ return self.n[n_idx].connect_to_next(nxt.p[p_idx])
- def connect_in(self, prev):
+ def connect_in(self, prev, idx=0, prev_idx=None):
""" helper function to connect stage to an input source. do not
use to connect stage-to-stage!
"""
- return self.p.connect_in(prev.p)
+ if prev_idx is None:
+ return self.p[idx].connect_in(prev.p)
+ return self.p[idx].connect_in(prev.p[prev_idx])
- def connect_out(self, nxt):
+ def connect_out(self, nxt, idx=0, nxt_idx=None):
""" helper function to connect stage to an output source. do not
use to connect stage-to-stage!
"""
- return self.n.connect_out(nxt.n)
+ if nxt_idx is None:
+ return self.n[idx].connect_out(nxt.n)
+ return self.n[idx].connect_out(nxt.n[nxt+idx])
- def set_input(self, i):
+ def set_input(self, i, idx=0):
""" helper function to set the input data
"""
- return eq(self.p.i_data, i)
+ return eq(self.p[idx].i_data, i)
def ports(self):
- return [self.p.i_valid, self.n.i_ready,
- self.n.o_valid, self.p.o_ready,
- self.p.i_data, self.n.o_data
- ]
+ res = []
+ for i in range(len(self.p)):
+ res += [self.p[i].i_valid, self.p[i].o_ready,
+ self.p[i].i_data]# XXX need flattening!]
+ for i in range(len(self.n)):
+ res += [self.n[i].i_ready, self.n[i].o_valid,
+ self.n.o_data] # XXX need flattening!]
+ return res
class BufferedPipeline(PipelineBase):
on the next cycle (as long as stall is not raised again) the
input may begin to be processed and transferred directly to output.
+
"""
- def __init__(self, stage):
- PipelineBase.__init__(self, stage)
+ def __init__(self, stage, n_len=1, p_len=1, p_mux=None, n_mux=None):
+ """ set up a BufferedPipeline (multi-input, multi-output)
+ NOTE: n_len > 1 and p_len > 1 is NOT supported
- # set up the input and output data
- self.p.i_data = stage.ispec() # input type
- self.r_data = stage.ospec() # all these are output type
- self.result = stage.ospec()
- self.n.o_data = stage.ospec()
-
- def update_buffer(self):
- """ copies the result into the intermediate register r_data,
- which will need to be outputted on a subsequent cycle
- prior to allowing "normal" operation.
- """
- return eq(self.r_data, self.result)
+ Arguments:
- def update_output(self):
- """ copies the (combinatorial) result into the output
+ * stage: see Stage API above
+ * p_len: number of inputs (PrevControls + data)
+ * n_len: number of outputs (NextControls + data)
+ * p_mux: optional multiplex selector for incoming data
+ * n_mux: optional multiplex router for outgoing data
"""
- return eq(self.n.o_data, self.result)
+ PipelineBase.__init__(self, stage)
+ self.p_mux = p_mux
+ self.n_mux = n_mux
- def flush_buffer(self):
- """ copies the *intermediate* register r_data into the output
- """
- return eq(self.n.o_data, self.r_data)
+ # set up the input and output data
+ for i in range(p_len):
+ self.p[i].i_data = stage.ispec() # input type
+ for i in range(n_len):
+ self.n[i].o_data = stage.ospec()
def elaborate(self, platform):
m = Module()
+
+ result = self.stage.ospec()
+ r_data = self.stage.ospec()
if hasattr(self.stage, "setup"):
- self.stage.setup(m, self.p.i_data)
+ for i in range(len(self.p)):
+ self.stage.setup(m, self.p[i].i_data)
+
+ pi = 0 # TODO: use p_mux to decide which to select
+ ni = 0 # TODO: use n_nux to decide which to select
# establish some combinatorial temporaries
o_n_validn = Signal(reset_less=True)
i_p_valid_o_p_ready = Signal(reset_less=True)
- m.d.comb += [o_n_validn.eq(~self.n.o_valid),
- i_p_valid_o_p_ready.eq(self.p.i_valid & self.p.o_ready),
+ p_i_valid = Signal(reset_less=True)
+ m.d.comb += [p_i_valid.eq(self.p[pi].i_valid_logic()),
+ o_n_validn.eq(~self.n[ni].o_valid),
+ i_p_valid_o_p_ready.eq(p_i_valid & self.p[pi].o_ready),
]
# store result of processing in combinatorial temporary
- with m.If(self.p.i_valid): # input is valid: process it
- m.d.comb += eq(self.result, self.stage.process(self.p.i_data))
+ m.d.comb += eq(result, self.stage.process(self.p[pi].i_data))
+
# if not in stall condition, update the temporary register
- with m.If(self.p.o_ready): # not stalled
- m.d.sync += self.update_buffer()
-
- #with m.If(self.p.i_rst): # reset
- # m.d.sync += self.n.o_valid.eq(0)
- # m.d.sync += self.p.o_ready.eq(0)
- with m.If(self.n.i_ready): # next stage is ready
- with m.If(self.p.o_ready): # not stalled
+ with m.If(self.p[pi].o_ready): # not stalled
+ m.d.sync += eq(r_data, result) # update buffer
+
+ with m.If(self.n[ni].i_ready): # next stage is ready
+ with m.If(self.p[pi].o_ready): # not stalled
# nothing in buffer: send (processed) input direct to output
- m.d.sync += [self.n.o_valid.eq(self.p.i_valid),
- self.update_output(),
+ m.d.sync += [self.n[ni].o_valid.eq(p_i_valid),
+ eq(self.n[ni].o_data, result), # update output
]
with m.Else(): # p.o_ready is false, and something is in buffer.
# Flush the [already processed] buffer to the output port.
- m.d.sync += [self.n.o_valid.eq(1),
- self.flush_buffer(),
- # clear stall condition, declare register empty.
- self.p.o_ready.eq(1),
+ m.d.sync += [self.n[ni].o_valid.eq(1), # declare reg empty
+ eq(self.n[ni].o_data, r_data), # flush buffer
+ self.p[pi].o_ready.eq(1), # clear stall
]
# ignore input, since p.o_ready is also false.
# (n.i_ready) is false here: next stage is ready
with m.Elif(o_n_validn): # next stage being told "ready"
- m.d.sync += [self.n.o_valid.eq(self.p.i_valid),
- self.p.o_ready.eq(1), # Keep the buffer empty
- # set the output data (from comb result)
- self.update_output(),
+ m.d.sync += [self.n[ni].o_valid.eq(p_i_valid),
+ self.p[pi].o_ready.eq(1), # Keep the buffer empty
+ eq(self.n[ni].o_data, result), # set output data
]
+
# (n.i_ready) false and (n.o_valid) true:
with m.Elif(i_p_valid_o_p_ready):
# If next stage *is* ready, and not stalled yet, accept input
- m.d.sync += self.p.o_ready.eq(~(self.p.i_valid & self.n.o_valid))
+ m.d.sync += self.p[pi].o_ready.eq(~(p_i_valid & self.n[ni].o_valid))
return m
"""
def ispec():
- return Signal(16)
+ return Signal(16, name="example_input_signal")
def ospec():
- return Signal(16)
+ return Signal(16, name="example_output_signal")
def process(i):
""" process the input data and returns it (adds 1)
return i + 1
+class ExampleStageCls:
+ """ an example of how to use the buffered pipeline, in a static class
+ fashion
+ """
+
+ def ispec(self):
+ return Signal(16, name="example_input_signal")
+
+ def ospec(self):
+ return Signal(16, name="example_output_signal")
+
+ def process(self, i):
+ """ process the input data and returns it (adds 1)
+ """
+ return i + 1
+
+
class ExampleBufPipe(BufferedPipeline):
""" an example of how to use the buffered pipeline.
"""
BufferedPipeline.__init__(self, ExampleStage)
-class CombPipe(PipelineBase):
- """A simple pipeline stage containing combinational logic that can execute
- completely in one clock cycle.
-
- Parameters:
- -----------
- input_shape : int or tuple or None
- the shape of ``input.data`` and ``comb_input``
- output_shape : int or tuple or None
- the shape of ``output.data`` and ``comb_output``
- name : str
- the name
-
- Attributes:
- -----------
- input : StageInput
- The pipeline input
- output : StageOutput
- The pipeline output
- comb_input : Signal, input_shape
- The input to the combinatorial logic
- comb_output: Signal, output_shape
- The output of the combinatorial logic
+class UnbufferedPipeline(PipelineBase):
+ """ A simple pipeline stage with single-clock synchronisation
+ and two-way valid/ready synchronised signalling.
+
+ Note that a stall in one stage will result in the entire pipeline
+ chain stalling.
+
+ Also that unlike BufferedPipeline, the valid/ready signalling does NOT
+ travel synchronously with the data: the valid/ready signalling
+ combines in a *combinatorial* fashion. Therefore, a long pipeline
+ chain will lengthen propagation delays.
+
+ Argument: stage. see Stage API, above
+
+ stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
+ stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
+ | |
+ r_data result
+ | |
+ +--process ->-+
+
+ Attributes:
+ -----------
+ p.i_data : StageInput, shaped according to ispec
+ The pipeline input
+ p.o_data : StageOutput, shaped according to ospec
+ The pipeline output
+ r_data : input_shape according to ispec
+ A temporary (buffered) copy of a prior (valid) input.
+ This is HELD if the output is not ready. It is updated
+ SYNCHRONOUSLY.
+ result: output_shape according to ospec
+ The output of the combinatorial logic. it is updated
+ COMBINATORIALLY (no clock dependence).
"""
- def __init__(self, stage):
- PipelineBase.__init__(self, stage)
+ def __init__(self, stage, p_len=1, n_len=1):
+ PipelineBase.__init__(self, stage, p_len, n_len)
self._data_valid = Signal()
# set up the input and output data
- self.p.i_data = stage.ispec() # input type
- self.n.o_data = stage.ospec() # output type
+ for i in range(p_len):
+ self.p[i].i_data = stage.ispec() # input type
+ for i in range(n_len):
+ self.n[i].o_data = stage.ospec()
def elaborate(self, platform):
m = Module()
- r_data = self.stage.ispec() # input type
+
+ r_data = []
result = self.stage.ospec() # output data
- if hasattr(self.stage, "setup"):
- self.stage.setup(m, r_data)
-
- m.d.comb += eq(result, self.stage.process(r_data))
- m.d.comb += self.n.o_valid.eq(self._data_valid)
- m.d.comb += self.p.o_ready.eq(~self._data_valid | self.n.i_ready)
- m.d.sync += self._data_valid.eq(self.p.i_valid | \
- (~self.n.i_ready & self._data_valid))
- with m.If(self.p.i_valid & self.p.o_ready):
- m.d.sync += eq(r_data, self.p.i_data)
- m.d.comb += eq(self.n.o_data, result)
+ for i in range(len(self.p)):
+ r = self.stage.ispec() # input type
+ r_data.append(r)
+ if hasattr(self.stage, "setup"):
+ self.stage.setup(m, r)
+ if len(r_data) > 1:
+ r_data = Array(r_data)
+
+ pi = 0 # TODO: use p_mux to decide which to select
+ ni = 0 # TODO: use n_nux to decide which to select
+
+ p_i_valid = Signal(reset_less=True)
+ m.d.comb += p_i_valid.eq(self.p[pi].i_valid_logic())
+ m.d.comb += eq(result, self.stage.process(r_data[pi]))
+ m.d.comb += self.n[ni].o_valid.eq(self._data_valid)
+ m.d.comb += self.p[pi].o_ready.eq(~self._data_valid | \
+ self.n[ni].i_ready)
+ m.d.sync += self._data_valid.eq(p_i_valid | \
+ (~self.n[ni].i_ready & self._data_valid))
+ with m.If(self.p[pi].i_valid & self.p[pi].o_ready):
+ m.d.sync += eq(r_data[pi], self.p[pi].i_data)
+ m.d.comb += eq(self.n[ni].o_data, result)
return m
-class ExampleCombPipe(CombPipe):
+class ExamplePipeline(UnbufferedPipeline):
""" an example of how to use the combinatorial pipeline.
"""
def __init__(self):
- CombPipe.__init__(self, ExampleStage)
+ UnbufferedPipeline.__init__(self, ExampleStage)
if __name__ == '__main__':
with open("test_bufpipe.il", "w") as f:
f.write(vl)
- dut = ExampleCombPipe()
+ dut = ExamplePipeline()
vl = rtlil.convert(dut, ports=dut.ports())
with open("test_combpipe.il", "w") as f:
f.write(vl)