+# SPDX-License-Identifier: LGPL-3-or-later
""" Pipeline API. For multi-input and multi-output variants, see multipipe.
This work is funded through NLnet under Grant 2019-02-012
honestly it's a lot easier just to create a direct Records-based
class (see ExampleAddRecordStage)
"""
+
def __init__(self, in_shape, out_shape, processfn, setupfn=None):
self.in_shape = in_shape
self.out_shape = out_shape
self.__process = processfn
self.__setup = setupfn
+
def ispec(self): return Record(self.in_shape)
def ospec(self): return Record(self.out_shape)
def process(seif, i): return self.__process(i)
(many APIs would potentially use a static "wrap" method in e.g.
StageCls to achieve a similar effect)
"""
+
def __init__(self, iospecfn): self.iospecfn = iospecfn
def ispec(self): return self.iospecfn()
def ospec(self): return self.iospecfn()
*BYPASSES* a ControlBase instance ready/valid signalling, which
clearly should not be done without a really, really good reason.
"""
+
def __init__(self, stage=None, in_multi=None, stage_ctl=False, maskwid=0):
""" Base class containing ready/valid/data to previous and next stages
* n: contains ready/valid to the next stage
Except when calling Controlbase.connect(), user must also:
- * add data_i member to PrevControl (p) and
- * add data_o member to NextControl (n)
+ * add i_data member to PrevControl (p) and
+ * add o_data member to NextControl (n)
Calling ControlBase._new_data is a good way to do that.
"""
- print ("ControlBase", self, stage, in_multi, stage_ctl)
+ print("ControlBase", self, stage, in_multi, stage_ctl)
StageHelper.__init__(self, stage)
# set up input and output IO ACK (prev/next ready/valid)
self._new_data("data")
def _new_data(self, name):
- """ allocates new data_i and data_o
+ """ allocates new i_data and o_data
"""
- self.p.data_i, self.n.data_o = self.new_specs(name)
+ self.p.i_data, self.n.o_data = self.new_specs(name)
@property
def data_r(self):
- return self.process(self.p.data_i)
+ return self.process(self.p.i_data)
def connect_to_next(self, nxt):
""" helper function to connect to the next stage data/valid/ready.
v | v | v |
out---in out--in out---in
- Also takes care of allocating data_i/data_o, by looking up
+ Also takes care of allocating i_data/o_data, by looking up
the data spec for each end of the pipechain. i.e It is NOT
- necessary to allocate self.p.data_i or self.n.data_o manually:
+ necessary to allocate self.p.i_data or self.n.o_data manually:
this is handled AUTOMATICALLY, here.
Basically this function is the direct equivalent of StageChain,
"""
assert len(pipechain) > 0, "pipechain must be non-zero length"
assert self.stage is None, "do not use connect with a stage"
- eqs = [] # collated list of assignment statements
+ eqs = [] # collated list of assignment statements
# connect inter-chain
for i in range(len(pipechain)-1):
pipe1 = pipechain[i] # earlier
pipe2 = pipechain[i+1] # later (by 1)
- eqs += pipe1.connect_to_next(pipe2) # earlier n to later p
+ eqs += pipe1.connect_to_next(pipe2) # earlier n to later p
# connect front and back of chain to ourselves
front = pipechain[0] # first in chain
end = pipechain[-1] # last in chain
- self.set_specs(front, end) # sets up ispec/ospec functions
- self._new_data("chain") # NOTE: REPLACES existing data
+ self.set_specs(front, end) # sets up ispec/ospec functions
+ self._new_data("chain") # NOTE: REPLACES existing data
eqs += front._connect_in(self) # front p to our p
eqs += end._connect_out(self) # end n to our n
def set_input(self, i):
""" helper function to set the input data (used in unit tests)
"""
- return nmoperator.eq(self.p.data_i, i)
+ return nmoperator.eq(self.p.i_data, i)
def __iter__(self):
- yield from self.p # yields ready/valid/data (data also gets yielded)
- yield from self.n # ditto
+ yield from self.p # yields ready/valid/data (data also gets yielded)
+ yield from self.n # ditto
def ports(self):
return list(self)
m.submodules.p = self.p
m.submodules.n = self.n
- self.setup(m, self.p.data_i)
+ self.setup(m, self.p.i_data)
if not self.p.stage_ctl:
return m
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
process --->----^
| |
+-- r_data ->-+
- input data p.data_i is read (only), is processed and goes into an
+ input data p.i_data is read (only), is processed and goes into an
intermediate result store [process()]. this is updated combinatorially.
in a non-stall condition, the intermediate result will go into the
por_pivn = Signal(reset_less=True)
npnn = Signal(reset_less=True)
self.m.d.comb += [p_i_valid.eq(self.p.i_valid_test),
- o_n_validn.eq(~self.n.o_valid),
- n_i_ready.eq(self.n.i_ready_test),
- nir_por.eq(n_i_ready & self.p._o_ready),
- nir_por_n.eq(n_i_ready & ~self.p._o_ready),
- nir_novn.eq(n_i_ready | o_n_validn),
- nirn_novn.eq(~n_i_ready & o_n_validn),
- npnn.eq(nir_por | nirn_novn),
- por_pivn.eq(self.p._o_ready & ~p_i_valid)
- ]
+ o_n_validn.eq(~self.n.o_valid),
+ n_i_ready.eq(self.n.i_ready_test),
+ nir_por.eq(n_i_ready & self.p._o_ready),
+ nir_por_n.eq(n_i_ready & ~self.p._o_ready),
+ nir_novn.eq(n_i_ready | o_n_validn),
+ nirn_novn.eq(~n_i_ready & o_n_validn),
+ npnn.eq(nir_por | nirn_novn),
+ por_pivn.eq(self.p._o_ready & ~p_i_valid)
+ ]
# store result of processing in combinatorial temporary
self.m.d.comb += nmoperator.eq(result, self.data_r)
# if not in stall condition, update the temporary register
- with self.m.If(self.p.o_ready): # not stalled
- self.m.d.sync += nmoperator.eq(r_data, result) # update buffer
+ with self.m.If(self.p.o_ready): # not stalled
+ self.m.d.sync += nmoperator.eq(r_data, result) # update buffer
# data pass-through conditions
with self.m.If(npnn):
- data_o = self._postprocess(result) # XXX TBD, does nothing right now
- self.m.d.sync += [self.n.o_valid.eq(p_i_valid), # valid if p_valid
- nmoperator.eq(self.n.data_o, data_o), # update out
- ]
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(result)
+ self.m.d.sync += [self.n.o_valid.eq(p_i_valid), # valid if p_valid
+ # update out
+ nmoperator.eq(self.n.o_data, o_data),
+ ]
# buffer flush conditions (NOTE: can override data passthru conditions)
- with self.m.If(nir_por_n): # not stalled
+ with self.m.If(nir_por_n): # not stalled
# Flush the [already processed] buffer to the output port.
- data_o = self._postprocess(r_data) # XXX TBD, does nothing right now
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(r_data)
self.m.d.sync += [self.n.o_valid.eq(1), # reg empty
- nmoperator.eq(self.n.data_o, data_o), # flush
- ]
+ nmoperator.eq(self.n.o_data, o_data), # flush
+ ]
# output ready conditions
self.m.d.sync += self.p._o_ready.eq(nir_novn | por_pivn)
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
+--process->--^
"""
+
def __init__(self, stage, maskwid, in_multi=None, stage_ctl=False):
ControlBase.__init__(self, stage, in_multi, stage_ctl, maskwid)
# XXX EXCEPTIONAL CIRCUMSTANCES: inspection of the data payload
# is NOT "normal" for the Stage API.
p_i_valid = Signal(reset_less=True)
- #print ("self.p.data_i", self.p.data_i)
+ #print ("self.p.i_data", self.p.i_data)
maskedout = Signal(len(self.p.mask_i), reset_less=True)
m.d.comb += maskedout.eq(self.p.mask_i & ~self.p.stop_i)
m.d.comb += p_i_valid.eq(maskedout.bool())
m.d.sync += self.n.o_valid.eq(p_i_valid)
m.d.sync += self.n.mask_o.eq(Mux(p_i_valid, maskedout, 0))
with m.If(p_i_valid):
- data_o = self._postprocess(result) # XXX TBD, does nothing right now
- m.d.sync += nmoperator.eq(self.n.data_o, data_o) # update output
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(result)
+ m.d.sync += nmoperator.eq(self.n.o_data, o_data) # update output
# output valid if
# input always "ready"
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
+--process->--^
"""
+
def __init__(self, stage, maskwid, in_multi=None, stage_ctl=False,
- dynamic=False):
+ dynamic=False):
ControlBase.__init__(self, stage, in_multi, stage_ctl, maskwid)
self.dynamic = dynamic
if dynamic:
# establish if the data should be passed on. cancellation is
# a global signal.
p_i_valid = Signal(reset_less=True)
- #print ("self.p.data_i", self.p.data_i)
+ #print ("self.p.i_data", self.p.i_data)
maskedout = Signal(len(self.p.mask_i), reset_less=True)
m.d.comb += maskedout.eq(self.p.mask_i & ~self.p.stop_i)
m.d.comb += [p_i_valid.eq(self.p.i_valid_test & maskedout.bool()),
n_i_ready.eq(self.n.i_ready_test),
p_i_valid_p_o_ready.eq(p_i_valid & self.p.o_ready),
- ]
+ ]
# if idmask nonzero, mask gets passed on (and register set).
# register is left as-is if idmask is zero, but out-mask is set to
m.d.sync += r_busy.eq(1) # output valid
# previous invalid or not ready, however next is accepting
with m.Elif(n_i_ready):
- m.d.sync += r_busy.eq(0) # ...so set output invalid
+ m.d.sync += r_busy.eq(0) # ...so set output invalid
# output set combinatorially from latch
- m.d.comb += nmoperator.eq(self.n.data_o, r_latch)
+ m.d.comb += nmoperator.eq(self.n.o_data, r_latch)
m.d.comb += self.n.o_valid.eq(r_busy)
# if next is ready, so is previous
m.d.comb += self.p._o_ready.eq(self.n.i_ready_test)
m.d.comb += self.n.stop_o.eq(self.p.stop_i)
m.d.comb += self.n.mask_o.eq(self.p.mask_i)
- m.d.comb += nmoperator.eq(self.n.data_o, data_r)
+ m.d.comb += nmoperator.eq(self.n.o_data, data_r)
return self.m
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
+--process->--^
Truth Table
------- - - - -
0 0 0 0 0 0 >0 0 reg
0 0 0 1 0 1 >1 0 reg
- 0 0 1 0 0 0 0 1 process(data_i)
- 0 0 1 1 0 0 0 1 process(data_i)
+ 0 0 1 0 0 0 0 1 process(i_data)
+ 0 0 1 1 0 0 0 1 process(i_data)
------- - - - -
0 1 0 0 0 0 >0 0 reg
0 1 0 1 0 1 >1 0 reg
- 0 1 1 0 0 0 0 1 process(data_i)
- 0 1 1 1 0 0 0 1 process(data_i)
+ 0 1 1 0 0 0 0 1 process(i_data)
+ 0 1 1 1 0 0 0 1 process(i_data)
------- - - - -
1 0 0 0 0 0 >0 0 reg
1 0 0 1 0 1 >1 0 reg
- 1 0 1 0 0 0 0 1 process(data_i)
- 1 0 1 1 0 0 0 1 process(data_i)
+ 1 0 1 0 0 0 0 1 process(i_data)
+ 1 0 1 1 0 0 0 1 process(i_data)
------- - - - -
- 1 1 0 0 1 0 1 0 process(data_i)
- 1 1 0 1 1 1 1 0 process(data_i)
- 1 1 1 0 1 0 1 1 process(data_i)
- 1 1 1 1 1 0 1 1 process(data_i)
+ 1 1 0 0 1 0 1 0 process(i_data)
+ 1 1 0 1 1 1 1 0 process(i_data)
+ 1 1 1 0 1 0 1 1 process(i_data)
+ 1 1 1 1 1 0 1 1 process(i_data)
------- - - - -
"""
m.d.comb += [p_i_valid.eq(self.p.i_valid_test),
n_i_ready.eq(self.n.i_ready_test),
p_i_valid_p_o_ready.eq(p_i_valid & self.p.o_ready),
- ]
+ ]
# store result of processing in combinatorial temporary
m.d.comb += nmoperator.eq(result, self.data_r)
# previous valid and ready
with m.If(p_i_valid_p_o_ready):
- data_o = self._postprocess(result) # XXX TBD, does nothing right now
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(result)
m.d.sync += [r_busy.eq(1), # output valid
- nmoperator.eq(self.n.data_o, data_o), # update output
- ]
+ nmoperator.eq(self.n.o_data, o_data), # update output
+ ]
# previous invalid or not ready, however next is accepting
with m.Elif(n_i_ready):
- data_o = self._postprocess(result) # XXX TBD, does nothing right now
- m.d.sync += [nmoperator.eq(self.n.data_o, data_o)]
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(result)
+ m.d.sync += [nmoperator.eq(self.n.o_data, o_data)]
# TODO: could still send data here (if there was any)
- #m.d.sync += self.n.o_valid.eq(0) # ...so set output invalid
- m.d.sync += r_busy.eq(0) # ...so set output invalid
+ # m.d.sync += self.n.o_valid.eq(0) # ...so set output invalid
+ m.d.sync += r_busy.eq(0) # ...so set output invalid
m.d.comb += self.n.o_valid.eq(r_busy)
# if next is ready, so is previous
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
r_data result
| |
Attributes:
-----------
- p.data_i : StageInput, shaped according to ispec
+ p.i_data : StageInput, shaped according to ispec
The pipeline input
- p.data_o : StageOutput, shaped according to ospec
+ p.o_data : StageOutput, shaped according to ospec
The pipeline output
r_data : input_shape according to ispec
A temporary (buffered) copy of a prior (valid) input.
1 0 1 0 0 1 1 reg
1 0 1 1 0 1 1 reg
------- - - -
- 1 1 0 0 0 1 1 process(data_i)
- 1 1 0 1 1 1 0 process(data_i)
- 1 1 1 0 0 1 1 process(data_i)
- 1 1 1 1 0 1 1 process(data_i)
+ 1 1 0 0 0 1 1 process(i_data)
+ 1 1 0 1 1 1 0 process(i_data)
+ 1 1 1 0 0 1 1 process(i_data)
+ 1 1 1 1 0 1 1 process(i_data)
------- - - -
Note: PoR is *NOT* involved in the above decision-making.
def elaborate(self, platform):
self.m = m = ControlBase.elaborate(self, platform)
- data_valid = Signal() # is data valid or not
- r_data = _spec(self.stage.ospec, "r_tmp") # output type
+ data_valid = Signal() # is data valid or not
+ r_data = _spec(self.stage.ospec, "r_tmp") # output type
# some temporaries
p_i_valid = Signal(reset_less=True)
with m.If(pv):
m.d.sync += nmoperator.eq(r_data, self.data_r)
- data_o = self._postprocess(r_data) # XXX TBD, does nothing right now
- m.d.comb += nmoperator.eq(self.n.data_o, data_o)
+ o_data = self._postprocess(r_data) # XXX TBD, does nothing right now
+ m.d.comb += nmoperator.eq(self.n.o_data, o_data)
return self.m
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| | |
+- process-> buf <-+
Attributes:
-----------
- p.data_i : StageInput, shaped according to ispec
+ p.i_data : StageInput, shaped according to ispec
The pipeline input
- p.data_o : StageOutput, shaped according to ospec
+ p.o_data : StageOutput, shaped according to ospec
The pipeline output
buf : output_shape according to ospec
A temporary (buffered) copy of a valid output
V R R V V R
------- - - -
- 0 0 0 0 0 0 1 process(data_i)
+ 0 0 0 0 0 0 1 process(i_data)
0 0 0 1 1 1 0 reg (odata, unchanged)
- 0 0 1 0 0 0 1 process(data_i)
- 0 0 1 1 0 0 1 process(data_i)
+ 0 0 1 0 0 0 1 process(i_data)
+ 0 0 1 1 0 0 1 process(i_data)
------- - - -
- 0 1 0 0 0 0 1 process(data_i)
+ 0 1 0 0 0 0 1 process(i_data)
0 1 0 1 1 1 0 reg (odata, unchanged)
- 0 1 1 0 0 0 1 process(data_i)
- 0 1 1 1 0 0 1 process(data_i)
+ 0 1 1 0 0 0 1 process(i_data)
+ 0 1 1 1 0 0 1 process(i_data)
------- - - -
- 1 0 0 0 0 1 1 process(data_i)
+ 1 0 0 0 0 1 1 process(i_data)
1 0 0 1 1 1 0 reg (odata, unchanged)
- 1 0 1 0 0 1 1 process(data_i)
- 1 0 1 1 0 1 1 process(data_i)
+ 1 0 1 0 0 1 1 process(i_data)
+ 1 0 1 1 0 1 1 process(i_data)
------- - - -
- 1 1 0 0 0 1 1 process(data_i)
+ 1 1 0 0 0 1 1 process(i_data)
1 1 0 1 1 1 0 reg (odata, unchanged)
- 1 1 1 0 0 1 1 process(data_i)
- 1 1 1 1 0 1 1 process(data_i)
+ 1 1 1 0 0 1 1 process(i_data)
+ 1 1 1 1 0 1 1 process(i_data)
------- - - -
Note: PoR is *NOT* involved in the above decision-making.
def elaborate(self, platform):
self.m = m = ControlBase.elaborate(self, platform)
- buf_full = Signal() # is data valid or not
- buf = _spec(self.stage.ospec, "r_tmp") # output type
+ buf_full = Signal() # is data valid or not
+ buf = _spec(self.stage.ospec, "r_tmp") # output type
# some temporaries
p_i_valid = Signal(reset_less=True)
m.d.comb += self.p._o_ready.eq(~buf_full)
m.d.sync += buf_full.eq(~self.n.i_ready_test & self.n.o_valid)
- data_o = Mux(buf_full, buf, self.data_r)
- data_o = self._postprocess(data_o) # XXX TBD, does nothing right now
- m.d.comb += nmoperator.eq(self.n.data_o, data_o)
- m.d.sync += nmoperator.eq(buf, self.n.data_o)
+ o_data = Mux(buf_full, buf, self.data_r)
+ o_data = self._postprocess(o_data) # XXX TBD, does nothing right now
+ m.d.comb += nmoperator.eq(self.n.o_data, o_data)
+ m.d.sync += nmoperator.eq(buf, self.n.o_data)
return self.m
def elaborate(self, platform):
self.m = m = ControlBase.elaborate(self, platform)
- r_data = _spec(self.stage.ospec, "r_tmp") # output type
+ r_data = _spec(self.stage.ospec, "r_tmp") # output type
# temporaries
p_i_valid = Signal(reset_less=True)
m.d.comb += p_i_valid.eq(self.p.i_valid_test)
m.d.comb += pvr.eq(p_i_valid & self.p.o_ready)
- m.d.comb += self.p.o_ready.eq(~self.n.o_valid | self.n.i_ready_test)
- m.d.sync += self.n.o_valid.eq(p_i_valid | ~self.p.o_ready)
+ m.d.comb += self.p.o_ready.eq(~self.n.o_valid | self.n.i_ready_test)
+ m.d.sync += self.n.o_valid.eq(p_i_valid | ~self.p.o_ready)
odata = Mux(pvr, self.data_r, r_data)
m.d.sync += nmoperator.eq(r_data, odata)
- r_data = self._postprocess(r_data) # XXX TBD, does nothing right now
- m.d.comb += nmoperator.eq(self.n.data_o, r_data)
+ r_data = self._postprocess(r_data) # XXX TBD, does nothing right now
+ m.d.comb += nmoperator.eq(self.n.o_data, r_data)
return m
class RegisterPipeline(UnbufferedPipeline):
""" A pipeline stage that delays by one clock cycle, creating a
- sync'd latch out of data_o and o_valid as an indirect byproduct
+ sync'd latch out of o_data and o_valid as an indirect byproduct
of using PassThroughStage
"""
+
def __init__(self, iospecfn):
UnbufferedPipeline.__init__(self, PassThroughStage(iospecfn))
""" FIFO Control. Uses Queue to store data, coincidentally
happens to have same valid/ready signalling as Stage API.
- data_i -> fifo.din -> FIFO -> fifo.dout -> data_o
+ i_data -> fifo.din -> FIFO -> fifo.dout -> o_data
"""
+
def __init__(self, depth, stage, in_multi=None, stage_ctl=False,
- fwft=True, pipe=False):
+ fwft=True, pipe=False):
""" FIFO Control
* :depth: number of entries in the FIFO
data is processed (and located) as follows:
self.p self.stage temp fn temp fn temp fp self.n
- data_i->process()->result->cat->din.FIFO.dout->cat(data_o)
+ i_data->process()->result->cat->din.FIFO.dout->cat(o_data)
yes, really: cat produces a Cat() which can be assigned to.
this is how the FIFO gets de-catted without needing a de-cat
def elaborate(self, platform):
self.m = m = ControlBase.elaborate(self, platform)
- # make a FIFO with a signal of equal width to the data_o.
- (fwidth, _) = nmoperator.shape(self.n.data_o)
+ # make a FIFO with a signal of equal width to the o_data.
+ (fwidth, _) = nmoperator.shape(self.n.o_data)
fifo = Queue(fwidth, self.fdepth, fwft=self.fwft, pipe=self.pipe)
m.submodules.fifo = fifo
- def processfn(data_i):
+ def processfn(i_data):
# store result of processing in combinatorial temporary
result = _spec(self.stage.ospec, "r_temp")
- m.d.comb += nmoperator.eq(result, self.process(data_i))
+ m.d.comb += nmoperator.eq(result, self.process(i_data))
return nmoperator.cat(result)
- ## prev: make the FIFO (Queue object) "look" like a PrevControl...
+ # prev: make the FIFO (Queue object) "look" like a PrevControl...
m.submodules.fp = fp = PrevControl()
- fp.i_valid, fp._o_ready, fp.data_i = fifo.w_en, fifo.w_rdy, fifo.w_data
+ fp.i_valid, fp._o_ready, fp.i_data = fifo.w_en, fifo.w_rdy, fifo.w_data
m.d.comb += fp._connect_in(self.p, fn=processfn)
# next: make the FIFO (Queue object) "look" like a NextControl...
m.submodules.fn = fn = NextControl()
- fn.o_valid, fn.i_ready, fn.data_o = fifo.r_rdy, fifo.r_en, fifo.r_data
+ fn.o_valid, fn.i_ready, fn.o_data = fifo.r_rdy, fifo.r_en, fifo.r_data
connections = fn._connect_out(self.n, fn=nmoperator.cat)
- valid_eq, ready_eq, data_o = connections
+ valid_eq, ready_eq, o_data = connections
# ok ok so we can't just do the ready/valid eqs straight:
# first 2 from connections are the ready/valid, 3rd is data.
if self.fwft:
- m.d.comb += [valid_eq, ready_eq] # combinatorial on next ready/valid
+ # combinatorial on next ready/valid
+ m.d.comb += [valid_eq, ready_eq]
else:
- m.d.sync += [valid_eq, ready_eq] # non-fwft mode needs sync
- data_o = self._postprocess(data_o) # XXX TBD, does nothing right now
- m.d.comb += data_o
+ m.d.sync += [valid_eq, ready_eq] # non-fwft mode needs sync
+ o_data = self._postprocess(o_data) # XXX TBD, does nothing right now
+ m.d.comb += o_data
return m
class UnbufferedPipeline(FIFOControl):
def __init__(self, stage, in_multi=None, stage_ctl=False):
FIFOControl.__init__(self, 1, stage, in_multi, stage_ctl,
- fwft=True, pipe=False)
+ fwft=True, pipe=False)
# aka "BreakReadyStage" XXX had to set fwft=True to get it to work
+
+
class PassThroughHandshake(FIFOControl):
def __init__(self, stage, in_multi=None, stage_ctl=False):
FIFOControl.__init__(self, 1, stage, in_multi, stage_ctl,
- fwft=True, pipe=True)
+ fwft=True, pipe=True)
# this is *probably* BufferedHandshake, although test #997 now succeeds.
+
+
class BufferedHandshake(FIFOControl):
def __init__(self, stage, in_multi=None, stage_ctl=False):
FIFOControl.__init__(self, 2, stage, in_multi, stage_ctl,
- fwft=True, pipe=False)
+ fwft=True, pipe=False)
"""