may be a multi-bit signal, where all bits are required
to be asserted to indicate "valid".
* o_ready: output to next stage indicating readiness to accept data
- * data_i : an input - MUST be added by the USER of this class
+ * i_data : an input - MUST be added by the USER of this class
"""
def __init__(self, i_width=1, stage_ctl=False, maskwid=0, offs=0):
self.stop_i = Signal(maskwid) # prev >>in self
self.i_valid = Signal(i_width, name="p_i_valid") # prev >>in self
self._o_ready = Signal(name="p_o_ready") # prev <<out self
- self.data_i = None # XXX MUST BE ADDED BY USER
+ self.i_data = None # XXX MUST BE ADDED BY USER
if stage_ctl:
self.s_o_ready = Signal(name="p_s_o_rdy") # prev <<out self
self.trigger = Signal(reset_less=True)
res.append(self.stop_i.eq(prev.stop_i))
if do_data is False:
return res
- data_i = fn(prev.data_i) if fn is not None else prev.data_i
- return res + [nmoperator.eq(self.data_i, data_i)]
+ i_data = fn(prev.i_data) if fn is not None else prev.i_data
+ return res + [nmoperator.eq(self.i_data, i_data)]
@property
def i_valid_test(self):
return m
def eq(self, i):
- res = [nmoperator.eq(self.data_i, i.data_i),
+ res = [nmoperator.eq(self.i_data, i.i_data),
self.o_ready.eq(i.o_ready),
self.i_valid.eq(i.i_valid)]
if self.maskwid:
if self.maskwid:
yield self.mask_i
yield self.stop_i
- if hasattr(self.data_i, "ports"):
- yield from self.data_i.ports()
- elif (isinstance(self.data_i, Sequence) or
- isinstance(self.data_i, Iterable)):
- yield from self.data_i
+ if hasattr(self.i_data, "ports"):
+ yield from self.i_data.ports()
+ elif (isinstance(self.i_data, Sequence) or
+ isinstance(self.i_data, Iterable)):
+ yield from self.i_data
else:
- yield self.data_i
+ yield self.i_data
def ports(self):
return list(self)
""" contains the signals that go *to* the next stage (both in and out)
* o_valid: output indicating to next stage that data is valid
* i_ready: input from next stage indicating that it can accept data
- * data_o : an output - MUST be added by the USER of this class
+ * o_data : an output - MUST be added by the USER of this class
"""
def __init__(self, stage_ctl=False, maskwid=0):
self.stage_ctl = stage_ctl
self.stop_o = Signal(maskwid) # self out>> next
self.o_valid = Signal(name="n_o_valid") # self out>> next
self.i_ready = Signal(name="n_i_ready") # self <<in next
- self.data_o = None # XXX MUST BE ADDED BY USER
+ self.o_data = None # XXX MUST BE ADDED BY USER
#if self.stage_ctl:
self.d_valid = Signal(reset=1) # INTERNAL (data valid)
self.trigger = Signal(reset_less=True)
if do_stop:
res.append(nxt.stop_i.eq(self.stop_o))
if do_data:
- res.append(nmoperator.eq(nxt.data_i, self.data_o))
- print ("connect to next", self, self.maskwid, nxt.data_i,
+ res.append(nmoperator.eq(nxt.i_data, self.o_data))
+ print ("connect to next", self, self.maskwid, nxt.i_data,
do_data, do_stop)
return res
res.append(nxt.stop_o.eq(self.stop_o))
if not do_data:
return res
- data_o = fn(nxt.data_o) if fn is not None else nxt.data_o
- return res + [nmoperator.eq(data_o, self.data_o)]
+ o_data = fn(nxt.o_data) if fn is not None else nxt.o_data
+ return res + [nmoperator.eq(o_data, self.o_data)]
def elaborate(self, platform):
m = Module()
if self.maskwid:
yield self.mask_o
yield self.stop_o
- if hasattr(self.data_o, "ports"):
- yield from self.data_o.ports()
- elif (isinstance(self.data_o, Sequence) or
- isinstance(self.data_o, Iterable)):
- yield from self.data_o
+ if hasattr(self.o_data, "ports"):
+ yield from self.o_data.ports()
+ elif (isinstance(self.o_data, Sequence) or
+ isinstance(self.o_data, Iterable)):
+ yield from self.o_data
else:
- yield self.data_o
+ yield self.o_data
def ports(self):
return list(self)
* n: contains ready/valid to the next stage
User must also:
- * add data_i members to PrevControl and
- * add data_o member to NextControl
+ * add i_data members to PrevControl and
+ * add o_data member to NextControl
"""
self.routemask = routemask
# set up input and output IO ACK (prev/next ready/valid)
def set_input(self, i, idx=0):
""" helper function to set the input data
"""
- return eq(self.p[idx].data_i, i)
+ return eq(self.p[idx].i_data, i)
def elaborate(self, platform):
m = Module()
* n: contains ready/valid to the next stages PLURAL
User must also:
- * add data_i member to PrevControl and
- * add data_o members to NextControl
+ * add i_data member to PrevControl and
+ * add o_data members to NextControl
"""
if routemask:
def set_input(self, i):
""" helper function to set the input data
"""
- return eq(self.p.data_i, i)
+ return eq(self.p.i_data, i)
def __iter__(self):
yield from self.p
Attributes:
-----------
- p.data_i : stage input data (non-array). shaped according to ispec
- n.data_o : stage output data array. shaped according to ospec
+ p.i_data : stage input data (non-array). shaped according to ispec
+ n.o_data : stage output data array. shaped according to ospec
"""
def __init__(self, stage, n_len, n_mux, maskwid=0, routemask=False):
self.n_mux = n_mux
# set up the input and output data
- self.p.data_i = _spec(stage.ispec, 'data_i') # input type
+ self.p.i_data = _spec(stage.ispec, 'i_data') # input type
for i in range(n_len):
- name = 'data_o_%d' % i
- self.n[i].data_o = _spec(stage.ospec, name) # output type
+ name = 'o_data_%d' % i
+ self.n[i].o_data = _spec(stage.ospec, name) # output type
def process(self, i):
if hasattr(self.stage, "process"):
# send data on
#with m.If(pv):
- m.d.comb += eq(r_data, self.p.data_i)
- m.d.comb += eq(self.n[muxid].data_o, self.process(r_data))
+ m.d.comb += eq(r_data, self.p.i_data)
+ m.d.comb += eq(self.n[muxid].o_data, self.process(r_data))
if self.maskwid:
if self.routemask: # straight "routing" mode - treat like data
Attributes:
-----------
- p.data_i : StageInput, shaped according to ispec
+ p.i_data : StageInput, shaped according to ispec
The pipeline input
- p.data_o : StageOutput, shaped according to ospec
+ p.o_data : StageOutput, shaped according to ospec
The pipeline output
r_data : input_shape according to ispec
A temporary (buffered) copy of a prior (valid) input.
# set up the input and output data
for i in range(p_len):
- name = 'data_i_%d' % i
- self.p[i].data_i = _spec(stage.ispec, name) # input type
- self.n.data_o = _spec(stage.ospec, 'data_o')
+ name = 'i_data_%d' % i
+ self.p[i].i_data = _spec(stage.ispec, name) # input type
+ self.n.o_data = _spec(stage.ospec, 'o_data')
def process(self, i):
if hasattr(self.stage, "process"):
#m.d.comb += vr.eq(p.i_valid & p.o_ready)
with m.If(vr):
m.d.comb += eq(self.n.mask_o, self.p[i].mask_i)
- m.d.comb += eq(r_data[i], self.p[i].data_i)
+ m.d.comb += eq(r_data[i], self.p[i].i_data)
else:
ml = [] # accumulate output masks
ms = [] # accumulate output stops
m.d.comb += maskedout.eq(1)
m.d.comb += vr.eq(maskedout.bool() & p.i_valid & p.o_ready)
with m.If(vr):
- m.d.comb += eq(r_data[i], self.p[i].data_i)
+ m.d.comb += eq(r_data[i], self.p[i].i_data)
if self.maskwid:
mlen = len(self.p[i].mask_i)
s = mlen*i
m.d.comb += self.n.mask_o.eq(Cat(*ml))
m.d.comb += self.n.stop_o.eq(Cat(*ms))
- m.d.comb += eq(self.n.data_o, self.process(r_data[mid]))
+ m.d.comb += eq(self.n.o_data, self.process(r_data[mid]))
return m
Attributes:
-----------
- p.data_i : StageInput, shaped according to ispec
+ p.i_data : StageInput, shaped according to ispec
The pipeline input
- p.data_o : StageOutput, shaped according to ospec
+ p.o_data : StageOutput, shaped according to ospec
The pipeline output
r_data : input_shape according to ispec
A temporary (buffered) copy of a prior (valid) input.
# set up the input and output data
for i in range(p_len):
- name = 'data_i_%d' % i
- self.p[i].data_i = _spec(stage.ispec, name) # input type
- self.n.data_o = _spec(stage.ospec, 'data_o')
+ name = 'i_data_%d' % i
+ self.p[i].i_data = _spec(stage.ispec, name) # input type
+ self.n.o_data = _spec(stage.ospec, 'o_data')
def process(self, i):
if hasattr(self.stage, "process"):
#m.d.comb += vr.eq(p.i_valid & p.o_ready)
with m.If(vr):
m.d.comb += eq(self.n.mask_o, self.p[i].mask_i)
- m.d.comb += eq(r_data[i], self.p[i].data_i)
+ m.d.comb += eq(r_data[i], self.p[i].i_data)
else:
ml = [] # accumulate output masks
ms = [] # accumulate output stops
m.d.comb += maskedout.eq(1)
m.d.comb += vr.eq(maskedout.bool() & p.i_valid & p.o_ready)
with m.If(vr):
- m.d.comb += eq(r_data[i], self.p[i].data_i)
+ m.d.comb += eq(r_data[i], self.p[i].i_data)
if self.maskwid:
mlen = len(self.p[i].mask_i)
s = mlen*i
m.d.comb += self.n.mask_o.eq(Cat(*ml))
m.d.comb += self.n.stop_o.eq(Cat(*ms))
- m.d.comb += eq(self.n.data_o, self.process(r_data[mid]))
+ m.d.comb += eq(self.n.o_data, self.process(r_data[mid]))
return m
routemask=routemask)
# HACK: n-mux is also the stage... so set the muxid equal to input muxid
- muxid = getattr(self.p.data_i, muxidname)
+ muxid = getattr(self.p.i_data, muxidname)
print ("combmuxout", muxidname, muxid)
stage.m_id = muxid
# deq is "dequeue" (data out, aka "next stage")
p_o_ready = self.w_rdy
p_i_valid = self.w_en
- enq_data = self.w_data # aka p_data_i
+ enq_data = self.w_data # aka p_i_data
n_o_valid = self.r_rdy
n_i_ready = self.r_en
- deq_data = self.r_data # aka n_data_o
+ deq_data = self.r_data # aka n_o_data
# intermediaries
ptr_width = bits_for(self.depth - 1) if self.depth > 1 else 0
* n: contains ready/valid to the next stage
Except when calling Controlbase.connect(), user must also:
- * add data_i member to PrevControl (p) and
- * add data_o member to NextControl (n)
+ * add i_data member to PrevControl (p) and
+ * add o_data member to NextControl (n)
Calling ControlBase._new_data is a good way to do that.
"""
print ("ControlBase", self, stage, in_multi, stage_ctl)
self._new_data("data")
def _new_data(self, name):
- """ allocates new data_i and data_o
+ """ allocates new i_data and o_data
"""
- self.p.data_i, self.n.data_o = self.new_specs(name)
+ self.p.i_data, self.n.o_data = self.new_specs(name)
@property
def data_r(self):
- return self.process(self.p.data_i)
+ return self.process(self.p.i_data)
def connect_to_next(self, nxt):
""" helper function to connect to the next stage data/valid/ready.
v | v | v |
out---in out--in out---in
- Also takes care of allocating data_i/data_o, by looking up
+ Also takes care of allocating i_data/o_data, by looking up
the data spec for each end of the pipechain. i.e It is NOT
- necessary to allocate self.p.data_i or self.n.data_o manually:
+ necessary to allocate self.p.i_data or self.n.o_data manually:
this is handled AUTOMATICALLY, here.
Basically this function is the direct equivalent of StageChain,
def set_input(self, i):
""" helper function to set the input data (used in unit tests)
"""
- return nmoperator.eq(self.p.data_i, i)
+ return nmoperator.eq(self.p.i_data, i)
def __iter__(self):
yield from self.p # yields ready/valid/data (data also gets yielded)
m.submodules.p = self.p
m.submodules.n = self.n
- self.setup(m, self.p.data_i)
+ self.setup(m, self.p.i_data)
if not self.p.stage_ctl:
return m
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
process --->----^
| |
+-- r_data ->-+
- input data p.data_i is read (only), is processed and goes into an
+ input data p.i_data is read (only), is processed and goes into an
intermediate result store [process()]. this is updated combinatorially.
in a non-stall condition, the intermediate result will go into the
# data pass-through conditions
with self.m.If(npnn):
- data_o = self._postprocess(result) # XXX TBD, does nothing right now
+ o_data = self._postprocess(result) # XXX TBD, does nothing right now
self.m.d.sync += [self.n.o_valid.eq(p_i_valid), # valid if p_valid
- nmoperator.eq(self.n.data_o, data_o), # update out
+ nmoperator.eq(self.n.o_data, o_data), # update out
]
# buffer flush conditions (NOTE: can override data passthru conditions)
with self.m.If(nir_por_n): # not stalled
# Flush the [already processed] buffer to the output port.
- data_o = self._postprocess(r_data) # XXX TBD, does nothing right now
+ o_data = self._postprocess(r_data) # XXX TBD, does nothing right now
self.m.d.sync += [self.n.o_valid.eq(1), # reg empty
- nmoperator.eq(self.n.data_o, data_o), # flush
+ nmoperator.eq(self.n.o_data, o_data), # flush
]
# output ready conditions
self.m.d.sync += self.p._o_ready.eq(nir_novn | por_pivn)
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
+--process->--^
"""
# XXX EXCEPTIONAL CIRCUMSTANCES: inspection of the data payload
# is NOT "normal" for the Stage API.
p_i_valid = Signal(reset_less=True)
- #print ("self.p.data_i", self.p.data_i)
+ #print ("self.p.i_data", self.p.i_data)
maskedout = Signal(len(self.p.mask_i), reset_less=True)
m.d.comb += maskedout.eq(self.p.mask_i & ~self.p.stop_i)
m.d.comb += p_i_valid.eq(maskedout.bool())
m.d.sync += self.n.o_valid.eq(p_i_valid)
m.d.sync += self.n.mask_o.eq(Mux(p_i_valid, maskedout, 0))
with m.If(p_i_valid):
- data_o = self._postprocess(result) # XXX TBD, does nothing right now
- m.d.sync += nmoperator.eq(self.n.data_o, data_o) # update output
+ o_data = self._postprocess(result) # XXX TBD, does nothing right now
+ m.d.sync += nmoperator.eq(self.n.o_data, o_data) # update output
# output valid if
# input always "ready"
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
+--process->--^
"""
# establish if the data should be passed on. cancellation is
# a global signal.
p_i_valid = Signal(reset_less=True)
- #print ("self.p.data_i", self.p.data_i)
+ #print ("self.p.i_data", self.p.i_data)
maskedout = Signal(len(self.p.mask_i), reset_less=True)
m.d.comb += maskedout.eq(self.p.mask_i & ~self.p.stop_i)
m.d.sync += r_busy.eq(0) # ...so set output invalid
# output set combinatorially from latch
- m.d.comb += nmoperator.eq(self.n.data_o, r_latch)
+ m.d.comb += nmoperator.eq(self.n.o_data, r_latch)
m.d.comb += self.n.o_valid.eq(r_busy)
# if next is ready, so is previous
m.d.comb += self.p._o_ready.eq(self.n.i_ready_test)
m.d.comb += self.n.stop_o.eq(self.p.stop_i)
m.d.comb += self.n.mask_o.eq(self.p.mask_i)
- m.d.comb += nmoperator.eq(self.n.data_o, data_r)
+ m.d.comb += nmoperator.eq(self.n.o_data, data_r)
return self.m
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
+--process->--^
Truth Table
------- - - - -
0 0 0 0 0 0 >0 0 reg
0 0 0 1 0 1 >1 0 reg
- 0 0 1 0 0 0 0 1 process(data_i)
- 0 0 1 1 0 0 0 1 process(data_i)
+ 0 0 1 0 0 0 0 1 process(i_data)
+ 0 0 1 1 0 0 0 1 process(i_data)
------- - - - -
0 1 0 0 0 0 >0 0 reg
0 1 0 1 0 1 >1 0 reg
- 0 1 1 0 0 0 0 1 process(data_i)
- 0 1 1 1 0 0 0 1 process(data_i)
+ 0 1 1 0 0 0 0 1 process(i_data)
+ 0 1 1 1 0 0 0 1 process(i_data)
------- - - - -
1 0 0 0 0 0 >0 0 reg
1 0 0 1 0 1 >1 0 reg
- 1 0 1 0 0 0 0 1 process(data_i)
- 1 0 1 1 0 0 0 1 process(data_i)
+ 1 0 1 0 0 0 0 1 process(i_data)
+ 1 0 1 1 0 0 0 1 process(i_data)
------- - - - -
- 1 1 0 0 1 0 1 0 process(data_i)
- 1 1 0 1 1 1 1 0 process(data_i)
- 1 1 1 0 1 0 1 1 process(data_i)
- 1 1 1 1 1 0 1 1 process(data_i)
+ 1 1 0 0 1 0 1 0 process(i_data)
+ 1 1 0 1 1 1 1 0 process(i_data)
+ 1 1 1 0 1 0 1 1 process(i_data)
+ 1 1 1 1 1 0 1 1 process(i_data)
------- - - - -
"""
# previous valid and ready
with m.If(p_i_valid_p_o_ready):
- data_o = self._postprocess(result) # XXX TBD, does nothing right now
+ o_data = self._postprocess(result) # XXX TBD, does nothing right now
m.d.sync += [r_busy.eq(1), # output valid
- nmoperator.eq(self.n.data_o, data_o), # update output
+ nmoperator.eq(self.n.o_data, o_data), # update output
]
# previous invalid or not ready, however next is accepting
with m.Elif(n_i_ready):
- data_o = self._postprocess(result) # XXX TBD, does nothing right now
- m.d.sync += [nmoperator.eq(self.n.data_o, data_o)]
+ o_data = self._postprocess(result) # XXX TBD, does nothing right now
+ m.d.sync += [nmoperator.eq(self.n.o_data, o_data)]
# TODO: could still send data here (if there was any)
#m.d.sync += self.n.o_valid.eq(0) # ...so set output invalid
m.d.sync += r_busy.eq(0) # ...so set output invalid
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| |
r_data result
| |
Attributes:
-----------
- p.data_i : StageInput, shaped according to ispec
+ p.i_data : StageInput, shaped according to ispec
The pipeline input
- p.data_o : StageOutput, shaped according to ospec
+ p.o_data : StageOutput, shaped according to ospec
The pipeline output
r_data : input_shape according to ispec
A temporary (buffered) copy of a prior (valid) input.
1 0 1 0 0 1 1 reg
1 0 1 1 0 1 1 reg
------- - - -
- 1 1 0 0 0 1 1 process(data_i)
- 1 1 0 1 1 1 0 process(data_i)
- 1 1 1 0 0 1 1 process(data_i)
- 1 1 1 1 0 1 1 process(data_i)
+ 1 1 0 0 0 1 1 process(i_data)
+ 1 1 0 1 1 1 0 process(i_data)
+ 1 1 1 0 0 1 1 process(i_data)
+ 1 1 1 1 0 1 1 process(i_data)
------- - - -
Note: PoR is *NOT* involved in the above decision-making.
with m.If(pv):
m.d.sync += nmoperator.eq(r_data, self.data_r)
- data_o = self._postprocess(r_data) # XXX TBD, does nothing right now
- m.d.comb += nmoperator.eq(self.n.data_o, data_o)
+ o_data = self._postprocess(r_data) # XXX TBD, does nothing right now
+ m.d.comb += nmoperator.eq(self.n.o_data, o_data)
return self.m
stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
- stage-1 p.data_i >>in stage n.data_o out>> stage+1
+ stage-1 p.i_data >>in stage n.o_data out>> stage+1
| | |
+- process-> buf <-+
Attributes:
-----------
- p.data_i : StageInput, shaped according to ispec
+ p.i_data : StageInput, shaped according to ispec
The pipeline input
- p.data_o : StageOutput, shaped according to ospec
+ p.o_data : StageOutput, shaped according to ospec
The pipeline output
buf : output_shape according to ospec
A temporary (buffered) copy of a valid output
V R R V V R
------- - - -
- 0 0 0 0 0 0 1 process(data_i)
+ 0 0 0 0 0 0 1 process(i_data)
0 0 0 1 1 1 0 reg (odata, unchanged)
- 0 0 1 0 0 0 1 process(data_i)
- 0 0 1 1 0 0 1 process(data_i)
+ 0 0 1 0 0 0 1 process(i_data)
+ 0 0 1 1 0 0 1 process(i_data)
------- - - -
- 0 1 0 0 0 0 1 process(data_i)
+ 0 1 0 0 0 0 1 process(i_data)
0 1 0 1 1 1 0 reg (odata, unchanged)
- 0 1 1 0 0 0 1 process(data_i)
- 0 1 1 1 0 0 1 process(data_i)
+ 0 1 1 0 0 0 1 process(i_data)
+ 0 1 1 1 0 0 1 process(i_data)
------- - - -
- 1 0 0 0 0 1 1 process(data_i)
+ 1 0 0 0 0 1 1 process(i_data)
1 0 0 1 1 1 0 reg (odata, unchanged)
- 1 0 1 0 0 1 1 process(data_i)
- 1 0 1 1 0 1 1 process(data_i)
+ 1 0 1 0 0 1 1 process(i_data)
+ 1 0 1 1 0 1 1 process(i_data)
------- - - -
- 1 1 0 0 0 1 1 process(data_i)
+ 1 1 0 0 0 1 1 process(i_data)
1 1 0 1 1 1 0 reg (odata, unchanged)
- 1 1 1 0 0 1 1 process(data_i)
- 1 1 1 1 0 1 1 process(data_i)
+ 1 1 1 0 0 1 1 process(i_data)
+ 1 1 1 1 0 1 1 process(i_data)
------- - - -
Note: PoR is *NOT* involved in the above decision-making.
m.d.comb += self.p._o_ready.eq(~buf_full)
m.d.sync += buf_full.eq(~self.n.i_ready_test & self.n.o_valid)
- data_o = Mux(buf_full, buf, self.data_r)
- data_o = self._postprocess(data_o) # XXX TBD, does nothing right now
- m.d.comb += nmoperator.eq(self.n.data_o, data_o)
- m.d.sync += nmoperator.eq(buf, self.n.data_o)
+ o_data = Mux(buf_full, buf, self.data_r)
+ o_data = self._postprocess(o_data) # XXX TBD, does nothing right now
+ m.d.comb += nmoperator.eq(self.n.o_data, o_data)
+ m.d.sync += nmoperator.eq(buf, self.n.o_data)
return self.m
odata = Mux(pvr, self.data_r, r_data)
m.d.sync += nmoperator.eq(r_data, odata)
r_data = self._postprocess(r_data) # XXX TBD, does nothing right now
- m.d.comb += nmoperator.eq(self.n.data_o, r_data)
+ m.d.comb += nmoperator.eq(self.n.o_data, r_data)
return m
class RegisterPipeline(UnbufferedPipeline):
""" A pipeline stage that delays by one clock cycle, creating a
- sync'd latch out of data_o and o_valid as an indirect byproduct
+ sync'd latch out of o_data and o_valid as an indirect byproduct
of using PassThroughStage
"""
def __init__(self, iospecfn):
""" FIFO Control. Uses Queue to store data, coincidentally
happens to have same valid/ready signalling as Stage API.
- data_i -> fifo.din -> FIFO -> fifo.dout -> data_o
+ i_data -> fifo.din -> FIFO -> fifo.dout -> o_data
"""
def __init__(self, depth, stage, in_multi=None, stage_ctl=False,
fwft=True, pipe=False):
data is processed (and located) as follows:
self.p self.stage temp fn temp fn temp fp self.n
- data_i->process()->result->cat->din.FIFO.dout->cat(data_o)
+ i_data->process()->result->cat->din.FIFO.dout->cat(o_data)
yes, really: cat produces a Cat() which can be assigned to.
this is how the FIFO gets de-catted without needing a de-cat
def elaborate(self, platform):
self.m = m = ControlBase.elaborate(self, platform)
- # make a FIFO with a signal of equal width to the data_o.
- (fwidth, _) = nmoperator.shape(self.n.data_o)
+ # make a FIFO with a signal of equal width to the o_data.
+ (fwidth, _) = nmoperator.shape(self.n.o_data)
fifo = Queue(fwidth, self.fdepth, fwft=self.fwft, pipe=self.pipe)
m.submodules.fifo = fifo
- def processfn(data_i):
+ def processfn(i_data):
# store result of processing in combinatorial temporary
result = _spec(self.stage.ospec, "r_temp")
- m.d.comb += nmoperator.eq(result, self.process(data_i))
+ m.d.comb += nmoperator.eq(result, self.process(i_data))
return nmoperator.cat(result)
## prev: make the FIFO (Queue object) "look" like a PrevControl...
m.submodules.fp = fp = PrevControl()
- fp.i_valid, fp._o_ready, fp.data_i = fifo.w_en, fifo.w_rdy, fifo.w_data
+ fp.i_valid, fp._o_ready, fp.i_data = fifo.w_en, fifo.w_rdy, fifo.w_data
m.d.comb += fp._connect_in(self.p, fn=processfn)
# next: make the FIFO (Queue object) "look" like a NextControl...
m.submodules.fn = fn = NextControl()
- fn.o_valid, fn.i_ready, fn.data_o = fifo.r_rdy, fifo.r_en, fifo.r_data
+ fn.o_valid, fn.i_ready, fn.o_data = fifo.r_rdy, fifo.r_en, fifo.r_data
connections = fn._connect_out(self.n, fn=nmoperator.cat)
- valid_eq, ready_eq, data_o = connections
+ valid_eq, ready_eq, o_data = connections
# ok ok so we can't just do the ready/valid eqs straight:
# first 2 from connections are the ready/valid, 3rd is data.
m.d.comb += [valid_eq, ready_eq] # combinatorial on next ready/valid
else:
m.d.sync += [valid_eq, ready_eq] # non-fwft mode needs sync
- data_o = self._postprocess(data_o) # XXX TBD, does nothing right now
- m.d.comb += data_o
+ o_data = self._postprocess(o_data) # XXX TBD, does nothing right now
+ m.d.comb += o_data
return m
* "Prev" port:
- * ``p_data_i``: value to be shifted
+ * ``p_i_data``: value to be shifted
* ``p_shift_i``: shift amount
* "Next" port:
- * ``n_data_o``: shifted value
+ * ``n_o_data``: shifted value
* ``n_o_valid`` and ``n_i_ready``: handshake
"""
def __init__(self, width):
self.width = width
"""data width"""
- self.p_data_i = Signal(width)
+ self.p_i_data = Signal(width)
self.p_shift_i = Signal(width)
self.op__sdir = Signal()
self.p_i_valid = Signal()
self.p_o_ready = Signal()
- self.n_data_o = Signal(width)
+ self.n_o_data = Signal(width)
self.n_o_valid = Signal()
self.n_i_ready = Signal()
# build the data flow
m.d.comb += [
# connect input and output
- shift_in.eq(self.p_data_i),
- self.n_data_o.eq(shift_reg),
+ shift_in.eq(self.p_i_data),
+ self.n_o_data.eq(shift_reg),
# generate shifted views of the register
shift_left_by_1.eq(Cat(0, shift_reg[:-1])),
shift_right_by_1.eq(Cat(shift_reg[1:], 0)),
def __iter__(self):
yield self.op__sdir
- yield self.p_data_i
+ yield self.p_i_data
yield self.p_shift_i
yield self.p_i_valid
yield self.p_o_ready
yield self.n_i_ready
yield self.n_o_valid
- yield self.n_data_o
+ yield self.n_o_data
def ports(self):
return list(self)
with gtkw.group("prev port"):
gtkw.trace(dut + "op__sdir", color=style_input)
# demonstrates using decimal base (default is hex)
- gtkw.trace(dut + "p_data_i[7:0]", color=style_input,
+ gtkw.trace(dut + "p_i_data[7:0]", color=style_input,
datafmt='dec')
gtkw.trace(dut + "p_shift_i[7:0]", color=style_input,
datafmt='dec')
gtkw.trace(dut + "count[3:0]")
gtkw.trace(dut + "shift_reg[7:0]", datafmt='dec')
with gtkw.group("next port"):
- gtkw.trace(dut + "n_data_o[7:0]", color=style_output,
+ gtkw.trace(dut + "n_o_data[7:0]", color=style_output,
datafmt='dec')
gtkw.trace(dut + "n_o_valid", color=style_output)
gtkw.trace(dut + "n_i_ready", color=style_input)
('prev port', [
# attach a class style for each signal
('op__sdir', 'in'),
- ('p_data_i[7:0]', 'in'),
+ ('p_i_data[7:0]', 'in'),
('p_shift_i[7:0]', 'in'),
('p_i_valid', 'in'),
('p_o_ready', 'out'),
'shift_reg[7:0]',
]),
('next port', [
- ('n_data_o[7:0]', 'out'),
+ ('n_o_data[7:0]', 'out'),
('n_o_valid', 'out'),
('n_i_ready', 'in'),
]),
def send(data, shift, direction):
# present input data and assert i_valid
- yield dut.p_data_i.eq(data)
+ yield dut.p_i_data.eq(data)
yield dut.p_shift_i.eq(shift)
yield dut.op__sdir.eq(direction)
yield dut.p_i_valid.eq(1)
yield msg.eq(1)
# clear input data and negate p.i_valid
yield dut.p_i_valid.eq(0)
- yield dut.p_data_i.eq(0)
+ yield dut.p_i_data.eq(0)
yield dut.p_shift_i.eq(0)
yield dut.op__sdir.eq(0)
while not (yield dut.n_o_valid):
yield
# read result
- result = yield dut.n_data_o
+ result = yield dut.n_o_data
# negate n.i_ready
yield dut.n_i_ready.eq(0)
# check result
yield
# yield dut.i_p_rst.eq(0)
yield dut.n.i_ready.eq(1)
- yield dut.p.data_i.eq(5)
+ yield dut.p.i_data.eq(5)
yield dut.p.i_valid.eq(1)
yield
- yield dut.p.data_i.eq(7)
+ yield dut.p.i_data.eq(7)
yield from check_o_n_valid(dut, 0) # effects of i_p_valid delayed
yield
yield from check_o_n_valid(dut, 1) # ok *now* i_p_valid effect is felt
- yield dut.p.data_i.eq(2)
+ yield dut.p.i_data.eq(2)
yield
# begin going into "stall" (next stage says ready)
yield dut.n.i_ready.eq(0)
- yield dut.p.data_i.eq(9)
+ yield dut.p.i_data.eq(9)
yield
yield dut.p.i_valid.eq(0)
- yield dut.p.data_i.eq(12)
+ yield dut.p.i_data.eq(12)
yield
- yield dut.p.data_i.eq(32)
+ yield dut.p.i_data.eq(32)
yield dut.n.i_ready.eq(1)
yield
yield from check_o_n_valid(dut, 1) # buffer still needs to output
yield
# yield dut.p.i_rst.eq(0)
yield dut.n.i_ready.eq(1)
- yield dut.p.data_i.eq(5)
+ yield dut.p.i_data.eq(5)
yield dut.p.i_valid.eq(1)
yield
- yield dut.p.data_i.eq(7)
+ yield dut.p.i_data.eq(7)
# effects of i_p_valid delayed 2 clocks
yield from check_o_n_valid2(dut, 0)
yield
# effects of i_p_valid delayed 2 clocks
yield from check_o_n_valid2(dut, 0)
- yield dut.p.data_i.eq(2)
+ yield dut.p.i_data.eq(2)
yield
yield from check_o_n_valid2(dut, 1) # ok *now* i_p_valid effect is felt
# begin going into "stall" (next stage says ready)
yield dut.n.i_ready.eq(0)
- yield dut.p.data_i.eq(9)
+ yield dut.p.i_data.eq(9)
yield
yield dut.p.i_valid.eq(0)
- yield dut.p.data_i.eq(12)
+ yield dut.p.i_data.eq(12)
yield
- yield dut.p.data_i.eq(32)
+ yield dut.p.i_data.eq(32)
yield dut.n.i_ready.eq(1)
yield
yield from check_o_n_valid2(dut, 1) # buffer still needs to output
continue
if send and self.i != len(self.data):
yield self.dut.p.i_valid.eq(1)
- yield self.dut.p.data_i.eq(self.data[self.i])
+ yield self.dut.p.i_data.eq(self.data[self.i])
self.i += 1
else:
yield self.dut.p.i_valid.eq(0)
i_n_ready = yield self.dut.n.i_ready_test
if not o_n_valid or not i_n_ready:
continue
- data_o = yield self.dut.n.data_o
- self.resultfn(data_o, self.data[self.o], self.i, self.o)
+ o_data = yield self.dut.n.o_data
+ self.resultfn(o_data, self.data[self.o], self.i, self.o)
self.o += 1
if self.o == len(self.data):
break
-def resultfn_3(data_o, expected, i, o):
- assert data_o == expected + 1, \
+def resultfn_3(o_data, expected, i, o):
+ assert o_data == expected + 1, \
"%d-%d data %x not match %x\n" \
- % (i, o, data_o, expected)
+ % (i, o, o_data, expected)
def data_placeholder():
i_n_ready = yield self.dut.n.i_ready_test
if not o_n_valid or not i_n_ready:
continue
- if isinstance(self.dut.n.data_o, Record):
- data_o = {}
- dod = self.dut.n.data_o
+ if isinstance(self.dut.n.o_data, Record):
+ o_data = {}
+ dod = self.dut.n.o_data
for k, v in dod.fields.items():
- data_o[k] = yield v
+ o_data[k] = yield v
else:
- data_o = yield self.dut.n.data_o
- self.resultfn(data_o, self.data[self.o], self.i, self.o)
+ o_data = yield self.dut.n.o_data
+ self.resultfn(o_data, self.data[self.o], self.i, self.o)
self.o += 1
if self.o == len(self.data):
break
i_n_ready = yield self.dut.n.i_ready_test
if not o_n_valid or not i_n_ready:
continue
- if isinstance(self.dut.n.data_o, Record):
- data_o = {}
- dod = self.dut.n.data_o
+ if isinstance(self.dut.n.o_data, Record):
+ o_data = {}
+ dod = self.dut.n.o_data
for k, v in dod.fields.items():
- data_o[k] = yield v
+ o_data[k] = yield v
else:
- data_o = yield self.dut.n.data_o
- print("recv", self.o, data_o)
- self.resultfn(data_o, self.data[self.o], self.i, self.o)
+ o_data = yield self.dut.n.o_data
+ print("recv", self.o, o_data)
+ self.resultfn(o_data, self.data[self.o], self.i, self.o)
self.o += 1
if self.o == len(self.data):
break
-def resultfn_5(data_o, expected, i, o):
+def resultfn_5(o_data, expected, i, o):
res = expected[0] + expected[1]
- assert data_o == res, \
+ assert o_data == res, \
"%d-%d data %x not match %s\n" \
- % (i, o, data_o, repr(expected))
+ % (i, o, o_data, repr(expected))
def tbench4(dut):
if o_p_ready:
if send and i != len(data):
yield dut.p.i_valid.eq(1)
- yield dut.p.data_i.eq(data[i])
+ yield dut.p.i_data.eq(data[i])
i += 1
else:
yield dut.p.i_valid.eq(0)
o_n_valid = yield dut.n.o_valid
i_n_ready = yield dut.n.i_ready_test
if o_n_valid and i_n_ready:
- data_o = yield dut.n.data_o
- assert data_o == data[o] + 2, "%d-%d data %x not match %x\n" \
- % (i, o, data_o, data[o])
+ o_data = yield dut.n.o_data
+ assert o_data == data[o] + 2, "%d-%d data %x not match %x\n" \
+ % (i, o, o_data, data[o])
o += 1
if o == len(data):
break
return data
-def resultfn_9(data_o, expected, i, o):
+def resultfn_9(o_data, expected, i, o):
res = expected + 2
- assert data_o == res, \
+ assert o_data == res, \
"%d-%d received data %x not match expected %x\n" \
- % (i, o, data_o, res)
+ % (i, o, o_data, res)
######################################################################
BufferedHandshake.__init__(self, stage)
-def resultfn_6(data_o, expected, i, o):
+def resultfn_6(o_data, expected, i, o):
res = 1 if expected[0] < expected[1] else 0
- assert data_o == res, \
+ assert o_data == res, \
"%d-%d data %x not match %s\n" \
- % (i, o, data_o, repr(expected))
+ % (i, o, o_data, repr(expected))
######################################################################
UnbufferedPipeline.__init__(self, stage)
-def resultfn_7(data_o, expected, i, o):
+def resultfn_7(o_data, expected, i, o):
res = (expected['src1'] + 1, expected['src2'] + 1)
- assert data_o['src1'] == res[0] and data_o['src2'] == res[1], \
+ assert o_data['src1'] == res[0] and o_data['src2'] == res[1], \
"%d-%d data %s not match %s\n" \
- % (i, o, repr(data_o), repr(expected))
+ % (i, o, repr(o_data), repr(expected))
class ExampleAddRecordPlaceHolderPipe(UnbufferedPipeline):
UnbufferedPipeline.__init__(self, stage)
-def resultfn_11(data_o, expected, i, o):
+def resultfn_11(o_data, expected, i, o):
res1 = expected.src1 + 1
res2 = expected.src2 + 1
- assert data_o['src1'] == res1 and data_o['src2'] == res2, \
+ assert o_data['src1'] == res1 and o_data['src2'] == res2, \
"%d-%d data %s not match %s\n" \
- % (i, o, repr(data_o), repr(expected))
+ % (i, o, repr(o_data), repr(expected))
######################################################################
self.op2 = op2
-def resultfn_8(data_o, expected, i, o):
+def resultfn_8(o_data, expected, i, o):
res = expected.op1 + expected.op2 # these are a TestInputAdd instance
- assert data_o == res, \
+ assert o_data == res, \
"%d-%d data %s res %x not match %s\n" \
- % (i, o, repr(data_o), res, repr(expected))
+ % (i, o, repr(o_data), res, repr(expected))
def data_2op():
return data
-def resultfn_12(data_o, expected, i, o):
+def resultfn_12(o_data, expected, i, o):
res = expected + 1
- assert data_o == res, \
+ assert o_data == res, \
"%d-%d data %x not match %x\n" \
- % (i, o, data_o, res)
+ % (i, o, o_data, res)
######################################################################
PassThroughHandshake.__init__(self, stage)
-def resultfn_identical(data_o, expected, i, o):
+def resultfn_identical(o_data, expected, i, o):
res = expected
- assert data_o == res, \
+ assert o_data == res, \
"%d-%d data %x not match %x\n" \
- % (i, o, data_o, res)
+ % (i, o, o_data, res)
######################################################################
return data
-def resultfn_0(data_o, expected, i, o):
- assert data_o['src1'] == expected.src1 + 2, \
+def resultfn_0(o_data, expected, i, o):
+ assert o_data['src1'] == expected.src1 + 2, \
"src1 %x-%x received data no match\n" \
- % (data_o['src1'], expected.src1 + 2)
- assert data_o['src2'] == expected.src2 + 2, \
+ % (o_data['src1'], expected.src1 + 2)
+ assert o_data['src2'] == expected.src2 + 2, \
"src2 %x-%x received data no match\n" \
- % (data_o['src2'], expected.src2 + 2)
+ % (o_data['src2'], expected.src2 + 2)
######################################################################
dut = MaskCancellablePipe(maskwid)
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- dut.p.data_i.ports() + dut.n.data_o.ports()
+ dut.p.i_data.ports() + dut.n.o_data.ports()
vl = rtlil.convert(dut, ports=ports)
with open("test_maskchain0.il", "w") as f:
f.write(vl)
dut = MaskCancellableDynamic(maskwid=maskwid)
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] # + \
- #dut.p.data_i.ports() + dut.n.data_o.ports()
+ #dut.p.i_data.ports() + dut.n.o_data.ports()
vl = rtlil.convert(dut, ports=ports)
with open("test_maskchain0_dynamic.il", "w") as f:
f.write(vl)
run_simulation(dut, tbench2(dut), vcd_name="test_bufpipe2.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufpipe2.il", "w") as f:
f.write(vl)
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- list(dut.p.data_i) + [dut.n.data_o]
+ list(dut.p.i_data) + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_ltcomb_pipe.il", "w") as f:
f.write(vl)
test = Test5(dut, resultfn_7, data=data)
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready,
- dut.p.data_i.src1, dut.p.data_i.src2,
- dut.n.data_o.src1, dut.n.data_o.src2]
+ dut.p.i_data.src1, dut.p.i_data.src2,
+ dut.n.o_data.src1, dut.n.o_data.src2]
vl = rtlil.convert(dut, ports=ports)
with open("test_recordcomb_pipe.il", "w") as f:
f.write(vl)
dut = ExampleBufPipeChain2()
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufpipechain2.il", "w") as f:
f.write(vl)
vcd_name="test_bufpipe12.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufpipe12.il", "w") as f:
f.write(vl)
vcd_name="test_unbufpipe13.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_unbufpipe13.il", "w") as f:
f.write(vl)
vcd_name="test_bufunbuf15.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufunbuf15.il", "w") as f:
f.write(vl)
vcd_name="test_bufunbuf16.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufunbuf16.il", "w") as f:
f.write(vl)
vcd_name="test_unbufpipe17.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_unbufpipe17.il", "w") as f:
f.write(vl)
vcd_name="test_passthru18.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_passthru18.il", "w") as f:
f.write(vl)
vcd_name="test_bufpass19.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufpass19.il", "w") as f:
f.write(vl)
run_simulation(dut, [test.send(), test.rcv()], vcd_name="test_fifo20.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_fifo20.il", "w") as f:
f.write(vl)
vcd_name="test_fifopass21.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_fifopass21.il", "w") as f:
f.write(vl)
vcd_name="test_addrecord22.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i.op1, dut.p.data_i.op2] + \
- [dut.n.data_o]
+ [dut.p.i_data.op1, dut.p.i_data.op2] + \
+ [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_addrecord22.il", "w") as f:
f.write(vl)
vcd_name="test_addrecord23.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i.op1, dut.p.data_i.op2] + \
- [dut.n.data_o]
+ [dut.p.i_data.op1, dut.p.i_data.op2] + \
+ [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_addrecord23.il", "w") as f:
f.write(vl)
test = Test5(dut, resultfn_8, data=data)
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i.op1, dut.p.data_i.op2] + \
- [dut.n.data_o]
+ [dut.p.i_data.op1, dut.p.i_data.op2] + \
+ [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_addrecord24.il", "w") as f:
f.write(vl)
vcd_name="test_add2pipe25.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_add2pipe25.il", "w") as f:
f.write(vl)
vcd_name="test_bufpass997.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufpass997.il", "w") as f:
f.write(vl)
vcd_name="test_bufpipe14.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufpipe14.il", "w") as f:
f.write(vl)
vcd_name="test_bufunbuf999.vcd")
ports = [dut.p.i_valid, dut.n.i_ready,
dut.n.o_valid, dut.p.o_ready] + \
- [dut.p.data_i] + [dut.n.data_o]
+ [dut.p.i_data] + [dut.n.o_data]
vl = rtlil.convert(dut, ports=ports)
with open("test_bufunbuf999.il", "w") as f:
f.write(vl)
op2 = self.di[muxid][i]
rs = self.dut.p[muxid]
yield rs.i_valid.eq(1)
- yield rs.data_i.data.eq(op2)
- yield rs.data_i.idx.eq(i)
- yield rs.data_i.muxid.eq(muxid)
- yield rs.data_i.operator.eq(1)
+ yield rs.i_data.data.eq(op2)
+ yield rs.i_data.idx.eq(i)
+ yield rs.i_data.muxid.eq(muxid)
+ yield rs.i_data.operator.eq(1)
yield rs.mask_i.eq(1)
yield
o_p_ready = yield rs.o_ready
if not o_n_valid or not i_n_ready:
continue
- out_muxid = yield n.data_o.muxid
- out_i = yield n.data_o.idx
- out_v = yield n.data_o.data
+ out_muxid = yield n.o_data.muxid
+ out_i = yield n.o_data.idx
+ out_v = yield n.o_data.data
print("recv", out_muxid, out_i, hex(out_v), hex(out_v))
op2 = self.di[muxid][i]
rs = self.dut.p[muxid]
yield rs.i_valid.eq(1)
- yield rs.data_i.data.eq(op2)
- yield rs.data_i.idx.eq(i)
- yield rs.data_i.muxid.eq(muxid)
+ yield rs.i_data.data.eq(op2)
+ yield rs.i_data.idx.eq(i)
+ yield rs.i_data.muxid.eq(muxid)
yield
o_p_ready = yield rs.o_ready
while not o_p_ready:
if not o_n_valid or not i_n_ready:
continue
- out_muxid = yield n.data_o.muxid
- out_i = yield n.data_o.idx
- out_v = yield n.data_o.data
+ out_muxid = yield n.o_data.muxid
+ out_i = yield n.o_data.idx
+ out_v = yield n.o_data.data
print ("recv", out_muxid, out_i, hex(out_v))
muxid = self.di[i][1]
rs = dut.p
yield rs.i_valid.eq(1)
- yield rs.data_i.data.eq(op2)
- yield rs.data_i.muxid.eq(muxid)
+ yield rs.i_data.data.eq(op2)
+ yield rs.i_data.muxid.eq(muxid)
yield
o_p_ready = yield rs.o_ready
while not o_p_ready:
op2 = self.di[muxid][i]
rs = self.dut.p[muxid]
yield rs.i_valid.eq(1)
- yield rs.data_i.data.eq(op2)
- yield rs.data_i.idx.eq(i)
- yield rs.data_i.muxid.eq(muxid)
+ yield rs.i_data.data.eq(op2)
+ yield rs.i_data.idx.eq(i)
+ yield rs.i_data.muxid.eq(muxid)
yield rs.mask_i.eq(1)
yield
o_p_ready = yield rs.o_ready
if not o_n_valid or not i_n_ready:
continue
- out_muxid = yield n.data_o.muxid
- out_i = yield n.data_o.idx
- out_v = yield n.data_o.data
+ out_muxid = yield n.o_data.muxid
+ out_i = yield n.o_data.idx
+ out_v = yield n.o_data.data
print ("recv", out_muxid, out_i, hex(out_v), out_v)
muxid = self.di[i][1]
rs = self.dut.p
yield rs.i_valid.eq(1)
- yield rs.data_i.data.eq(op2)
- yield rs.data_i.muxid.eq(muxid)
+ yield rs.i_data.data.eq(op2)
+ yield rs.i_data.muxid.eq(muxid)
yield
o_p_ready = yield rs.o_ready
while not o_p_ready:
if not o_n_valid or not i_n_ready:
continue
- out_v = yield n.data_o
+ out_v = yield n.o_data
print ("recv", muxid, out_i, hex(out_v))
def ports(self):
res = [self.p.i_valid, self.p.o_ready] + \
- self.p.data_i.ports()
+ self.p.i_data.ports()
for i in range(len(self.n)):
res += [self.n[i].i_ready, self.n[i].o_valid] + \
- [self.n[i].data_o]
- #self.n[i].data_o.ports()
+ [self.n[i].o_data]
+ #self.n[i].o_data.ports()
return res
op2 = self.di[muxid][i]
rs = self.dut.p[muxid]
yield rs.i_valid.eq(1)
- yield rs.data_i.data.eq(op2)
- yield rs.data_i.idx.eq(i)
- yield rs.data_i.muxid.eq(muxid)
+ yield rs.i_data.data.eq(op2)
+ yield rs.i_data.idx.eq(i)
+ yield rs.i_data.muxid.eq(muxid)
yield
o_p_ready = yield rs.o_ready
step_limiter = StepLimiter(10000)
if not o_n_valid or not i_n_ready:
continue
- muxid = yield n.data_o.muxid
- out_i = yield n.data_o.idx
- out_v = yield n.data_o.data
+ muxid = yield n.o_data.muxid
+ out_i = yield n.o_data.idx
+ out_v = yield n.o_data.data
print("recv", muxid, out_i, hex(out_v))
"""treereduce: apply a map-reduce to a list.
examples: OR-reduction of one member of a list of Records down to a
single data point:
- treereduce(tree, operator.or_, lambda x: getattr(x, "data_o"))
+ treereduce(tree, operator.or_, lambda x: getattr(x, "o_data"))
"""
#print ("treereduce", tree)
if not isinstance(tree, list):