from nmigen import Signal, Cat
# TODO: turn this into a module?
+
+
def byte_reverse(m, name, data, length):
"""byte_reverse: unlike nmigen word_select this takes a dynamic length
# Switch statement needed: dynamic length had better be = 1,2,4 or 8
with m.Switch(length):
- for j in [1,2,4,8]:
+ for j in [1, 2, 4, 8]:
with m.Case(j):
rev = []
for i in range(j):
rev.append(data.word_select(j-1-i, 8))
comb += data_r.eq(Cat(*rev))
return data_r
-
"""
+
class CLZ(Elaboratable):
def __init__(self, width):
self.width = width
comb += self.lz.eq(pairs[0][0])
return m
-
sandwiched in between the fan-in and fan-out. One ALU looks like
it is multiple concurrent ALUs
"""
+
def __init__(self, alu, p, n):
self.alu = alu
self.p = p
Fan-in and Fan-out are combinatorial.
"""
+
def __init__(self, num_rows, maskwid=0, feedback_width=None):
self.num_rows = nr = num_rows
self.feedback_width = feedback_width
self.inpipe = InMuxPipe(nr, self.i_specfn, maskwid) # fan-in
- self.outpipe = MuxOutPipe(nr, self.o_specfn, maskwid) # fan-out
+ self.outpipe = MuxOutPipe(nr, self.o_specfn, maskwid) # fan-out
self.p = self.inpipe.p # kinda annoying,
- self.n = self.outpipe.n # use pipe in/out as this class in/out
+ self.n = self.outpipe.n # use pipe in/out as this class in/out
self._ports = self.inpipe.ports() + self.outpipe.ports()
def setup_pseudoalus(self):
FAILING TO SET THE MUXID IS GUARANTEED TO RESULT IN CORRUPTED DATA.
"""
+
def __init__(self, alu, num_rows, alu_name=None):
if alu_name is None:
alu_name = "alu"
self.p.append(p)
self.n.append(n)
- self.pipe = self # for Arbiter to select the incoming prevcontrols
+ self.pipe = self # for Arbiter to select the incoming prevcontrols
# set up pseudo-alus that look like a standard pipeline
self.pseudoalus = []
def elaborate(self, platform):
m = Module()
- pe = PriorityEncoder(self.num_rows) # input priority picker
+ pe = PriorityEncoder(self.num_rows) # input priority picker
m.submodules[self.alu_name] = self.alu
m.submodules.selector = pe
for i, (p, n) in enumerate(zip(self.p, self.n)):
self.m_id = Signal.like(pe.o)
# ReservationStation status information, progressively updated in FSM
- rsvd = Signal(self.num_rows) # indicates RS data in flight
- sent = Signal(self.num_rows) # sent indicates data in pipeline
- wait = Signal(self.num_rows) # the outputs are waiting for accept
+ rsvd = Signal(self.num_rows) # indicates RS data in flight
+ sent = Signal(self.num_rows) # sent indicates data in pipeline
+ wait = Signal(self.num_rows) # the outputs are waiting for accept
# pick first non-reserved ReservationStation with data not already
# sent into the ALU
# mux in and mux out ids. note that all data *must* have a muxid
mid = self.m_id # input mux selector
- o_muxid = self.alu.n.o_data.muxid # output mux selector
+ o_muxid = self.alu.n.o_data.muxid # output mux selector
# technically speaking this could be set permanently "HI".
# when all the ReservationStations outputs are waiting,
# first, establish input: select one input to pass data to (p_mux)
for i in range(self.num_rows):
- i_buf, o_buf = self.alu.new_specs("buf%d" % i) # buffers
+ i_buf, o_buf = self.alu.new_specs("buf%d" % i) # buffers
with m.FSM():
# indicate ready to accept data, and accept it if incoming
# BUT, if there is an opportunity to send on immediately
# to the ALU, take it early (combinatorial)
with m.State("ACCEPTING%d" % i):
- m.d.comb += self.p[i].o_ready.eq(1) # ready indicator
+ m.d.comb += self.p[i].o_ready.eq(1) # ready indicator
with m.If(self.p[i].i_valid): # valid data incoming
m.d.sync += rsvd[i].eq(1) # now reserved
# a unique opportunity: the ALU happens to be free
- with m.If(mid == i): # picker selected us
+ with m.If(mid == i): # picker selected us
with m.If(self.alu.p.o_ready): # ALU can accept
- m.d.comb += self.alu.p.i_valid.eq(1) # transfer
+ # transfer
+ m.d.comb += self.alu.p.i_valid.eq(1)
m.d.comb += nmoperator.eq(self.alu.p.i_data,
- self.p[i].i_data)
- m.d.sync += sent[i].eq(1) # now reserved
- m.next = "WAITOUT%d" % i # move to "wait output"
+ self.p[i].i_data)
+ m.d.sync += sent[i].eq(1) # now reserved
+ m.next = "WAITOUT%d" % i # move to "wait output"
with m.Else():
# nope. ALU wasn't free. try next cycle(s)
m.d.sync += nmoperator.eq(i_buf, self.p[i].i_data)
- m.next = "ACCEPTED%d" % i # move to "accepted"
+ m.next = "ACCEPTED%d" % i # move to "accepted"
# now try to deliver to the ALU, but only if we are "picked"
with m.State("ACCEPTED%d" % i):
- with m.If(mid == i): # picker selected us
+ with m.If(mid == i): # picker selected us
with m.If(self.alu.p.o_ready): # ALU can accept
- m.d.comb += self.alu.p.i_valid.eq(1) # transfer
+ m.d.comb += self.alu.p.i_valid.eq(1) # transfer
m.d.comb += nmoperator.eq(self.alu.p.i_data, i_buf)
- m.d.sync += sent[i].eq(1) # now reserved
- m.next = "WAITOUT%d" % i # move to "wait output"
+ m.d.sync += sent[i].eq(1) # now reserved
+ m.next = "WAITOUT%d" % i # move to "wait output"
# waiting for output to appear on the ALU, take a copy
# BUT, again, if there is an opportunity to send on
# immediately, take it (combinatorial)
with m.State("WAITOUT%d" % i):
- with m.If(o_muxid == i): # when ALU output matches our RS
+ with m.If(o_muxid == i): # when ALU output matches our RS
with m.If(self.alu.n.o_valid): # ALU can accept
# second unique opportunity: the RS is ready
- with m.If(self.n[i].i_ready): # ready to receive
- m.d.comb += self.n[i].o_valid.eq(1) # valid
+ with m.If(self.n[i].i_ready): # ready to receive
+ m.d.comb += self.n[i].o_valid.eq(1) # valid
m.d.comb += nmoperator.eq(self.n[i].o_data,
self.alu.n.o_data)
- m.d.sync += wait[i].eq(0) # clear waiting
- m.d.sync += sent[i].eq(0) # and sending
- m.d.sync += rsvd[i].eq(0) # and reserved
- m.next = "ACCEPTING%d" % i # back to "accepting"
+ m.d.sync += wait[i].eq(0) # clear waiting
+ m.d.sync += sent[i].eq(0) # and sending
+ m.d.sync += rsvd[i].eq(0) # and reserved
+ m.next = "ACCEPTING%d" % i # back to "accepting"
with m.Else():
# nope. RS wasn't ready. try next cycles
- m.d.sync += wait[i].eq(1) # now waiting
+ m.d.sync += wait[i].eq(1) # now waiting
m.d.sync += nmoperator.eq(o_buf,
self.alu.n.o_data)
- m.next = "SENDON%d" % i # move to "send data on"
+ m.next = "SENDON%d" % i # move to "send data on"
# waiting for "valid" indicator on RS output: deliver it
with m.State("SENDON%d" % i):
- with m.If(self.n[i].i_ready): # user is ready to receive
- m.d.comb += self.n[i].o_valid.eq(1) # indicate valid
+ with m.If(self.n[i].i_ready): # user is ready to receive
+ m.d.comb += self.n[i].o_valid.eq(1) # indicate valid
m.d.comb += nmoperator.eq(self.n[i].o_data, o_buf)
- m.d.sync += wait[i].eq(0) # clear waiting
- m.d.sync += sent[i].eq(0) # and sending
- m.d.sync += rsvd[i].eq(0) # and reserved
- m.next = "ACCEPTING%d" % i # and back to "accepting"
+ m.d.sync += wait[i].eq(0) # clear waiting
+ m.d.sync += sent[i].eq(0) # and sending
+ m.d.sync += rsvd[i].eq(0) # and reserved
+ m.next = "ACCEPTING%d" % i # and back to "accepting"
return m
-
# list post:
# http://lists.libre-riscv.org/pipermail/libre-riscv-dev/2019-July/002259.html
+
class Meta(ABCMeta):
registry = {}
recursing = threading.local()
if mcls.recursing.check:
return super().__call__(*args, **kw)
spec = args[0]
- base = spec.pipekls # pick up the dynamic class from PipelineSpec, HERE
+ base = spec.pipekls # pick up the dynamic class from PipelineSpec, HERE
if (cls, base) not in mcls.registry:
- print ("__call__", args, kw, cls, base,
- base.__bases__, cls.__bases__)
+ print("__call__", args, kw, cls, base,
+ base.__bases__, cls.__bases__)
mcls.registry[cls, base] = type(
cls.__name__,
(cls, base) + cls.__bases__[1:],
class DynamicPipe(metaclass=Meta):
def __init__(self, *args):
- print ("DynamicPipe init", super(), args)
+ print("DynamicPipe init", super(), args)
super().__init__(self, *args)
# could hypothetically be passed through the pspec.
class SimpleHandshakeRedir(SimpleHandshake):
def __init__(self, mod, *args):
- print ("redir", mod, args)
+ print("redir", mod, args)
stage = self
if args and args[0].stage:
stage = args[0].stage
maskwid = args[0].maskwid
if args[0].stage:
stage = args[0].stage
- print ("redir mask", mod, args, maskwid)
+ print("redir mask", mod, args, maskwid)
MaskCancellable.__init__(self, stage, maskwid)
-
sig_in = Signal.like(dut.sig_in)
count = Signal.like(dut.lz)
-
m.d.comb += [
sig_in.eq(AnyConst(width)),
dut.sig_in.eq(sig_in),
comb += result_sig.eq(result)
comb += Assert(result_sig == count)
-
+
# setup the inputs and outputs of the DUT as anyconst
return m
+
class CLZTestCase(FHDLTestCase):
def test_proof(self):
module = Driver()
self.assertFormal(module, mode="bmc", depth=4)
+
def test_ilang(self):
dut = Driver()
vl = rtlil.convert(dut, ports=[])
with open("clz.il", "w") as f:
f.write(vl)
+
if __name__ == '__main__':
unittest.main()
self.fields = OrderedDict()
def __setattr__(self, k, v):
- print ("kv", k, v)
+ print("kv", k, v)
if (k.startswith('_') or k in ["fields", "name", "src_loc"] or
- k in dir(Object) or "fields" not in self.__dict__):
+ k in dir(Object) or "fields" not in self.__dict__):
return object.__setattr__(self, k, v)
self.fields[k] = v
res = []
for (k, o) in self.fields.items():
i = getattr(inp, k)
- print ("eq", o, i)
+ print("eq", o, i)
rres = o.eq(i)
if isinstance(rres, Sequence):
res += rres
else:
res.append(rres)
- print (res)
+ print(res)
return res
- def ports(self): # being called "keys" would be much better
+ def ports(self): # being called "keys" would be much better
return list(self)
class RecordObject(Record):
def __init__(self, layout=None, name=None):
- #if name is None:
+ # if name is None:
# name = tracer.get_var_name(depth=2, default="$ro")
Record.__init__(self, layout=layout or [], name=name)
-
def __setattr__(self, k, v):
#print(f"RecordObject setattr({k}, {v})")
#print (dir(Record))
if (k.startswith('_') or k in ["fields", "name", "src_loc"] or
- k in dir(Record) or "fields" not in self.__dict__):
+ k in dir(Record) or "fields" not in self.__dict__):
return object.__setattr__(self, k, v)
if self.name is None:
self.layout.fields.update(newlayout)
def __iter__(self):
- for x in self.fields.values(): # remember: fields is an OrderedDict
+ for x in self.fields.values(): # remember: fields is an OrderedDict
if hasattr(x, 'ports'):
yield from x.ports()
elif isinstance(x, Record):
else:
yield x
- def ports(self): # would be better being called "keys"
+ def ports(self): # would be better being called "keys"
return list(self)
"""
def __init__(self, i_width=1, stage_ctl=False, maskwid=0, offs=0,
- name=None):
+ name=None):
if name is None:
name = ""
n_piv = "p_i_valid"+name
self.stop_i = Signal(maskwid) # prev >>in self
self.i_valid = Signal(i_width, name=n_piv) # prev >>in self
self._o_ready = Signal(name=n_por) # prev <<out self
- self.i_data = None # XXX MUST BE ADDED BY USER
+ self.i_data = None # XXX MUST BE ADDED BY USER
if stage_ctl:
self.s_o_ready = Signal(name="p_s_o_rdy") # prev <<out self
self.trigger = Signal(reset_less=True)
""" public-facing API: indicates (externally) that stage is ready
"""
if self.stage_ctl:
- return self.s_o_ready # set dynamically by stage
+ return self.s_o_ready # set dynamically by stage
return self._o_ready # return this when not under dynamic control
def _connect_in(self, prev, direct=False, fn=None,
def eq(self, i):
res = [nmoperator.eq(self.i_data, i.i_data),
- self.o_ready.eq(i.o_ready),
- self.i_valid.eq(i.i_valid)]
+ self.o_ready.eq(i.o_ready),
+ self.i_valid.eq(i.i_valid)]
if self.maskwid:
res.append(self.mask_i.eq(i.mask_i))
return res
* i_ready: input from next stage indicating that it can accept data
* o_data : an output - MUST be added by the USER of this class
"""
+
def __init__(self, stage_ctl=False, maskwid=0, name=None):
if name is None:
name = ""
self.stage_ctl = stage_ctl
self.maskwid = maskwid
if maskwid:
- self.mask_o = Signal(maskwid) # self out>> next
- self.stop_o = Signal(maskwid) # self out>> next
- self.o_valid = Signal(name=n_nov) # self out>> next
- self.i_ready = Signal(name=n_nir) # self <<in next
- self.o_data = None # XXX MUST BE ADDED BY USER
- #if self.stage_ctl:
- self.d_valid = Signal(reset=1) # INTERNAL (data valid)
+ self.mask_o = Signal(maskwid) # self out>> next
+ self.stop_o = Signal(maskwid) # self out>> next
+ self.o_valid = Signal(name=n_nov) # self out>> next
+ self.i_ready = Signal(name=n_nir) # self <<in next
+ self.o_data = None # XXX MUST BE ADDED BY USER
+ # if self.stage_ctl:
+ self.d_valid = Signal(reset=1) # INTERNAL (data valid)
self.trigger = Signal(reset_less=True)
@property
res.append(nxt.stop_i.eq(self.stop_o))
if do_data:
res.append(nmoperator.eq(nxt.i_data, self.o_data))
- print ("connect to next", self, self.maskwid, nxt.i_data,
- do_data, do_stop)
+ print("connect to next", self, self.maskwid, nxt.i_data,
+ do_data, do_stop)
return res
def _connect_out(self, nxt, direct=False, fn=None,
def ports(self):
return list(self)
-
else:
reg = Signal.like(incoming, name=name)
m.d.comb += outgoing.eq(Mux(settrue, incoming, reg))
- with m.If(settrue): # pass in some kind of expression/condition here
+ with m.If(settrue): # pass in some kind of expression/condition here
m.d.sync += reg.eq(incoming) # latch input into register
return reg
qint = mkname("qint", name)
qlq_n = mkname("qlq", name)
self.s = Signal(llen, name=s_n, reset=0)
- self.r = Signal(llen, name=r_n, reset=(1<<llen)-1) # defaults to off
+ self.r = Signal(llen, name=r_n, reset=(1 << llen)-1) # defaults to off
self.q = Signal(llen, name=q_n, reset_less=True)
self.qn = Signal(llen, name=qn_n, reset_less=True)
self.qlq = Signal(llen, name=qlq_n, reset_less=True)
else:
m.d.comb += self.q.eq(next_o)
m.d.comb += self.qn.eq(~self.q)
- m.d.comb += self.qlq.eq(self.q | self.q_int) # useful output
+ m.d.comb += self.qlq.eq(self.q | self.q_int) # useful output
return m
yield
yield
+
def test_sr():
dut = SRLatch(llen=4)
vl = rtlil.convert(dut, ports=dut.ports())
run_simulation(dut, sr_sim(dut), vcd_name='test_srlatch_async.vcd')
+
if __name__ == '__main__':
test_sr()
class MultiInControlBase(Elaboratable):
""" Common functions for Pipeline API
"""
+
def __init__(self, in_multi=None, p_len=1, maskwid=0, routemask=False):
""" Multi-input Control class. Conforms to same API as ControlBase...
mostly. has additional indices to the *multiple* input stages
"""
self.routemask = routemask
# set up input and output IO ACK (prev/next ready/valid)
- print ("multi_in", self, maskwid, p_len, routemask)
+ print("multi_in", self, maskwid, p_len, routemask)
p = []
for i in range(p_len):
p.append(PrevControl(in_multi, maskwid=maskwid))
self.p = Array(p)
if routemask:
- nmaskwid = maskwid # straight route mask mode
+ nmaskwid = maskwid # straight route mask mode
else:
- nmaskwid = maskwid * p_len # fan-in mode
- self.n = NextControl(maskwid=nmaskwid) # masks fan in (Cat)
+ nmaskwid = maskwid * p_len # fan-in mode
+ self.n = NextControl(maskwid=nmaskwid) # masks fan in (Cat)
def connect_to_next(self, nxt, p_idx=0):
""" helper function to connect to the next stage data/valid/ready.
class MultiOutControlBase(Elaboratable):
""" Common functions for Pipeline API
"""
+
def __init__(self, n_len=1, in_multi=None, maskwid=0, routemask=False):
""" Multi-output Control class. Conforms to same API as ControlBase...
mostly. has additional indices to the multiple *output* stages
"""
if routemask:
- nmaskwid = maskwid # straight route mask mode
+ nmaskwid = maskwid # straight route mask mode
else:
- nmaskwid = maskwid * n_len # fan-out mode
+ nmaskwid = maskwid * n_len # fan-out mode
# set up input and output IO ACK (prev/next ready/valid)
self.p = PrevControl(in_multi, maskwid=nmaskwid)
def __init__(self, stage, n_len, n_mux, maskwid=0, routemask=False):
MultiOutControlBase.__init__(self, n_len=n_len, maskwid=maskwid,
- routemask=routemask)
+ routemask=routemask)
self.stage = stage
self.maskwid = maskwid
self.routemask = routemask
self.n_mux = n_mux
# set up the input and output data
- self.p.i_data = _spec(stage.ispec, 'i_data') # input type
+ self.p.i_data = _spec(stage.ispec, 'i_data') # input type
for i in range(n_len):
name = 'o_data_%d' % i
- self.n[i].o_data = _spec(stage.ospec, name) # output type
+ self.n[i].o_data = _spec(stage.ospec, name) # output type
def process(self, i):
if hasattr(self.stage, "process"):
def elaborate(self, platform):
m = MultiOutControlBase.elaborate(self, platform)
- if hasattr(self.n_mux, "elaborate"): # TODO: identify submodule?
+ if hasattr(self.n_mux, "elaborate"): # TODO: identify submodule?
m.submodules.n_mux = self.n_mux
# need buffer register conforming to *input* spec
- r_data = _spec(self.stage.ispec, 'r_data') # input type
+ r_data = _spec(self.stage.ispec, 'r_data') # input type
if hasattr(self.stage, "setup"):
self.stage.setup(m, r_data)
# multiplexer id taken from n_mux
muxid = self.n_mux.m_id
- print ("self.n_mux", self.n_mux)
- print ("self.n_mux.m_id", self.n_mux.m_id)
+ print("self.n_mux", self.n_mux)
+ print("self.n_mux.m_id", self.n_mux.m_id)
self.n_mux.m_id.name = "m_id"
p_i_valid = Signal(reset_less=True)
pv = Signal(reset_less=True)
m.d.comb += p_i_valid.eq(self.p.i_valid_test)
- #m.d.comb += pv.eq(self.p.i_valid) #& self.n[muxid].i_ready)
+ # m.d.comb += pv.eq(self.p.i_valid) #& self.n[muxid].i_ready)
m.d.comb += pv.eq(self.p.i_valid & self.p.o_ready)
# all outputs to next stages first initialised to zero (invalid)
for i in range(len(self.n)):
m.d.comb += self.n[i].o_valid.eq(0)
if self.routemask:
- #with m.If(pv):
+ # with m.If(pv):
m.d.comb += self.n[muxid].o_valid.eq(pv)
m.d.comb += self.p.o_ready.eq(self.n[muxid].i_ready)
else:
data_valid = self.n[muxid].o_valid
m.d.comb += self.p.o_ready.eq(self.n[muxid].i_ready)
- m.d.comb += data_valid.eq(p_i_valid | \
- (~self.n[muxid].i_ready & data_valid))
-
+ m.d.comb += data_valid.eq(p_i_valid |
+ (~self.n[muxid].i_ready & data_valid))
# send data on
- #with m.If(pv):
+ # with m.If(pv):
m.d.comb += eq(r_data, self.p.i_data)
#m.d.comb += eq(self.n[muxid].o_data, self.process(r_data))
for i in range(len(self.n)):
m.d.comb += eq(self.n[i].o_data, self.process(r_data))
if self.maskwid:
- if self.routemask: # straight "routing" mode - treat like data
+ if self.routemask: # straight "routing" mode - treat like data
m.d.comb += self.n[muxid].stop_o.eq(self.p.stop_i)
with m.If(pv):
m.d.comb += self.n[muxid].mask_o.eq(self.p.mask_i)
else:
- ml = [] # accumulate output masks
- ms = [] # accumulate output stops
+ ml = [] # accumulate output masks
+ ms = [] # accumulate output stops
# fan-out mode.
# conditionally fan-out mask bits, always fan-out stop bits
for i in range(len(self.n)):
def __init__(self, stage, p_len, p_mux, maskwid=0, routemask=False):
MultiInControlBase.__init__(self, p_len=p_len, maskwid=maskwid,
- routemask=routemask)
+ routemask=routemask)
self.stage = stage
self.maskwid = maskwid
self.p_mux = p_mux
# set up the input and output data
for i in range(p_len):
name = 'i_data_%d' % i
- self.p[i].i_data = _spec(stage.ispec, name) # input type
+ self.p[i].i_data = _spec(stage.ispec, name) # input type
self.n.o_data = _spec(stage.ospec, 'o_data')
def process(self, i):
p_len = len(self.p)
for i in range(p_len):
name = 'r_%d' % i
- r = _spec(self.stage.ispec, name) # input type
+ r = _spec(self.stage.ispec, name) # input type
r_data.append(r)
data_valid.append(Signal(name="data_valid", reset_less=True))
p_i_valid.append(Signal(name="p_i_valid", reset_less=True))
n_i_readyn.append(Signal(name="n_i_readyn", reset_less=True))
if hasattr(self.stage, "setup"):
- print ("setup", self, self.stage, r)
+ print("setup", self, self.stage, r)
self.stage.setup(m, r)
- if True: # len(r_data) > 1: # hmm always create an Array even of len 1
+ if True: # len(r_data) > 1: # hmm always create an Array even of len 1
p_i_valid = Array(p_i_valid)
n_i_readyn = Array(n_i_readyn)
data_valid = Array(data_valid)
nirn = Signal(reset_less=True)
m.d.comb += nirn.eq(~self.n.i_ready)
mid = self.p_mux.m_id
- print ("CombMuxIn mid", self, self.stage, self.routemask, mid, p_len)
+ print("CombMuxIn mid", self, self.stage, self.routemask, mid, p_len)
for i in range(p_len):
m.d.comb += data_valid[i].eq(0)
m.d.comb += n_i_readyn[i].eq(1)
av.append(data_valid[i])
anyvalid = Cat(*av)
m.d.comb += self.n.o_valid.eq(anyvalid.bool())
- m.d.comb += data_valid[mid].eq(p_i_valid[mid] | \
- (n_i_readyn[mid] ))
+ m.d.comb += data_valid[mid].eq(p_i_valid[mid] |
+ (n_i_readyn[mid]))
if self.routemask:
# XXX hack - fixes loop
with m.If(mid == i):
m.d.comb += eq(self.n.o_data, r_data[i])
else:
- ml = [] # accumulate output masks
- ms = [] # accumulate output stops
+ ml = [] # accumulate output masks
+ ms = [] # accumulate output stops
for i in range(p_len):
vr = Signal(reset_less=True)
p = self.p[i]
def __init__(self, stage, p_len, p_mux, maskwid=0, routemask=False):
MultiInControlBase.__init__(self, p_len=p_len, maskwid=maskwid,
- routemask=routemask)
+ routemask=routemask)
self.stage = stage
self.maskwid = maskwid
self.p_mux = p_mux
# set up the input and output data
for i in range(p_len):
name = 'i_data_%d' % i
- self.p[i].i_data = _spec(stage.ispec, name) # input type
+ self.p[i].i_data = _spec(stage.ispec, name) # input type
self.n.o_data = _spec(stage.ospec, 'o_data')
def process(self, i):
p_len = len(self.p)
for i in range(p_len):
name = 'r_%d' % i
- r = _spec(self.stage.ispec, name) # input type
+ r = _spec(self.stage.ispec, name) # input type
r_data.append(r)
r_busy.append(Signal(name="r_busy%d" % i, reset_less=True))
p_i_valid.append(Signal(name="p_i_valid%d" % i, reset_less=True))
if hasattr(self.stage, "setup"):
- print ("setup", self, self.stage, r)
+ print("setup", self, self.stage, r)
self.stage.setup(m, r)
if len(r_data) > 1:
r_data = Array(r_data)
nirn = Signal(reset_less=True)
m.d.comb += nirn.eq(~self.n.i_ready)
mid = self.p_mux.m_id
- print ("CombMuxIn mid", self, self.stage, self.routemask, mid, p_len)
+ print("CombMuxIn mid", self, self.stage, self.routemask, mid, p_len)
for i in range(p_len):
m.d.comb += r_busy[i].eq(0)
m.d.comb += n_i_readyn[i].eq(1)
av.append(data_valid[i])
anyvalid = Cat(*av)
m.d.comb += self.n.o_valid.eq(anyvalid.bool())
- m.d.comb += data_valid[mid].eq(p_i_valid[mid] | \
- (n_i_readyn[mid] ))
+ m.d.comb += data_valid[mid].eq(p_i_valid[mid] |
+ (n_i_readyn[mid]))
if self.routemask:
# XXX hack - fixes loop
m.d.comb += eq(self.n.mask_o, self.p[i].mask_i)
m.d.comb += eq(r_data[i], self.p[i].i_data)
else:
- ml = [] # accumulate output masks
- ms = [] # accumulate output stops
+ ml = [] # accumulate output masks
+ ms = [] # accumulate output stops
for i in range(p_len):
vr = Signal(reset_less=True)
p = self.p[i]
class CombMuxOutPipe(CombMultiOutPipeline):
def __init__(self, stage, n_len, maskwid=0, muxidname=None,
- routemask=False):
+ routemask=False):
muxidname = muxidname or "muxid"
# HACK: stage is also the n-way multiplexer
CombMultiOutPipeline.__init__(self, stage, n_len=n_len,
- n_mux=stage, maskwid=maskwid,
- routemask=routemask)
+ n_mux=stage, maskwid=maskwid,
+ routemask=routemask)
# HACK: n-mux is also the stage... so set the muxid equal to input muxid
muxid = getattr(self.p.i_data, muxidname)
- print ("combmuxout", muxidname, muxid)
+ print("combmuxout", muxidname, muxid)
stage.m_id = muxid
-
class InputPriorityArbiter(Elaboratable):
""" arbitration module for Input-Mux pipe, baed on PriorityEncoder
"""
+
def __init__(self, pipe, num_rows):
self.pipe = pipe
self.num_rows = num_rows
self.mmax = int(log(self.num_rows) / log(2))
- self.m_id = Signal(self.mmax, reset_less=True) # multiplex id
+ self.m_id = Signal(self.mmax, reset_less=True) # multiplex id
self.active = Signal(reset_less=True)
def elaborate(self, platform):
m = Module()
assert len(self.pipe.p) == self.num_rows, \
- "must declare input to be same size"
+ "must declare input to be same size"
pe = PriorityEncoder(self.num_rows)
m.submodules.selector = pe
else:
m.d.comb += p_i_valid.eq(self.pipe.p[i].i_valid_test)
in_ready.append(p_i_valid)
- m.d.comb += pe.i.eq(Cat(*in_ready)) # array of input "valids"
+ m.d.comb += pe.i.eq(Cat(*in_ready)) # array of input "valids"
m.d.comb += self.active.eq(~pe.n) # encoder active (one input valid)
m.d.comb += self.m_id.eq(pe.o) # output one active input
return [self.m_id, self.active]
-
class PriorityCombMuxInPipe(CombMultiInPipeline):
""" an example of how to use the combinatorial pipeline.
"""
python object, enumerate them, find out the list of Signals that way,
and assign them.
"""
+
def iterator2(self, o, i):
if isinstance(o, dict):
yield from self.dict_iter2(o, i)
val = ai.fields
else:
val = ai
- if hasattr(val, field_name): # check for attribute
+ if hasattr(val, field_name): # check for attribute
val = getattr(val, field_name)
else:
- val = val[field_name] # dictionary-style specification
+ val = val[field_name] # dictionary-style specification
yield from self.iterator2(ao.fields[field_name], val)
def record_iter2(self, ao, ai):
val = ai.fields
else:
val = ai
- if hasattr(val, field_name): # check for attribute
+ if hasattr(val, field_name): # check for attribute
val = getattr(val, field_name)
else:
- val = val[field_name] # dictionary-style specification
+ val = val[field_name] # dictionary-style specification
yield from self.iterator2(ao.fields[field_name], val)
def arrayproxy_iter2(self, ao, ai):
""" a helper class for iterating single-argument compound data structures.
similar to Visitor2.
"""
+
def iterate(self, i):
""" iterate a compound structure recursively using yield
"""
val = ai.fields
else:
val = ai
- if hasattr(val, field_name): # check for attribute
+ if hasattr(val, field_name): # check for attribute
val = getattr(val, field_name)
else:
- val = val[field_name] # dictionary-style specification
+ val = val[field_name] # dictionary-style specification
#print ("recidx", idx, field_name, field_shape, val)
yield from self.iterate(val)
""" flattens a compound structure recursively using Cat
"""
from nmigen._utils import flatten
- #res = list(flatten(i)) # works (as of nmigen commit f22106e5) HOWEVER...
- res = list(Visitor().iterate(i)) # needed because input may be a sequence
+ # res = list(flatten(i)) # works (as of nmigen commit f22106e5) HOWEVER...
+ res = list(Visitor().iterate(i)) # needed because input may be a sequence
return Cat(*res)
-
-
-import inspect, types
+import inspect
+import types
############## preliminary: two utility functions #####################
+
def skip_redundant(iterable, skipset=None):
- "Redundant items are repeated items or items in the original skipset."
- if skipset is None: skipset = set()
- for item in iterable:
- if item not in skipset:
- skipset.add(item)
- yield item
+ "Redundant items are repeated items or items in the original skipset."
+ if skipset is None:
+ skipset = set()
+ for item in iterable:
+ if item not in skipset:
+ skipset.add(item)
+ yield item
def remove_redundant(metaclasses):
- skipset = set([type])
- for meta in metaclasses: # determines the metaclasses to be skipped
- skipset.update(inspect.getmro(meta)[1:])
- return tuple(skip_redundant(metaclasses, skipset))
+ skipset = set([type])
+ for meta in metaclasses: # determines the metaclasses to be skipped
+ skipset.update(inspect.getmro(meta)[1:])
+ return tuple(skip_redundant(metaclasses, skipset))
##################################################################
## now the core of the module: two mutually recursive functions ##
##################################################################
+
memoized_metaclasses_map = {}
+
def get_noconflict_metaclass(bases, left_metas, right_metas):
"""Not intended to be used outside of this module, unless you know
what you are doing."""
# return existing confict-solving meta, if any
if needed_metas in memoized_metaclasses_map:
- return memoized_metaclasses_map[needed_metas]
+ return memoized_metaclasses_map[needed_metas]
# nope: compute, memoize and return needed conflict-solving meta
elif not needed_metas: # wee, a trivial case, happy us
meta = type
- elif len(needed_metas) == 1: # another trivial case
- meta = needed_metas[0]
+ elif len(needed_metas) == 1: # another trivial case
+ meta = needed_metas[0]
# check for recursion, can happen i.e. for Zope ExtensionClasses
- elif needed_metas == bases:
+ elif needed_metas == bases:
raise TypeError("Incompatible root metatypes", needed_metas)
- else: # gotta work ...
+ else: # gotta work ...
metaname = '_' + ''.join([m.__name__ for m in needed_metas])
meta = classmaker()(metaname, needed_metas, {})
memoized_metaclasses_map[needed_metas] = meta
return meta
+
def classmaker(left_metas=(), right_metas=()):
def make_class(name, bases, adict):
- print ("make_class", name)
+ print("make_class", name)
metaclass = get_noconflict_metaclass(bases, left_metas, right_metas)
return metaclass(name, bases, adict)
return make_class
* reverse_i=True is for convenient reverseal of the input bits
* reverse_o=True is for convenient reversal of the output bits
"""
+
def __init__(self, wid, lsb_mode=False, reverse_i=False, reverse_o=False):
self.wid = wid
# inputs
self.reverse_o = reverse_o
self.i = Signal(wid, reset_less=True)
self.o = Signal(wid, reset_less=True)
- self.en_o = Signal(reset_less=True) # true if any output is true
+ self.en_o = Signal(reset_less=True) # true if any output is true
def elaborate(self, platform):
m = Module()
# works by saying, "if all previous bits were zero, we get a chance"
res = []
- ni = Signal(self.wid, reset_less = True)
+ ni = Signal(self.wid, reset_less=True)
i = list(self.i)
if self.reverse_i:
i.reverse()
if self.lsb_mode:
prange.reverse()
for n in prange:
- t = Signal(name="t%d" % n, reset_less = True)
+ t = Signal(name="t%d" % n, reset_less=True)
res.append(t)
if n == 0:
m.d.comb += t.eq(i[n])
# we like Cat(*xxx). turn lists into concatenated bits
m.d.comb += self.o.eq(Cat(*res))
# useful "is any output enabled" signal
- m.d.comb += self.en_o.eq(self.o.bool()) # true if 1 input is true
+ m.d.comb += self.en_o.eq(self.o.bool()) # true if 1 input is true
return m
Also outputted (optional): an index for each picked "thing".
"""
+
def __init__(self, wid, levels, indices=False, multiin=False):
self.levels = levels
self.wid = wid
self.indices = indices
self.multiin = multiin
-
if multiin:
# multiple inputs, multiple outputs.
- i_l = [] # array of picker outputs
+ i_l = [] # array of picker outputs
for j in range(self.levels):
i = Signal(self.wid, name="i_%d" % j, reset_less=True)
i_l.append(i)
self.i = Signal(self.wid, reset_less=True)
# create array of (single-bit) outputs (unary)
- o_l = [] # array of picker outputs
+ o_l = [] # array of picker outputs
for j in range(self.levels):
o = Signal(self.wid, name="o_%d" % j, reset_less=True)
o_l.append(o)
# add an array of indices
lidx = math.ceil(math.log2(self.levels))
- idx_o = [] # store the array of indices
+ idx_o = [] # store the array of indices
for j in range(self.levels):
i = Signal(lidx, name="idxo_%d" % j, reset_less=True)
idx_o.append(i)
p_mask = Const(0, self.wid)
else:
mask = Signal(self.wid, name="m_%d" % j, reset_less=True)
- comb += mask.eq(prev_pp.o | p_mask) # accumulate output bits
+ comb += mask.eq(prev_pp.o | p_mask) # accumulate output bits
comb += pp.i.eq(i & ~mask) # mask out input
p_mask = mask
- i = pp.i # for input to next round
+ i = pp.i # for input to next round
prev_pp = pp
# accumulate the enables
class PipeModBase(Elaboratable):
"""PipeModBase: common code between nearly every pipeline module
"""
+
def __init__(self, pspec, modname):
- self.modname = modname # use this to give a name to this module
+ self.modname = modname # use this to give a name to this module
self.pspec = pspec
self.i = self.ispec()
self.o = self.ospec()
and uses pspec.pipekls to dynamically select the pipeline type
Also conforms to the Pipeline Stage API
"""
+
def __init__(self, pspec):
self.pspec = pspec
self.chain = self.get_chain()
return self.chain[-1].ospec()
def process(self, i):
- return self.o # ... returned here (see setup comment below)
+ return self.o # ... returned here (see setup comment below)
def setup(self, m, i):
""" links module to inputs and outputs
"""
- StageChain(self.chain).setup(m, i) # input linked here, through chain
- self.o = self.chain[-1].o # output is the last thing in the chain...
+ StageChain(self.chain).setup(m, i) # input linked here, through chain
+ self.o = self.chain[-1].o # output is the last thing in the chain...
shift = LOG_TLB - lvl
new_idx = Const(~((i >> (shift-1)) & 1), 1)
plru_idx = idx_base + (i >> shift)
- #print("plru", i, lvl, hex(idx_base),
+ # print("plru", i, lvl, hex(idx_base),
# plru_idx, shift, new_idx)
m.d.sync += plru_tree[plru_idx].eq(new_idx)
new_idx = (i >> (shift-1)) & 1
plru_idx = idx_base + (i >> shift)
plru = Signal(reset_less=True,
- name="plru-%d-%d-%d-%d" % \
- (i, lvl, plru_idx, new_idx))
+ name="plru-%d-%d-%d-%d" %
+ (i, lvl, plru_idx, new_idx))
m.d.comb += plru.eq(plru_tree[plru_idx])
if new_idx:
en.append(~plru) # yes inverted (using bool() below)
comb += te.n.eq(~self.valid)
comb += te.i.eq(self.index)
- out = Array(Signal(self.n_bits, name="plru_out%d" % x) \
- for x in range(self.n_plrus))
+ out = Array(Signal(self.n_bits, name="plru_out%d" % x)
+ for x in range(self.n_plrus))
for i in range(self.n_plrus):
# PLRU interface
with open("test_plru.il", "w") as f:
f.write(vl)
-
dut = PLRUs(4, 2)
vl = rtlil.convert(dut, ports=dut.ports())
with open("test_plrus.il", "w") as f:
f.write(vl)
-
-
# deq is "dequeue" (data out, aka "next stage")
p_o_ready = self.w_rdy
p_i_valid = self.w_en
- enq_data = self.w_data # aka p_i_data
+ enq_data = self.w_data # aka p_i_data
n_o_valid = self.r_rdy
n_i_ready = self.r_en
- deq_data = self.r_data # aka n_o_data
+ deq_data = self.r_data # aka n_o_data
# intermediaries
ptr_width = bits_for(self.depth - 1) if self.depth > 1 else 0
- enq_ptr = Signal(ptr_width) # cyclic pointer to "insert" point (wrport)
- deq_ptr = Signal(ptr_width) # cyclic pointer to "remove" point (rdport)
- maybe_full = Signal() # not reset_less (set by sync)
+ # cyclic pointer to "insert" point (wrport)
+ enq_ptr = Signal(ptr_width)
+ # cyclic pointer to "remove" point (rdport)
+ deq_ptr = Signal(ptr_width)
+ maybe_full = Signal() # not reset_less (set by sync)
# temporaries
do_enq = Signal(reset_less=True)
enq_max = Signal(reset_less=True)
deq_max = Signal(reset_less=True)
- m.d.comb += [ptr_match.eq(enq_ptr == deq_ptr), # read-ptr = write-ptr
+ m.d.comb += [ptr_match.eq(enq_ptr == deq_ptr), # read-ptr = write-ptr
ptr_diff.eq(enq_ptr - deq_ptr),
enq_max.eq(enq_ptr == self.depth - 1),
deq_max.eq(deq_ptr == self.depth - 1),
empty.eq(ptr_match & ~maybe_full),
full.eq(ptr_match & maybe_full),
- do_enq.eq(p_o_ready & p_i_valid), # write conditions ok
- do_deq.eq(n_i_ready & n_o_valid), # read conditions ok
+ do_enq.eq(p_o_ready & p_i_valid), # write conditions ok
+ do_deq.eq(n_i_ready & n_o_valid), # read conditions ok
# set r_rdy and w_rdy (NOTE: see pipe mode below)
- n_o_valid.eq(~empty), # cannot read if empty!
+ n_o_valid.eq(~empty), # cannot read if empty!
p_o_ready.eq(~full), # cannot write if full!
# set up memory and connect to input and output
ram_write.data.eq(enq_data),
ram_write.en.eq(do_enq),
ram_read.addr.eq(deq_ptr),
- deq_data.eq(ram_read.data) # NOTE: overridden in fwft mode
- ]
+ # NOTE: overridden in fwft mode
+ deq_data.eq(ram_read.data)
+ ]
# under write conditions, SRAM write-pointer moves on next clock
with m.If(do_enq):
results_in => 0 0 1 0 1 0 0 1
output => 1 1 1 0 0 1 1 1
"""
+
def __init__(self, width, start_lsb=True):
self.width = width
self.start_lsb = start_lsb
width = self.width
results_in = list(self.results_in)
- if not self.start_lsb: results_in = reversed(results_in)
+ if not self.start_lsb:
+ results_in = reversed(results_in)
l = [results_in[0]]
for i in range(width-1):
l.append(Mux(self.gates[i], results_in[i+1], self.output[i]))
- if not self.start_lsb: l = reversed(l)
+ if not self.start_lsb:
+ l = reversed(l)
comb += self.output.eq(Cat(*l))
return m
based on a partition mask, the LSB is "rippled" (duplicated)
up to the beginning of the next partition.
"""
+
def __init__(self, width):
Ripple.__init__(self, width, start_lsb=True)
based on a partition mask, the MSB is "rippled" (duplicated)
down to the beginning of the next partition.
"""
+
def __init__(self, width):
Ripple.__init__(self, width, start_lsb=True)
into its own useful module), then ANDs the (new) LSB with the
partition mask to isolate it.
"""
+
def __init__(self, width):
self.width = width
self.results_in = Signal(width, reset_less=True)
intermed = Signal(width, reset_less=True)
# first propagate MSB down until the nearest partition gate
- comb += intermed[-1].eq(self.results_in[-1]) # start at MSB
+ comb += intermed[-1].eq(self.results_in[-1]) # start at MSB
for i in range(width-2, -1, -1):
cur = Mux(self.gates[i], self.results_in[i], intermed[i+1])
comb += intermed[i].eq(cur)
# now only select those bits where the mask starts
- out = [intermed[0]] # LSB of first part always set
- for i in range(width-1): # length of partition gates
+ out = [intermed[0]] # LSB of first part always set
+ for i in range(width-1): # length of partition gates
out.append(self.gates[i] & intermed[i+1])
comb += self.output.eq(Cat(*out))
# then check with yosys "read_ilang ripple.il; show top"
alu = MoveMSBDown(width=4)
main(alu, ports=[alu.results_in, alu.gates, alu.output])
-
Delay, Settle, Tick, Passive)
nmigen_sim_environ_variable = os.environ.get("NMIGEN_SIM_MODE") \
- or "pysim"
+ or "pysim"
"""Detected run-time engine from environment"""
honestly it's a lot easier just to create a direct Records-based
class (see ExampleAddRecordStage)
"""
+
def __init__(self, in_shape, out_shape, processfn, setupfn=None):
self.in_shape = in_shape
self.out_shape = out_shape
self.__process = processfn
self.__setup = setupfn
+
def ispec(self): return Record(self.in_shape)
def ospec(self): return Record(self.out_shape)
def process(seif, i): return self.__process(i)
(many APIs would potentially use a static "wrap" method in e.g.
StageCls to achieve a similar effect)
"""
+
def __init__(self, iospecfn): self.iospecfn = iospecfn
def ispec(self): return self.iospecfn()
def ospec(self): return self.iospecfn()
*BYPASSES* a ControlBase instance ready/valid signalling, which
clearly should not be done without a really, really good reason.
"""
+
def __init__(self, stage=None, in_multi=None, stage_ctl=False, maskwid=0):
""" Base class containing ready/valid/data to previous and next stages
* add o_data member to NextControl (n)
Calling ControlBase._new_data is a good way to do that.
"""
- print ("ControlBase", self, stage, in_multi, stage_ctl)
+ print("ControlBase", self, stage, in_multi, stage_ctl)
StageHelper.__init__(self, stage)
# set up input and output IO ACK (prev/next ready/valid)
"""
assert len(pipechain) > 0, "pipechain must be non-zero length"
assert self.stage is None, "do not use connect with a stage"
- eqs = [] # collated list of assignment statements
+ eqs = [] # collated list of assignment statements
# connect inter-chain
for i in range(len(pipechain)-1):
pipe1 = pipechain[i] # earlier
pipe2 = pipechain[i+1] # later (by 1)
- eqs += pipe1.connect_to_next(pipe2) # earlier n to later p
+ eqs += pipe1.connect_to_next(pipe2) # earlier n to later p
# connect front and back of chain to ourselves
front = pipechain[0] # first in chain
end = pipechain[-1] # last in chain
- self.set_specs(front, end) # sets up ispec/ospec functions
- self._new_data("chain") # NOTE: REPLACES existing data
+ self.set_specs(front, end) # sets up ispec/ospec functions
+ self._new_data("chain") # NOTE: REPLACES existing data
eqs += front._connect_in(self) # front p to our p
eqs += end._connect_out(self) # end n to our n
return nmoperator.eq(self.p.i_data, i)
def __iter__(self):
- yield from self.p # yields ready/valid/data (data also gets yielded)
- yield from self.n # ditto
+ yield from self.p # yields ready/valid/data (data also gets yielded)
+ yield from self.n # ditto
def ports(self):
return list(self)
por_pivn = Signal(reset_less=True)
npnn = Signal(reset_less=True)
self.m.d.comb += [p_i_valid.eq(self.p.i_valid_test),
- o_n_validn.eq(~self.n.o_valid),
- n_i_ready.eq(self.n.i_ready_test),
- nir_por.eq(n_i_ready & self.p._o_ready),
- nir_por_n.eq(n_i_ready & ~self.p._o_ready),
- nir_novn.eq(n_i_ready | o_n_validn),
- nirn_novn.eq(~n_i_ready & o_n_validn),
- npnn.eq(nir_por | nirn_novn),
- por_pivn.eq(self.p._o_ready & ~p_i_valid)
- ]
+ o_n_validn.eq(~self.n.o_valid),
+ n_i_ready.eq(self.n.i_ready_test),
+ nir_por.eq(n_i_ready & self.p._o_ready),
+ nir_por_n.eq(n_i_ready & ~self.p._o_ready),
+ nir_novn.eq(n_i_ready | o_n_validn),
+ nirn_novn.eq(~n_i_ready & o_n_validn),
+ npnn.eq(nir_por | nirn_novn),
+ por_pivn.eq(self.p._o_ready & ~p_i_valid)
+ ]
# store result of processing in combinatorial temporary
self.m.d.comb += nmoperator.eq(result, self.data_r)
# if not in stall condition, update the temporary register
- with self.m.If(self.p.o_ready): # not stalled
- self.m.d.sync += nmoperator.eq(r_data, result) # update buffer
+ with self.m.If(self.p.o_ready): # not stalled
+ self.m.d.sync += nmoperator.eq(r_data, result) # update buffer
# data pass-through conditions
with self.m.If(npnn):
- o_data = self._postprocess(result) # XXX TBD, does nothing right now
- self.m.d.sync += [self.n.o_valid.eq(p_i_valid), # valid if p_valid
- nmoperator.eq(self.n.o_data, o_data), # update out
- ]
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(result)
+ self.m.d.sync += [self.n.o_valid.eq(p_i_valid), # valid if p_valid
+ # update out
+ nmoperator.eq(self.n.o_data, o_data),
+ ]
# buffer flush conditions (NOTE: can override data passthru conditions)
- with self.m.If(nir_por_n): # not stalled
+ with self.m.If(nir_por_n): # not stalled
# Flush the [already processed] buffer to the output port.
- o_data = self._postprocess(r_data) # XXX TBD, does nothing right now
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(r_data)
self.m.d.sync += [self.n.o_valid.eq(1), # reg empty
- nmoperator.eq(self.n.o_data, o_data), # flush
- ]
+ nmoperator.eq(self.n.o_data, o_data), # flush
+ ]
# output ready conditions
self.m.d.sync += self.p._o_ready.eq(nir_novn | por_pivn)
| |
+--process->--^
"""
+
def __init__(self, stage, maskwid, in_multi=None, stage_ctl=False):
ControlBase.__init__(self, stage, in_multi, stage_ctl, maskwid)
m.d.sync += self.n.o_valid.eq(p_i_valid)
m.d.sync += self.n.mask_o.eq(Mux(p_i_valid, maskedout, 0))
with m.If(p_i_valid):
- o_data = self._postprocess(result) # XXX TBD, does nothing right now
- m.d.sync += nmoperator.eq(self.n.o_data, o_data) # update output
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(result)
+ m.d.sync += nmoperator.eq(self.n.o_data, o_data) # update output
# output valid if
# input always "ready"
| |
+--process->--^
"""
+
def __init__(self, stage, maskwid, in_multi=None, stage_ctl=False,
- dynamic=False):
+ dynamic=False):
ControlBase.__init__(self, stage, in_multi, stage_ctl, maskwid)
self.dynamic = dynamic
if dynamic:
m.d.comb += [p_i_valid.eq(self.p.i_valid_test & maskedout.bool()),
n_i_ready.eq(self.n.i_ready_test),
p_i_valid_p_o_ready.eq(p_i_valid & self.p.o_ready),
- ]
+ ]
# if idmask nonzero, mask gets passed on (and register set).
# register is left as-is if idmask is zero, but out-mask is set to
m.d.sync += r_busy.eq(1) # output valid
# previous invalid or not ready, however next is accepting
with m.Elif(n_i_ready):
- m.d.sync += r_busy.eq(0) # ...so set output invalid
+ m.d.sync += r_busy.eq(0) # ...so set output invalid
# output set combinatorially from latch
m.d.comb += nmoperator.eq(self.n.o_data, r_latch)
m.d.comb += [p_i_valid.eq(self.p.i_valid_test),
n_i_ready.eq(self.n.i_ready_test),
p_i_valid_p_o_ready.eq(p_i_valid & self.p.o_ready),
- ]
+ ]
# store result of processing in combinatorial temporary
m.d.comb += nmoperator.eq(result, self.data_r)
# previous valid and ready
with m.If(p_i_valid_p_o_ready):
- o_data = self._postprocess(result) # XXX TBD, does nothing right now
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(result)
m.d.sync += [r_busy.eq(1), # output valid
- nmoperator.eq(self.n.o_data, o_data), # update output
- ]
+ nmoperator.eq(self.n.o_data, o_data), # update output
+ ]
# previous invalid or not ready, however next is accepting
with m.Elif(n_i_ready):
- o_data = self._postprocess(result) # XXX TBD, does nothing right now
+ # XXX TBD, does nothing right now
+ o_data = self._postprocess(result)
m.d.sync += [nmoperator.eq(self.n.o_data, o_data)]
# TODO: could still send data here (if there was any)
- #m.d.sync += self.n.o_valid.eq(0) # ...so set output invalid
- m.d.sync += r_busy.eq(0) # ...so set output invalid
+ # m.d.sync += self.n.o_valid.eq(0) # ...so set output invalid
+ m.d.sync += r_busy.eq(0) # ...so set output invalid
m.d.comb += self.n.o_valid.eq(r_busy)
# if next is ready, so is previous
def elaborate(self, platform):
self.m = m = ControlBase.elaborate(self, platform)
- data_valid = Signal() # is data valid or not
- r_data = _spec(self.stage.ospec, "r_tmp") # output type
+ data_valid = Signal() # is data valid or not
+ r_data = _spec(self.stage.ospec, "r_tmp") # output type
# some temporaries
p_i_valid = Signal(reset_less=True)
with m.If(pv):
m.d.sync += nmoperator.eq(r_data, self.data_r)
- o_data = self._postprocess(r_data) # XXX TBD, does nothing right now
+ o_data = self._postprocess(r_data) # XXX TBD, does nothing right now
m.d.comb += nmoperator.eq(self.n.o_data, o_data)
return self.m
def elaborate(self, platform):
self.m = m = ControlBase.elaborate(self, platform)
- buf_full = Signal() # is data valid or not
- buf = _spec(self.stage.ospec, "r_tmp") # output type
+ buf_full = Signal() # is data valid or not
+ buf = _spec(self.stage.ospec, "r_tmp") # output type
# some temporaries
p_i_valid = Signal(reset_less=True)
m.d.sync += buf_full.eq(~self.n.i_ready_test & self.n.o_valid)
o_data = Mux(buf_full, buf, self.data_r)
- o_data = self._postprocess(o_data) # XXX TBD, does nothing right now
+ o_data = self._postprocess(o_data) # XXX TBD, does nothing right now
m.d.comb += nmoperator.eq(self.n.o_data, o_data)
m.d.sync += nmoperator.eq(buf, self.n.o_data)
def elaborate(self, platform):
self.m = m = ControlBase.elaborate(self, platform)
- r_data = _spec(self.stage.ospec, "r_tmp") # output type
+ r_data = _spec(self.stage.ospec, "r_tmp") # output type
# temporaries
p_i_valid = Signal(reset_less=True)
m.d.comb += p_i_valid.eq(self.p.i_valid_test)
m.d.comb += pvr.eq(p_i_valid & self.p.o_ready)
- m.d.comb += self.p.o_ready.eq(~self.n.o_valid | self.n.i_ready_test)
- m.d.sync += self.n.o_valid.eq(p_i_valid | ~self.p.o_ready)
+ m.d.comb += self.p.o_ready.eq(~self.n.o_valid | self.n.i_ready_test)
+ m.d.sync += self.n.o_valid.eq(p_i_valid | ~self.p.o_ready)
odata = Mux(pvr, self.data_r, r_data)
m.d.sync += nmoperator.eq(r_data, odata)
- r_data = self._postprocess(r_data) # XXX TBD, does nothing right now
+ r_data = self._postprocess(r_data) # XXX TBD, does nothing right now
m.d.comb += nmoperator.eq(self.n.o_data, r_data)
return m
sync'd latch out of o_data and o_valid as an indirect byproduct
of using PassThroughStage
"""
+
def __init__(self, iospecfn):
UnbufferedPipeline.__init__(self, PassThroughStage(iospecfn))
i_data -> fifo.din -> FIFO -> fifo.dout -> o_data
"""
+
def __init__(self, depth, stage, in_multi=None, stage_ctl=False,
- fwft=True, pipe=False):
+ fwft=True, pipe=False):
""" FIFO Control
* :depth: number of entries in the FIFO
m.d.comb += nmoperator.eq(result, self.process(i_data))
return nmoperator.cat(result)
- ## prev: make the FIFO (Queue object) "look" like a PrevControl...
+ # prev: make the FIFO (Queue object) "look" like a PrevControl...
m.submodules.fp = fp = PrevControl()
fp.i_valid, fp._o_ready, fp.i_data = fifo.w_en, fifo.w_rdy, fifo.w_data
m.d.comb += fp._connect_in(self.p, fn=processfn)
# next: make the FIFO (Queue object) "look" like a NextControl...
m.submodules.fn = fn = NextControl()
- fn.o_valid, fn.i_ready, fn.o_data = fifo.r_rdy, fifo.r_en, fifo.r_data
+ fn.o_valid, fn.i_ready, fn.o_data = fifo.r_rdy, fifo.r_en, fifo.r_data
connections = fn._connect_out(self.n, fn=nmoperator.cat)
valid_eq, ready_eq, o_data = connections
# ok ok so we can't just do the ready/valid eqs straight:
# first 2 from connections are the ready/valid, 3rd is data.
if self.fwft:
- m.d.comb += [valid_eq, ready_eq] # combinatorial on next ready/valid
+ # combinatorial on next ready/valid
+ m.d.comb += [valid_eq, ready_eq]
else:
- m.d.sync += [valid_eq, ready_eq] # non-fwft mode needs sync
- o_data = self._postprocess(o_data) # XXX TBD, does nothing right now
+ m.d.sync += [valid_eq, ready_eq] # non-fwft mode needs sync
+ o_data = self._postprocess(o_data) # XXX TBD, does nothing right now
m.d.comb += o_data
return m
class UnbufferedPipeline(FIFOControl):
def __init__(self, stage, in_multi=None, stage_ctl=False):
FIFOControl.__init__(self, 1, stage, in_multi, stage_ctl,
- fwft=True, pipe=False)
+ fwft=True, pipe=False)
# aka "BreakReadyStage" XXX had to set fwft=True to get it to work
+
+
class PassThroughHandshake(FIFOControl):
def __init__(self, stage, in_multi=None, stage_ctl=False):
FIFOControl.__init__(self, 1, stage, in_multi, stage_ctl,
- fwft=True, pipe=True)
+ fwft=True, pipe=True)
# this is *probably* BufferedHandshake, although test #997 now succeeds.
+
+
class BufferedHandshake(FIFOControl):
def __init__(self, stage, in_multi=None, stage_ctl=False):
FIFOControl.__init__(self, 2, stage, in_multi, stage_ctl,
- fwft=True, pipe=False)
+ fwft=True, pipe=False)
"""
def ispec(self): pass # REQUIRED
@abstractmethod
def ospec(self): pass # REQUIRED
- #@abstractmethod
- #def setup(self, m, i): pass # OPTIONAL
- #@abstractmethod
- #def process(self, i): pass # OPTIONAL
+ # @abstractmethod
+ # def setup(self, m, i): pass # OPTIONAL
+ # @abstractmethod
+ # def process(self, i): pass # OPTIONAL
class Stage(metaclass=ABCMeta):
@abstractmethod
def ospec(): pass
- #@staticmethod
- #@abstractmethod
+ # @staticmethod
+ # @abstractmethod
#def setup(m, i): pass
- #@staticmethod
- #@abstractmethod
+ # @staticmethod
+ # @abstractmethod
#def process(i): pass
it differs from the stage that it wraps in that all the "optional"
functions are provided (hence the designation "convenience wrapper")
"""
+
def __init__(self, stage):
self.stage = stage
self._ispecfn = None
if self.stage is not None and hasattr(self.stage, "setup"):
self.stage.setup(m, i)
- def _postprocess(self, i): # XXX DISABLED
- return i # RETURNS INPUT
+ def _postprocess(self, i): # XXX DISABLED
+ return i # RETURNS INPUT
if hasattr(self.stage, "postprocess"):
return self.stage.postprocess(i)
return i
side-effects (state-based / clock-based input) or conditional
(inter-chain) dependencies, unless you really know what you are doing.
"""
+
def __init__(self, chain, specallocate=False):
assert len(chain) > 0, "stage chain must be non-zero length"
self.chain = chain
o = _spec(ofn, cname)
if isinstance(o, Elaboratable):
setattr(m.submodules, cname, o)
- m.d.comb += nmoperator.eq(o, c.process(i)) # process input into "o"
+ # process input into "o"
+ m.d.comb += nmoperator.eq(o, c.process(i))
if idx == len(self.chain)-1:
break
ifn = self.chain[idx+1].ispec # new input on next loop
i = _spec(ifn, 'chainin%d' % (idx+1))
- m.d.comb += nmoperator.eq(i, o) # assign to next input
+ m.d.comb += nmoperator.eq(i, o) # assign to next input
self.o = o
return self.o # last loop is the output
return self.o # last loop is the output
def process(self, i):
- return self.o # conform to Stage API: return last-loop output
-
-
+ return self.o # conform to Stage API: return last-loop output
from nmutil.nmoperator import eq
from nmutil.iocontrol import (PrevControl, NextControl)
from nmutil.singlepipe import (PrevControl, NextControl, ControlBase,
- StageCls, Stage, StageChain,
- BufferedHandshake, UnbufferedPipeline)
+ StageCls, Stage, StageChain,
+ BufferedHandshake, UnbufferedPipeline)
from nmigen import Signal, Module
from nmigen.cli import verilog, rtlil
* ``n_o_valid`` and ``n_i_ready``: handshake
"""
+
def __init__(self, width):
self.width = width
"""data width"""
sig_in = Signal.like(dut.sig_in)
count = Signal.like(dut.lz)
-
m.d.comb += [
dut.sig_in.eq(sig_in),
count.eq(dut.lz)]
self.data = Signal(16, reset_less=True)
-
class PassThroughStage:
def ispec(self):
return PassData()
+
def ospec(self):
- return self.ispec() # same as ospec
+ return self.ispec() # same as ospec
def process(self, i):
- return i # pass-through
-
+ return i # pass-through
class PassThroughPipe(SimpleHandshake):
self.di[muxid] = {}
self.do[muxid] = {}
for i in range(self.tlen):
- self.di[muxid][i] = randint(0, 255) + (muxid<<8)
+ self.di[muxid][i] = randint(0, 255) + (muxid << 8)
self.do[muxid][i] = self.di[muxid][i]
def send(self, muxid):
yield
o_p_ready = yield rs.o_ready
- print ("send", muxid, i, hex(op2))
+ print("send", muxid, i, hex(op2))
yield rs.i_valid.eq(0)
# wait random period of time before queueing another value
yield rs.i_valid.eq(0)
yield
- print ("send ended", muxid)
+ print("send ended", muxid)
- ## wait random period of time before queueing another value
- #for i in range(randint(0, 3)):
+ # wait random period of time before queueing another value
+ # for i in range(randint(0, 3)):
# yield
#send_range = randint(0, 3)
- #if send_range == 0:
+ # if send_range == 0:
# send = True
- #else:
+ # else:
# send = randint(0, send_range) != 0
def rcv(self, muxid):
while True:
#stall_range = randint(0, 3)
- #for j in range(randint(1,10)):
+ # for j in range(randint(1,10)):
# stall = randint(0, stall_range) != 0
# yield self.dut.n[0].i_ready.eq(stall)
# yield
out_i = yield n.o_data.idx
out_v = yield n.o_data.data
- print ("recv", out_muxid, out_i, hex(out_v))
+ print("recv", out_muxid, out_i, hex(out_v))
# see if this output has occurred already, delete it if it has
assert muxid == out_muxid, \
- "out_muxid %d not correct %d" % (out_muxid, muxid)
+ "out_muxid %d not correct %d" % (out_muxid, muxid)
assert out_i in self.do[muxid], "out_i %d not in array %s" % \
- (out_i, repr(self.do[muxid]))
- assert self.do[muxid][out_i] == out_v # pass-through data
+ (out_i, repr(self.do[muxid]))
+ assert self.do[muxid][out_i] == out_v # pass-through data
del self.do[muxid][out_i]
# check if there's any more outputs
if len(self.do[muxid]) == 0:
break
- print ("recv ended", muxid)
+ print("recv ended", muxid)
class TestPriorityMuxPipe(PriorityCombMuxInPipe):
muxid = i
else:
muxid = randint(0, dut.num_rows-1)
- data = randint(0, 255) + (muxid<<8)
+ data = randint(0, 255) + (muxid << 8)
def send(self):
for i in range(self.tlen * dut.num_rows):
yield
o_p_ready = yield rs.o_ready
- print ("send", muxid, i, hex(op2))
+ print("send", muxid, i, hex(op2))
yield rs.i_valid.eq(0)
# wait random period of time before queueing another value
class TestInOutPipe(Elaboratable):
def __init__(self, num_rows=4):
self.num_rows = num_rows
- self.inpipe = TestPriorityMuxPipe(num_rows) # fan-in (combinatorial)
+ self.inpipe = TestPriorityMuxPipe(num_rows) # fan-in (combinatorial)
self.pipe1 = PassThroughPipe() # stage 1 (clock-sync)
self.pipe2 = PassThroughPipe() # stage 2 (clock-sync)
self.outpipe = TestMuxOutPipe(num_rows) # fan-out (combinatorial)
self.p = self.inpipe.p # kinda annoying,
- self.n = self.outpipe.n # use pipe in/out as this class in/out
+ self.n = self.outpipe.n # use pipe in/out as this class in/out
self._ports = self.inpipe.ports() + self.outpipe.ports()
def elaborate(self, platform):
test.rcv(3), test.rcv(2),
test.send(0), test.send(1),
test.send(3), test.send(2),
- ],
+ ],
vcd_name="test_inoutmux_pipe.vcd")
+
if __name__ == '__main__':
test1()
self.data = Signal(16, reset_less=True)
-
class PassThroughStage:
def ispec(self):
return PassData()
+
def ospec(self):
- return self.ispec() # same as ospec
+ return self.ispec() # same as ospec
def process(self, i):
- return i # pass-through
-
+ return i # pass-through
class PassThroughPipe(MaskCancellable):
self.do[muxid] = {}
self.sent[muxid] = []
for i in range(self.tlen):
- self.di[muxid][i] = randint(0, 255) + (muxid<<8)
+ self.di[muxid][i] = randint(0, 255) + (muxid << 8)
self.do[muxid][i] = self.di[muxid][i]
def send(self, muxid):
yield
o_p_ready = yield rs.o_ready
- print ("send", muxid, i, hex(op2), op2)
+ print("send", muxid, i, hex(op2), op2)
self.sent[muxid].append(i)
yield rs.i_valid.eq(0)
yield rs.i_valid.eq(0)
yield
- print ("send ended", muxid)
+ print("send ended", muxid)
- ## wait random period of time before queueing another value
- #for i in range(randint(0, 3)):
+ # wait random period of time before queueing another value
+ # for i in range(randint(0, 3)):
# yield
#send_range = randint(0, 3)
- #if send_range == 0:
+ # if send_range == 0:
# send = True
- #else:
+ # else:
# send = randint(0, send_range) != 0
def rcv(self, muxid):
# check cancellation
if self.sent[muxid] and randint(0, 2) == 0:
todel = self.sent[muxid].pop()
- print ("to delete", muxid, self.sent[muxid], todel)
+ print("to delete", muxid, self.sent[muxid], todel)
if todel in self.do[muxid]:
del self.do[muxid][todel]
yield rs.stop_i.eq(1)
- print ("left", muxid, self.do[muxid])
+ print("left", muxid, self.do[muxid])
if len(self.do[muxid]) == 0:
break
stall_range = randint(0, 3)
- for j in range(randint(1,10)):
+ for j in range(randint(1, 10)):
stall = randint(0, stall_range) != 0
yield self.dut.n[0].i_ready.eq(stall)
yield
n = self.dut.n[muxid]
yield n.i_ready.eq(1)
yield
- yield rs.stop_i.eq(0) # resets cancel mask
+ yield rs.stop_i.eq(0) # resets cancel mask
o_n_valid = yield n.o_valid
i_n_ready = yield n.i_ready
if not o_n_valid or not i_n_ready:
out_i = yield n.o_data.idx
out_v = yield n.o_data.data
- print ("recv", out_muxid, out_i, hex(out_v), out_v)
+ print("recv", out_muxid, out_i, hex(out_v), out_v)
# see if this output has occurred already, delete it if it has
assert muxid == out_muxid, \
- "out_muxid %d not correct %d" % (out_muxid, muxid)
+ "out_muxid %d not correct %d" % (out_muxid, muxid)
if out_i not in self.sent[muxid]:
- print ("cancelled/recv", muxid, out_i)
+ print("cancelled/recv", muxid, out_i)
continue
assert out_i in self.do[muxid], "out_i %d not in array %s" % \
- (out_i, repr(self.do[muxid]))
- assert self.do[muxid][out_i] == out_v # pass-through data
+ (out_i, repr(self.do[muxid]))
+ assert self.do[muxid][out_i] == out_v # pass-through data
del self.do[muxid][out_i]
todel = self.sent[muxid].index(out_i)
del self.sent[muxid][todel]
if len(self.do[muxid]) == 0:
break
- print ("recv ended", muxid)
+ print("recv ended", muxid)
class TestPriorityMuxPipe(PriorityCombMuxInPipe):
class TestInOutPipe(Elaboratable):
def __init__(self, num_rows=4):
self.num_rows = nr = num_rows
- self.inpipe = TestPriorityMuxPipe(nr) # fan-in (combinatorial)
+ self.inpipe = TestPriorityMuxPipe(nr) # fan-in (combinatorial)
self.pipe1 = PassThroughPipe(nr) # stage 1 (clock-sync)
self.pipe2 = PassThroughPipe(nr) # stage 2 (clock-sync)
self.pipe3 = PassThroughPipe(nr) # stage 3 (clock-sync)
self.outpipe = TestMuxOutPipe(nr) # fan-out (combinatorial)
self.p = self.inpipe.p # kinda annoying,
- self.n = self.outpipe.n # use pipe in/out as this class in/out
+ self.n = self.outpipe.n # use pipe in/out as this class in/out
self._ports = self.inpipe.ports() + self.outpipe.ports()
def elaborate(self, platform):
test.rcv(3), test.rcv(2),
test.send(0), test.send(1),
test.send(3), test.send(2),
- ],
+ ],
vcd_name="test_inoutmux_unarycancel_pipe.vcd")
+
if __name__ == '__main__':
test1()
def ospec(self, name):
return Signal(16, name="%s_dout" % name, reset_less=True)
-
+
def process(self, i):
return i.data
class PassThroughDataStage:
def ispec(self):
return PassInData()
+
def ospec(self):
- return self.ispec() # same as ospec
+ return self.ispec() # same as ospec
def process(self, i):
- return i # pass-through
-
+ return i # pass-through
class PassThroughPipe(PassThroughHandshake):
muxid = i
else:
muxid = randint(0, dut.num_rows-1)
- data = randint(0, 255) + (muxid<<8)
+ data = randint(0, 255) + (muxid << 8)
if muxid not in self.do:
self.do[muxid] = []
self.di.append((data, muxid))
yield
o_p_ready = yield rs.o_ready
- print ("send", muxid, i, hex(op2))
+ print("send", muxid, i, hex(op2))
yield rs.i_valid.eq(0)
# wait random period of time before queueing another value
out_v = yield n.o_data
- print ("recv", muxid, out_i, hex(out_v))
+ print("recv", muxid, out_i, hex(out_v))
- assert self.do[muxid][out_i] == out_v # pass-through data
+ assert self.do[muxid][out_i] == out_v # pass-through data
out_i += 1
def ports(self):
res = [self.p.i_valid, self.p.o_ready] + \
- self.p.i_data.ports()
+ self.p.i_data.ports()
for i in range(len(self.n)):
res += [self.n[i].i_ready, self.n[i].o_valid] + \
- [self.n[i].o_data]
- #self.n[i].o_data.ports()
+ [self.n[i].o_data]
+ # self.n[i].o_data.ports()
return res
test.send()],
vcd_name="test_outmux_pipe.vcd")
+
if __name__ == '__main__':
test1()
self.data = Signal(16, name="data"+name, reset_less=True)
-
class PassThroughStage:
def ispec(self, name=None):
return PassData(name=name)
+
def ospec(self, name=None):
- return self.ispec(name) # same as ospec
+ return self.ispec(name) # same as ospec
def process(self, i):
- return i # pass-through
-
+ return i # pass-through
class PassThroughPipe(SimpleHandshake):
self.di[muxid] = {}
self.do[muxid] = {}
for i in range(self.tlen):
- self.di[muxid][i] = randint(0, 255) + (muxid<<8)
+ self.di[muxid][i] = randint(0, 255) + (muxid << 8)
self.do[muxid][i] = self.di[muxid][i]
def send(self, muxid):
yield
o_p_ready = yield rs.o_ready
- print ("send", muxid, i, hex(op2))
+ print("send", muxid, i, hex(op2))
yield rs.i_valid.eq(0)
# wait random period of time before queueing another value
yield rs.i_valid.eq(0)
yield
- print ("send ended", muxid)
+ print("send ended", muxid)
- ## wait random period of time before queueing another value
- #for i in range(randint(0, 3)):
+ # wait random period of time before queueing another value
+ # for i in range(randint(0, 3)):
# yield
#send_range = randint(0, 3)
- #if send_range == 0:
+ # if send_range == 0:
# send = True
- #else:
+ # else:
# send = randint(0, send_range) != 0
def rcv(self, muxid):
while True:
#stall_range = randint(0, 3)
- #for j in range(randint(1,10)):
+ # for j in range(randint(1,10)):
# stall = randint(0, stall_range) != 0
# yield self.dut.n[0].i_ready.eq(stall)
# yield
out_i = yield n.o_data.idx
out_v = yield n.o_data.data
- print ("recv", out_muxid, out_i, hex(out_v))
+ print("recv", out_muxid, out_i, hex(out_v))
# see if this output has occurred already, delete it if it has
assert muxid == out_muxid, \
- "out_muxid %d not correct %d" % (out_muxid, muxid)
+ "out_muxid %d not correct %d" % (out_muxid, muxid)
assert out_i in self.do[muxid], "out_i %d not in array %s" % \
- (out_i, repr(self.do[muxid]))
- assert self.do[muxid][out_i] == out_v # pass-through data
+ (out_i, repr(self.do[muxid]))
+ assert self.do[muxid][out_i] == out_v # pass-through data
del self.do[muxid][out_i]
# check if there's any more outputs
if len(self.do[muxid]) == 0:
break
- print ("recv ended", muxid)
+ print("recv ended", muxid)
class TestALU(Elaboratable):
test.rcv(3), test.rcv(2),
test.send(0), test.send(1),
test.send(3), test.send(2),
- ],
+ ],
vcd_name="test_reservation_stations.vcd")
+
if __name__ == '__main__':
test1()
treereduce(tree, operator.or_, lambda x: getattr(x, "o_data"))
"""
if fn is None:
- fn = lambda x: x
+ def fn(x): return x
if not isinstance(tree, list):
return tree
if len(tree) == 1:
return fn(tree[0])
if len(tree) == 2:
return op(fn(tree[0]), fn(tree[1]))
- s = len(tree) // 2 # splitpoint
+ s = len(tree) // 2 # splitpoint
return op(treereduce(tree[:s], op, fn),
treereduce(tree[s:], op, fn))
# chooses assignment of 32 bit or full 64 bit depending on is_32bit
+
+
def eq32(is_32bit, dest, src):
return [dest[0:32].eq(src[0:32]),
dest[32:64].eq(Mux(is_32bit, 0, src[32:64]))]
rising = Signal.like(sig)
delay.name = "%s_dly" % sig.name
rising.name = "%s_rise" % sig.name
- m.d.sync += delay.eq(sig) # 1 clock delay
- m.d.comb += rising.eq(sig & ~delay) # sig is hi but delay-sig is lo
+ m.d.sync += delay.eq(sig) # 1 clock delay
+ m.d.comb += rising.eq(sig & ~delay) # sig is hi but delay-sig is lo
return rising