from soc.config.test.test_pi2ls import pi_ld, pi_st, pi_ldst
import unittest
+class L0CacheBuffer2(Elaboratable):
+ """L0CacheBuffer2"""
+ def __init__(self, n_units=8, regwid=64, addrwid=48):
+ self.n_units = n_units
+ self.regwid = regwid
+ self.addrwid = addrwid
+ ul = []
+ for i in range(self.n_units):
+ ul += [PortInterface()]
+ self.dports = Array(ul)
-class DualPortSplitter(Elaboratable):
- """DualPortSplitter
-
- * one incoming PortInterface
- * two *OUTGOING* PortInterfaces
- * uses LDSTSplitter to do it
-
- (actually, thinking about it LDSTSplitter could simply be
- modified to conform to PortInterface: one in, two out)
-
- once that is done each pair of ports may be wired directly
- to the dual ports of L0CacheBuffer
+ def elaborate(self, platform):
+ m = Module()
+ comb, sync = m.d.comb, m.d.sync
- The split is carried out so that, regardless of alignment or
- mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
- of the address, whilst outgoing PortInterface[1] takes
- bit 4 == 1.
+ # connect the ports as modules
- PortInterface *may* need to be changed so that the length is
- a binary number (accepting values 1-16).
- """
- def __init__(self):
- self.outp = [PortInterface(name="outp_0"),
- PortInterface(name="outp_1")]
- self.inp = PortInterface(name="inp")
- print(self.outp)
+ for i in range(self.n_units):
+ d = LDSTSplitter(64, 48, 4, self.dports[i])
+ setattr(m.submodules, "ldst_splitter%d" % i, d)
- def elaborate(self, platform):
- m = Module()
- comb = m.d.comb
- m.submodules.splitter = splitter = LDSTSplitter(64, 48, 4)
- comb += splitter.addr_i.eq(self.inp.addr) #XXX
- #comb += splitter.len_i.eq()
- #comb += splitter.valid_i.eq()
- comb += splitter.is_ld_i.eq(self.inp.is_ld_i)
- comb += splitter.is_st_i.eq(self.inp.is_st_i)
- #comb += splitter.st_data_i.eq()
- #comb += splitter.sld_valid_i.eq()
- #comb += splitter.sld_data_i.eq()
- #comb += splitter.sst_valid_i.eq()
+ # state-machine latches TODO
return m
-
class DataMergerRecord(Record):
"""
{data: 128 bit, byte_enable: 16 bit}
self.data.reset_less = True
self.en.reset_less = True
+class CacheRecord(Record):
+ def __init__(self, name=None):
+ layout = (('addr', 37),
+ ('a_even', 7),
+ ('bytemask_even', 16),
+ ('data_even', 128),
+ ('a_odd', 7),
+ ('bytemask_odd', 16),
+ ('data_odd', 128))
+ Record.__init__(self, Layout(layout), name=name)
+
+ self.addr.reset_less = True
+ self.a_even.reset_less = True
+ self.bytemask_even.reset_less = True
+ self.data_even.reset_less = True
+ self.a_odd.reset_less = True
+ self.bytemask_odd.reset_less = True
+ self.data_odd.reset_less = True
+
+
# TODO: formal verification
class DataMerger(Elaboratable):
def elaborate(self, platform):
m = Module()
comb = m.d.comb
- #(1) pick a row
+ # (1) pick a row
m.submodules.pick = pick = PriorityEncoder(self.array_size)
for j in range(self.array_size):
comb += pick.i[j].eq(self.addr_array_i[j].bool())
valid = ~pick.n
idx = pick.o
- #(2) merge
+ # (2) merge
with m.If(valid):
l = []
for j in range(self.array_size):
with m.If(select):
comb += r.eq(self.data_i[j])
l.append(r)
- comb += self.data_o.data.eq(ortreereduce(l,"data"))
- comb += self.data_o.en.eq(ortreereduce(l,"en"))
+ comb += self.data_o.data.eq(ortreereduce(l, "data"))
+ comb += self.data_o.en.eq(ortreereduce(l, "en"))
return m
+class TstDataMerger2(Elaboratable):
+ def __init__(self):
+ self.data_odd = Signal(128,reset_less=True)
+ self.data_even = Signal(128,reset_less=True)
+ self.n_units = 8
+ ul = []
+ for i in range(self.n_units):
+ ul.append(CacheRecord())
+ self.input_array = Array(ul)
+
+ def addr_match(self,j,addr):
+ ret = []
+ for k in range(self.n_units):
+ ret += [(addr[j] == addr[k])]
+ return Cat(*ret)
+
+ def elaborate(self, platform):
+ m = Module()
+ m.submodules.dm_odd = dm_odd = DataMerger(self.n_units)
+ m.submodules.dm_even = dm_even = DataMerger(self.n_units)
+
+ addr_even = []
+ addr_odd = []
+ for j in range(self.n_units):
+ inp = self.input_array[j]
+ addr_even += [Cat(inp.addr,inp.a_even)]
+ addr_odd += [Cat(inp.addr,inp.a_odd)]
+
+ for j in range(self.n_units):
+ inp = self.input_array[j]
+ m.d.comb += dm_even.data_i[j].en.eq(inp.bytemask_even)
+ m.d.comb += dm_odd.data_i[j].en.eq(inp.bytemask_odd)
+ m.d.comb += dm_even.data_i[j].data.eq(inp.data_even)
+ m.d.comb += dm_odd.data_i[j].data.eq(inp.data_odd)
+ m.d.comb += dm_even.addr_array_i[j].eq(self.addr_match(j,addr_even))
+ m.d.comb += dm_odd.addr_array_i[j].eq(self.addr_match(j,addr_odd))
+
+ m.d.comb += self.data_odd.eq(dm_odd.data_o.data)
+ m.d.comb += self.data_even.eq(dm_even.data_o.data)
+ return m
+
class L0CacheBuffer(Elaboratable):
"""L0 Cache / Buffer
comb, sync = m.d.comb, m.d.sync
# connect the ports as modules
- #for i in range(self.n_units):
+ # for i in range(self.n_units):
# setattr(m.submodules, "port%d" % i, self.dports[i])
# state-machine latches
ldsti = []
for i in range(self.n_units):
pi = self.dports[i]
- busy = (pi.is_ld_i | pi.is_st_i)# & pi.busy_o
- ldsti.append(busy) # accumulate ld/st-req
+ busy = (pi.is_ld_i | pi.is_st_i) # & pi.busy_o
+ ldsti.append(busy) # accumulate ld/st-req
# put the requests into the priority-picker
comb += pick.i.eq(Cat(*ldsti))
with m.If(idx_l.q):
comb += self.pimem.connect_port(port)
with m.If(~self.pimem.pi.busy_o):
- comb += reset_l.s.eq(1) # reset when no longer busy
+ comb += reset_l.s.eq(1) # reset when no longer busy
# ugly hack, due to simultaneous addr req-go acknowledge
reset_delay = Signal(reset_less=True)
return m
- def ports(self):
+ def __iter__(self):
for p in self.dports:
yield from p.ports()
+ def ports(self):
+ return list(self)
+
class TstL0CacheBuffer(Elaboratable):
def __init__(self, pspec, n_units=3):
addrwid = pspec.addr_wid
self.cmpi = ConfigMemoryPortInterface(pspec)
self.pimem = self.cmpi.pi
- self.l0 = L0CacheBuffer(n_units, self.pimem, regwid, addrwid<<1)
+ self.l0 = L0CacheBuffer(n_units, self.pimem, regwid, addrwid << 1)
def elaborate(self, platform):
m = Module()
m.submodules.pimem = self.pimem
m.submodules.l0 = self.l0
- if hasattr(self.cmpi, 'lsmem'): # hmmm not happy about this
+ if hasattr(self.cmpi, 'lsmem'): # hmmm not happy about this
m.submodules.lsmem = self.cmpi.lsmem.lsi
return m
def data_merger_merge(dut):
- print("data_merger")
- #starting with all inputs zero
+ # starting with all inputs zero
yield Settle()
en = yield dut.data_o.en
data = yield dut.data_o.data
assert en == 0xff
yield
+def data_merger_test2(dut):
+ # starting with all inputs zero
+ yield Settle()
+ yield
+ yield
+
class TestL0Cache(unittest.TestCase):
mask_wid=8,
reg_wid=64)
dut = TstL0CacheBuffer(pspec)
- vl = rtlil.convert(dut, ports=[])# TODOdut.ports())
+ vl = rtlil.convert(dut, ports=[]) # TODOdut.ports())
with open("test_basic_l0_cache_bare_wb.il", "w") as f:
f.write(vl)
mask_wid=8,
reg_wid=64)
dut = TstL0CacheBuffer(pspec)
- vl = rtlil.convert(dut, ports=[])# TODOdut.ports())
+ vl = rtlil.convert(dut, ports=[]) # TODOdut.ports())
with open("test_basic_l0_cache.il", "w") as f:
f.write(vl)
def test_data_merger(self):
- dut = DataMerger(8)
+ dut = TstDataMerger2()
#vl = rtlil.convert(dut, ports=dut.ports())
- #with open("test_data_merger.il", "w") as f:
+ # with open("test_data_merger.il", "w") as f:
# f.write(vl)
- run_simulation(dut, data_merger_merge(dut),
+ run_simulation(dut, data_merger_test2(dut),
vcd_name='test_data_merger.vcd')
+
class TestDualPortSplitter(unittest.TestCase):
def test_dual_port_splitter(self):
dut = DualPortSplitter()
#vl = rtlil.convert(dut, ports=dut.ports())
- #with open("test_data_merger.il", "w") as f:
+ # with open("test_data_merger.il", "w") as f:
# f.write(vl)
- #run_simulation(dut, data_merger_merge(dut),
+ # run_simulation(dut, data_merger_merge(dut),
# vcd_name='test_dual_port_splitter.vcd')
if __name__ == '__main__':
- unittest.main(exit=False)
-
+ unittest.main()