3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
18 from nmigen
.compat
.sim
import run_simulation
19 from nmigen
.cli
import verilog
, rtlil
20 from nmigen
import Module
, Signal
, Mux
, Elaboratable
, Array
, Cat
21 from nmutil
.iocontrol
import RecordObject
22 from nmigen
.utils
import log2_int
24 from nmutil
.latch
import SRLatch
, latchregister
25 from soc
.decoder
.power_decoder2
import Data
26 from soc
.decoder
.power_enums
import InternalOp
28 from soc
.experiment
.compldst
import CompLDSTOpSubset
29 from soc
.decoder
.power_decoder2
import Data
30 #from nmutil.picker import PriorityPicker
31 from nmigen
.lib
.coding
import PriorityEncoder
33 # for testing purposes
34 from soc
.experiment
.testmem
import TestMemory
37 class PortInterface(RecordObject
):
40 defines the interface - the API - that the LDSTCompUnit connects
41 to. note that this is NOT a "fire-and-forget" interface. the
42 LDSTCompUnit *must* be kept appraised that the request is in
43 progress, and only when it has a 100% successful completion rate
44 can the notification be given (busy dropped).
46 The interface FSM rules are as follows:
48 * if busy_o is asserted, a LD/ST is in progress. further
49 requests may not be made until busy_o is deasserted.
51 * only one of is_ld_i or is_st_i may be asserted. busy_o
52 will immediately be asserted and remain asserted.
54 * addr.ok is to be asserted when the LD/ST address is known.
55 addr.data is to be valid on the same cycle.
57 addr.ok and addr.data must REMAIN asserted until busy_o
58 is de-asserted. this ensures that there is no need
59 for the L0 Cache/Buffer to have an additional address latch
60 (because the LDSTCompUnit already has it)
62 * addr_ok_o (or addr_exc_o) must be waited for. these will
63 be asserted *only* for one cycle and one cycle only.
65 * addr_exc_o will be asserted if there is no chance that the
66 memory request may be fulfilled.
68 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
70 * conversely: addr_ok_o must *ONLY* be asserted if there is a
71 HUNDRED PERCENT guarantee that the memory request will be
74 * for a LD, ld.ok will be asserted - for only one clock cycle -
75 at any point in the future that is acceptable to the underlying
76 Memory subsystem. the recipient MUST latch ld.data on that cycle.
78 busy_o is deasserted on the same cycle as ld.ok is asserted.
80 * for a ST, st.ok may be asserted only after addr_ok_o had been
81 asserted, alongside valid st.data at the same time. st.ok
82 must only be asserted for one cycle.
84 the underlying Memory is REQUIRED to pick up that data and
85 guarantee its delivery. no back-acknowledgement is required.
87 busy_o is deasserted on the cycle AFTER st.ok is asserted.
90 def __init__(self
, name
=None, regwid
=64, addrwid
=48):
93 self
._addrwid
= addrwid
95 RecordObject
.__init
__(self
, name
=name
)
97 # distinguish op type (ld/st)
98 self
.is_ld_i
= Signal(reset_less
=True)
99 self
.is_st_i
= Signal(reset_less
=True)
100 self
.op
= CompLDSTOpSubset() # hm insn_type ld/st duplicates here
103 self
.busy_o
= Signal(reset_less
=True) # do not use if busy
104 self
.go_die_i
= Signal(reset_less
=True) # back to reset
105 self
.addr
= Data(addrwid
, "addr_i") # addr/addr-ok
106 self
.addr_ok_o
= Signal(reset_less
=True) # addr is valid (TLB, L1 etc.)
107 self
.addr_exc_o
= Signal(reset_less
=True) # TODO, "type" of exception
110 self
.ld
= Data(regwid
, "ld_data_o") # ok to be set by L0 Cache/Buf
111 self
.st
= Data(regwid
, "st_data_i") # ok to be set by CompUnit
114 class DualPortSplitter(Elaboratable
):
117 * one incoming PortInterface
118 * two *OUTGOING* PortInterfaces
119 * uses LDSTSplitter to do it
121 (actually, thinking about it LDSTSplitter could simply be
122 modified to conform to PortInterface: one in, two out)
124 once that is done each pair of ports may be wired directly
125 to the dual ports of L0CacheBuffer
130 class LDSTPort(Elaboratable
):
131 def __init__(self
, idx
, regwid
=64, addrwid
=48):
132 self
.pi
= PortInterface("ldst_port%d" % idx
, regwid
, addrwid
)
134 def elaborate(self
, platform
):
136 comb
, sync
= m
.d
.comb
, m
.d
.sync
139 m
.submodules
.busy_l
= busy_l
= SRLatch(False, name
="busy")
140 m
.submodules
.cyc_l
= cyc_l
= SRLatch(True, name
="cyc")
141 comb
+= cyc_l
.s
.eq(0)
142 comb
+= cyc_l
.r
.eq(0)
144 # this is a little weird: we let the L0Cache/Buffer set
145 # the outputs: this module just monitors "state".
147 # LD/ST requested activates "busy"
148 with m
.If(self
.pi
.is_ld_i | self
.pi
.is_st_i
):
149 comb
+= busy_l
.s
.eq(1)
151 # monitor for an exception or the completion of LD.
152 with m
.If(self
.pi
.addr_exc_o
):
153 comb
+= busy_l
.r
.eq(1)
155 # however ST needs one cycle before busy is reset
156 with m
.If(self
.pi
.st
.ok | self
.pi
.ld
.ok
):
157 comb
+= cyc_l
.s
.eq(1)
160 comb
+= cyc_l
.r
.eq(1)
161 comb
+= busy_l
.r
.eq(1)
163 # busy latch outputs to interface
164 comb
+= self
.pi
.busy_o
.eq(busy_l
.q
)
169 yield self
.pi
.is_ld_i
170 yield self
.pi
.is_st_i
171 yield from self
.pi
.op
.ports()
173 yield self
.pi
.go_die_i
174 yield from self
.pi
.addr
.ports()
175 yield self
.pi
.addr_ok_o
176 yield self
.pi
.addr_exc_o
178 yield from self
.pi
.ld
.ports()
179 yield from self
.pi
.st
.ports()
185 class L0CacheBuffer(Elaboratable
):
188 Note that the final version will have *two* interfaces per LDSTCompUnit,
189 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
190 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
192 This version is to be used for test purposes (and actively maintained
193 for such, rather than "replaced")
195 There are much better ways to implement this. However it's only
196 a "demo" / "test" class, and one important aspect: it responds
197 combinatorially, where a nmigen FSM's state-changes only activate
198 on clock-sync boundaries.
200 def __init__(self
, n_units
, mem
, regwid
=64, addrwid
=48):
201 self
.n_units
= n_units
204 for i
in range(n_units
):
205 ul
.append(LDSTPort(i
, regwid
, addrwid
))
206 self
.dports
= Array(ul
)
208 def elaborate(self
, platform
):
210 comb
, sync
= m
.d
.comb
, m
.d
.sync
212 # connect the ports as modules
213 for i
in range(self
.n_units
):
214 setattr(m
.submodules
, "port%d" % i
, self
.dports
[i
])
216 # state-machine latches
217 m
.submodules
.st_active
= st_active
= SRLatch(False, name
="st_active")
218 m
.submodules
.ld_active
= ld_active
= SRLatch(False, name
="ld_active")
219 m
.submodules
.reset_l
= reset_l
= SRLatch(True, name
="reset")
220 m
.submodules
.idx_l
= idx_l
= SRLatch(False, name
="idx_l")
221 m
.submodules
.adrok_l
= adrok_l
= SRLatch(False, name
="addr_acked")
223 # find one LD (or ST) and do it. only one per cycle.
224 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
225 # LD/STs using mask-expansion - see LenExpand class
227 m
.submodules
.ldpick
= ldpick
= PriorityEncoder(self
.n_units
)
228 m
.submodules
.stpick
= stpick
= PriorityEncoder(self
.n_units
)
230 lds
= Signal(self
.n_units
, reset_less
=True)
231 sts
= Signal(self
.n_units
, reset_less
=True)
234 for i
in range(self
.n_units
):
235 pi
= self
.dports
[i
].pi
236 ldi
.append(pi
.is_ld_i
& pi
.busy_o
) # accumulate ld-req signals
237 sti
.append(pi
.is_st_i
& pi
.busy_o
) # accumulate st-req signals
238 # put the requests into the priority-pickers
239 comb
+= ldpick
.i
.eq(Cat(*ldi
))
240 comb
+= stpick
.i
.eq(Cat(*sti
))
242 # hmm, have to select (record) the right port index
243 nbits
= log2_int(self
.n_units
, False)
244 ld_idx
= Signal(nbits
, reset_less
=False)
245 st_idx
= Signal(nbits
, reset_less
=False)
246 # use these because of the sync-and-comb pass-through capability
247 latchregister(m
, ldpick
.o
, ld_idx
, idx_l
.qn
, name
="ld_idx")
248 latchregister(m
, stpick
.o
, st_idx
, idx_l
.qn
, name
="st_idx")
250 # convenience variables to reference the "picked" port
251 ldport
= self
.dports
[ld_idx
].pi
252 stport
= self
.dports
[st_idx
].pi
253 # and the memory ports
254 rdport
= self
.mem
.rdport
255 wrport
= self
.mem
.wrport
257 # Priority-Pickers pick one and only one request, capture its index.
258 # from that point on this code *only* "listens" to that port.
260 sync
+= adrok_l
.s
.eq(0)
261 comb
+= adrok_l
.r
.eq(0)
262 with m
.If(~ldpick
.n
):
263 comb
+= ld_active
.s
.eq(1) # activate LD mode
264 comb
+= idx_l
.r
.eq(1) # pick (and capture) the port index
265 comb
+= adrok_l
.r
.eq(1) # address not yet "ok'd"
266 with m
.Elif(~stpick
.n
):
267 comb
+= st_active
.s
.eq(1) # activate ST mode
268 comb
+= idx_l
.r
.eq(1) # pick (and capture) the port index
269 comb
+= adrok_l
.r
.eq(1) # address not yet "ok'd"
271 # from this point onwards, with the port "picked", it stays picked
272 # until ld_active (or st_active) are de-asserted.
274 # if now in "LD" mode: wait for addr_ok, then send the address out
275 # to memory, acknowledge address, and send out LD data
276 with m
.If(ld_active
.q
):
277 with m
.If(ldport
.addr
.ok
):
278 comb
+= rdport
.addr
.eq(ldport
.addr
.data
) # addr ok, send thru
279 with m
.If(adrok_l
.qn
):
280 comb
+= ldport
.addr_ok_o
.eq(1) # acknowledge addr ok
281 sync
+= adrok_l
.s
.eq(1) # and pull "ack" latch
283 # if now in "ST" mode: likewise do the same but with "ST"
284 # to memory, acknowledge address, and send out LD data
285 with m
.If(st_active
.q
):
286 with m
.If(stport
.addr
.ok
):
287 comb
+= wrport
.addr
.eq(stport
.addr
.data
) # addr ok, send thru
288 with m
.If(adrok_l
.qn
):
289 comb
+= stport
.addr_ok_o
.eq(1) # acknowledge addr ok
290 sync
+= adrok_l
.s
.eq(1) # and pull "ack" latch
292 # NOTE: in both these, below, the port itself takes care
293 # of de-asserting its "busy_o" signal, based on either ld.ok going
294 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
296 # for LD mode, when addr has been "ok'd", assume that (because this
297 # is a "Memory" test-class) the memory read data is valid.
298 comb
+= reset_l
.s
.eq(0)
299 comb
+= reset_l
.r
.eq(0)
300 with m
.If(ld_active
.q
& adrok_l
.q
):
301 comb
+= ldport
.ld
.data
.eq(rdport
.data
) # put data out
302 comb
+= ldport
.ld
.ok
.eq(1) # indicate data valid
303 comb
+= reset_l
.s
.eq(1) # reset mode after 1 cycle
305 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
306 with m
.If(st_active
.q
& stport
.st
.ok
):
307 comb
+= wrport
.data
.eq(stport
.st
.data
) # write st to mem
308 comb
+= wrport
.en
.eq(1) # enable write
309 comb
+= reset_l
.s
.eq(1) # reset mode after 1 cycle
311 # after waiting one cycle (reset_l is "sync" mode), reset the port
312 with m
.If(reset_l
.q
):
313 comb
+= idx_l
.s
.eq(1) # deactivate port-index selector
314 comb
+= ld_active
.r
.eq(1) # leave the ST active for 1 cycle
315 comb
+= st_active
.r
.eq(1) # leave the ST active for 1 cycle
316 comb
+= reset_l
.r
.eq(1) # clear reset
321 for p
in self
.dports
:
325 class TstL0CacheBuffer(Elaboratable
):
326 def __init__(self
, n_units
=3, regwid
=16, addrwid
=4):
327 self
.mem
= TestMemory(regwid
, addrwid
)
328 self
.l0
= L0CacheBuffer(n_units
, self
.mem
, regwid
, addrwid
)
330 def elaborate(self
, platform
):
332 m
.submodules
.mem
= self
.mem
333 m
.submodules
.l0
= self
.l0
338 yield from self
.l0
.ports()
339 yield self
.mem
.rdport
.addr
340 yield self
.mem
.rdport
.data
341 yield self
.mem
.wrport
.addr
342 yield self
.mem
.wrport
.data
346 def wait_busy(port
, no
=False):
348 busy
= yield port
.pi
.busy_o
349 print ("busy", no
, busy
)
357 addr_ok
= yield port
.pi
.addr_ok_o
358 print ("addrok", addr_ok
)
366 ldok
= yield port
.pi
.ld
.ok
373 def l0_cache_st(dut
, addr
, data
):
379 # have to wait until not busy
380 yield from wait_busy(port1
, no
=False) # wait until not busy
382 # set up a ST on the port. address first:
383 yield port1
.pi
.is_st_i
.eq(1) # indicate LD
385 yield port1
.pi
.addr
.data
.eq(addr
) # set address
386 yield port1
.pi
.addr
.ok
.eq(1) # set ok
387 yield from wait_addr(port1
) # wait until addr ok
389 # assert "ST" for one cycle (required by the API)
390 yield port1
.pi
.st
.data
.eq(data
)
391 yield port1
.pi
.st
.ok
.eq(1)
393 yield port1
.pi
.st
.ok
.eq(0)
395 # can go straight to reset.
396 yield port1
.pi
.is_st_i
.eq(0) #end
397 yield port1
.pi
.addr
.ok
.eq(0) # set !ok
398 #yield from wait_busy(port1, False) # wait until not busy
401 def l0_cache_ld(dut
, addr
, expected
):
408 # have to wait until not busy
409 yield from wait_busy(port1
, no
=False) # wait until not busy
411 # set up a LD on the port. address first:
412 yield port1
.pi
.is_ld_i
.eq(1) # indicate LD
414 yield port1
.pi
.addr
.data
.eq(addr
) # set address
415 yield port1
.pi
.addr
.ok
.eq(1) # set ok
416 yield from wait_addr(port1
) # wait until addr ok
418 yield from wait_ldok(port1
) # wait until ld ok
419 data
= yield port1
.pi
.ld
.data
422 yield port1
.pi
.is_ld_i
.eq(0) #end
423 yield port1
.pi
.addr
.ok
.eq(0) # set !ok
424 #yield from wait_busy(port1, no=False) # wait until not busy
429 def l0_cache_ldst(dut
):
434 yield from l0_cache_st(dut
, addr
, data
)
435 result
= yield from l0_cache_ld(dut
, addr
, data
)
437 assert data
== result
, "data %x != %x" % (result
, data
)
442 dut
= TstL0CacheBuffer()
443 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
444 with
open("test_basic_l0_cache.il", "w") as f
:
447 run_simulation(dut
, l0_cache_ldst(dut
),
448 vcd_name
='test_l0_cache_basic.vcd')
451 if __name__
== '__main__':