3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
18 from nmigen
.compat
.sim
import run_simulation
19 from nmigen
.cli
import verilog
, rtlil
20 from nmigen
import Module
, Signal
, Mux
, Elaboratable
, Array
, Cat
21 from nmutil
.iocontrol
import RecordObject
22 from nmigen
.utils
import log2_int
23 from nmigen
.hdl
.rec
import Record
, Layout
25 from nmutil
.latch
import SRLatch
, latchregister
26 from soc
.decoder
.power_decoder2
import Data
27 from soc
.decoder
.power_enums
import InternalOp
29 from soc
.experiment
.compldst
import CompLDSTOpSubset
30 from soc
.decoder
.power_decoder2
import Data
31 #from nmutil.picker import PriorityPicker
32 from nmigen
.lib
.coding
import PriorityEncoder
34 # for testing purposes
35 from soc
.experiment
.testmem
import TestMemory
38 class PortInterface(RecordObject
):
41 defines the interface - the API - that the LDSTCompUnit connects
42 to. note that this is NOT a "fire-and-forget" interface. the
43 LDSTCompUnit *must* be kept appraised that the request is in
44 progress, and only when it has a 100% successful completion rate
45 can the notification be given (busy dropped).
47 The interface FSM rules are as follows:
49 * if busy_o is asserted, a LD/ST is in progress. further
50 requests may not be made until busy_o is deasserted.
52 * only one of is_ld_i or is_st_i may be asserted. busy_o
53 will immediately be asserted and remain asserted.
55 * addr.ok is to be asserted when the LD/ST address is known.
56 addr.data is to be valid on the same cycle.
58 addr.ok and addr.data must REMAIN asserted until busy_o
59 is de-asserted. this ensures that there is no need
60 for the L0 Cache/Buffer to have an additional address latch
61 (because the LDSTCompUnit already has it)
63 * addr_ok_o (or addr_exc_o) must be waited for. these will
64 be asserted *only* for one cycle and one cycle only.
66 * addr_exc_o will be asserted if there is no chance that the
67 memory request may be fulfilled.
69 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
71 * conversely: addr_ok_o must *ONLY* be asserted if there is a
72 HUNDRED PERCENT guarantee that the memory request will be
75 * for a LD, ld.ok will be asserted - for only one clock cycle -
76 at any point in the future that is acceptable to the underlying
77 Memory subsystem. the recipient MUST latch ld.data on that cycle.
79 busy_o is deasserted on the same cycle as ld.ok is asserted.
81 * for a ST, st.ok may be asserted only after addr_ok_o had been
82 asserted, alongside valid st.data at the same time. st.ok
83 must only be asserted for one cycle.
85 the underlying Memory is REQUIRED to pick up that data and
86 guarantee its delivery. no back-acknowledgement is required.
88 busy_o is deasserted on the cycle AFTER st.ok is asserted.
91 def __init__(self
, name
=None, regwid
=64, addrwid
=48):
94 self
._addrwid
= addrwid
96 RecordObject
.__init
__(self
, name
=name
)
98 # distinguish op type (ld/st)
99 self
.is_ld_i
= Signal(reset_less
=True)
100 self
.is_st_i
= Signal(reset_less
=True)
101 self
.op
= CompLDSTOpSubset() # hm insn_type ld/st duplicates here
104 self
.busy_o
= Signal(reset_less
=True) # do not use if busy
105 self
.go_die_i
= Signal(reset_less
=True) # back to reset
106 self
.addr
= Data(addrwid
, "addr_i") # addr/addr-ok
107 # addr is valid (TLB, L1 etc.)
108 self
.addr_ok_o
= Signal(reset_less
=True)
109 self
.addr_exc_o
= Signal(reset_less
=True) # TODO, "type" of exception
112 self
.ld
= Data(regwid
, "ld_data_o") # ok to be set by L0 Cache/Buf
113 self
.st
= Data(regwid
, "st_data_i") # ok to be set by CompUnit
118 class DualPortSplitter(Elaboratable
):
121 * one incoming PortInterface
122 * two *OUTGOING* PortInterfaces
123 * uses LDSTSplitter to do it
125 (actually, thinking about it LDSTSplitter could simply be
126 modified to conform to PortInterface: one in, two out)
128 once that is done each pair of ports may be wired directly
129 to the dual ports of L0CacheBuffer
134 class DataMergerRecord(Record
):
136 {data: 128 bit, byte_enable: 16 bit}
139 def __init__(self
, name
=None):
140 layout
= (('data', 128),
144 Record
.__init
__(self
, Layout(layout
), name
=name
)
149 class DataMerger(Elaboratable
):
152 Merges data based on an address-match matrix.
153 Identifies (picks) one (any) row, then uses that row,
154 based on matching address bits, to merge (OR) all data
155 rows into the output.
157 Basically, by the time DataMerger is used, all of its incoming data is
158 determined not to conflict. The last step before actually submitting
159 the request to the Memory Subsystem is to work out which requests,
160 on the same 128-bit cache line, can be "merged" due to them being:
161 (A) on the same address (bits 4 and above) (B) having byte-enable
162 lines that (as previously mentioned) do not conflict.
164 Therefore, put simply, this module will:
165 (1) pick a row (any row) and identify it by an index labelled "idx"
166 (2) merge all byte-enable lines which are on that same address, as
167 indicated by addr_match_i[idx], onto the output
170 def __init__(self
, array_size
):
172 :addr_array_i: an NxN Array of Signals with bits set indicating address
173 match. bits across the diagonal (addr_array_i[x][x])
174 will always be set, to indicate "active".
175 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
176 :data_o: an Output Record of same type
177 {data: 128 bit, byte_enable: 16 bit}
179 self
.array_size
= array_size
181 for i
in range(array_size
):
182 ul
.append(Signal(array_size
,
184 name
="addr_match_%d" % i
))
185 self
.addr_array_i
= Array(ul
)
188 for i
in range(array_size
):
189 ul
.append(DataMergerRecord())
190 self
.data_i
= Array(ul
)
191 self
.data_o
= DataMergerRecord()
193 def elaborate(self
, platform
):
195 comb
, sync
= m
.d
.comb
, m
.d
.sync
197 m
.submodules
.pick
= pick
= PriorityEncoder(self
.array_size
)
199 for j
in range(self
.addr
):
200 with m
.If(self
.addr_match_i
[j
]>0):
201 pick
.i
.eq(pick
.i||
(1<<j
))
206 for j
in range(self
.array_size
):
207 with m
.If(self
.addr_match_i
[idx
][j
] && valid
):
208 self
.data_o
.eq(self
.data_i
[j
]|self
.data_o
)
211 class LDSTPort(Elaboratable
):
212 def __init__(self
, idx
, regwid
=64, addrwid
=48):
213 self
.pi
= PortInterface("ldst_port%d" % idx
, regwid
, addrwid
)
215 def elaborate(self
, platform
):
217 comb
, sync
= m
.d
.comb
, m
.d
.sync
220 m
.submodules
.busy_l
= busy_l
= SRLatch(False, name
="busy")
221 m
.submodules
.cyc_l
= cyc_l
= SRLatch(True, name
="cyc")
222 comb
+= cyc_l
.s
.eq(0)
223 comb
+= cyc_l
.r
.eq(0)
225 # this is a little weird: we let the L0Cache/Buffer set
226 # the outputs: this module just monitors "state".
228 # LD/ST requested activates "busy"
229 with m
.If(self
.pi
.is_ld_i | self
.pi
.is_st_i
):
230 comb
+= busy_l
.s
.eq(1)
232 # monitor for an exception or the completion of LD.
233 with m
.If(self
.pi
.addr_exc_o
):
234 comb
+= busy_l
.r
.eq(1)
236 # however ST needs one cycle before busy is reset
237 with m
.If(self
.pi
.st
.ok | self
.pi
.ld
.ok
):
238 comb
+= cyc_l
.s
.eq(1)
241 comb
+= cyc_l
.r
.eq(1)
242 comb
+= busy_l
.r
.eq(1)
244 # busy latch outputs to interface
245 comb
+= self
.pi
.busy_o
.eq(busy_l
.q
)
250 yield self
.pi
.is_ld_i
251 yield self
.pi
.is_st_i
252 yield from self
.pi
.op
.ports()
254 yield self
.pi
.go_die_i
255 yield from self
.pi
.addr
.ports()
256 yield self
.pi
.addr_ok_o
257 yield self
.pi
.addr_exc_o
259 yield from self
.pi
.ld
.ports()
260 yield from self
.pi
.st
.ports()
266 class L0CacheBuffer(Elaboratable
):
269 Note that the final version will have *two* interfaces per LDSTCompUnit,
270 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
271 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
273 This version is to be used for test purposes (and actively maintained
274 for such, rather than "replaced")
276 There are much better ways to implement this. However it's only
277 a "demo" / "test" class, and one important aspect: it responds
278 combinatorially, where a nmigen FSM's state-changes only activate
279 on clock-sync boundaries.
282 def __init__(self
, n_units
, mem
, regwid
=64, addrwid
=48):
283 self
.n_units
= n_units
286 for i
in range(n_units
):
287 ul
.append(LDSTPort(i
, regwid
, addrwid
))
288 self
.dports
= Array(ul
)
290 def elaborate(self
, platform
):
292 comb
, sync
= m
.d
.comb
, m
.d
.sync
294 # connect the ports as modules
295 for i
in range(self
.n_units
):
296 setattr(m
.submodules
, "port%d" % i
, self
.dports
[i
])
298 # state-machine latches
299 m
.submodules
.st_active
= st_active
= SRLatch(False, name
="st_active")
300 m
.submodules
.ld_active
= ld_active
= SRLatch(False, name
="ld_active")
301 m
.submodules
.reset_l
= reset_l
= SRLatch(True, name
="reset")
302 m
.submodules
.idx_l
= idx_l
= SRLatch(False, name
="idx_l")
303 m
.submodules
.adrok_l
= adrok_l
= SRLatch(False, name
="addr_acked")
305 # find one LD (or ST) and do it. only one per cycle.
306 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
307 # LD/STs using mask-expansion - see LenExpand class
309 m
.submodules
.ldpick
= ldpick
= PriorityEncoder(self
.n_units
)
310 m
.submodules
.stpick
= stpick
= PriorityEncoder(self
.n_units
)
312 lds
= Signal(self
.n_units
, reset_less
=True)
313 sts
= Signal(self
.n_units
, reset_less
=True)
316 for i
in range(self
.n_units
):
317 pi
= self
.dports
[i
].pi
318 ldi
.append(pi
.is_ld_i
& pi
.busy_o
) # accumulate ld-req signals
319 sti
.append(pi
.is_st_i
& pi
.busy_o
) # accumulate st-req signals
320 # put the requests into the priority-pickers
321 comb
+= ldpick
.i
.eq(Cat(*ldi
))
322 comb
+= stpick
.i
.eq(Cat(*sti
))
324 # hmm, have to select (record) the right port index
325 nbits
= log2_int(self
.n_units
, False)
326 ld_idx
= Signal(nbits
, reset_less
=False)
327 st_idx
= Signal(nbits
, reset_less
=False)
328 # use these because of the sync-and-comb pass-through capability
329 latchregister(m
, ldpick
.o
, ld_idx
, idx_l
.qn
, name
="ld_idx_l")
330 latchregister(m
, stpick
.o
, st_idx
, idx_l
.qn
, name
="st_idx_l")
332 # convenience variables to reference the "picked" port
333 ldport
= self
.dports
[ld_idx
].pi
334 stport
= self
.dports
[st_idx
].pi
335 # and the memory ports
336 rdport
= self
.mem
.rdport
337 wrport
= self
.mem
.wrport
339 # Priority-Pickers pick one and only one request, capture its index.
340 # from that point on this code *only* "listens" to that port.
342 sync
+= adrok_l
.s
.eq(0)
343 comb
+= adrok_l
.r
.eq(0)
344 with m
.If(~ldpick
.n
):
345 comb
+= ld_active
.s
.eq(1) # activate LD mode
346 comb
+= idx_l
.r
.eq(1) # pick (and capture) the port index
347 with m
.Elif(~stpick
.n
):
348 comb
+= st_active
.s
.eq(1) # activate ST mode
349 comb
+= idx_l
.r
.eq(1) # pick (and capture) the port index
351 # from this point onwards, with the port "picked", it stays picked
352 # until ld_active (or st_active) are de-asserted.
354 # if now in "LD" mode: wait for addr_ok, then send the address out
355 # to memory, acknowledge address, and send out LD data
356 with m
.If(ld_active
.q
):
357 with m
.If(ldport
.addr
.ok
& adrok_l
.qn
):
358 comb
+= rdport
.addr
.eq(ldport
.addr
.data
) # addr ok, send thru
359 comb
+= ldport
.addr_ok_o
.eq(1) # acknowledge addr ok
360 sync
+= adrok_l
.s
.eq(1) # and pull "ack" latch
362 # if now in "ST" mode: likewise do the same but with "ST"
363 # to memory, acknowledge address, and send out LD data
364 with m
.If(st_active
.q
):
365 with m
.If(stport
.addr
.ok
):
366 comb
+= wrport
.addr
.eq(stport
.addr
.data
) # addr ok, send thru
367 with m
.If(adrok_l
.qn
):
368 comb
+= stport
.addr_ok_o
.eq(1) # acknowledge addr ok
369 sync
+= adrok_l
.s
.eq(1) # and pull "ack" latch
371 # NOTE: in both these, below, the port itself takes care
372 # of de-asserting its "busy_o" signal, based on either ld.ok going
373 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
375 # for LD mode, when addr has been "ok'd", assume that (because this
376 # is a "Memory" test-class) the memory read data is valid.
377 comb
+= reset_l
.s
.eq(0)
378 comb
+= reset_l
.r
.eq(0)
379 with m
.If(ld_active
.q
& adrok_l
.q
):
380 comb
+= ldport
.ld
.data
.eq(rdport
.data
) # put data out
381 comb
+= ldport
.ld
.ok
.eq(1) # indicate data valid
382 comb
+= reset_l
.s
.eq(1) # reset mode after 1 cycle
384 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
385 with m
.If(st_active
.q
& stport
.st
.ok
):
386 comb
+= wrport
.data
.eq(stport
.st
.data
) # write st to mem
387 comb
+= wrport
.en
.eq(1) # enable write
388 comb
+= reset_l
.s
.eq(1) # reset mode after 1 cycle
390 # after waiting one cycle (reset_l is "sync" mode), reset the port
391 with m
.If(reset_l
.q
):
392 comb
+= idx_l
.s
.eq(1) # deactivate port-index selector
393 comb
+= ld_active
.r
.eq(1) # leave the ST active for 1 cycle
394 comb
+= st_active
.r
.eq(1) # leave the ST active for 1 cycle
395 comb
+= reset_l
.r
.eq(1) # clear reset
396 comb
+= adrok_l
.r
.eq(1) # address reset
401 for p
in self
.dports
:
405 class TstL0CacheBuffer(Elaboratable
):
406 def __init__(self
, n_units
=3, regwid
=16, addrwid
=4):
407 self
.mem
= TestMemory(regwid
, addrwid
)
408 self
.l0
= L0CacheBuffer(n_units
, self
.mem
, regwid
, addrwid
)
410 def elaborate(self
, platform
):
412 m
.submodules
.mem
= self
.mem
413 m
.submodules
.l0
= self
.l0
418 yield from self
.l0
.ports()
419 yield self
.mem
.rdport
.addr
420 yield self
.mem
.rdport
.data
421 yield self
.mem
.wrport
.addr
422 yield self
.mem
.wrport
.data
426 def wait_busy(port
, no
=False):
428 busy
= yield port
.pi
.busy_o
429 print("busy", no
, busy
)
437 addr_ok
= yield port
.pi
.addr_ok_o
438 print("addrok", addr_ok
)
446 ldok
= yield port
.pi
.ld
.ok
453 def l0_cache_st(dut
, addr
, data
):
459 # have to wait until not busy
460 yield from wait_busy(port1
, no
=False) # wait until not busy
462 # set up a ST on the port. address first:
463 yield port1
.pi
.is_st_i
.eq(1) # indicate LD
465 yield port1
.pi
.addr
.data
.eq(addr
) # set address
466 yield port1
.pi
.addr
.ok
.eq(1) # set ok
467 yield from wait_addr(port1
) # wait until addr ok
468 # yield # not needed, just for checking
469 # yield # not needed, just for checking
470 # assert "ST" for one cycle (required by the API)
471 yield port1
.pi
.st
.data
.eq(data
)
472 yield port1
.pi
.st
.ok
.eq(1)
474 yield port1
.pi
.st
.ok
.eq(0)
476 # can go straight to reset.
477 yield port1
.pi
.is_st_i
.eq(0) # end
478 yield port1
.pi
.addr
.ok
.eq(0) # set !ok
479 # yield from wait_busy(port1, False) # wait until not busy
482 def l0_cache_ld(dut
, addr
, expected
):
489 # have to wait until not busy
490 yield from wait_busy(port1
, no
=False) # wait until not busy
492 # set up a LD on the port. address first:
493 yield port1
.pi
.is_ld_i
.eq(1) # indicate LD
495 yield port1
.pi
.addr
.data
.eq(addr
) # set address
496 yield port1
.pi
.addr
.ok
.eq(1) # set ok
497 yield from wait_addr(port1
) # wait until addr ok
499 yield from wait_ldok(port1
) # wait until ld ok
500 data
= yield port1
.pi
.ld
.data
503 yield port1
.pi
.is_ld_i
.eq(0) # end
504 yield port1
.pi
.addr
.ok
.eq(0) # set !ok
505 # yield from wait_busy(port1, no=False) # wait until not busy
510 def l0_cache_ldst(dut
):
516 yield from l0_cache_st(dut
, 0x2, data
)
517 yield from l0_cache_st(dut
, 0x3, data2
)
518 result
= yield from l0_cache_ld(dut
, 0x2, data
)
519 result2
= yield from l0_cache_ld(dut
, 0x3, data2
)
521 assert data
== result
, "data %x != %x" % (result
, data
)
522 assert data2
== result2
, "data2 %x != %x" % (result2
, data2
)
527 dut
= TstL0CacheBuffer()
528 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
529 with
open("test_basic_l0_cache.il", "w") as f
:
532 run_simulation(dut
, l0_cache_ldst(dut
),
533 vcd_name
='test_l0_cache_basic.vcd')
536 if __name__
== '__main__':