3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
18 from nmigen
.compat
.sim
import run_simulation
, Settle
19 from nmigen
.cli
import verilog
, rtlil
20 from nmigen
import Module
, Signal
, Mux
, Elaboratable
, Array
, Cat
21 from nmutil
.iocontrol
import RecordObject
22 from nmigen
.utils
import log2_int
23 from nmigen
.hdl
.rec
import Record
, Layout
25 from nmutil
.latch
import SRLatch
, latchregister
26 from soc
.decoder
.power_decoder2
import Data
27 from soc
.decoder
.power_enums
import InternalOp
28 from soc
.regfile
.regfile
import ortreereduce
29 from nmutil
.util
import treereduce
31 from soc
.experiment
.compldst
import CompLDSTOpSubset
32 from soc
.decoder
.power_decoder2
import Data
33 #from nmutil.picker import PriorityPicker
34 from nmigen
.lib
.coding
import PriorityEncoder
36 # for testing purposes
37 from soc
.experiment
.testmem
import TestMemory
40 class PortInterface(RecordObject
):
43 defines the interface - the API - that the LDSTCompUnit connects
44 to. note that this is NOT a "fire-and-forget" interface. the
45 LDSTCompUnit *must* be kept appraised that the request is in
46 progress, and only when it has a 100% successful completion rate
47 can the notification be given (busy dropped).
49 The interface FSM rules are as follows:
51 * if busy_o is asserted, a LD/ST is in progress. further
52 requests may not be made until busy_o is deasserted.
54 * only one of is_ld_i or is_st_i may be asserted. busy_o
55 will immediately be asserted and remain asserted.
57 * addr.ok is to be asserted when the LD/ST address is known.
58 addr.data is to be valid on the same cycle.
60 addr.ok and addr.data must REMAIN asserted until busy_o
61 is de-asserted. this ensures that there is no need
62 for the L0 Cache/Buffer to have an additional address latch
63 (because the LDSTCompUnit already has it)
65 * addr_ok_o (or addr_exc_o) must be waited for. these will
66 be asserted *only* for one cycle and one cycle only.
68 * addr_exc_o will be asserted if there is no chance that the
69 memory request may be fulfilled.
71 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
73 * conversely: addr_ok_o must *ONLY* be asserted if there is a
74 HUNDRED PERCENT guarantee that the memory request will be
77 * for a LD, ld.ok will be asserted - for only one clock cycle -
78 at any point in the future that is acceptable to the underlying
79 Memory subsystem. the recipient MUST latch ld.data on that cycle.
81 busy_o is deasserted on the same cycle as ld.ok is asserted.
83 * for a ST, st.ok may be asserted only after addr_ok_o had been
84 asserted, alongside valid st.data at the same time. st.ok
85 must only be asserted for one cycle.
87 the underlying Memory is REQUIRED to pick up that data and
88 guarantee its delivery. no back-acknowledgement is required.
90 busy_o is deasserted on the cycle AFTER st.ok is asserted.
93 def __init__(self
, name
=None, regwid
=64, addrwid
=48):
96 self
._addrwid
= addrwid
98 RecordObject
.__init
__(self
, name
=name
)
100 # distinguish op type (ld/st)
101 self
.is_ld_i
= Signal(reset_less
=True)
102 self
.is_st_i
= Signal(reset_less
=True)
103 self
.op
= CompLDSTOpSubset() # hm insn_type ld/st duplicates here
106 self
.busy_o
= Signal(reset_less
=True) # do not use if busy
107 self
.go_die_i
= Signal(reset_less
=True) # back to reset
108 self
.addr
= Data(addrwid
, "addr_i") # addr/addr-ok
109 # addr is valid (TLB, L1 etc.)
110 self
.addr_ok_o
= Signal(reset_less
=True)
111 self
.addr_exc_o
= Signal(reset_less
=True) # TODO, "type" of exception
114 self
.ld
= Data(regwid
, "ld_data_o") # ok to be set by L0 Cache/Buf
115 self
.st
= Data(regwid
, "st_data_i") # ok to be set by CompUnit
117 # TODO: elaborate function
120 class DualPortSplitter(Elaboratable
):
123 * one incoming PortInterface
124 * two *OUTGOING* PortInterfaces
125 * uses LDSTSplitter to do it
127 (actually, thinking about it LDSTSplitter could simply be
128 modified to conform to PortInterface: one in, two out)
130 once that is done each pair of ports may be wired directly
131 to the dual ports of L0CacheBuffer
133 The split is carried out so that, regardless of alignment or
134 mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
135 of the address, whilst outgoing PortInterface[1] takes
138 PortInterface *may* need to be changed so that the length is
139 a binary number (accepting values 1-16).
143 self
.outp
[0] = PortInterface(name
="outp_0")
144 self
.outp
[1] = PortInterface(name
="outp_1")
145 self
.inp
= PortInterface(name
="inp")
147 def elaborate(self
, platform
):
148 splitter
= LDSTSplitter(64, 48, 4)
151 class DataMergerRecord(Record
):
153 {data: 128 bit, byte_enable: 16 bit}
156 def __init__(self
, name
=None):
157 layout
= (('data', 128),
161 Record
.__init
__(self
, Layout(layout
), name
=name
)
163 #FIXME: make resetless
165 # TODO: formal verification
167 class DataMerger(Elaboratable
):
170 Merges data based on an address-match matrix.
171 Identifies (picks) one (any) row, then uses that row,
172 based on matching address bits, to merge (OR) all data
173 rows into the output.
175 Basically, by the time DataMerger is used, all of its incoming data is
176 determined not to conflict. The last step before actually submitting
177 the request to the Memory Subsystem is to work out which requests,
178 on the same 128-bit cache line, can be "merged" due to them being:
179 (A) on the same address (bits 4 and above) (B) having byte-enable
180 lines that (as previously mentioned) do not conflict.
182 Therefore, put simply, this module will:
183 (1) pick a row (any row) and identify it by an index labelled "idx"
184 (2) merge all byte-enable lines which are on that same address, as
185 indicated by addr_match_i[idx], onto the output
188 def __init__(self
, array_size
):
190 :addr_array_i: an NxN Array of Signals with bits set indicating address
191 match. bits across the diagonal (addr_array_i[x][x])
192 will always be set, to indicate "active".
193 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
194 :data_o: an Output Record of same type
195 {data: 128 bit, byte_enable: 16 bit}
197 self
.array_size
= array_size
199 for i
in range(array_size
):
200 ul
.append(Signal(array_size
,
202 name
="addr_match_%d" % i
))
203 self
.addr_array_i
= Array(ul
)
206 for i
in range(array_size
):
207 ul
.append(DataMergerRecord())
208 self
.data_i
= Array(ul
)
209 self
.data_o
= DataMergerRecord()
211 def elaborate(self
, platform
):
215 m
.submodules
.pick
= pick
= PriorityEncoder(self
.array_size
)
216 for j
in range(self
.array_size
):
217 comb
+= pick
.i
[j
].eq(self
.addr_array_i
[j
].bool())
223 for j
in range(self
.array_size
):
224 select
= self
.addr_array_i
[idx
][j
]
225 r
= DataMergerRecord()
227 comb
+= r
.eq(self
.data_i
[j
])
229 comb
+= self
.data_o
.data
.eq(ortreereduce(l
,"data"))
230 comb
+= self
.data_o
.en
.eq(ortreereduce(l
,"en"))
235 class LDSTPort(Elaboratable
):
236 def __init__(self
, idx
, regwid
=64, addrwid
=48):
237 self
.pi
= PortInterface("ldst_port%d" % idx
, regwid
, addrwid
)
239 def elaborate(self
, platform
):
241 comb
, sync
= m
.d
.comb
, m
.d
.sync
244 m
.submodules
.busy_l
= busy_l
= SRLatch(False, name
="busy")
245 m
.submodules
.cyc_l
= cyc_l
= SRLatch(True, name
="cyc")
246 comb
+= cyc_l
.s
.eq(0)
247 comb
+= cyc_l
.r
.eq(0)
249 # this is a little weird: we let the L0Cache/Buffer set
250 # the outputs: this module just monitors "state".
252 # LD/ST requested activates "busy"
253 with m
.If(self
.pi
.is_ld_i | self
.pi
.is_st_i
):
254 comb
+= busy_l
.s
.eq(1)
256 # monitor for an exception or the completion of LD.
257 with m
.If(self
.pi
.addr_exc_o
):
258 comb
+= busy_l
.r
.eq(1)
260 # however ST needs one cycle before busy is reset
261 with m
.If(self
.pi
.st
.ok | self
.pi
.ld
.ok
):
262 comb
+= cyc_l
.s
.eq(1)
265 comb
+= cyc_l
.r
.eq(1)
266 comb
+= busy_l
.r
.eq(1)
268 # busy latch outputs to interface
269 comb
+= self
.pi
.busy_o
.eq(busy_l
.q
)
274 yield self
.pi
.is_ld_i
275 yield self
.pi
.is_st_i
276 yield from self
.pi
.op
.ports()
278 yield self
.pi
.go_die_i
279 yield from self
.pi
.addr
.ports()
280 yield self
.pi
.addr_ok_o
281 yield self
.pi
.addr_exc_o
283 yield from self
.pi
.ld
.ports()
284 yield from self
.pi
.st
.ports()
290 class L0CacheBuffer(Elaboratable
):
293 Note that the final version will have *two* interfaces per LDSTCompUnit,
294 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
295 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
297 This version is to be used for test purposes (and actively maintained
298 for such, rather than "replaced")
300 There are much better ways to implement this. However it's only
301 a "demo" / "test" class, and one important aspect: it responds
302 combinatorially, where a nmigen FSM's state-changes only activate
303 on clock-sync boundaries.
306 def __init__(self
, n_units
, mem
, regwid
=64, addrwid
=48):
307 self
.n_units
= n_units
310 for i
in range(n_units
):
311 ul
.append(LDSTPort(i
, regwid
, addrwid
))
312 self
.dports
= Array(ul
)
314 def elaborate(self
, platform
):
316 comb
, sync
= m
.d
.comb
, m
.d
.sync
318 # connect the ports as modules
319 for i
in range(self
.n_units
):
320 setattr(m
.submodules
, "port%d" % i
, self
.dports
[i
])
322 # state-machine latches
323 m
.submodules
.st_active
= st_active
= SRLatch(False, name
="st_active")
324 m
.submodules
.ld_active
= ld_active
= SRLatch(False, name
="ld_active")
325 m
.submodules
.reset_l
= reset_l
= SRLatch(True, name
="reset")
326 m
.submodules
.idx_l
= idx_l
= SRLatch(False, name
="idx_l")
327 m
.submodules
.adrok_l
= adrok_l
= SRLatch(False, name
="addr_acked")
329 # find one LD (or ST) and do it. only one per cycle.
330 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
331 # LD/STs using mask-expansion - see LenExpand class
333 m
.submodules
.ldpick
= ldpick
= PriorityEncoder(self
.n_units
)
334 m
.submodules
.stpick
= stpick
= PriorityEncoder(self
.n_units
)
336 lds
= Signal(self
.n_units
, reset_less
=True)
337 sts
= Signal(self
.n_units
, reset_less
=True)
340 for i
in range(self
.n_units
):
341 pi
= self
.dports
[i
].pi
342 ldi
.append(pi
.is_ld_i
& pi
.busy_o
) # accumulate ld-req signals
343 sti
.append(pi
.is_st_i
& pi
.busy_o
) # accumulate st-req signals
344 # put the requests into the priority-pickers
345 comb
+= ldpick
.i
.eq(Cat(*ldi
))
346 comb
+= stpick
.i
.eq(Cat(*sti
))
348 # hmm, have to select (record) the right port index
349 nbits
= log2_int(self
.n_units
, False)
350 ld_idx
= Signal(nbits
, reset_less
=False)
351 st_idx
= Signal(nbits
, reset_less
=False)
352 # use these because of the sync-and-comb pass-through capability
353 latchregister(m
, ldpick
.o
, ld_idx
, idx_l
.qn
, name
="ld_idx_l")
354 latchregister(m
, stpick
.o
, st_idx
, idx_l
.qn
, name
="st_idx_l")
356 # convenience variables to reference the "picked" port
357 ldport
= self
.dports
[ld_idx
].pi
358 stport
= self
.dports
[st_idx
].pi
359 # and the memory ports
360 rdport
= self
.mem
.rdport
361 wrport
= self
.mem
.wrport
363 # Priority-Pickers pick one and only one request, capture its index.
364 # from that point on this code *only* "listens" to that port.
366 sync
+= adrok_l
.s
.eq(0)
367 comb
+= adrok_l
.r
.eq(0)
368 with m
.If(~ldpick
.n
):
369 comb
+= ld_active
.s
.eq(1) # activate LD mode
370 comb
+= idx_l
.r
.eq(1) # pick (and capture) the port index
371 with m
.Elif(~stpick
.n
):
372 comb
+= st_active
.s
.eq(1) # activate ST mode
373 comb
+= idx_l
.r
.eq(1) # pick (and capture) the port index
375 # from this point onwards, with the port "picked", it stays picked
376 # until ld_active (or st_active) are de-asserted.
378 # if now in "LD" mode: wait for addr_ok, then send the address out
379 # to memory, acknowledge address, and send out LD data
380 with m
.If(ld_active
.q
):
381 with m
.If(ldport
.addr
.ok
& adrok_l
.qn
):
382 comb
+= rdport
.addr
.eq(ldport
.addr
.data
) # addr ok, send thru
383 comb
+= ldport
.addr_ok_o
.eq(1) # acknowledge addr ok
384 sync
+= adrok_l
.s
.eq(1) # and pull "ack" latch
386 # if now in "ST" mode: likewise do the same but with "ST"
387 # to memory, acknowledge address, and send out LD data
388 with m
.If(st_active
.q
):
389 with m
.If(stport
.addr
.ok
):
390 comb
+= wrport
.addr
.eq(stport
.addr
.data
) # addr ok, send thru
391 with m
.If(adrok_l
.qn
):
392 comb
+= stport
.addr_ok_o
.eq(1) # acknowledge addr ok
393 sync
+= adrok_l
.s
.eq(1) # and pull "ack" latch
395 # NOTE: in both these, below, the port itself takes care
396 # of de-asserting its "busy_o" signal, based on either ld.ok going
397 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
399 # for LD mode, when addr has been "ok'd", assume that (because this
400 # is a "Memory" test-class) the memory read data is valid.
401 comb
+= reset_l
.s
.eq(0)
402 comb
+= reset_l
.r
.eq(0)
403 with m
.If(ld_active
.q
& adrok_l
.q
):
404 comb
+= ldport
.ld
.data
.eq(rdport
.data
) # put data out
405 comb
+= ldport
.ld
.ok
.eq(1) # indicate data valid
406 comb
+= reset_l
.s
.eq(1) # reset mode after 1 cycle
408 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
409 with m
.If(st_active
.q
& stport
.st
.ok
):
410 comb
+= wrport
.data
.eq(stport
.st
.data
) # write st to mem
411 comb
+= wrport
.en
.eq(1) # enable write
412 comb
+= reset_l
.s
.eq(1) # reset mode after 1 cycle
414 # after waiting one cycle (reset_l is "sync" mode), reset the port
415 with m
.If(reset_l
.q
):
416 comb
+= idx_l
.s
.eq(1) # deactivate port-index selector
417 comb
+= ld_active
.r
.eq(1) # leave the ST active for 1 cycle
418 comb
+= st_active
.r
.eq(1) # leave the ST active for 1 cycle
419 comb
+= reset_l
.r
.eq(1) # clear reset
420 comb
+= adrok_l
.r
.eq(1) # address reset
425 for p
in self
.dports
:
429 class TstL0CacheBuffer(Elaboratable
):
430 def __init__(self
, n_units
=3, regwid
=16, addrwid
=4):
431 self
.mem
= TestMemory(regwid
, addrwid
)
432 self
.l0
= L0CacheBuffer(n_units
, self
.mem
, regwid
, addrwid
)
434 def elaborate(self
, platform
):
436 m
.submodules
.mem
= self
.mem
437 m
.submodules
.l0
= self
.l0
442 yield from self
.l0
.ports()
443 yield self
.mem
.rdport
.addr
444 yield self
.mem
.rdport
.data
445 yield self
.mem
.wrport
.addr
446 yield self
.mem
.wrport
.data
450 def wait_busy(port
, no
=False):
452 busy
= yield port
.pi
.busy_o
453 print("busy", no
, busy
)
461 addr_ok
= yield port
.pi
.addr_ok_o
462 print("addrok", addr_ok
)
470 ldok
= yield port
.pi
.ld
.ok
477 def l0_cache_st(dut
, addr
, data
):
483 # have to wait until not busy
484 yield from wait_busy(port1
, no
=False) # wait until not busy
486 # set up a ST on the port. address first:
487 yield port1
.pi
.is_st_i
.eq(1) # indicate LD
489 yield port1
.pi
.addr
.data
.eq(addr
) # set address
490 yield port1
.pi
.addr
.ok
.eq(1) # set ok
491 yield from wait_addr(port1
) # wait until addr ok
492 # yield # not needed, just for checking
493 # yield # not needed, just for checking
494 # assert "ST" for one cycle (required by the API)
495 yield port1
.pi
.st
.data
.eq(data
)
496 yield port1
.pi
.st
.ok
.eq(1)
498 yield port1
.pi
.st
.ok
.eq(0)
500 # can go straight to reset.
501 yield port1
.pi
.is_st_i
.eq(0) # end
502 yield port1
.pi
.addr
.ok
.eq(0) # set !ok
503 # yield from wait_busy(port1, False) # wait until not busy
506 def l0_cache_ld(dut
, addr
, expected
):
513 # have to wait until not busy
514 yield from wait_busy(port1
, no
=False) # wait until not busy
516 # set up a LD on the port. address first:
517 yield port1
.pi
.is_ld_i
.eq(1) # indicate LD
519 yield port1
.pi
.addr
.data
.eq(addr
) # set address
520 yield port1
.pi
.addr
.ok
.eq(1) # set ok
521 yield from wait_addr(port1
) # wait until addr ok
523 yield from wait_ldok(port1
) # wait until ld ok
524 data
= yield port1
.pi
.ld
.data
527 yield port1
.pi
.is_ld_i
.eq(0) # end
528 yield port1
.pi
.addr
.ok
.eq(0) # set !ok
529 # yield from wait_busy(port1, no=False) # wait until not busy
534 def l0_cache_ldst(dut
):
540 yield from l0_cache_st(dut
, 0x2, data
)
541 yield from l0_cache_st(dut
, 0x3, data2
)
542 result
= yield from l0_cache_ld(dut
, 0x2, data
)
543 result2
= yield from l0_cache_ld(dut
, 0x3, data2
)
545 assert data
== result
, "data %x != %x" % (result
, data
)
546 assert data2
== result2
, "data2 %x != %x" % (result2
, data2
)
548 def data_merger_merge(dut
):
550 #starting with all inputs zero
552 en
= yield dut
.data_o
.en
553 data
= yield dut
.data_o
.data
554 assert en
== 0, "en must be zero"
555 assert data
== 0, "data must be zero"
558 yield dut
.addr_array_i
[0].eq(0xFF)
559 for j
in range(dut
.array_size
):
560 yield dut
.data_i
[j
].en
.eq(1 << j
)
561 yield dut
.data_i
[j
].data
.eq(0xFF << (16*j
))
564 en
= yield dut
.data_o
.en
565 data
= yield dut
.data_o
.data
566 assert data
== 0xff00ff00ff00ff00ff00ff00ff00ff
572 dut
= TstL0CacheBuffer(regwid
=64)
573 #vl = rtlil.convert(dut, ports=dut.ports())
574 #with open("test_basic_l0_cache.il", "w") as f:
577 run_simulation(dut
, l0_cache_ldst(dut
),
578 vcd_name
='test_l0_cache_basic.vcd')
580 def test_data_merger():
583 #vl = rtlil.convert(dut, ports=dut.ports())
584 #with open("test_data_merger.il", "w") as f:
587 run_simulation(dut
, data_merger_merge(dut
),
588 vcd_name
='test_data_merger.vcd')
591 if __name__
== '__main__':