ab1e23781c73785c5b7efc203d7bf4894e15e211
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29
30 from soc.experiment.compldst import CompLDSTOpSubset
31 from soc.decoder.power_decoder2 import Data
32 #from nmutil.picker import PriorityPicker
33 from nmigen.lib.coding import PriorityEncoder
34
35 # for testing purposes
36 from soc.experiment.testmem import TestMemory
37
38 class PortInterface(RecordObject):
39 """PortInterface
40
41 defines the interface - the API - that the LDSTCompUnit connects
42 to. note that this is NOT a "fire-and-forget" interface. the
43 LDSTCompUnit *must* be kept appraised that the request is in
44 progress, and only when it has a 100% successful completion rate
45 can the notification be given (busy dropped).
46
47 The interface FSM rules are as follows:
48
49 * if busy_o is asserted, a LD/ST is in progress. further
50 requests may not be made until busy_o is deasserted.
51
52 * only one of is_ld_i or is_st_i may be asserted. busy_o
53 will immediately be asserted and remain asserted.
54
55 * addr.ok is to be asserted when the LD/ST address is known.
56 addr.data is to be valid on the same cycle.
57
58 addr.ok and addr.data must REMAIN asserted until busy_o
59 is de-asserted. this ensures that there is no need
60 for the L0 Cache/Buffer to have an additional address latch
61 (because the LDSTCompUnit already has it)
62
63 * addr_ok_o (or addr_exc_o) must be waited for. these will
64 be asserted *only* for one cycle and one cycle only.
65
66 * addr_exc_o will be asserted if there is no chance that the
67 memory request may be fulfilled.
68
69 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
70
71 * conversely: addr_ok_o must *ONLY* be asserted if there is a
72 HUNDRED PERCENT guarantee that the memory request will be
73 fulfilled.
74
75 * for a LD, ld.ok will be asserted - for only one clock cycle -
76 at any point in the future that is acceptable to the underlying
77 Memory subsystem. the recipient MUST latch ld.data on that cycle.
78
79 busy_o is deasserted on the same cycle as ld.ok is asserted.
80
81 * for a ST, st.ok may be asserted only after addr_ok_o had been
82 asserted, alongside valid st.data at the same time. st.ok
83 must only be asserted for one cycle.
84
85 the underlying Memory is REQUIRED to pick up that data and
86 guarantee its delivery. no back-acknowledgement is required.
87
88 busy_o is deasserted on the cycle AFTER st.ok is asserted.
89 """
90
91 def __init__(self, name=None, regwid=64, addrwid=48):
92
93 self._regwid = regwid
94 self._addrwid = addrwid
95
96 RecordObject.__init__(self, name=name)
97
98 # distinguish op type (ld/st)
99 self.is_ld_i = Signal(reset_less=True)
100 self.is_st_i = Signal(reset_less=True)
101 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
102
103 # common signals
104 self.busy_o = Signal(reset_less=True) # do not use if busy
105 self.go_die_i = Signal(reset_less=True) # back to reset
106 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
107 # addr is valid (TLB, L1 etc.)
108 self.addr_ok_o = Signal(reset_less=True)
109 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
110
111 # LD/ST
112 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
113 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
114
115 # TODO:
116
117
118 class DualPortSplitter(Elaboratable):
119 """DualPortSplitter
120
121 * one incoming PortInterface
122 * two *OUTGOING* PortInterfaces
123 * uses LDSTSplitter to do it
124
125 (actually, thinking about it LDSTSplitter could simply be
126 modified to conform to PortInterface: one in, two out)
127
128 once that is done each pair of ports may be wired directly
129 to the dual ports of L0CacheBuffer
130 """
131 pass
132
133
134 class DataMergerRecord(Record):
135 """
136 {data: 128 bit, byte_enable: 16 bit}
137 """
138
139 def __init__(self, name=None):
140 layout = (('data', 128),
141 ('en', 16)
142 )
143
144 Record.__init__(self, Layout(layout), name=name)
145
146 # TODO: unit test
147
148 class DataMerger(Elaboratable):
149 """DataMerger
150
151 Merges data based on an address-match matrix.
152 Identifies (picks) one (any) row, then uses that row,
153 based on matching address bits, to merge (OR) all data
154 rows into the output.
155
156 Basically, by the time DataMerger is used, all of its incoming data is
157 determined not to conflict. The last step before actually submitting
158 the request to the Memory Subsystem is to work out which requests,
159 on the same 128-bit cache line, can be "merged" due to them being:
160 (A) on the same address (bits 4 and above) (B) having byte-enable
161 lines that (as previously mentioned) do not conflict.
162
163 Therefore, put simply, this module will:
164 (1) pick a row (any row) and identify it by an index labelled "idx"
165 (2) merge all byte-enable lines which are on that same address, as
166 indicated by addr_match_i[idx], onto the output
167 """
168
169 def __init__(self, array_size):
170 """
171 :addr_array_i: an NxN Array of Signals with bits set indicating address
172 match. bits across the diagonal (addr_array_i[x][x])
173 will always be set, to indicate "active".
174 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
175 :data_o: an Output Record of same type
176 {data: 128 bit, byte_enable: 16 bit}
177 """
178 self.array_size = array_size
179 ul = []
180 for i in range(array_size):
181 ul.append(Signal(array_size,
182 reset_less=True,
183 name="addr_match_%d" % i))
184 self.addr_array_i = Array(ul)
185
186 ul = []
187 for i in range(array_size):
188 ul.append(DataMergerRecord())
189 self.data_i = Array(ul)
190 self.data_o = DataMergerRecord()
191
192 def elaborate(self, platform):
193 m = Module()
194 comb = m.d.comb
195 #(1) pick a row
196 m.submodules.pick = pick = PriorityEncoder(self.array_size)
197 for j in range(self.array_size):
198 comb += pick.i[j].eq(self.addr_match_i[j].bool())
199 valid = ~pick.n
200 idx = pick.o
201 #(2) merge
202 with m.If(valid):
203 l = []
204 for j in range(self.array_size):
205 select = self.addr_match_i[idx][j]
206 l.append(Mux(select, self.data_i[j], 0))
207 comb += self.data_o.eq(ortreereduce(l))
208
209 return m
210
211
212 class LDSTPort(Elaboratable):
213 def __init__(self, idx, regwid=64, addrwid=48):
214 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
215
216 def elaborate(self, platform):
217 m = Module()
218 comb, sync = m.d.comb, m.d.sync
219
220 # latches
221 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
222 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
223 comb += cyc_l.s.eq(0)
224 comb += cyc_l.r.eq(0)
225
226 # this is a little weird: we let the L0Cache/Buffer set
227 # the outputs: this module just monitors "state".
228
229 # LD/ST requested activates "busy"
230 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
231 comb += busy_l.s.eq(1)
232
233 # monitor for an exception or the completion of LD.
234 with m.If(self.pi.addr_exc_o):
235 comb += busy_l.r.eq(1)
236
237 # however ST needs one cycle before busy is reset
238 with m.If(self.pi.st.ok | self.pi.ld.ok):
239 comb += cyc_l.s.eq(1)
240
241 with m.If(cyc_l.q):
242 comb += cyc_l.r.eq(1)
243 comb += busy_l.r.eq(1)
244
245 # busy latch outputs to interface
246 comb += self.pi.busy_o.eq(busy_l.q)
247
248 return m
249
250 def __iter__(self):
251 yield self.pi.is_ld_i
252 yield self.pi.is_st_i
253 yield from self.pi.op.ports()
254 yield self.pi.busy_o
255 yield self.pi.go_die_i
256 yield from self.pi.addr.ports()
257 yield self.pi.addr_ok_o
258 yield self.pi.addr_exc_o
259
260 yield from self.pi.ld.ports()
261 yield from self.pi.st.ports()
262
263 def ports(self):
264 return list(self)
265
266
267 class L0CacheBuffer(Elaboratable):
268 """L0 Cache / Buffer
269
270 Note that the final version will have *two* interfaces per LDSTCompUnit,
271 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
272 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
273
274 This version is to be used for test purposes (and actively maintained
275 for such, rather than "replaced")
276
277 There are much better ways to implement this. However it's only
278 a "demo" / "test" class, and one important aspect: it responds
279 combinatorially, where a nmigen FSM's state-changes only activate
280 on clock-sync boundaries.
281 """
282
283 def __init__(self, n_units, mem, regwid=64, addrwid=48):
284 self.n_units = n_units
285 self.mem = mem
286 ul = []
287 for i in range(n_units):
288 ul.append(LDSTPort(i, regwid, addrwid))
289 self.dports = Array(ul)
290
291 def elaborate(self, platform):
292 m = Module()
293 comb, sync = m.d.comb, m.d.sync
294
295 # connect the ports as modules
296 for i in range(self.n_units):
297 setattr(m.submodules, "port%d" % i, self.dports[i])
298
299 # state-machine latches
300 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
301 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
302 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
303 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
304 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
305
306 # find one LD (or ST) and do it. only one per cycle.
307 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
308 # LD/STs using mask-expansion - see LenExpand class
309
310 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
311 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
312
313 lds = Signal(self.n_units, reset_less=True)
314 sts = Signal(self.n_units, reset_less=True)
315 ldi = []
316 sti = []
317 for i in range(self.n_units):
318 pi = self.dports[i].pi
319 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
320 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
321 # put the requests into the priority-pickers
322 comb += ldpick.i.eq(Cat(*ldi))
323 comb += stpick.i.eq(Cat(*sti))
324
325 # hmm, have to select (record) the right port index
326 nbits = log2_int(self.n_units, False)
327 ld_idx = Signal(nbits, reset_less=False)
328 st_idx = Signal(nbits, reset_less=False)
329 # use these because of the sync-and-comb pass-through capability
330 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
331 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
332
333 # convenience variables to reference the "picked" port
334 ldport = self.dports[ld_idx].pi
335 stport = self.dports[st_idx].pi
336 # and the memory ports
337 rdport = self.mem.rdport
338 wrport = self.mem.wrport
339
340 # Priority-Pickers pick one and only one request, capture its index.
341 # from that point on this code *only* "listens" to that port.
342
343 sync += adrok_l.s.eq(0)
344 comb += adrok_l.r.eq(0)
345 with m.If(~ldpick.n):
346 comb += ld_active.s.eq(1) # activate LD mode
347 comb += idx_l.r.eq(1) # pick (and capture) the port index
348 with m.Elif(~stpick.n):
349 comb += st_active.s.eq(1) # activate ST mode
350 comb += idx_l.r.eq(1) # pick (and capture) the port index
351
352 # from this point onwards, with the port "picked", it stays picked
353 # until ld_active (or st_active) are de-asserted.
354
355 # if now in "LD" mode: wait for addr_ok, then send the address out
356 # to memory, acknowledge address, and send out LD data
357 with m.If(ld_active.q):
358 with m.If(ldport.addr.ok & adrok_l.qn):
359 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
360 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
361 sync += adrok_l.s.eq(1) # and pull "ack" latch
362
363 # if now in "ST" mode: likewise do the same but with "ST"
364 # to memory, acknowledge address, and send out LD data
365 with m.If(st_active.q):
366 with m.If(stport.addr.ok):
367 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
368 with m.If(adrok_l.qn):
369 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
370 sync += adrok_l.s.eq(1) # and pull "ack" latch
371
372 # NOTE: in both these, below, the port itself takes care
373 # of de-asserting its "busy_o" signal, based on either ld.ok going
374 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
375
376 # for LD mode, when addr has been "ok'd", assume that (because this
377 # is a "Memory" test-class) the memory read data is valid.
378 comb += reset_l.s.eq(0)
379 comb += reset_l.r.eq(0)
380 with m.If(ld_active.q & adrok_l.q):
381 comb += ldport.ld.data.eq(rdport.data) # put data out
382 comb += ldport.ld.ok.eq(1) # indicate data valid
383 comb += reset_l.s.eq(1) # reset mode after 1 cycle
384
385 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
386 with m.If(st_active.q & stport.st.ok):
387 comb += wrport.data.eq(stport.st.data) # write st to mem
388 comb += wrport.en.eq(1) # enable write
389 comb += reset_l.s.eq(1) # reset mode after 1 cycle
390
391 # after waiting one cycle (reset_l is "sync" mode), reset the port
392 with m.If(reset_l.q):
393 comb += idx_l.s.eq(1) # deactivate port-index selector
394 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
395 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
396 comb += reset_l.r.eq(1) # clear reset
397 comb += adrok_l.r.eq(1) # address reset
398
399 return m
400
401 def ports(self):
402 for p in self.dports:
403 yield from p.ports()
404
405
406 class TstL0CacheBuffer(Elaboratable):
407 def __init__(self, n_units=3, regwid=16, addrwid=4):
408 self.mem = TestMemory(regwid, addrwid)
409 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
410
411 def elaborate(self, platform):
412 m = Module()
413 m.submodules.mem = self.mem
414 m.submodules.l0 = self.l0
415
416 return m
417
418 def ports(self):
419 yield from self.l0.ports()
420 yield self.mem.rdport.addr
421 yield self.mem.rdport.data
422 yield self.mem.wrport.addr
423 yield self.mem.wrport.data
424 # TODO: mem ports
425
426
427 def wait_busy(port, no=False):
428 while True:
429 busy = yield port.pi.busy_o
430 print("busy", no, busy)
431 if bool(busy) == no:
432 break
433 yield
434
435
436 def wait_addr(port):
437 while True:
438 addr_ok = yield port.pi.addr_ok_o
439 print("addrok", addr_ok)
440 if not addr_ok:
441 break
442 yield
443
444
445 def wait_ldok(port):
446 while True:
447 ldok = yield port.pi.ld.ok
448 print("ldok", ldok)
449 if ldok:
450 break
451 yield
452
453
454 def l0_cache_st(dut, addr, data):
455 l0 = dut.l0
456 mem = dut.mem
457 port0 = l0.dports[0]
458 port1 = l0.dports[1]
459
460 # have to wait until not busy
461 yield from wait_busy(port1, no=False) # wait until not busy
462
463 # set up a ST on the port. address first:
464 yield port1.pi.is_st_i.eq(1) # indicate LD
465
466 yield port1.pi.addr.data.eq(addr) # set address
467 yield port1.pi.addr.ok.eq(1) # set ok
468 yield from wait_addr(port1) # wait until addr ok
469 # yield # not needed, just for checking
470 # yield # not needed, just for checking
471 # assert "ST" for one cycle (required by the API)
472 yield port1.pi.st.data.eq(data)
473 yield port1.pi.st.ok.eq(1)
474 yield
475 yield port1.pi.st.ok.eq(0)
476
477 # can go straight to reset.
478 yield port1.pi.is_st_i.eq(0) # end
479 yield port1.pi.addr.ok.eq(0) # set !ok
480 # yield from wait_busy(port1, False) # wait until not busy
481
482
483 def l0_cache_ld(dut, addr, expected):
484
485 l0 = dut.l0
486 mem = dut.mem
487 port0 = l0.dports[0]
488 port1 = l0.dports[1]
489
490 # have to wait until not busy
491 yield from wait_busy(port1, no=False) # wait until not busy
492
493 # set up a LD on the port. address first:
494 yield port1.pi.is_ld_i.eq(1) # indicate LD
495
496 yield port1.pi.addr.data.eq(addr) # set address
497 yield port1.pi.addr.ok.eq(1) # set ok
498 yield from wait_addr(port1) # wait until addr ok
499
500 yield from wait_ldok(port1) # wait until ld ok
501 data = yield port1.pi.ld.data
502
503 # cleanup
504 yield port1.pi.is_ld_i.eq(0) # end
505 yield port1.pi.addr.ok.eq(0) # set !ok
506 # yield from wait_busy(port1, no=False) # wait until not busy
507
508 return data
509
510
511 def l0_cache_ldst(dut):
512 yield
513 addr = 0x2
514 data = 0xbeef
515 data2 = 0xf00f
516 #data = 0x4
517 yield from l0_cache_st(dut, 0x2, data)
518 yield from l0_cache_st(dut, 0x3, data2)
519 result = yield from l0_cache_ld(dut, 0x2, data)
520 result2 = yield from l0_cache_ld(dut, 0x3, data2)
521 yield
522 assert data == result, "data %x != %x" % (result, data)
523 assert data2 == result2, "data2 %x != %x" % (result2, data2)
524
525 def data_merger_merge(dut):
526 print("TODO")
527 yield
528
529 def test_l0_cache():
530
531 dut = TstL0CacheBuffer()
532 vl = rtlil.convert(dut, ports=dut.ports())
533 with open("test_basic_l0_cache.il", "w") as f:
534 f.write(vl)
535
536 run_simulation(dut, l0_cache_ldst(dut),
537 vcd_name='test_l0_cache_basic.vcd')
538
539 def test_data_merger():
540
541 dut = DataMerger(8)
542 #vl = rtlil.convert(dut, ports=dut.ports())
543 #with open("test_data_merger.il", "w") as f:
544 # f.write(vl)
545
546 run_simulation(dut, data_merger_merge(dut),
547 vcd_name='test_data_merger.vcd')
548
549
550 if __name__ == '__main__':
551 test_l0_cache()
552 #test_data_merger()