indention
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29
30 from soc.experiment.compldst import CompLDSTOpSubset
31 from soc.decoder.power_decoder2 import Data
32 #from nmutil.picker import PriorityPicker
33 from nmigen.lib.coding import PriorityEncoder
34
35 # for testing purposes
36 from soc.experiment.testmem import TestMemory
37
38 class PortInterface(RecordObject):
39 """PortInterface
40
41 defines the interface - the API - that the LDSTCompUnit connects
42 to. note that this is NOT a "fire-and-forget" interface. the
43 LDSTCompUnit *must* be kept appraised that the request is in
44 progress, and only when it has a 100% successful completion rate
45 can the notification be given (busy dropped).
46
47 The interface FSM rules are as follows:
48
49 * if busy_o is asserted, a LD/ST is in progress. further
50 requests may not be made until busy_o is deasserted.
51
52 * only one of is_ld_i or is_st_i may be asserted. busy_o
53 will immediately be asserted and remain asserted.
54
55 * addr.ok is to be asserted when the LD/ST address is known.
56 addr.data is to be valid on the same cycle.
57
58 addr.ok and addr.data must REMAIN asserted until busy_o
59 is de-asserted. this ensures that there is no need
60 for the L0 Cache/Buffer to have an additional address latch
61 (because the LDSTCompUnit already has it)
62
63 * addr_ok_o (or addr_exc_o) must be waited for. these will
64 be asserted *only* for one cycle and one cycle only.
65
66 * addr_exc_o will be asserted if there is no chance that the
67 memory request may be fulfilled.
68
69 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
70
71 * conversely: addr_ok_o must *ONLY* be asserted if there is a
72 HUNDRED PERCENT guarantee that the memory request will be
73 fulfilled.
74
75 * for a LD, ld.ok will be asserted - for only one clock cycle -
76 at any point in the future that is acceptable to the underlying
77 Memory subsystem. the recipient MUST latch ld.data on that cycle.
78
79 busy_o is deasserted on the same cycle as ld.ok is asserted.
80
81 * for a ST, st.ok may be asserted only after addr_ok_o had been
82 asserted, alongside valid st.data at the same time. st.ok
83 must only be asserted for one cycle.
84
85 the underlying Memory is REQUIRED to pick up that data and
86 guarantee its delivery. no back-acknowledgement is required.
87
88 busy_o is deasserted on the cycle AFTER st.ok is asserted.
89 """
90
91 def __init__(self, name=None, regwid=64, addrwid=48):
92
93 self._regwid = regwid
94 self._addrwid = addrwid
95
96 RecordObject.__init__(self, name=name)
97
98 # distinguish op type (ld/st)
99 self.is_ld_i = Signal(reset_less=True)
100 self.is_st_i = Signal(reset_less=True)
101 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
102
103 # common signals
104 self.busy_o = Signal(reset_less=True) # do not use if busy
105 self.go_die_i = Signal(reset_less=True) # back to reset
106 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
107 # addr is valid (TLB, L1 etc.)
108 self.addr_ok_o = Signal(reset_less=True)
109 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
110
111 # LD/ST
112 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
113 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
114
115 # TODO:
116
117
118 class DualPortSplitter(Elaboratable):
119 """DualPortSplitter
120
121 * one incoming PortInterface
122 * two *OUTGOING* PortInterfaces
123 * uses LDSTSplitter to do it
124
125 (actually, thinking about it LDSTSplitter could simply be
126 modified to conform to PortInterface: one in, two out)
127
128 once that is done each pair of ports may be wired directly
129 to the dual ports of L0CacheBuffer
130 """
131 pass
132
133
134 class DataMergerRecord(Record):
135 """
136 {data: 128 bit, byte_enable: 16 bit}
137 """
138
139 def __init__(self, name=None):
140 layout = (('data', 128),
141 ('en', 16)
142 )
143
144 Record.__init__(self, Layout(layout), name=name)
145
146 # TODO: unit test
147
148 class DataMerger(Elaboratable):
149 """DataMerger
150
151 Merges data based on an address-match matrix.
152 Identifies (picks) one (any) row, then uses that row,
153 based on matching address bits, to merge (OR) all data
154 rows into the output.
155
156 Basically, by the time DataMerger is used, all of its incoming data is
157 determined not to conflict. The last step before actually submitting
158 the request to the Memory Subsystem is to work out which requests,
159 on the same 128-bit cache line, can be "merged" due to them being:
160 (A) on the same address (bits 4 and above) (B) having byte-enable
161 lines that (as previously mentioned) do not conflict.
162
163 Therefore, put simply, this module will:
164 (1) pick a row (any row) and identify it by an index labelled "idx"
165 (2) merge all byte-enable lines which are on that same address, as
166 indicated by addr_match_i[idx], onto the output
167 """
168
169 def __init__(self, array_size):
170 """
171 :addr_array_i: an NxN Array of Signals with bits set indicating address
172 match. bits across the diagonal (addr_array_i[x][x])
173 will always be set, to indicate "active".
174 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
175 :data_o: an Output Record of same type
176 {data: 128 bit, byte_enable: 16 bit}
177 """
178 self.array_size = array_size
179 ul = []
180 for i in range(array_size):
181 ul.append(Signal(array_size,
182 reset_less=True,
183 name="addr_match_%d" % i))
184 self.addr_array_i = Array(ul)
185
186 ul = []
187 for i in range(array_size):
188 ul.append(DataMergerRecord())
189 self.data_i = Array(ul)
190 self.data_o = DataMergerRecord()
191
192 def elaborate(self, platform):
193 m = Module()
194 comb = m.d.comb
195 #(1) pick a row
196 m.submodules.pick = pick = PriorityEncoder(self.array_size)
197 for j in range(self.array_size):
198 comb += pick.i[j].eq(self.addr_match_i[j].bool())
199 valid = ~pick.n
200 idx = pick.o
201 #(2) merge
202 with m.If(valid):
203 l = []
204 for j in range(self.array_size):
205 select = self.addr_match_i[idx][j]
206 l.append(Mux(select, self.data_i[j], 0))
207 comb += self.data_o.eq(ortreereduce(l))
208
209
210 class LDSTPort(Elaboratable):
211 def __init__(self, idx, regwid=64, addrwid=48):
212 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
213
214 def elaborate(self, platform):
215 m = Module()
216 comb, sync = m.d.comb, m.d.sync
217
218 # latches
219 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
220 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
221 comb += cyc_l.s.eq(0)
222 comb += cyc_l.r.eq(0)
223
224 # this is a little weird: we let the L0Cache/Buffer set
225 # the outputs: this module just monitors "state".
226
227 # LD/ST requested activates "busy"
228 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
229 comb += busy_l.s.eq(1)
230
231 # monitor for an exception or the completion of LD.
232 with m.If(self.pi.addr_exc_o):
233 comb += busy_l.r.eq(1)
234
235 # however ST needs one cycle before busy is reset
236 with m.If(self.pi.st.ok | self.pi.ld.ok):
237 comb += cyc_l.s.eq(1)
238
239 with m.If(cyc_l.q):
240 comb += cyc_l.r.eq(1)
241 comb += busy_l.r.eq(1)
242
243 # busy latch outputs to interface
244 comb += self.pi.busy_o.eq(busy_l.q)
245
246 return m
247
248 def __iter__(self):
249 yield self.pi.is_ld_i
250 yield self.pi.is_st_i
251 yield from self.pi.op.ports()
252 yield self.pi.busy_o
253 yield self.pi.go_die_i
254 yield from self.pi.addr.ports()
255 yield self.pi.addr_ok_o
256 yield self.pi.addr_exc_o
257
258 yield from self.pi.ld.ports()
259 yield from self.pi.st.ports()
260
261 def ports(self):
262 return list(self)
263
264
265 class L0CacheBuffer(Elaboratable):
266 """L0 Cache / Buffer
267
268 Note that the final version will have *two* interfaces per LDSTCompUnit,
269 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
270 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
271
272 This version is to be used for test purposes (and actively maintained
273 for such, rather than "replaced")
274
275 There are much better ways to implement this. However it's only
276 a "demo" / "test" class, and one important aspect: it responds
277 combinatorially, where a nmigen FSM's state-changes only activate
278 on clock-sync boundaries.
279 """
280
281 def __init__(self, n_units, mem, regwid=64, addrwid=48):
282 self.n_units = n_units
283 self.mem = mem
284 ul = []
285 for i in range(n_units):
286 ul.append(LDSTPort(i, regwid, addrwid))
287 self.dports = Array(ul)
288
289 def elaborate(self, platform):
290 m = Module()
291 comb, sync = m.d.comb, m.d.sync
292
293 # connect the ports as modules
294 for i in range(self.n_units):
295 setattr(m.submodules, "port%d" % i, self.dports[i])
296
297 # state-machine latches
298 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
299 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
300 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
301 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
302 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
303
304 # find one LD (or ST) and do it. only one per cycle.
305 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
306 # LD/STs using mask-expansion - see LenExpand class
307
308 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
309 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
310
311 lds = Signal(self.n_units, reset_less=True)
312 sts = Signal(self.n_units, reset_less=True)
313 ldi = []
314 sti = []
315 for i in range(self.n_units):
316 pi = self.dports[i].pi
317 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
318 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
319 # put the requests into the priority-pickers
320 comb += ldpick.i.eq(Cat(*ldi))
321 comb += stpick.i.eq(Cat(*sti))
322
323 # hmm, have to select (record) the right port index
324 nbits = log2_int(self.n_units, False)
325 ld_idx = Signal(nbits, reset_less=False)
326 st_idx = Signal(nbits, reset_less=False)
327 # use these because of the sync-and-comb pass-through capability
328 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
329 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
330
331 # convenience variables to reference the "picked" port
332 ldport = self.dports[ld_idx].pi
333 stport = self.dports[st_idx].pi
334 # and the memory ports
335 rdport = self.mem.rdport
336 wrport = self.mem.wrport
337
338 # Priority-Pickers pick one and only one request, capture its index.
339 # from that point on this code *only* "listens" to that port.
340
341 sync += adrok_l.s.eq(0)
342 comb += adrok_l.r.eq(0)
343 with m.If(~ldpick.n):
344 comb += ld_active.s.eq(1) # activate LD mode
345 comb += idx_l.r.eq(1) # pick (and capture) the port index
346 with m.Elif(~stpick.n):
347 comb += st_active.s.eq(1) # activate ST mode
348 comb += idx_l.r.eq(1) # pick (and capture) the port index
349
350 # from this point onwards, with the port "picked", it stays picked
351 # until ld_active (or st_active) are de-asserted.
352
353 # if now in "LD" mode: wait for addr_ok, then send the address out
354 # to memory, acknowledge address, and send out LD data
355 with m.If(ld_active.q):
356 with m.If(ldport.addr.ok & adrok_l.qn):
357 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
358 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
359 sync += adrok_l.s.eq(1) # and pull "ack" latch
360
361 # if now in "ST" mode: likewise do the same but with "ST"
362 # to memory, acknowledge address, and send out LD data
363 with m.If(st_active.q):
364 with m.If(stport.addr.ok):
365 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
366 with m.If(adrok_l.qn):
367 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
368 sync += adrok_l.s.eq(1) # and pull "ack" latch
369
370 # NOTE: in both these, below, the port itself takes care
371 # of de-asserting its "busy_o" signal, based on either ld.ok going
372 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
373
374 # for LD mode, when addr has been "ok'd", assume that (because this
375 # is a "Memory" test-class) the memory read data is valid.
376 comb += reset_l.s.eq(0)
377 comb += reset_l.r.eq(0)
378 with m.If(ld_active.q & adrok_l.q):
379 comb += ldport.ld.data.eq(rdport.data) # put data out
380 comb += ldport.ld.ok.eq(1) # indicate data valid
381 comb += reset_l.s.eq(1) # reset mode after 1 cycle
382
383 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
384 with m.If(st_active.q & stport.st.ok):
385 comb += wrport.data.eq(stport.st.data) # write st to mem
386 comb += wrport.en.eq(1) # enable write
387 comb += reset_l.s.eq(1) # reset mode after 1 cycle
388
389 # after waiting one cycle (reset_l is "sync" mode), reset the port
390 with m.If(reset_l.q):
391 comb += idx_l.s.eq(1) # deactivate port-index selector
392 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
393 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
394 comb += reset_l.r.eq(1) # clear reset
395 comb += adrok_l.r.eq(1) # address reset
396
397 return m
398
399 def ports(self):
400 for p in self.dports:
401 yield from p.ports()
402
403
404 class TstL0CacheBuffer(Elaboratable):
405 def __init__(self, n_units=3, regwid=16, addrwid=4):
406 self.mem = TestMemory(regwid, addrwid)
407 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
408
409 def elaborate(self, platform):
410 m = Module()
411 m.submodules.mem = self.mem
412 m.submodules.l0 = self.l0
413
414 return m
415
416 def ports(self):
417 yield from self.l0.ports()
418 yield self.mem.rdport.addr
419 yield self.mem.rdport.data
420 yield self.mem.wrport.addr
421 yield self.mem.wrport.data
422 # TODO: mem ports
423
424
425 def wait_busy(port, no=False):
426 while True:
427 busy = yield port.pi.busy_o
428 print("busy", no, busy)
429 if bool(busy) == no:
430 break
431 yield
432
433
434 def wait_addr(port):
435 while True:
436 addr_ok = yield port.pi.addr_ok_o
437 print("addrok", addr_ok)
438 if not addr_ok:
439 break
440 yield
441
442
443 def wait_ldok(port):
444 while True:
445 ldok = yield port.pi.ld.ok
446 print("ldok", ldok)
447 if ldok:
448 break
449 yield
450
451
452 def l0_cache_st(dut, addr, data):
453 l0 = dut.l0
454 mem = dut.mem
455 port0 = l0.dports[0]
456 port1 = l0.dports[1]
457
458 # have to wait until not busy
459 yield from wait_busy(port1, no=False) # wait until not busy
460
461 # set up a ST on the port. address first:
462 yield port1.pi.is_st_i.eq(1) # indicate LD
463
464 yield port1.pi.addr.data.eq(addr) # set address
465 yield port1.pi.addr.ok.eq(1) # set ok
466 yield from wait_addr(port1) # wait until addr ok
467 # yield # not needed, just for checking
468 # yield # not needed, just for checking
469 # assert "ST" for one cycle (required by the API)
470 yield port1.pi.st.data.eq(data)
471 yield port1.pi.st.ok.eq(1)
472 yield
473 yield port1.pi.st.ok.eq(0)
474
475 # can go straight to reset.
476 yield port1.pi.is_st_i.eq(0) # end
477 yield port1.pi.addr.ok.eq(0) # set !ok
478 # yield from wait_busy(port1, False) # wait until not busy
479
480
481 def l0_cache_ld(dut, addr, expected):
482
483 l0 = dut.l0
484 mem = dut.mem
485 port0 = l0.dports[0]
486 port1 = l0.dports[1]
487
488 # have to wait until not busy
489 yield from wait_busy(port1, no=False) # wait until not busy
490
491 # set up a LD on the port. address first:
492 yield port1.pi.is_ld_i.eq(1) # indicate LD
493
494 yield port1.pi.addr.data.eq(addr) # set address
495 yield port1.pi.addr.ok.eq(1) # set ok
496 yield from wait_addr(port1) # wait until addr ok
497
498 yield from wait_ldok(port1) # wait until ld ok
499 data = yield port1.pi.ld.data
500
501 # cleanup
502 yield port1.pi.is_ld_i.eq(0) # end
503 yield port1.pi.addr.ok.eq(0) # set !ok
504 # yield from wait_busy(port1, no=False) # wait until not busy
505
506 return data
507
508
509 def l0_cache_ldst(dut):
510 yield
511 addr = 0x2
512 data = 0xbeef
513 data2 = 0xf00f
514 #data = 0x4
515 yield from l0_cache_st(dut, 0x2, data)
516 yield from l0_cache_st(dut, 0x3, data2)
517 result = yield from l0_cache_ld(dut, 0x2, data)
518 result2 = yield from l0_cache_ld(dut, 0x3, data2)
519 yield
520 assert data == result, "data %x != %x" % (result, data)
521 assert data2 == result2, "data2 %x != %x" % (result2, data2)
522
523 def data_merger_merge(dut):
524 print("TODO")
525 yield
526
527 def test_l0_cache():
528
529 dut = TstL0CacheBuffer()
530 vl = rtlil.convert(dut, ports=dut.ports())
531 with open("test_basic_l0_cache.il", "w") as f:
532 f.write(vl)
533
534 run_simulation(dut, l0_cache_ldst(dut),
535 vcd_name='test_l0_cache_basic.vcd')
536
537 def test_data_merger():
538
539 dut = DataMerger(8)
540 #vl = rtlil.convert(dut, ports=dut.ports())
541 #with open("test_data_merger.il", "w") as f:
542 # f.write(vl)
543
544 run_simulation(dut, data_merger_merge(dut),
545 vcd_name='test_data_merger.vcd')
546
547
548 if __name__ == '__main__':
549 test_l0_cache()
550 #test_data_merger()