fixes for l0_cache.py
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28
29 from soc.experiment.compldst import CompLDSTOpSubset
30 from soc.decoder.power_decoder2 import Data
31 #from nmutil.picker import PriorityPicker
32 from nmigen.lib.coding import PriorityEncoder
33
34 # for testing purposes
35 from soc.experiment.testmem import TestMemory
36
37
38 class PortInterface(RecordObject):
39 """PortInterface
40
41 defines the interface - the API - that the LDSTCompUnit connects
42 to. note that this is NOT a "fire-and-forget" interface. the
43 LDSTCompUnit *must* be kept appraised that the request is in
44 progress, and only when it has a 100% successful completion rate
45 can the notification be given (busy dropped).
46
47 The interface FSM rules are as follows:
48
49 * if busy_o is asserted, a LD/ST is in progress. further
50 requests may not be made until busy_o is deasserted.
51
52 * only one of is_ld_i or is_st_i may be asserted. busy_o
53 will immediately be asserted and remain asserted.
54
55 * addr.ok is to be asserted when the LD/ST address is known.
56 addr.data is to be valid on the same cycle.
57
58 addr.ok and addr.data must REMAIN asserted until busy_o
59 is de-asserted. this ensures that there is no need
60 for the L0 Cache/Buffer to have an additional address latch
61 (because the LDSTCompUnit already has it)
62
63 * addr_ok_o (or addr_exc_o) must be waited for. these will
64 be asserted *only* for one cycle and one cycle only.
65
66 * addr_exc_o will be asserted if there is no chance that the
67 memory request may be fulfilled.
68
69 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
70
71 * conversely: addr_ok_o must *ONLY* be asserted if there is a
72 HUNDRED PERCENT guarantee that the memory request will be
73 fulfilled.
74
75 * for a LD, ld.ok will be asserted - for only one clock cycle -
76 at any point in the future that is acceptable to the underlying
77 Memory subsystem. the recipient MUST latch ld.data on that cycle.
78
79 busy_o is deasserted on the same cycle as ld.ok is asserted.
80
81 * for a ST, st.ok may be asserted only after addr_ok_o had been
82 asserted, alongside valid st.data at the same time. st.ok
83 must only be asserted for one cycle.
84
85 the underlying Memory is REQUIRED to pick up that data and
86 guarantee its delivery. no back-acknowledgement is required.
87
88 busy_o is deasserted on the cycle AFTER st.ok is asserted.
89 """
90
91 def __init__(self, name=None, regwid=64, addrwid=48):
92
93 self._regwid = regwid
94 self._addrwid = addrwid
95
96 RecordObject.__init__(self, name=name)
97
98 # distinguish op type (ld/st)
99 self.is_ld_i = Signal(reset_less=True)
100 self.is_st_i = Signal(reset_less=True)
101 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
102
103 # common signals
104 self.busy_o = Signal(reset_less=True) # do not use if busy
105 self.go_die_i = Signal(reset_less=True) # back to reset
106 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
107 # addr is valid (TLB, L1 etc.)
108 self.addr_ok_o = Signal(reset_less=True)
109 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
110
111 # LD/ST
112 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
113 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
114
115 # TODO:
116
117
118 class DualPortSplitter(Elaboratable):
119 """DualPortSplitter
120
121 * one incoming PortInterface
122 * two *OUTGOING* PortInterfaces
123 * uses LDSTSplitter to do it
124
125 (actually, thinking about it LDSTSplitter could simply be
126 modified to conform to PortInterface: one in, two out)
127
128 once that is done each pair of ports may be wired directly
129 to the dual ports of L0CacheBuffer
130 """
131 pass
132
133
134 class DataMergerRecord(Record):
135 """
136 {data: 128 bit, byte_enable: 16 bit}
137 """
138
139 def __init__(self, name=None):
140 layout = (('data', 128),
141 ('en', 16)
142 )
143
144 Record.__init__(self, Layout(layout), name=name)
145
146 # TODO:
147
148
149 class DataMerger(Elaboratable):
150 """DataMerger
151
152 Merges data based on an address-match matrix.
153 Identifies (picks) one (any) row, then uses that row,
154 based on matching address bits, to merge (OR) all data
155 rows into the output.
156
157 Basically, by the time DataMerger is used, all of its incoming data is
158 determined not to conflict. The last step before actually submitting
159 the request to the Memory Subsystem is to work out which requests,
160 on the same 128-bit cache line, can be "merged" due to them being:
161 (A) on the same address (bits 4 and above) (B) having byte-enable
162 lines that (as previously mentioned) do not conflict.
163
164 Therefore, put simply, this module will:
165 (1) pick a row (any row) and identify it by an index labelled "idx"
166 (2) merge all byte-enable lines which are on that same address, as
167 indicated by addr_match_i[idx], onto the output
168 """
169
170 def __init__(self, array_size):
171 """
172 :addr_array_i: an NxN Array of Signals with bits set indicating address
173 match. bits across the diagonal (addr_array_i[x][x])
174 will always be set, to indicate "active".
175 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
176 :data_o: an Output Record of same type
177 {data: 128 bit, byte_enable: 16 bit}
178 """
179 self.array_size = array_size
180 ul = []
181 for i in range(array_size):
182 ul.append(Signal(array_size,
183 reset_less=True,
184 name="addr_match_%d" % i))
185 self.addr_array_i = Array(ul)
186
187 ul = []
188 for i in range(array_size):
189 ul.append(DataMergerRecord())
190 self.data_i = Array(ul)
191 self.data_o = DataMergerRecord()
192
193 def elaborate(self, platform):
194 m = Module()
195 comb, sync = m.d.comb, m.d.sync
196 #(1) pick a row
197 m.submodules.pick = pick = PriorityEncoder(self.array_size)
198 for j in range(self.array_size):
199 with m.If(self.addr_match_i[j].bool()):
200 pick.i.eq(pick.i||(1<<j))
201 valid = ~pick.n
202 idx = pick.o
203 #(2) merge
204 #not needed # self.data_o.eq(0)
205 #TODO
206 l = []
207 for j in range(self.array_size):
208 select = self.addr_match_i[idx][j] & valid
209 l.append(Mux(select, self.data_i[j], 0))
210 self.data_o.eq(ortreereduce(l))
211
212
213 class LDSTPort(Elaboratable):
214 def __init__(self, idx, regwid=64, addrwid=48):
215 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
216
217 def elaborate(self, platform):
218 m = Module()
219 comb, sync = m.d.comb, m.d.sync
220
221 # latches
222 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
223 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
224 comb += cyc_l.s.eq(0)
225 comb += cyc_l.r.eq(0)
226
227 # this is a little weird: we let the L0Cache/Buffer set
228 # the outputs: this module just monitors "state".
229
230 # LD/ST requested activates "busy"
231 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
232 comb += busy_l.s.eq(1)
233
234 # monitor for an exception or the completion of LD.
235 with m.If(self.pi.addr_exc_o):
236 comb += busy_l.r.eq(1)
237
238 # however ST needs one cycle before busy is reset
239 with m.If(self.pi.st.ok | self.pi.ld.ok):
240 comb += cyc_l.s.eq(1)
241
242 with m.If(cyc_l.q):
243 comb += cyc_l.r.eq(1)
244 comb += busy_l.r.eq(1)
245
246 # busy latch outputs to interface
247 comb += self.pi.busy_o.eq(busy_l.q)
248
249 return m
250
251 def __iter__(self):
252 yield self.pi.is_ld_i
253 yield self.pi.is_st_i
254 yield from self.pi.op.ports()
255 yield self.pi.busy_o
256 yield self.pi.go_die_i
257 yield from self.pi.addr.ports()
258 yield self.pi.addr_ok_o
259 yield self.pi.addr_exc_o
260
261 yield from self.pi.ld.ports()
262 yield from self.pi.st.ports()
263
264 def ports(self):
265 return list(self)
266
267
268 class L0CacheBuffer(Elaboratable):
269 """L0 Cache / Buffer
270
271 Note that the final version will have *two* interfaces per LDSTCompUnit,
272 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
273 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
274
275 This version is to be used for test purposes (and actively maintained
276 for such, rather than "replaced")
277
278 There are much better ways to implement this. However it's only
279 a "demo" / "test" class, and one important aspect: it responds
280 combinatorially, where a nmigen FSM's state-changes only activate
281 on clock-sync boundaries.
282 """
283
284 def __init__(self, n_units, mem, regwid=64, addrwid=48):
285 self.n_units = n_units
286 self.mem = mem
287 ul = []
288 for i in range(n_units):
289 ul.append(LDSTPort(i, regwid, addrwid))
290 self.dports = Array(ul)
291
292 def elaborate(self, platform):
293 m = Module()
294 comb, sync = m.d.comb, m.d.sync
295
296 # connect the ports as modules
297 for i in range(self.n_units):
298 setattr(m.submodules, "port%d" % i, self.dports[i])
299
300 # state-machine latches
301 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
302 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
303 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
304 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
305 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
306
307 # find one LD (or ST) and do it. only one per cycle.
308 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
309 # LD/STs using mask-expansion - see LenExpand class
310
311 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
312 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
313
314 lds = Signal(self.n_units, reset_less=True)
315 sts = Signal(self.n_units, reset_less=True)
316 ldi = []
317 sti = []
318 for i in range(self.n_units):
319 pi = self.dports[i].pi
320 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
321 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
322 # put the requests into the priority-pickers
323 comb += ldpick.i.eq(Cat(*ldi))
324 comb += stpick.i.eq(Cat(*sti))
325
326 # hmm, have to select (record) the right port index
327 nbits = log2_int(self.n_units, False)
328 ld_idx = Signal(nbits, reset_less=False)
329 st_idx = Signal(nbits, reset_less=False)
330 # use these because of the sync-and-comb pass-through capability
331 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
332 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
333
334 # convenience variables to reference the "picked" port
335 ldport = self.dports[ld_idx].pi
336 stport = self.dports[st_idx].pi
337 # and the memory ports
338 rdport = self.mem.rdport
339 wrport = self.mem.wrport
340
341 # Priority-Pickers pick one and only one request, capture its index.
342 # from that point on this code *only* "listens" to that port.
343
344 sync += adrok_l.s.eq(0)
345 comb += adrok_l.r.eq(0)
346 with m.If(~ldpick.n):
347 comb += ld_active.s.eq(1) # activate LD mode
348 comb += idx_l.r.eq(1) # pick (and capture) the port index
349 with m.Elif(~stpick.n):
350 comb += st_active.s.eq(1) # activate ST mode
351 comb += idx_l.r.eq(1) # pick (and capture) the port index
352
353 # from this point onwards, with the port "picked", it stays picked
354 # until ld_active (or st_active) are de-asserted.
355
356 # if now in "LD" mode: wait for addr_ok, then send the address out
357 # to memory, acknowledge address, and send out LD data
358 with m.If(ld_active.q):
359 with m.If(ldport.addr.ok & adrok_l.qn):
360 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
361 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
362 sync += adrok_l.s.eq(1) # and pull "ack" latch
363
364 # if now in "ST" mode: likewise do the same but with "ST"
365 # to memory, acknowledge address, and send out LD data
366 with m.If(st_active.q):
367 with m.If(stport.addr.ok):
368 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
369 with m.If(adrok_l.qn):
370 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
371 sync += adrok_l.s.eq(1) # and pull "ack" latch
372
373 # NOTE: in both these, below, the port itself takes care
374 # of de-asserting its "busy_o" signal, based on either ld.ok going
375 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
376
377 # for LD mode, when addr has been "ok'd", assume that (because this
378 # is a "Memory" test-class) the memory read data is valid.
379 comb += reset_l.s.eq(0)
380 comb += reset_l.r.eq(0)
381 with m.If(ld_active.q & adrok_l.q):
382 comb += ldport.ld.data.eq(rdport.data) # put data out
383 comb += ldport.ld.ok.eq(1) # indicate data valid
384 comb += reset_l.s.eq(1) # reset mode after 1 cycle
385
386 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
387 with m.If(st_active.q & stport.st.ok):
388 comb += wrport.data.eq(stport.st.data) # write st to mem
389 comb += wrport.en.eq(1) # enable write
390 comb += reset_l.s.eq(1) # reset mode after 1 cycle
391
392 # after waiting one cycle (reset_l is "sync" mode), reset the port
393 with m.If(reset_l.q):
394 comb += idx_l.s.eq(1) # deactivate port-index selector
395 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
396 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
397 comb += reset_l.r.eq(1) # clear reset
398 comb += adrok_l.r.eq(1) # address reset
399
400 return m
401
402 def ports(self):
403 for p in self.dports:
404 yield from p.ports()
405
406
407 class TstL0CacheBuffer(Elaboratable):
408 def __init__(self, n_units=3, regwid=16, addrwid=4):
409 self.mem = TestMemory(regwid, addrwid)
410 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
411
412 def elaborate(self, platform):
413 m = Module()
414 m.submodules.mem = self.mem
415 m.submodules.l0 = self.l0
416
417 return m
418
419 def ports(self):
420 yield from self.l0.ports()
421 yield self.mem.rdport.addr
422 yield self.mem.rdport.data
423 yield self.mem.wrport.addr
424 yield self.mem.wrport.data
425 # TODO: mem ports
426
427
428 def wait_busy(port, no=False):
429 while True:
430 busy = yield port.pi.busy_o
431 print("busy", no, busy)
432 if bool(busy) == no:
433 break
434 yield
435
436
437 def wait_addr(port):
438 while True:
439 addr_ok = yield port.pi.addr_ok_o
440 print("addrok", addr_ok)
441 if not addr_ok:
442 break
443 yield
444
445
446 def wait_ldok(port):
447 while True:
448 ldok = yield port.pi.ld.ok
449 print("ldok", ldok)
450 if ldok:
451 break
452 yield
453
454
455 def l0_cache_st(dut, addr, data):
456 l0 = dut.l0
457 mem = dut.mem
458 port0 = l0.dports[0]
459 port1 = l0.dports[1]
460
461 # have to wait until not busy
462 yield from wait_busy(port1, no=False) # wait until not busy
463
464 # set up a ST on the port. address first:
465 yield port1.pi.is_st_i.eq(1) # indicate LD
466
467 yield port1.pi.addr.data.eq(addr) # set address
468 yield port1.pi.addr.ok.eq(1) # set ok
469 yield from wait_addr(port1) # wait until addr ok
470 # yield # not needed, just for checking
471 # yield # not needed, just for checking
472 # assert "ST" for one cycle (required by the API)
473 yield port1.pi.st.data.eq(data)
474 yield port1.pi.st.ok.eq(1)
475 yield
476 yield port1.pi.st.ok.eq(0)
477
478 # can go straight to reset.
479 yield port1.pi.is_st_i.eq(0) # end
480 yield port1.pi.addr.ok.eq(0) # set !ok
481 # yield from wait_busy(port1, False) # wait until not busy
482
483
484 def l0_cache_ld(dut, addr, expected):
485
486 l0 = dut.l0
487 mem = dut.mem
488 port0 = l0.dports[0]
489 port1 = l0.dports[1]
490
491 # have to wait until not busy
492 yield from wait_busy(port1, no=False) # wait until not busy
493
494 # set up a LD on the port. address first:
495 yield port1.pi.is_ld_i.eq(1) # indicate LD
496
497 yield port1.pi.addr.data.eq(addr) # set address
498 yield port1.pi.addr.ok.eq(1) # set ok
499 yield from wait_addr(port1) # wait until addr ok
500
501 yield from wait_ldok(port1) # wait until ld ok
502 data = yield port1.pi.ld.data
503
504 # cleanup
505 yield port1.pi.is_ld_i.eq(0) # end
506 yield port1.pi.addr.ok.eq(0) # set !ok
507 # yield from wait_busy(port1, no=False) # wait until not busy
508
509 return data
510
511
512 def l0_cache_ldst(dut):
513 yield
514 addr = 0x2
515 data = 0xbeef
516 data2 = 0xf00f
517 #data = 0x4
518 yield from l0_cache_st(dut, 0x2, data)
519 yield from l0_cache_st(dut, 0x3, data2)
520 result = yield from l0_cache_ld(dut, 0x2, data)
521 result2 = yield from l0_cache_ld(dut, 0x3, data2)
522 yield
523 assert data == result, "data %x != %x" % (result, data)
524 assert data2 == result2, "data2 %x != %x" % (result2, data2)
525
526
527 def test_l0_cache():
528
529 dut = TstL0CacheBuffer()
530 vl = rtlil.convert(dut, ports=dut.ports())
531 with open("test_basic_l0_cache.il", "w") as f:
532 f.write(vl)
533
534 run_simulation(dut, l0_cache_ldst(dut),
535 vcd_name='test_l0_cache_basic.vcd')
536
537
538 if __name__ == '__main__':
539 test_l0_cache()