add in LenExpander to L0CacheBuffer, not used yet
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation, Settle
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29 from nmutil.util import treereduce
30
31 from soc.experiment.compldst import CompLDSTOpSubset
32 from soc.decoder.power_decoder2 import Data
33 #from nmutil.picker import PriorityPicker
34 from nmigen.lib.coding import PriorityEncoder
35 from soc.scoreboard.addr_split import LDSTSplitter
36 from soc.scoreboard.addr_match import LenExpand
37
38 # for testing purposes
39 from soc.experiment.testmem import TestMemory
40
41
42 class PortInterface(RecordObject):
43 """PortInterface
44
45 defines the interface - the API - that the LDSTCompUnit connects
46 to. note that this is NOT a "fire-and-forget" interface. the
47 LDSTCompUnit *must* be kept appraised that the request is in
48 progress, and only when it has a 100% successful completion rate
49 can the notification be given (busy dropped).
50
51 The interface FSM rules are as follows:
52
53 * if busy_o is asserted, a LD/ST is in progress. further
54 requests may not be made until busy_o is deasserted.
55
56 * only one of is_ld_i or is_st_i may be asserted. busy_o
57 will immediately be asserted and remain asserted.
58
59 * addr.ok is to be asserted when the LD/ST address is known.
60 addr.data is to be valid on the same cycle.
61
62 addr.ok and addr.data must REMAIN asserted until busy_o
63 is de-asserted. this ensures that there is no need
64 for the L0 Cache/Buffer to have an additional address latch
65 (because the LDSTCompUnit already has it)
66
67 * addr_ok_o (or addr_exc_o) must be waited for. these will
68 be asserted *only* for one cycle and one cycle only.
69
70 * addr_exc_o will be asserted if there is no chance that the
71 memory request may be fulfilled.
72
73 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
74
75 * conversely: addr_ok_o must *ONLY* be asserted if there is a
76 HUNDRED PERCENT guarantee that the memory request will be
77 fulfilled.
78
79 * for a LD, ld.ok will be asserted - for only one clock cycle -
80 at any point in the future that is acceptable to the underlying
81 Memory subsystem. the recipient MUST latch ld.data on that cycle.
82
83 busy_o is deasserted on the same cycle as ld.ok is asserted.
84
85 * for a ST, st.ok may be asserted only after addr_ok_o had been
86 asserted, alongside valid st.data at the same time. st.ok
87 must only be asserted for one cycle.
88
89 the underlying Memory is REQUIRED to pick up that data and
90 guarantee its delivery. no back-acknowledgement is required.
91
92 busy_o is deasserted on the cycle AFTER st.ok is asserted.
93 """
94
95 def __init__(self, name=None, regwid=64, addrwid=48):
96
97 self._regwid = regwid
98 self._addrwid = addrwid
99
100 RecordObject.__init__(self, name=name)
101
102 # distinguish op type (ld/st)
103 self.is_ld_i = Signal(reset_less=True)
104 self.is_st_i = Signal(reset_less=True)
105 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
106
107 # common signals
108 self.busy_o = Signal(reset_less=True) # do not use if busy
109 self.go_die_i = Signal(reset_less=True) # back to reset
110 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
111 # addr is valid (TLB, L1 etc.)
112 self.addr_ok_o = Signal(reset_less=True)
113 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
114
115 # LD/ST
116 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
117 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
118
119 # TODO: elaborate function
120
121
122 class DualPortSplitter(Elaboratable):
123 """DualPortSplitter
124
125 * one incoming PortInterface
126 * two *OUTGOING* PortInterfaces
127 * uses LDSTSplitter to do it
128
129 (actually, thinking about it LDSTSplitter could simply be
130 modified to conform to PortInterface: one in, two out)
131
132 once that is done each pair of ports may be wired directly
133 to the dual ports of L0CacheBuffer
134
135 The split is carried out so that, regardless of alignment or
136 mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
137 of the address, whilst outgoing PortInterface[1] takes
138 bit 4 == 1.
139
140 PortInterface *may* need to be changed so that the length is
141 a binary number (accepting values 1-16).
142 """
143 def __init__(self):
144 self.outp = [PortInterface(name="outp_0"),
145 PortInterface(name="outp_1")]
146 self.inp = PortInterface(name="inp")
147 print(self.outp)
148
149 def elaborate(self, platform):
150 m = Module()
151 comb = m.d.comb
152 m.submodules.splitter = splitter = LDSTSplitter(64, 48, 4)
153 comb += splitter.addr_i.eq(self.inp.addr) #XXX
154 #comb += splitter.len_i.eq()
155 #comb += splitter.valid_i.eq()
156 comb += splitter.is_ld_i.eq(self.inp.is_ld_i)
157 comb += splitter.is_st_i.eq(self.inp.is_st_i)
158 #comb += splitter.st_data_i.eq()
159 #comb += splitter.sld_valid_i.eq()
160 #comb += splitter.sld_data_i.eq()
161 #comb += splitter.sst_valid_i.eq()
162 return m
163
164 class DataMergerRecord(Record):
165 """
166 {data: 128 bit, byte_enable: 16 bit}
167 """
168
169 def __init__(self, name=None):
170 layout = (('data', 128),
171 ('en', 16))
172 Record.__init__(self, Layout(layout), name=name)
173
174 self.data.reset_less=True
175 self.en.reset_less=True
176
177 # TODO: formal verification
178
179 class DataMerger(Elaboratable):
180 """DataMerger
181
182 Merges data based on an address-match matrix.
183 Identifies (picks) one (any) row, then uses that row,
184 based on matching address bits, to merge (OR) all data
185 rows into the output.
186
187 Basically, by the time DataMerger is used, all of its incoming data is
188 determined not to conflict. The last step before actually submitting
189 the request to the Memory Subsystem is to work out which requests,
190 on the same 128-bit cache line, can be "merged" due to them being:
191 (A) on the same address (bits 4 and above) (B) having byte-enable
192 lines that (as previously mentioned) do not conflict.
193
194 Therefore, put simply, this module will:
195 (1) pick a row (any row) and identify it by an index labelled "idx"
196 (2) merge all byte-enable lines which are on that same address, as
197 indicated by addr_match_i[idx], onto the output
198 """
199
200 def __init__(self, array_size):
201 """
202 :addr_array_i: an NxN Array of Signals with bits set indicating address
203 match. bits across the diagonal (addr_array_i[x][x])
204 will always be set, to indicate "active".
205 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
206 :data_o: an Output Record of same type
207 {data: 128 bit, byte_enable: 16 bit}
208 """
209 self.array_size = array_size
210 ul = []
211 for i in range(array_size):
212 ul.append(Signal(array_size,
213 reset_less=True,
214 name="addr_match_%d" % i))
215 self.addr_array_i = Array(ul)
216
217 ul = []
218 for i in range(array_size):
219 ul.append(DataMergerRecord())
220 self.data_i = Array(ul)
221 self.data_o = DataMergerRecord()
222
223 def elaborate(self, platform):
224 m = Module()
225 comb = m.d.comb
226 #(1) pick a row
227 m.submodules.pick = pick = PriorityEncoder(self.array_size)
228 for j in range(self.array_size):
229 comb += pick.i[j].eq(self.addr_array_i[j].bool())
230 valid = ~pick.n
231 idx = pick.o
232 #(2) merge
233 with m.If(valid):
234 l = []
235 for j in range(self.array_size):
236 select = self.addr_array_i[idx][j]
237 r = DataMergerRecord()
238 with m.If(select):
239 comb += r.eq(self.data_i[j])
240 l.append(r)
241 comb += self.data_o.data.eq(ortreereduce(l,"data"))
242 comb += self.data_o.en.eq(ortreereduce(l,"en"))
243
244 return m
245
246
247 class LDSTPort(Elaboratable):
248 def __init__(self, idx, regwid=64, addrwid=48):
249 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
250
251 def elaborate(self, platform):
252 m = Module()
253 comb, sync = m.d.comb, m.d.sync
254
255 # latches
256 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
257 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
258 comb += cyc_l.s.eq(0)
259 comb += cyc_l.r.eq(0)
260
261 # this is a little weird: we let the L0Cache/Buffer set
262 # the outputs: this module just monitors "state".
263
264 # LD/ST requested activates "busy"
265 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
266 comb += busy_l.s.eq(1)
267
268 # monitor for an exception or the completion of LD.
269 with m.If(self.pi.addr_exc_o):
270 comb += busy_l.r.eq(1)
271
272 # however ST needs one cycle before busy is reset
273 with m.If(self.pi.st.ok | self.pi.ld.ok):
274 comb += cyc_l.s.eq(1)
275
276 with m.If(cyc_l.q):
277 comb += cyc_l.r.eq(1)
278 comb += busy_l.r.eq(1)
279
280 # busy latch outputs to interface
281 comb += self.pi.busy_o.eq(busy_l.q)
282
283 return m
284
285 def __iter__(self):
286 yield self.pi.is_ld_i
287 yield self.pi.is_st_i
288 yield from self.pi.op.ports()
289 yield self.pi.busy_o
290 yield self.pi.go_die_i
291 yield from self.pi.addr.ports()
292 yield self.pi.addr_ok_o
293 yield self.pi.addr_exc_o
294
295 yield from self.pi.ld.ports()
296 yield from self.pi.st.ports()
297
298 def ports(self):
299 return list(self)
300
301
302 class L0CacheBuffer(Elaboratable):
303 """L0 Cache / Buffer
304
305 Note that the final version will have *two* interfaces per LDSTCompUnit,
306 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
307 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
308
309 This version is to be used for test purposes (and actively maintained
310 for such, rather than "replaced")
311
312 There are much better ways to implement this. However it's only
313 a "demo" / "test" class, and one important aspect: it responds
314 combinatorially, where a nmigen FSM's state-changes only activate
315 on clock-sync boundaries.
316 """
317
318 def __init__(self, n_units, mem, regwid=64, addrwid=48):
319 self.n_units = n_units
320 self.mem = mem
321 self.regwid = regwid
322 self.addrwid = addrwid
323 ul = []
324 for i in range(n_units):
325 ul.append(LDSTPort(i, regwid, addrwid))
326 self.dports = Array(ul)
327
328 @property
329 def addrbits(self):
330 return log2_int(self.mem.regwid//8)
331
332 def splitaddr(self, addr):
333 """split the address into top and bottom bits of the memory granularity
334 """
335 return addr[:self.addrbits], addr[self.addrbits:]
336
337 def elaborate(self, platform):
338 m = Module()
339 comb, sync = m.d.comb, m.d.sync
340
341 # connect the ports as modules
342 for i in range(self.n_units):
343 setattr(m.submodules, "port%d" % i, self.dports[i])
344
345 # state-machine latches
346 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
347 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
348 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
349 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
350 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
351
352 # find one LD (or ST) and do it. only one per cycle.
353 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
354 # LD/STs using mask-expansion - see LenExpand class
355
356 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
357 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
358 m.submodules.lenexp = lenexp = LenExpand(self.regwid//8, 8)
359
360 lds = Signal(self.n_units, reset_less=True)
361 sts = Signal(self.n_units, reset_less=True)
362 ldi = []
363 sti = []
364 for i in range(self.n_units):
365 pi = self.dports[i].pi
366 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
367 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
368 # put the requests into the priority-pickers
369 comb += ldpick.i.eq(Cat(*ldi))
370 comb += stpick.i.eq(Cat(*sti))
371
372 # hmm, have to select (record) the right port index
373 nbits = log2_int(self.n_units, False)
374 ld_idx = Signal(nbits, reset_less=False)
375 st_idx = Signal(nbits, reset_less=False)
376 # use these because of the sync-and-comb pass-through capability
377 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
378 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
379
380 # convenience variables to reference the "picked" port
381 ldport = self.dports[ld_idx].pi
382 stport = self.dports[st_idx].pi
383 # and the memory ports
384 rdport = self.mem.rdport
385 wrport = self.mem.wrport
386
387 # Priority-Pickers pick one and only one request, capture its index.
388 # from that point on this code *only* "listens" to that port.
389
390 sync += adrok_l.s.eq(0)
391 comb += adrok_l.r.eq(0)
392 with m.If(~ldpick.n):
393 comb += ld_active.s.eq(1) # activate LD mode
394 comb += idx_l.r.eq(1) # pick (and capture) the port index
395 with m.Elif(~stpick.n):
396 comb += st_active.s.eq(1) # activate ST mode
397 comb += idx_l.r.eq(1) # pick (and capture) the port index
398
399 # from this point onwards, with the port "picked", it stays picked
400 # until ld_active (or st_active) are de-asserted.
401
402 # if now in "LD" mode: wait for addr_ok, then send the address out
403 # to memory, acknowledge address, and send out LD data
404 with m.If(ld_active.q):
405 with m.If(ldport.addr.ok & adrok_l.qn):
406 lsbaddr, msbaddr = self.splitaddr(ldport.addr.data)
407 comb += rdport.addr.eq(msbaddr) # addr ok, send thru
408 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
409 sync += adrok_l.s.eq(1) # and pull "ack" latch
410 # set up LenExpander with the LD len and lower bits of addr
411 comb += lenexp.len_i.eq(ldport.op.data_len)
412 comb += lenexp.addr_i.eq(lsbaddr)
413
414 # if now in "ST" mode: likewise do the same but with "ST"
415 # to memory, acknowledge address, and send out LD data
416 with m.If(st_active.q):
417 with m.If(stport.addr.ok):
418 lsbaddr, msbaddr = self.splitaddr(stport.addr.data)
419 comb += wrport.addr.eq(msbaddr) # addr ok, send thru
420 with m.If(adrok_l.qn):
421 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
422 sync += adrok_l.s.eq(1) # and pull "ack" latch
423 # set up LenExpander with the ST len and lower bits of addr
424 comb += lenexp.len_i.eq(stport.op.data_len)
425 comb += lenexp.addr_i.eq(lsbaddr)
426
427 # NOTE: in both these, below, the port itself takes care
428 # of de-asserting its "busy_o" signal, based on either ld.ok going
429 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
430
431 # for LD mode, when addr has been "ok'd", assume that (because this
432 # is a "Memory" test-class) the memory read data is valid.
433 comb += reset_l.s.eq(0)
434 comb += reset_l.r.eq(0)
435 with m.If(ld_active.q & adrok_l.q):
436 comb += ldport.ld.data.eq(rdport.data) # put data out
437 comb += ldport.ld.ok.eq(1) # indicate data valid
438 comb += reset_l.s.eq(1) # reset mode after 1 cycle
439
440 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
441 with m.If(st_active.q & stport.st.ok):
442 comb += wrport.data.eq(stport.st.data) # write st to mem
443 comb += wrport.en.eq(1) # enable write
444 comb += reset_l.s.eq(1) # reset mode after 1 cycle
445
446 # after waiting one cycle (reset_l is "sync" mode), reset the port
447 with m.If(reset_l.q):
448 comb += idx_l.s.eq(1) # deactivate port-index selector
449 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
450 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
451 comb += reset_l.r.eq(1) # clear reset
452 comb += adrok_l.r.eq(1) # address reset
453
454 return m
455
456 def ports(self):
457 for p in self.dports:
458 yield from p.ports()
459
460
461 class TstL0CacheBuffer(Elaboratable):
462 def __init__(self, n_units=3, regwid=16, addrwid=4):
463 self.mem = TestMemory(regwid, addrwid)
464 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
465
466 def elaborate(self, platform):
467 m = Module()
468 m.submodules.mem = self.mem
469 m.submodules.l0 = self.l0
470
471 return m
472
473 def ports(self):
474 yield from self.l0.ports()
475 yield self.mem.rdport.addr
476 yield self.mem.rdport.data
477 yield self.mem.wrport.addr
478 yield self.mem.wrport.data
479 # TODO: mem ports
480
481
482 def wait_busy(port, no=False):
483 while True:
484 busy = yield port.pi.busy_o
485 print("busy", no, busy)
486 if bool(busy) == no:
487 break
488 yield
489
490
491 def wait_addr(port):
492 while True:
493 addr_ok = yield port.pi.addr_ok_o
494 print("addrok", addr_ok)
495 if not addr_ok:
496 break
497 yield
498
499
500 def wait_ldok(port):
501 while True:
502 ldok = yield port.pi.ld.ok
503 print("ldok", ldok)
504 if ldok:
505 break
506 yield
507
508
509 def l0_cache_st(dut, addr, data, datalen):
510 l0 = dut.l0
511 mem = dut.mem
512 port0 = l0.dports[0]
513 port1 = l0.dports[1]
514
515 # have to wait until not busy
516 yield from wait_busy(port1, no=False) # wait until not busy
517
518 # set up a ST on the port. address first:
519 yield port1.pi.is_st_i.eq(1) # indicate ST
520 yield port1.pi.op.data_len.eq(datalen) # ST length (1/2/4/8)
521
522 yield port1.pi.addr.data.eq(addr) # set address
523 yield port1.pi.addr.ok.eq(1) # set ok
524 yield from wait_addr(port1) # wait until addr ok
525 # yield # not needed, just for checking
526 # yield # not needed, just for checking
527 # assert "ST" for one cycle (required by the API)
528 yield port1.pi.st.data.eq(data)
529 yield port1.pi.st.ok.eq(1)
530 yield
531 yield port1.pi.st.ok.eq(0)
532
533 # can go straight to reset.
534 yield port1.pi.is_st_i.eq(0) # end
535 yield port1.pi.addr.ok.eq(0) # set !ok
536 # yield from wait_busy(port1, False) # wait until not busy
537
538
539 def l0_cache_ld(dut, addr, datalen, expected):
540
541 l0 = dut.l0
542 mem = dut.mem
543 port0 = l0.dports[0]
544 port1 = l0.dports[1]
545
546 # have to wait until not busy
547 yield from wait_busy(port1, no=False) # wait until not busy
548
549 # set up a LD on the port. address first:
550 yield port1.pi.is_ld_i.eq(1) # indicate LD
551 yield port1.pi.op.data_len.eq(datalen) # LD length (1/2/4/8)
552
553 yield port1.pi.addr.data.eq(addr) # set address
554 yield port1.pi.addr.ok.eq(1) # set ok
555 yield from wait_addr(port1) # wait until addr ok
556
557 yield from wait_ldok(port1) # wait until ld ok
558 data = yield port1.pi.ld.data
559
560 # cleanup
561 yield port1.pi.is_ld_i.eq(0) # end
562 yield port1.pi.addr.ok.eq(0) # set !ok
563 # yield from wait_busy(port1, no=False) # wait until not busy
564
565 return data
566
567
568 def l0_cache_ldst(dut):
569 yield
570 addr = 0x2
571 data = 0xbeef
572 data2 = 0xf00f
573 #data = 0x4
574 yield from l0_cache_st(dut, 0x2, data, 2)
575 yield from l0_cache_st(dut, 0x3, data2, 2)
576 result = yield from l0_cache_ld(dut, 0x2, 2, data)
577 result2 = yield from l0_cache_ld(dut, 0x3, 2, data2)
578 yield
579 assert data == result, "data %x != %x" % (result, data)
580 assert data2 == result2, "data2 %x != %x" % (result2, data2)
581
582 def data_merger_merge(dut):
583 print("data_merger")
584 #starting with all inputs zero
585 yield Settle()
586 en = yield dut.data_o.en
587 data = yield dut.data_o.data
588 assert en == 0, "en must be zero"
589 assert data == 0, "data must be zero"
590 yield
591
592 yield dut.addr_array_i[0].eq(0xFF)
593 for j in range(dut.array_size):
594 yield dut.data_i[j].en.eq(1 << j)
595 yield dut.data_i[j].data.eq(0xFF << (16*j))
596 yield Settle()
597
598 en = yield dut.data_o.en
599 data = yield dut.data_o.data
600 assert data == 0xff00ff00ff00ff00ff00ff00ff00ff
601 assert en == 0xff
602 yield
603
604 def test_l0_cache():
605
606 dut = TstL0CacheBuffer(regwid=64)
607 #vl = rtlil.convert(dut, ports=dut.ports())
608 #with open("test_basic_l0_cache.il", "w") as f:
609 # f.write(vl)
610
611 run_simulation(dut, l0_cache_ldst(dut),
612 vcd_name='test_l0_cache_basic.vcd')
613
614 def test_data_merger():
615
616 dut = DataMerger(8)
617 #vl = rtlil.convert(dut, ports=dut.ports())
618 #with open("test_data_merger.il", "w") as f:
619 # f.write(vl)
620
621 run_simulation(dut, data_merger_merge(dut),
622 vcd_name='test_data_merger.vcd')
623
624 def test_dual_port_splitter():
625
626 dut = DualPortSplitter()
627 #vl = rtlil.convert(dut, ports=dut.ports())
628 #with open("test_data_merger.il", "w") as f:
629 # f.write(vl)
630
631 #run_simulation(dut, data_merger_merge(dut),
632 # vcd_name='test_dual_port_splitter.vcd')
633
634 if __name__ == '__main__':
635 test_l0_cache()
636 test_data_merger()
637 #test_dual_port_splitter()