fixes for DualPortSplitter
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation, Settle
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29 from nmutil.util import treereduce
30
31 from soc.experiment.compldst import CompLDSTOpSubset
32 from soc.decoder.power_decoder2 import Data
33 #from nmutil.picker import PriorityPicker
34 from nmigen.lib.coding import PriorityEncoder
35
36 # for testing purposes
37 from soc.experiment.testmem import TestMemory
38
39
40 class PortInterface(RecordObject):
41 """PortInterface
42
43 defines the interface - the API - that the LDSTCompUnit connects
44 to. note that this is NOT a "fire-and-forget" interface. the
45 LDSTCompUnit *must* be kept appraised that the request is in
46 progress, and only when it has a 100% successful completion rate
47 can the notification be given (busy dropped).
48
49 The interface FSM rules are as follows:
50
51 * if busy_o is asserted, a LD/ST is in progress. further
52 requests may not be made until busy_o is deasserted.
53
54 * only one of is_ld_i or is_st_i may be asserted. busy_o
55 will immediately be asserted and remain asserted.
56
57 * addr.ok is to be asserted when the LD/ST address is known.
58 addr.data is to be valid on the same cycle.
59
60 addr.ok and addr.data must REMAIN asserted until busy_o
61 is de-asserted. this ensures that there is no need
62 for the L0 Cache/Buffer to have an additional address latch
63 (because the LDSTCompUnit already has it)
64
65 * addr_ok_o (or addr_exc_o) must be waited for. these will
66 be asserted *only* for one cycle and one cycle only.
67
68 * addr_exc_o will be asserted if there is no chance that the
69 memory request may be fulfilled.
70
71 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
72
73 * conversely: addr_ok_o must *ONLY* be asserted if there is a
74 HUNDRED PERCENT guarantee that the memory request will be
75 fulfilled.
76
77 * for a LD, ld.ok will be asserted - for only one clock cycle -
78 at any point in the future that is acceptable to the underlying
79 Memory subsystem. the recipient MUST latch ld.data on that cycle.
80
81 busy_o is deasserted on the same cycle as ld.ok is asserted.
82
83 * for a ST, st.ok may be asserted only after addr_ok_o had been
84 asserted, alongside valid st.data at the same time. st.ok
85 must only be asserted for one cycle.
86
87 the underlying Memory is REQUIRED to pick up that data and
88 guarantee its delivery. no back-acknowledgement is required.
89
90 busy_o is deasserted on the cycle AFTER st.ok is asserted.
91 """
92
93 def __init__(self, name=None, regwid=64, addrwid=48):
94
95 self._regwid = regwid
96 self._addrwid = addrwid
97
98 RecordObject.__init__(self, name=name)
99
100 # distinguish op type (ld/st)
101 self.is_ld_i = Signal(reset_less=True)
102 self.is_st_i = Signal(reset_less=True)
103 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
104
105 # common signals
106 self.busy_o = Signal(reset_less=True) # do not use if busy
107 self.go_die_i = Signal(reset_less=True) # back to reset
108 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
109 # addr is valid (TLB, L1 etc.)
110 self.addr_ok_o = Signal(reset_less=True)
111 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
112
113 # LD/ST
114 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
115 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
116
117 # TODO: elaborate function
118
119
120 class DualPortSplitter(Elaboratable):
121 """DualPortSplitter
122
123 * one incoming PortInterface
124 * two *OUTGOING* PortInterfaces
125 * uses LDSTSplitter to do it
126
127 (actually, thinking about it LDSTSplitter could simply be
128 modified to conform to PortInterface: one in, two out)
129
130 once that is done each pair of ports may be wired directly
131 to the dual ports of L0CacheBuffer
132
133 The split is carried out so that, regardless of alignment or
134 mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
135 of the address, whilst outgoing PortInterface[1] takes
136 bit 4 == 1.
137
138 PortInterface *may* need to be changed so that the length is
139 a binary number (accepting values 1-16).
140 """
141 def __init__(self):
142 self.outp = [PortInterface(name="outp_0"),
143 PortInterface(name="outp_1")]
144 self.inp = PortInterface(name="inp")
145 print(self.outp)
146
147 def elaborate(self, platform):
148 m = Module()
149 comb = m.d.comb
150 #TODO splitter = LDSTSplitter(64, 48, 4)
151 return m
152
153 class DataMergerRecord(Record):
154 """
155 {data: 128 bit, byte_enable: 16 bit}
156 """
157
158 def __init__(self, name=None):
159 layout = (('data', 128),
160 ('en', 16)
161 )
162
163 Record.__init__(self, Layout(layout), name=name)
164
165 #FIXME: make resetless
166
167 # TODO: formal verification
168
169 class DataMerger(Elaboratable):
170 """DataMerger
171
172 Merges data based on an address-match matrix.
173 Identifies (picks) one (any) row, then uses that row,
174 based on matching address bits, to merge (OR) all data
175 rows into the output.
176
177 Basically, by the time DataMerger is used, all of its incoming data is
178 determined not to conflict. The last step before actually submitting
179 the request to the Memory Subsystem is to work out which requests,
180 on the same 128-bit cache line, can be "merged" due to them being:
181 (A) on the same address (bits 4 and above) (B) having byte-enable
182 lines that (as previously mentioned) do not conflict.
183
184 Therefore, put simply, this module will:
185 (1) pick a row (any row) and identify it by an index labelled "idx"
186 (2) merge all byte-enable lines which are on that same address, as
187 indicated by addr_match_i[idx], onto the output
188 """
189
190 def __init__(self, array_size):
191 """
192 :addr_array_i: an NxN Array of Signals with bits set indicating address
193 match. bits across the diagonal (addr_array_i[x][x])
194 will always be set, to indicate "active".
195 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
196 :data_o: an Output Record of same type
197 {data: 128 bit, byte_enable: 16 bit}
198 """
199 self.array_size = array_size
200 ul = []
201 for i in range(array_size):
202 ul.append(Signal(array_size,
203 reset_less=True,
204 name="addr_match_%d" % i))
205 self.addr_array_i = Array(ul)
206
207 ul = []
208 for i in range(array_size):
209 ul.append(DataMergerRecord())
210 self.data_i = Array(ul)
211 self.data_o = DataMergerRecord()
212
213 def elaborate(self, platform):
214 m = Module()
215 comb = m.d.comb
216 #(1) pick a row
217 m.submodules.pick = pick = PriorityEncoder(self.array_size)
218 for j in range(self.array_size):
219 comb += pick.i[j].eq(self.addr_array_i[j].bool())
220 valid = ~pick.n
221 idx = pick.o
222 #(2) merge
223 with m.If(valid):
224 l = []
225 for j in range(self.array_size):
226 select = self.addr_array_i[idx][j]
227 r = DataMergerRecord()
228 with m.If(select):
229 comb += r.eq(self.data_i[j])
230 l.append(r)
231 comb += self.data_o.data.eq(ortreereduce(l,"data"))
232 comb += self.data_o.en.eq(ortreereduce(l,"en"))
233
234 return m
235
236
237 class LDSTPort(Elaboratable):
238 def __init__(self, idx, regwid=64, addrwid=48):
239 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
240
241 def elaborate(self, platform):
242 m = Module()
243 comb, sync = m.d.comb, m.d.sync
244
245 # latches
246 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
247 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
248 comb += cyc_l.s.eq(0)
249 comb += cyc_l.r.eq(0)
250
251 # this is a little weird: we let the L0Cache/Buffer set
252 # the outputs: this module just monitors "state".
253
254 # LD/ST requested activates "busy"
255 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
256 comb += busy_l.s.eq(1)
257
258 # monitor for an exception or the completion of LD.
259 with m.If(self.pi.addr_exc_o):
260 comb += busy_l.r.eq(1)
261
262 # however ST needs one cycle before busy is reset
263 with m.If(self.pi.st.ok | self.pi.ld.ok):
264 comb += cyc_l.s.eq(1)
265
266 with m.If(cyc_l.q):
267 comb += cyc_l.r.eq(1)
268 comb += busy_l.r.eq(1)
269
270 # busy latch outputs to interface
271 comb += self.pi.busy_o.eq(busy_l.q)
272
273 return m
274
275 def __iter__(self):
276 yield self.pi.is_ld_i
277 yield self.pi.is_st_i
278 yield from self.pi.op.ports()
279 yield self.pi.busy_o
280 yield self.pi.go_die_i
281 yield from self.pi.addr.ports()
282 yield self.pi.addr_ok_o
283 yield self.pi.addr_exc_o
284
285 yield from self.pi.ld.ports()
286 yield from self.pi.st.ports()
287
288 def ports(self):
289 return list(self)
290
291
292 class L0CacheBuffer(Elaboratable):
293 """L0 Cache / Buffer
294
295 Note that the final version will have *two* interfaces per LDSTCompUnit,
296 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
297 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
298
299 This version is to be used for test purposes (and actively maintained
300 for such, rather than "replaced")
301
302 There are much better ways to implement this. However it's only
303 a "demo" / "test" class, and one important aspect: it responds
304 combinatorially, where a nmigen FSM's state-changes only activate
305 on clock-sync boundaries.
306 """
307
308 def __init__(self, n_units, mem, regwid=64, addrwid=48):
309 self.n_units = n_units
310 self.mem = mem
311 ul = []
312 for i in range(n_units):
313 ul.append(LDSTPort(i, regwid, addrwid))
314 self.dports = Array(ul)
315
316 def elaborate(self, platform):
317 m = Module()
318 comb, sync = m.d.comb, m.d.sync
319
320 # connect the ports as modules
321 for i in range(self.n_units):
322 setattr(m.submodules, "port%d" % i, self.dports[i])
323
324 # state-machine latches
325 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
326 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
327 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
328 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
329 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
330
331 # find one LD (or ST) and do it. only one per cycle.
332 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
333 # LD/STs using mask-expansion - see LenExpand class
334
335 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
336 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
337
338 lds = Signal(self.n_units, reset_less=True)
339 sts = Signal(self.n_units, reset_less=True)
340 ldi = []
341 sti = []
342 for i in range(self.n_units):
343 pi = self.dports[i].pi
344 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
345 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
346 # put the requests into the priority-pickers
347 comb += ldpick.i.eq(Cat(*ldi))
348 comb += stpick.i.eq(Cat(*sti))
349
350 # hmm, have to select (record) the right port index
351 nbits = log2_int(self.n_units, False)
352 ld_idx = Signal(nbits, reset_less=False)
353 st_idx = Signal(nbits, reset_less=False)
354 # use these because of the sync-and-comb pass-through capability
355 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
356 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
357
358 # convenience variables to reference the "picked" port
359 ldport = self.dports[ld_idx].pi
360 stport = self.dports[st_idx].pi
361 # and the memory ports
362 rdport = self.mem.rdport
363 wrport = self.mem.wrport
364
365 # Priority-Pickers pick one and only one request, capture its index.
366 # from that point on this code *only* "listens" to that port.
367
368 sync += adrok_l.s.eq(0)
369 comb += adrok_l.r.eq(0)
370 with m.If(~ldpick.n):
371 comb += ld_active.s.eq(1) # activate LD mode
372 comb += idx_l.r.eq(1) # pick (and capture) the port index
373 with m.Elif(~stpick.n):
374 comb += st_active.s.eq(1) # activate ST mode
375 comb += idx_l.r.eq(1) # pick (and capture) the port index
376
377 # from this point onwards, with the port "picked", it stays picked
378 # until ld_active (or st_active) are de-asserted.
379
380 # if now in "LD" mode: wait for addr_ok, then send the address out
381 # to memory, acknowledge address, and send out LD data
382 with m.If(ld_active.q):
383 with m.If(ldport.addr.ok & adrok_l.qn):
384 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
385 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
386 sync += adrok_l.s.eq(1) # and pull "ack" latch
387
388 # if now in "ST" mode: likewise do the same but with "ST"
389 # to memory, acknowledge address, and send out LD data
390 with m.If(st_active.q):
391 with m.If(stport.addr.ok):
392 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
393 with m.If(adrok_l.qn):
394 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
395 sync += adrok_l.s.eq(1) # and pull "ack" latch
396
397 # NOTE: in both these, below, the port itself takes care
398 # of de-asserting its "busy_o" signal, based on either ld.ok going
399 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
400
401 # for LD mode, when addr has been "ok'd", assume that (because this
402 # is a "Memory" test-class) the memory read data is valid.
403 comb += reset_l.s.eq(0)
404 comb += reset_l.r.eq(0)
405 with m.If(ld_active.q & adrok_l.q):
406 comb += ldport.ld.data.eq(rdport.data) # put data out
407 comb += ldport.ld.ok.eq(1) # indicate data valid
408 comb += reset_l.s.eq(1) # reset mode after 1 cycle
409
410 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
411 with m.If(st_active.q & stport.st.ok):
412 comb += wrport.data.eq(stport.st.data) # write st to mem
413 comb += wrport.en.eq(1) # enable write
414 comb += reset_l.s.eq(1) # reset mode after 1 cycle
415
416 # after waiting one cycle (reset_l is "sync" mode), reset the port
417 with m.If(reset_l.q):
418 comb += idx_l.s.eq(1) # deactivate port-index selector
419 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
420 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
421 comb += reset_l.r.eq(1) # clear reset
422 comb += adrok_l.r.eq(1) # address reset
423
424 return m
425
426 def ports(self):
427 for p in self.dports:
428 yield from p.ports()
429
430
431 class TstL0CacheBuffer(Elaboratable):
432 def __init__(self, n_units=3, regwid=16, addrwid=4):
433 self.mem = TestMemory(regwid, addrwid)
434 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
435
436 def elaborate(self, platform):
437 m = Module()
438 m.submodules.mem = self.mem
439 m.submodules.l0 = self.l0
440
441 return m
442
443 def ports(self):
444 yield from self.l0.ports()
445 yield self.mem.rdport.addr
446 yield self.mem.rdport.data
447 yield self.mem.wrport.addr
448 yield self.mem.wrport.data
449 # TODO: mem ports
450
451
452 def wait_busy(port, no=False):
453 while True:
454 busy = yield port.pi.busy_o
455 print("busy", no, busy)
456 if bool(busy) == no:
457 break
458 yield
459
460
461 def wait_addr(port):
462 while True:
463 addr_ok = yield port.pi.addr_ok_o
464 print("addrok", addr_ok)
465 if not addr_ok:
466 break
467 yield
468
469
470 def wait_ldok(port):
471 while True:
472 ldok = yield port.pi.ld.ok
473 print("ldok", ldok)
474 if ldok:
475 break
476 yield
477
478
479 def l0_cache_st(dut, addr, data):
480 l0 = dut.l0
481 mem = dut.mem
482 port0 = l0.dports[0]
483 port1 = l0.dports[1]
484
485 # have to wait until not busy
486 yield from wait_busy(port1, no=False) # wait until not busy
487
488 # set up a ST on the port. address first:
489 yield port1.pi.is_st_i.eq(1) # indicate LD
490
491 yield port1.pi.addr.data.eq(addr) # set address
492 yield port1.pi.addr.ok.eq(1) # set ok
493 yield from wait_addr(port1) # wait until addr ok
494 # yield # not needed, just for checking
495 # yield # not needed, just for checking
496 # assert "ST" for one cycle (required by the API)
497 yield port1.pi.st.data.eq(data)
498 yield port1.pi.st.ok.eq(1)
499 yield
500 yield port1.pi.st.ok.eq(0)
501
502 # can go straight to reset.
503 yield port1.pi.is_st_i.eq(0) # end
504 yield port1.pi.addr.ok.eq(0) # set !ok
505 # yield from wait_busy(port1, False) # wait until not busy
506
507
508 def l0_cache_ld(dut, addr, expected):
509
510 l0 = dut.l0
511 mem = dut.mem
512 port0 = l0.dports[0]
513 port1 = l0.dports[1]
514
515 # have to wait until not busy
516 yield from wait_busy(port1, no=False) # wait until not busy
517
518 # set up a LD on the port. address first:
519 yield port1.pi.is_ld_i.eq(1) # indicate LD
520
521 yield port1.pi.addr.data.eq(addr) # set address
522 yield port1.pi.addr.ok.eq(1) # set ok
523 yield from wait_addr(port1) # wait until addr ok
524
525 yield from wait_ldok(port1) # wait until ld ok
526 data = yield port1.pi.ld.data
527
528 # cleanup
529 yield port1.pi.is_ld_i.eq(0) # end
530 yield port1.pi.addr.ok.eq(0) # set !ok
531 # yield from wait_busy(port1, no=False) # wait until not busy
532
533 return data
534
535
536 def l0_cache_ldst(dut):
537 yield
538 addr = 0x2
539 data = 0xbeef
540 data2 = 0xf00f
541 #data = 0x4
542 yield from l0_cache_st(dut, 0x2, data)
543 yield from l0_cache_st(dut, 0x3, data2)
544 result = yield from l0_cache_ld(dut, 0x2, data)
545 result2 = yield from l0_cache_ld(dut, 0x3, data2)
546 yield
547 assert data == result, "data %x != %x" % (result, data)
548 assert data2 == result2, "data2 %x != %x" % (result2, data2)
549
550 def data_merger_merge(dut):
551 print("data_merger")
552 #starting with all inputs zero
553 yield Settle()
554 en = yield dut.data_o.en
555 data = yield dut.data_o.data
556 assert en == 0, "en must be zero"
557 assert data == 0, "data must be zero"
558 yield
559
560 yield dut.addr_array_i[0].eq(0xFF)
561 for j in range(dut.array_size):
562 yield dut.data_i[j].en.eq(1 << j)
563 yield dut.data_i[j].data.eq(0xFF << (16*j))
564 yield Settle()
565
566 en = yield dut.data_o.en
567 data = yield dut.data_o.data
568 assert data == 0xff00ff00ff00ff00ff00ff00ff00ff
569 assert en == 0xff
570 yield
571
572 def test_l0_cache():
573
574 dut = TstL0CacheBuffer(regwid=64)
575 #vl = rtlil.convert(dut, ports=dut.ports())
576 #with open("test_basic_l0_cache.il", "w") as f:
577 # f.write(vl)
578
579 run_simulation(dut, l0_cache_ldst(dut),
580 vcd_name='test_l0_cache_basic.vcd')
581
582 def test_data_merger():
583
584 dut = DataMerger(8)
585 #vl = rtlil.convert(dut, ports=dut.ports())
586 #with open("test_data_merger.il", "w") as f:
587 # f.write(vl)
588
589 run_simulation(dut, data_merger_merge(dut),
590 vcd_name='test_data_merger.vcd')
591
592 def test_dual_port_splitter():
593
594 dut = DualPortSplitter()
595 #vl = rtlil.convert(dut, ports=dut.ports())
596 #with open("test_data_merger.il", "w") as f:
597 # f.write(vl)
598
599 run_simulation(dut, data_merger_merge(dut),
600 vcd_name='test_dual_port_splitter.vcd')
601
602 if __name__ == '__main__':
603 #test_l0_cache()
604 #test_data_merger()
605 test_dual_port_splitter()