elaborate function for DualPortSplitter
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation, Settle
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29 from nmutil.util import treereduce
30
31 from soc.experiment.compldst import CompLDSTOpSubset
32 from soc.decoder.power_decoder2 import Data
33 #from nmutil.picker import PriorityPicker
34 from nmigen.lib.coding import PriorityEncoder
35 from soc.scoreboard.addr_split import LDSTSplitter
36
37 # for testing purposes
38 from soc.experiment.testmem import TestMemory
39
40
41 class PortInterface(RecordObject):
42 """PortInterface
43
44 defines the interface - the API - that the LDSTCompUnit connects
45 to. note that this is NOT a "fire-and-forget" interface. the
46 LDSTCompUnit *must* be kept appraised that the request is in
47 progress, and only when it has a 100% successful completion rate
48 can the notification be given (busy dropped).
49
50 The interface FSM rules are as follows:
51
52 * if busy_o is asserted, a LD/ST is in progress. further
53 requests may not be made until busy_o is deasserted.
54
55 * only one of is_ld_i or is_st_i may be asserted. busy_o
56 will immediately be asserted and remain asserted.
57
58 * addr.ok is to be asserted when the LD/ST address is known.
59 addr.data is to be valid on the same cycle.
60
61 addr.ok and addr.data must REMAIN asserted until busy_o
62 is de-asserted. this ensures that there is no need
63 for the L0 Cache/Buffer to have an additional address latch
64 (because the LDSTCompUnit already has it)
65
66 * addr_ok_o (or addr_exc_o) must be waited for. these will
67 be asserted *only* for one cycle and one cycle only.
68
69 * addr_exc_o will be asserted if there is no chance that the
70 memory request may be fulfilled.
71
72 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
73
74 * conversely: addr_ok_o must *ONLY* be asserted if there is a
75 HUNDRED PERCENT guarantee that the memory request will be
76 fulfilled.
77
78 * for a LD, ld.ok will be asserted - for only one clock cycle -
79 at any point in the future that is acceptable to the underlying
80 Memory subsystem. the recipient MUST latch ld.data on that cycle.
81
82 busy_o is deasserted on the same cycle as ld.ok is asserted.
83
84 * for a ST, st.ok may be asserted only after addr_ok_o had been
85 asserted, alongside valid st.data at the same time. st.ok
86 must only be asserted for one cycle.
87
88 the underlying Memory is REQUIRED to pick up that data and
89 guarantee its delivery. no back-acknowledgement is required.
90
91 busy_o is deasserted on the cycle AFTER st.ok is asserted.
92 """
93
94 def __init__(self, name=None, regwid=64, addrwid=48):
95
96 self._regwid = regwid
97 self._addrwid = addrwid
98
99 RecordObject.__init__(self, name=name)
100
101 # distinguish op type (ld/st)
102 self.is_ld_i = Signal(reset_less=True)
103 self.is_st_i = Signal(reset_less=True)
104 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
105
106 # common signals
107 self.busy_o = Signal(reset_less=True) # do not use if busy
108 self.go_die_i = Signal(reset_less=True) # back to reset
109 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
110 # addr is valid (TLB, L1 etc.)
111 self.addr_ok_o = Signal(reset_less=True)
112 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
113
114 # LD/ST
115 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
116 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
117
118 # TODO: elaborate function
119
120
121 class DualPortSplitter(Elaboratable):
122 """DualPortSplitter
123
124 * one incoming PortInterface
125 * two *OUTGOING* PortInterfaces
126 * uses LDSTSplitter to do it
127
128 (actually, thinking about it LDSTSplitter could simply be
129 modified to conform to PortInterface: one in, two out)
130
131 once that is done each pair of ports may be wired directly
132 to the dual ports of L0CacheBuffer
133
134 The split is carried out so that, regardless of alignment or
135 mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
136 of the address, whilst outgoing PortInterface[1] takes
137 bit 4 == 1.
138
139 PortInterface *may* need to be changed so that the length is
140 a binary number (accepting values 1-16).
141 """
142 def __init__(self):
143 self.outp = [PortInterface(name="outp_0"),
144 PortInterface(name="outp_1")]
145 self.inp = PortInterface(name="inp")
146 print(self.outp)
147
148 def elaborate(self, platform):
149 m = Module()
150 comb = m.d.comb
151 m.submodules.splitter = splitter = LDSTSplitter(64, 48, 4)
152 comb += splitter.addr_i.eq(self.inp.addr) #XXX
153 #comb += splitter.len_i.eq()
154 #comb += splitter.valid_i.eq()
155 comb += splitter.is_ld_i.eq(self.inp.is_ld_i)
156 comb += splitter.is_st_i.eq(self.inp.is_st_i)
157 #comb += splitter.st_data_i.eq()
158 #comb += splitter.sld_valid_i.eq()
159 #comb += splitter.sld_data_i.eq()
160 #comb += splitter.sst_valid_i.eq()
161 return m
162
163 class DataMergerRecord(Record):
164 """
165 {data: 128 bit, byte_enable: 16 bit}
166 """
167
168 def __init__(self, name=None):
169 layout = (('data', 128),
170 ('en', 16)
171 )
172
173 Record.__init__(self, Layout(layout), name=name)
174
175 #FIXME: make resetless
176
177 # TODO: formal verification
178
179 class DataMerger(Elaboratable):
180 """DataMerger
181
182 Merges data based on an address-match matrix.
183 Identifies (picks) one (any) row, then uses that row,
184 based on matching address bits, to merge (OR) all data
185 rows into the output.
186
187 Basically, by the time DataMerger is used, all of its incoming data is
188 determined not to conflict. The last step before actually submitting
189 the request to the Memory Subsystem is to work out which requests,
190 on the same 128-bit cache line, can be "merged" due to them being:
191 (A) on the same address (bits 4 and above) (B) having byte-enable
192 lines that (as previously mentioned) do not conflict.
193
194 Therefore, put simply, this module will:
195 (1) pick a row (any row) and identify it by an index labelled "idx"
196 (2) merge all byte-enable lines which are on that same address, as
197 indicated by addr_match_i[idx], onto the output
198 """
199
200 def __init__(self, array_size):
201 """
202 :addr_array_i: an NxN Array of Signals with bits set indicating address
203 match. bits across the diagonal (addr_array_i[x][x])
204 will always be set, to indicate "active".
205 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
206 :data_o: an Output Record of same type
207 {data: 128 bit, byte_enable: 16 bit}
208 """
209 self.array_size = array_size
210 ul = []
211 for i in range(array_size):
212 ul.append(Signal(array_size,
213 reset_less=True,
214 name="addr_match_%d" % i))
215 self.addr_array_i = Array(ul)
216
217 ul = []
218 for i in range(array_size):
219 ul.append(DataMergerRecord())
220 self.data_i = Array(ul)
221 self.data_o = DataMergerRecord()
222
223 def elaborate(self, platform):
224 m = Module()
225 comb = m.d.comb
226 #(1) pick a row
227 m.submodules.pick = pick = PriorityEncoder(self.array_size)
228 for j in range(self.array_size):
229 comb += pick.i[j].eq(self.addr_array_i[j].bool())
230 valid = ~pick.n
231 idx = pick.o
232 #(2) merge
233 with m.If(valid):
234 l = []
235 for j in range(self.array_size):
236 select = self.addr_array_i[idx][j]
237 r = DataMergerRecord()
238 with m.If(select):
239 comb += r.eq(self.data_i[j])
240 l.append(r)
241 comb += self.data_o.data.eq(ortreereduce(l,"data"))
242 comb += self.data_o.en.eq(ortreereduce(l,"en"))
243
244 return m
245
246
247 class LDSTPort(Elaboratable):
248 def __init__(self, idx, regwid=64, addrwid=48):
249 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
250
251 def elaborate(self, platform):
252 m = Module()
253 comb, sync = m.d.comb, m.d.sync
254
255 # latches
256 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
257 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
258 comb += cyc_l.s.eq(0)
259 comb += cyc_l.r.eq(0)
260
261 # this is a little weird: we let the L0Cache/Buffer set
262 # the outputs: this module just monitors "state".
263
264 # LD/ST requested activates "busy"
265 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
266 comb += busy_l.s.eq(1)
267
268 # monitor for an exception or the completion of LD.
269 with m.If(self.pi.addr_exc_o):
270 comb += busy_l.r.eq(1)
271
272 # however ST needs one cycle before busy is reset
273 with m.If(self.pi.st.ok | self.pi.ld.ok):
274 comb += cyc_l.s.eq(1)
275
276 with m.If(cyc_l.q):
277 comb += cyc_l.r.eq(1)
278 comb += busy_l.r.eq(1)
279
280 # busy latch outputs to interface
281 comb += self.pi.busy_o.eq(busy_l.q)
282
283 return m
284
285 def __iter__(self):
286 yield self.pi.is_ld_i
287 yield self.pi.is_st_i
288 yield from self.pi.op.ports()
289 yield self.pi.busy_o
290 yield self.pi.go_die_i
291 yield from self.pi.addr.ports()
292 yield self.pi.addr_ok_o
293 yield self.pi.addr_exc_o
294
295 yield from self.pi.ld.ports()
296 yield from self.pi.st.ports()
297
298 def ports(self):
299 return list(self)
300
301
302 class L0CacheBuffer(Elaboratable):
303 """L0 Cache / Buffer
304
305 Note that the final version will have *two* interfaces per LDSTCompUnit,
306 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
307 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
308
309 This version is to be used for test purposes (and actively maintained
310 for such, rather than "replaced")
311
312 There are much better ways to implement this. However it's only
313 a "demo" / "test" class, and one important aspect: it responds
314 combinatorially, where a nmigen FSM's state-changes only activate
315 on clock-sync boundaries.
316 """
317
318 def __init__(self, n_units, mem, regwid=64, addrwid=48):
319 self.n_units = n_units
320 self.mem = mem
321 ul = []
322 for i in range(n_units):
323 ul.append(LDSTPort(i, regwid, addrwid))
324 self.dports = Array(ul)
325
326 def elaborate(self, platform):
327 m = Module()
328 comb, sync = m.d.comb, m.d.sync
329
330 # connect the ports as modules
331 for i in range(self.n_units):
332 setattr(m.submodules, "port%d" % i, self.dports[i])
333
334 # state-machine latches
335 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
336 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
337 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
338 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
339 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
340
341 # find one LD (or ST) and do it. only one per cycle.
342 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
343 # LD/STs using mask-expansion - see LenExpand class
344
345 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
346 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
347
348 lds = Signal(self.n_units, reset_less=True)
349 sts = Signal(self.n_units, reset_less=True)
350 ldi = []
351 sti = []
352 for i in range(self.n_units):
353 pi = self.dports[i].pi
354 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
355 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
356 # put the requests into the priority-pickers
357 comb += ldpick.i.eq(Cat(*ldi))
358 comb += stpick.i.eq(Cat(*sti))
359
360 # hmm, have to select (record) the right port index
361 nbits = log2_int(self.n_units, False)
362 ld_idx = Signal(nbits, reset_less=False)
363 st_idx = Signal(nbits, reset_less=False)
364 # use these because of the sync-and-comb pass-through capability
365 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
366 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
367
368 # convenience variables to reference the "picked" port
369 ldport = self.dports[ld_idx].pi
370 stport = self.dports[st_idx].pi
371 # and the memory ports
372 rdport = self.mem.rdport
373 wrport = self.mem.wrport
374
375 # Priority-Pickers pick one and only one request, capture its index.
376 # from that point on this code *only* "listens" to that port.
377
378 sync += adrok_l.s.eq(0)
379 comb += adrok_l.r.eq(0)
380 with m.If(~ldpick.n):
381 comb += ld_active.s.eq(1) # activate LD mode
382 comb += idx_l.r.eq(1) # pick (and capture) the port index
383 with m.Elif(~stpick.n):
384 comb += st_active.s.eq(1) # activate ST mode
385 comb += idx_l.r.eq(1) # pick (and capture) the port index
386
387 # from this point onwards, with the port "picked", it stays picked
388 # until ld_active (or st_active) are de-asserted.
389
390 # if now in "LD" mode: wait for addr_ok, then send the address out
391 # to memory, acknowledge address, and send out LD data
392 with m.If(ld_active.q):
393 with m.If(ldport.addr.ok & adrok_l.qn):
394 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
395 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
396 sync += adrok_l.s.eq(1) # and pull "ack" latch
397
398 # if now in "ST" mode: likewise do the same but with "ST"
399 # to memory, acknowledge address, and send out LD data
400 with m.If(st_active.q):
401 with m.If(stport.addr.ok):
402 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
403 with m.If(adrok_l.qn):
404 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
405 sync += adrok_l.s.eq(1) # and pull "ack" latch
406
407 # NOTE: in both these, below, the port itself takes care
408 # of de-asserting its "busy_o" signal, based on either ld.ok going
409 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
410
411 # for LD mode, when addr has been "ok'd", assume that (because this
412 # is a "Memory" test-class) the memory read data is valid.
413 comb += reset_l.s.eq(0)
414 comb += reset_l.r.eq(0)
415 with m.If(ld_active.q & adrok_l.q):
416 comb += ldport.ld.data.eq(rdport.data) # put data out
417 comb += ldport.ld.ok.eq(1) # indicate data valid
418 comb += reset_l.s.eq(1) # reset mode after 1 cycle
419
420 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
421 with m.If(st_active.q & stport.st.ok):
422 comb += wrport.data.eq(stport.st.data) # write st to mem
423 comb += wrport.en.eq(1) # enable write
424 comb += reset_l.s.eq(1) # reset mode after 1 cycle
425
426 # after waiting one cycle (reset_l is "sync" mode), reset the port
427 with m.If(reset_l.q):
428 comb += idx_l.s.eq(1) # deactivate port-index selector
429 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
430 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
431 comb += reset_l.r.eq(1) # clear reset
432 comb += adrok_l.r.eq(1) # address reset
433
434 return m
435
436 def ports(self):
437 for p in self.dports:
438 yield from p.ports()
439
440
441 class TstL0CacheBuffer(Elaboratable):
442 def __init__(self, n_units=3, regwid=16, addrwid=4):
443 self.mem = TestMemory(regwid, addrwid)
444 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
445
446 def elaborate(self, platform):
447 m = Module()
448 m.submodules.mem = self.mem
449 m.submodules.l0 = self.l0
450
451 return m
452
453 def ports(self):
454 yield from self.l0.ports()
455 yield self.mem.rdport.addr
456 yield self.mem.rdport.data
457 yield self.mem.wrport.addr
458 yield self.mem.wrport.data
459 # TODO: mem ports
460
461
462 def wait_busy(port, no=False):
463 while True:
464 busy = yield port.pi.busy_o
465 print("busy", no, busy)
466 if bool(busy) == no:
467 break
468 yield
469
470
471 def wait_addr(port):
472 while True:
473 addr_ok = yield port.pi.addr_ok_o
474 print("addrok", addr_ok)
475 if not addr_ok:
476 break
477 yield
478
479
480 def wait_ldok(port):
481 while True:
482 ldok = yield port.pi.ld.ok
483 print("ldok", ldok)
484 if ldok:
485 break
486 yield
487
488
489 def l0_cache_st(dut, addr, data):
490 l0 = dut.l0
491 mem = dut.mem
492 port0 = l0.dports[0]
493 port1 = l0.dports[1]
494
495 # have to wait until not busy
496 yield from wait_busy(port1, no=False) # wait until not busy
497
498 # set up a ST on the port. address first:
499 yield port1.pi.is_st_i.eq(1) # indicate LD
500
501 yield port1.pi.addr.data.eq(addr) # set address
502 yield port1.pi.addr.ok.eq(1) # set ok
503 yield from wait_addr(port1) # wait until addr ok
504 # yield # not needed, just for checking
505 # yield # not needed, just for checking
506 # assert "ST" for one cycle (required by the API)
507 yield port1.pi.st.data.eq(data)
508 yield port1.pi.st.ok.eq(1)
509 yield
510 yield port1.pi.st.ok.eq(0)
511
512 # can go straight to reset.
513 yield port1.pi.is_st_i.eq(0) # end
514 yield port1.pi.addr.ok.eq(0) # set !ok
515 # yield from wait_busy(port1, False) # wait until not busy
516
517
518 def l0_cache_ld(dut, addr, expected):
519
520 l0 = dut.l0
521 mem = dut.mem
522 port0 = l0.dports[0]
523 port1 = l0.dports[1]
524
525 # have to wait until not busy
526 yield from wait_busy(port1, no=False) # wait until not busy
527
528 # set up a LD on the port. address first:
529 yield port1.pi.is_ld_i.eq(1) # indicate LD
530
531 yield port1.pi.addr.data.eq(addr) # set address
532 yield port1.pi.addr.ok.eq(1) # set ok
533 yield from wait_addr(port1) # wait until addr ok
534
535 yield from wait_ldok(port1) # wait until ld ok
536 data = yield port1.pi.ld.data
537
538 # cleanup
539 yield port1.pi.is_ld_i.eq(0) # end
540 yield port1.pi.addr.ok.eq(0) # set !ok
541 # yield from wait_busy(port1, no=False) # wait until not busy
542
543 return data
544
545
546 def l0_cache_ldst(dut):
547 yield
548 addr = 0x2
549 data = 0xbeef
550 data2 = 0xf00f
551 #data = 0x4
552 yield from l0_cache_st(dut, 0x2, data)
553 yield from l0_cache_st(dut, 0x3, data2)
554 result = yield from l0_cache_ld(dut, 0x2, data)
555 result2 = yield from l0_cache_ld(dut, 0x3, data2)
556 yield
557 assert data == result, "data %x != %x" % (result, data)
558 assert data2 == result2, "data2 %x != %x" % (result2, data2)
559
560 def data_merger_merge(dut):
561 print("data_merger")
562 #starting with all inputs zero
563 yield Settle()
564 en = yield dut.data_o.en
565 data = yield dut.data_o.data
566 assert en == 0, "en must be zero"
567 assert data == 0, "data must be zero"
568 yield
569
570 yield dut.addr_array_i[0].eq(0xFF)
571 for j in range(dut.array_size):
572 yield dut.data_i[j].en.eq(1 << j)
573 yield dut.data_i[j].data.eq(0xFF << (16*j))
574 yield Settle()
575
576 en = yield dut.data_o.en
577 data = yield dut.data_o.data
578 assert data == 0xff00ff00ff00ff00ff00ff00ff00ff
579 assert en == 0xff
580 yield
581
582 def test_l0_cache():
583
584 dut = TstL0CacheBuffer(regwid=64)
585 #vl = rtlil.convert(dut, ports=dut.ports())
586 #with open("test_basic_l0_cache.il", "w") as f:
587 # f.write(vl)
588
589 run_simulation(dut, l0_cache_ldst(dut),
590 vcd_name='test_l0_cache_basic.vcd')
591
592 def test_data_merger():
593
594 dut = DataMerger(8)
595 #vl = rtlil.convert(dut, ports=dut.ports())
596 #with open("test_data_merger.il", "w") as f:
597 # f.write(vl)
598
599 run_simulation(dut, data_merger_merge(dut),
600 vcd_name='test_data_merger.vcd')
601
602 def test_dual_port_splitter():
603
604 dut = DualPortSplitter()
605 #vl = rtlil.convert(dut, ports=dut.ports())
606 #with open("test_data_merger.il", "w") as f:
607 # f.write(vl)
608
609 #run_simulation(dut, data_merger_merge(dut),
610 # vcd_name='test_dual_port_splitter.vcd')
611
612 if __name__ == '__main__':
613 test_l0_cache()
614 test_data_merger()
615 #test_dual_port_splitter()