make DataMerger record reset_less
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation, Settle
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29 from nmutil.util import treereduce
30
31 from soc.experiment.compldst import CompLDSTOpSubset
32 from soc.decoder.power_decoder2 import Data
33 #from nmutil.picker import PriorityPicker
34 from nmigen.lib.coding import PriorityEncoder
35
36 # for testing purposes
37 from soc.experiment.testmem import TestMemory
38
39
40 class PortInterface(RecordObject):
41 """PortInterface
42
43 defines the interface - the API - that the LDSTCompUnit connects
44 to. note that this is NOT a "fire-and-forget" interface. the
45 LDSTCompUnit *must* be kept appraised that the request is in
46 progress, and only when it has a 100% successful completion rate
47 can the notification be given (busy dropped).
48
49 The interface FSM rules are as follows:
50
51 * if busy_o is asserted, a LD/ST is in progress. further
52 requests may not be made until busy_o is deasserted.
53
54 * only one of is_ld_i or is_st_i may be asserted. busy_o
55 will immediately be asserted and remain asserted.
56
57 * addr.ok is to be asserted when the LD/ST address is known.
58 addr.data is to be valid on the same cycle.
59
60 addr.ok and addr.data must REMAIN asserted until busy_o
61 is de-asserted. this ensures that there is no need
62 for the L0 Cache/Buffer to have an additional address latch
63 (because the LDSTCompUnit already has it)
64
65 * addr_ok_o (or addr_exc_o) must be waited for. these will
66 be asserted *only* for one cycle and one cycle only.
67
68 * addr_exc_o will be asserted if there is no chance that the
69 memory request may be fulfilled.
70
71 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
72
73 * conversely: addr_ok_o must *ONLY* be asserted if there is a
74 HUNDRED PERCENT guarantee that the memory request will be
75 fulfilled.
76
77 * for a LD, ld.ok will be asserted - for only one clock cycle -
78 at any point in the future that is acceptable to the underlying
79 Memory subsystem. the recipient MUST latch ld.data on that cycle.
80
81 busy_o is deasserted on the same cycle as ld.ok is asserted.
82
83 * for a ST, st.ok may be asserted only after addr_ok_o had been
84 asserted, alongside valid st.data at the same time. st.ok
85 must only be asserted for one cycle.
86
87 the underlying Memory is REQUIRED to pick up that data and
88 guarantee its delivery. no back-acknowledgement is required.
89
90 busy_o is deasserted on the cycle AFTER st.ok is asserted.
91 """
92
93 def __init__(self, name=None, regwid=64, addrwid=48):
94
95 self._regwid = regwid
96 self._addrwid = addrwid
97
98 RecordObject.__init__(self, name=name)
99
100 # distinguish op type (ld/st)
101 self.is_ld_i = Signal(reset_less=True)
102 self.is_st_i = Signal(reset_less=True)
103 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
104
105 # common signals
106 self.busy_o = Signal(reset_less=True) # do not use if busy
107 self.go_die_i = Signal(reset_less=True) # back to reset
108 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
109 # addr is valid (TLB, L1 etc.)
110 self.addr_ok_o = Signal(reset_less=True)
111 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
112
113 # LD/ST
114 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
115 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
116
117 # TODO: elaborate function
118
119
120 class DualPortSplitter(Elaboratable):
121 """DualPortSplitter
122
123 * one incoming PortInterface
124 * two *OUTGOING* PortInterfaces
125 * uses LDSTSplitter to do it
126
127 (actually, thinking about it LDSTSplitter could simply be
128 modified to conform to PortInterface: one in, two out)
129
130 once that is done each pair of ports may be wired directly
131 to the dual ports of L0CacheBuffer
132
133 The split is carried out so that, regardless of alignment or
134 mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
135 of the address, whilst outgoing PortInterface[1] takes
136 bit 4 == 1.
137
138 PortInterface *may* need to be changed so that the length is
139 a binary number (accepting values 1-16).
140 """
141 def __init__(self):
142 self.outp = []
143 self.outp[0] = PortInterface(name="outp_0")
144 self.outp[1] = PortInterface(name="outp_1")
145 self.inp = PortInterface(name="inp")
146
147 def elaborate(self, platform):
148 splitter = LDSTSplitter(64, 48, 4)
149
150
151 class DataMergerRecord(Record):
152 """
153 {data: 128 bit, byte_enable: 16 bit}
154 """
155
156 def __init__(self, name=None):
157 layout = (('data', 128),
158 ('en', 16)
159 )
160
161 Record.__init__(self, Layout(layout), name=name)
162
163 self.data.reset_less = True
164 self.en.reset_less = True
165
166
167 # TODO: formal verification
168 class DataMerger(Elaboratable):
169 """DataMerger
170
171 Merges data based on an address-match matrix.
172 Identifies (picks) one (any) row, then uses that row,
173 based on matching address bits, to merge (OR) all data
174 rows into the output.
175
176 Basically, by the time DataMerger is used, all of its incoming data is
177 determined not to conflict. The last step before actually submitting
178 the request to the Memory Subsystem is to work out which requests,
179 on the same 128-bit cache line, can be "merged" due to them being:
180 (A) on the same address (bits 4 and above) (B) having byte-enable
181 lines that (as previously mentioned) do not conflict.
182
183 Therefore, put simply, this module will:
184 (1) pick a row (any row) and identify it by an index labelled "idx"
185 (2) merge all byte-enable lines which are on that same address, as
186 indicated by addr_match_i[idx], onto the output
187 """
188
189 def __init__(self, array_size):
190 """
191 :addr_array_i: an NxN Array of Signals with bits set indicating address
192 match. bits across the diagonal (addr_array_i[x][x])
193 will always be set, to indicate "active".
194 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
195 :data_o: an Output Record of same type
196 {data: 128 bit, byte_enable: 16 bit}
197 """
198 self.array_size = array_size
199 ul = []
200 for i in range(array_size):
201 ul.append(Signal(array_size,
202 reset_less=True,
203 name="addr_match_%d" % i))
204 self.addr_array_i = Array(ul)
205
206 ul = []
207 for i in range(array_size):
208 ul.append(DataMergerRecord())
209 self.data_i = Array(ul)
210 self.data_o = DataMergerRecord()
211
212 def elaborate(self, platform):
213 m = Module()
214 comb = m.d.comb
215 #(1) pick a row
216 m.submodules.pick = pick = PriorityEncoder(self.array_size)
217 for j in range(self.array_size):
218 comb += pick.i[j].eq(self.addr_array_i[j].bool())
219 valid = ~pick.n
220 idx = pick.o
221 #(2) merge
222 with m.If(valid):
223 l = []
224 for j in range(self.array_size):
225 select = self.addr_array_i[idx][j]
226 r = DataMergerRecord()
227 with m.If(select):
228 comb += r.eq(self.data_i[j])
229 l.append(r)
230 comb += self.data_o.data.eq(ortreereduce(l,"data"))
231 comb += self.data_o.en.eq(ortreereduce(l,"en"))
232
233 return m
234
235
236 class LDSTPort(Elaboratable):
237 def __init__(self, idx, regwid=64, addrwid=48):
238 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
239
240 def elaborate(self, platform):
241 m = Module()
242 comb, sync = m.d.comb, m.d.sync
243
244 # latches
245 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
246 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
247 comb += cyc_l.s.eq(0)
248 comb += cyc_l.r.eq(0)
249
250 # this is a little weird: we let the L0Cache/Buffer set
251 # the outputs: this module just monitors "state".
252
253 # LD/ST requested activates "busy"
254 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
255 comb += busy_l.s.eq(1)
256
257 # monitor for an exception or the completion of LD.
258 with m.If(self.pi.addr_exc_o):
259 comb += busy_l.r.eq(1)
260
261 # however ST needs one cycle before busy is reset
262 with m.If(self.pi.st.ok | self.pi.ld.ok):
263 comb += cyc_l.s.eq(1)
264
265 with m.If(cyc_l.q):
266 comb += cyc_l.r.eq(1)
267 comb += busy_l.r.eq(1)
268
269 # busy latch outputs to interface
270 comb += self.pi.busy_o.eq(busy_l.q)
271
272 return m
273
274 def __iter__(self):
275 yield self.pi.is_ld_i
276 yield self.pi.is_st_i
277 yield from self.pi.op.ports()
278 yield self.pi.busy_o
279 yield self.pi.go_die_i
280 yield from self.pi.addr.ports()
281 yield self.pi.addr_ok_o
282 yield self.pi.addr_exc_o
283
284 yield from self.pi.ld.ports()
285 yield from self.pi.st.ports()
286
287 def ports(self):
288 return list(self)
289
290
291 class L0CacheBuffer(Elaboratable):
292 """L0 Cache / Buffer
293
294 Note that the final version will have *two* interfaces per LDSTCompUnit,
295 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
296 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
297
298 This version is to be used for test purposes (and actively maintained
299 for such, rather than "replaced")
300
301 There are much better ways to implement this. However it's only
302 a "demo" / "test" class, and one important aspect: it responds
303 combinatorially, where a nmigen FSM's state-changes only activate
304 on clock-sync boundaries.
305 """
306
307 def __init__(self, n_units, mem, regwid=64, addrwid=48):
308 self.n_units = n_units
309 self.mem = mem
310 self.regwid = regwid
311 self.addrwid = addrwid
312 ul = []
313 for i in range(n_units):
314 ul.append(LDSTPort(i, regwid, addrwid))
315 self.dports = Array(ul)
316
317 def truncaddr(self, addr):
318 """truncates the address to the top bits of the memory granularity
319 """
320 nbits = log2_int(self.mem.regwid)
321 return addr[nbits:]
322
323 def elaborate(self, platform):
324 m = Module()
325 comb, sync = m.d.comb, m.d.sync
326
327 # connect the ports as modules
328 for i in range(self.n_units):
329 setattr(m.submodules, "port%d" % i, self.dports[i])
330
331 # state-machine latches
332 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
333 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
334 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
335 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
336 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
337
338 # find one LD (or ST) and do it. only one per cycle.
339 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
340 # LD/STs using mask-expansion - see LenExpand class
341
342 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
343 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
344
345 lds = Signal(self.n_units, reset_less=True)
346 sts = Signal(self.n_units, reset_less=True)
347 ldi = []
348 sti = []
349 for i in range(self.n_units):
350 pi = self.dports[i].pi
351 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
352 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
353 # put the requests into the priority-pickers
354 comb += ldpick.i.eq(Cat(*ldi))
355 comb += stpick.i.eq(Cat(*sti))
356
357 # hmm, have to select (record) the right port index
358 nbits = log2_int(self.n_units, False)
359 ld_idx = Signal(nbits, reset_less=False)
360 st_idx = Signal(nbits, reset_less=False)
361 # use these because of the sync-and-comb pass-through capability
362 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
363 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
364
365 # convenience variables to reference the "picked" port
366 ldport = self.dports[ld_idx].pi
367 stport = self.dports[st_idx].pi
368 # and the memory ports
369 rdport = self.mem.rdport
370 wrport = self.mem.wrport
371
372 # Priority-Pickers pick one and only one request, capture its index.
373 # from that point on this code *only* "listens" to that port.
374
375 sync += adrok_l.s.eq(0)
376 comb += adrok_l.r.eq(0)
377 with m.If(~ldpick.n):
378 comb += ld_active.s.eq(1) # activate LD mode
379 comb += idx_l.r.eq(1) # pick (and capture) the port index
380 with m.Elif(~stpick.n):
381 comb += st_active.s.eq(1) # activate ST mode
382 comb += idx_l.r.eq(1) # pick (and capture) the port index
383
384 # from this point onwards, with the port "picked", it stays picked
385 # until ld_active (or st_active) are de-asserted.
386
387 # if now in "LD" mode: wait for addr_ok, then send the address out
388 # to memory, acknowledge address, and send out LD data
389 with m.If(ld_active.q):
390 with m.If(ldport.addr.ok & adrok_l.qn):
391 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
392 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
393 sync += adrok_l.s.eq(1) # and pull "ack" latch
394
395 # if now in "ST" mode: likewise do the same but with "ST"
396 # to memory, acknowledge address, and send out LD data
397 with m.If(st_active.q):
398 with m.If(stport.addr.ok):
399 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
400 with m.If(adrok_l.qn):
401 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
402 sync += adrok_l.s.eq(1) # and pull "ack" latch
403
404 # NOTE: in both these, below, the port itself takes care
405 # of de-asserting its "busy_o" signal, based on either ld.ok going
406 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
407
408 # for LD mode, when addr has been "ok'd", assume that (because this
409 # is a "Memory" test-class) the memory read data is valid.
410 comb += reset_l.s.eq(0)
411 comb += reset_l.r.eq(0)
412 with m.If(ld_active.q & adrok_l.q):
413 comb += ldport.ld.data.eq(rdport.data) # put data out
414 comb += ldport.ld.ok.eq(1) # indicate data valid
415 comb += reset_l.s.eq(1) # reset mode after 1 cycle
416
417 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
418 with m.If(st_active.q & stport.st.ok):
419 comb += wrport.data.eq(stport.st.data) # write st to mem
420 comb += wrport.en.eq(1) # enable write
421 comb += reset_l.s.eq(1) # reset mode after 1 cycle
422
423 # after waiting one cycle (reset_l is "sync" mode), reset the port
424 with m.If(reset_l.q):
425 comb += idx_l.s.eq(1) # deactivate port-index selector
426 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
427 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
428 comb += reset_l.r.eq(1) # clear reset
429 comb += adrok_l.r.eq(1) # address reset
430
431 return m
432
433 def ports(self):
434 for p in self.dports:
435 yield from p.ports()
436
437
438 class TstL0CacheBuffer(Elaboratable):
439 def __init__(self, n_units=3, regwid=16, addrwid=4):
440 self.mem = TestMemory(regwid, addrwid)
441 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
442
443 def elaborate(self, platform):
444 m = Module()
445 m.submodules.mem = self.mem
446 m.submodules.l0 = self.l0
447
448 return m
449
450 def ports(self):
451 yield from self.l0.ports()
452 yield self.mem.rdport.addr
453 yield self.mem.rdport.data
454 yield self.mem.wrport.addr
455 yield self.mem.wrport.data
456 # TODO: mem ports
457
458
459 def wait_busy(port, no=False):
460 while True:
461 busy = yield port.pi.busy_o
462 print("busy", no, busy)
463 if bool(busy) == no:
464 break
465 yield
466
467
468 def wait_addr(port):
469 while True:
470 addr_ok = yield port.pi.addr_ok_o
471 print("addrok", addr_ok)
472 if not addr_ok:
473 break
474 yield
475
476
477 def wait_ldok(port):
478 while True:
479 ldok = yield port.pi.ld.ok
480 print("ldok", ldok)
481 if ldok:
482 break
483 yield
484
485
486 def l0_cache_st(dut, addr, data):
487 l0 = dut.l0
488 mem = dut.mem
489 port0 = l0.dports[0]
490 port1 = l0.dports[1]
491
492 # have to wait until not busy
493 yield from wait_busy(port1, no=False) # wait until not busy
494
495 # set up a ST on the port. address first:
496 yield port1.pi.is_st_i.eq(1) # indicate LD
497
498 yield port1.pi.addr.data.eq(addr) # set address
499 yield port1.pi.addr.ok.eq(1) # set ok
500 yield from wait_addr(port1) # wait until addr ok
501 # yield # not needed, just for checking
502 # yield # not needed, just for checking
503 # assert "ST" for one cycle (required by the API)
504 yield port1.pi.st.data.eq(data)
505 yield port1.pi.st.ok.eq(1)
506 yield
507 yield port1.pi.st.ok.eq(0)
508
509 # can go straight to reset.
510 yield port1.pi.is_st_i.eq(0) # end
511 yield port1.pi.addr.ok.eq(0) # set !ok
512 # yield from wait_busy(port1, False) # wait until not busy
513
514
515 def l0_cache_ld(dut, addr, expected):
516
517 l0 = dut.l0
518 mem = dut.mem
519 port0 = l0.dports[0]
520 port1 = l0.dports[1]
521
522 # have to wait until not busy
523 yield from wait_busy(port1, no=False) # wait until not busy
524
525 # set up a LD on the port. address first:
526 yield port1.pi.is_ld_i.eq(1) # indicate LD
527
528 yield port1.pi.addr.data.eq(addr) # set address
529 yield port1.pi.addr.ok.eq(1) # set ok
530 yield from wait_addr(port1) # wait until addr ok
531
532 yield from wait_ldok(port1) # wait until ld ok
533 data = yield port1.pi.ld.data
534
535 # cleanup
536 yield port1.pi.is_ld_i.eq(0) # end
537 yield port1.pi.addr.ok.eq(0) # set !ok
538 # yield from wait_busy(port1, no=False) # wait until not busy
539
540 return data
541
542
543 def l0_cache_ldst(dut):
544 yield
545 addr = 0x2
546 data = 0xbeef
547 data2 = 0xf00f
548 #data = 0x4
549 yield from l0_cache_st(dut, 0x2, data)
550 yield from l0_cache_st(dut, 0x3, data2)
551 result = yield from l0_cache_ld(dut, 0x2, data)
552 result2 = yield from l0_cache_ld(dut, 0x3, data2)
553 yield
554 assert data == result, "data %x != %x" % (result, data)
555 assert data2 == result2, "data2 %x != %x" % (result2, data2)
556
557
558 def data_merger_merge(dut):
559 print("data_merger")
560 #starting with all inputs zero
561 yield Settle()
562 en = yield dut.data_o.en
563 data = yield dut.data_o.data
564 assert en == 0, "en must be zero"
565 assert data == 0, "data must be zero"
566 yield
567
568 yield dut.addr_array_i[0].eq(0xFF)
569 for j in range(dut.array_size):
570 yield dut.data_i[j].en.eq(1 << j)
571 yield dut.data_i[j].data.eq(0xFF << (16*j))
572 yield Settle()
573
574 en = yield dut.data_o.en
575 data = yield dut.data_o.data
576 assert data == 0xff00ff00ff00ff00ff00ff00ff00ff
577 assert en == 0xff
578 yield
579
580
581 def test_l0_cache():
582
583 dut = TstL0CacheBuffer(regwid=64)
584 #vl = rtlil.convert(dut, ports=dut.ports())
585 #with open("test_basic_l0_cache.il", "w") as f:
586 # f.write(vl)
587
588 run_simulation(dut, l0_cache_ldst(dut),
589 vcd_name='test_l0_cache_basic.vcd')
590
591 def test_data_merger():
592
593 dut = DataMerger(8)
594 #vl = rtlil.convert(dut, ports=dut.ports())
595 #with open("test_data_merger.il", "w") as f:
596 # f.write(vl)
597
598 run_simulation(dut, data_merger_merge(dut),
599 vcd_name='test_data_merger.vcd')
600
601
602 if __name__ == '__main__':
603 test_l0_cache()
604 test_data_merger()