LDSTCompUnit test data structures linked up, starting debugging
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation, Settle
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29 from nmutil.util import treereduce
30
31 from soc.experiment.compldst import CompLDSTOpSubset
32 from soc.decoder.power_decoder2 import Data
33 #from nmutil.picker import PriorityPicker
34 from nmigen.lib.coding import PriorityEncoder
35
36 # for testing purposes
37 from soc.experiment.testmem import TestMemory
38
39 class PortInterface(RecordObject):
40 """PortInterface
41
42 defines the interface - the API - that the LDSTCompUnit connects
43 to. note that this is NOT a "fire-and-forget" interface. the
44 LDSTCompUnit *must* be kept appraised that the request is in
45 progress, and only when it has a 100% successful completion rate
46 can the notification be given (busy dropped).
47
48 The interface FSM rules are as follows:
49
50 * if busy_o is asserted, a LD/ST is in progress. further
51 requests may not be made until busy_o is deasserted.
52
53 * only one of is_ld_i or is_st_i may be asserted. busy_o
54 will immediately be asserted and remain asserted.
55
56 * addr.ok is to be asserted when the LD/ST address is known.
57 addr.data is to be valid on the same cycle.
58
59 addr.ok and addr.data must REMAIN asserted until busy_o
60 is de-asserted. this ensures that there is no need
61 for the L0 Cache/Buffer to have an additional address latch
62 (because the LDSTCompUnit already has it)
63
64 * addr_ok_o (or addr_exc_o) must be waited for. these will
65 be asserted *only* for one cycle and one cycle only.
66
67 * addr_exc_o will be asserted if there is no chance that the
68 memory request may be fulfilled.
69
70 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
71
72 * conversely: addr_ok_o must *ONLY* be asserted if there is a
73 HUNDRED PERCENT guarantee that the memory request will be
74 fulfilled.
75
76 * for a LD, ld.ok will be asserted - for only one clock cycle -
77 at any point in the future that is acceptable to the underlying
78 Memory subsystem. the recipient MUST latch ld.data on that cycle.
79
80 busy_o is deasserted on the same cycle as ld.ok is asserted.
81
82 * for a ST, st.ok may be asserted only after addr_ok_o had been
83 asserted, alongside valid st.data at the same time. st.ok
84 must only be asserted for one cycle.
85
86 the underlying Memory is REQUIRED to pick up that data and
87 guarantee its delivery. no back-acknowledgement is required.
88
89 busy_o is deasserted on the cycle AFTER st.ok is asserted.
90 """
91
92 def __init__(self, name=None, regwid=64, addrwid=48):
93
94 self._regwid = regwid
95 self._addrwid = addrwid
96
97 RecordObject.__init__(self, name=name)
98
99 # distinguish op type (ld/st)
100 self.is_ld_i = Signal(reset_less=True)
101 self.is_st_i = Signal(reset_less=True)
102 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
103
104 # common signals
105 self.busy_o = Signal(reset_less=True) # do not use if busy
106 self.go_die_i = Signal(reset_less=True) # back to reset
107 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
108 # addr is valid (TLB, L1 etc.)
109 self.addr_ok_o = Signal(reset_less=True)
110 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
111
112 # LD/ST
113 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
114 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
115
116 # TODO: elaborate function
117
118
119 class DualPortSplitter(Elaboratable):
120 """DualPortSplitter
121
122 * one incoming PortInterface
123 * two *OUTGOING* PortInterfaces
124 * uses LDSTSplitter to do it
125
126 (actually, thinking about it LDSTSplitter could simply be
127 modified to conform to PortInterface: one in, two out)
128
129 once that is done each pair of ports may be wired directly
130 to the dual ports of L0CacheBuffer
131
132 The split is carried out so that, regardless of alignment or
133 mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
134 of the address, whilst outgoing PortInterface[1] takes
135 bit 4 == 1.
136
137 PortInterface *may* need to be changed so that the length is
138 a binary number (accepting values 1-16).
139 """
140 def __init__(self):
141 self.outp = []
142 self.outp[0] = PortInterface(name="outp_0")
143 self.outp[1] = PortInterface(name="outp_1")
144 self.inp = PortInterface(name="inp")
145
146 def elaborate(self, platform):
147 splitter = LDSTSplitter(64, 48, 4)
148
149
150 class DataMergerRecord(Record):
151 """
152 {data: 128 bit, byte_enable: 16 bit}
153 """
154
155 def __init__(self, name=None):
156 layout = (('data', 128),
157 ('en', 16)
158 )
159
160 Record.__init__(self, Layout(layout), name=name)
161
162 #FIXME: make resetless
163
164 # TODO: formal verification
165
166 class DataMerger(Elaboratable):
167 """DataMerger
168
169 Merges data based on an address-match matrix.
170 Identifies (picks) one (any) row, then uses that row,
171 based on matching address bits, to merge (OR) all data
172 rows into the output.
173
174 Basically, by the time DataMerger is used, all of its incoming data is
175 determined not to conflict. The last step before actually submitting
176 the request to the Memory Subsystem is to work out which requests,
177 on the same 128-bit cache line, can be "merged" due to them being:
178 (A) on the same address (bits 4 and above) (B) having byte-enable
179 lines that (as previously mentioned) do not conflict.
180
181 Therefore, put simply, this module will:
182 (1) pick a row (any row) and identify it by an index labelled "idx"
183 (2) merge all byte-enable lines which are on that same address, as
184 indicated by addr_match_i[idx], onto the output
185 """
186
187 def __init__(self, array_size):
188 """
189 :addr_array_i: an NxN Array of Signals with bits set indicating address
190 match. bits across the diagonal (addr_array_i[x][x])
191 will always be set, to indicate "active".
192 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
193 :data_o: an Output Record of same type
194 {data: 128 bit, byte_enable: 16 bit}
195 """
196 self.array_size = array_size
197 ul = []
198 for i in range(array_size):
199 ul.append(Signal(array_size,
200 reset_less=True,
201 name="addr_match_%d" % i))
202 self.addr_array_i = Array(ul)
203
204 ul = []
205 for i in range(array_size):
206 ul.append(DataMergerRecord())
207 self.data_i = Array(ul)
208 self.data_o = DataMergerRecord()
209
210 def elaborate(self, platform):
211 m = Module()
212 comb = m.d.comb
213 #(1) pick a row
214 m.submodules.pick = pick = PriorityEncoder(self.array_size)
215 for j in range(self.array_size):
216 comb += pick.i[j].eq(self.addr_array_i[j].bool())
217 valid = ~pick.n
218 idx = pick.o
219 #(2) merge
220 with m.If(valid):
221 l = []
222 for j in range(self.array_size):
223 select = self.addr_array_i[idx][j]
224 r = DataMergerRecord()
225 with m.If(select):
226 comb += r.eq(self.data_i[j])
227 l.append(r)
228 comb += self.data_o.data.eq(ortreereduce(l,"data"))
229 comb += self.data_o.en.eq(ortreereduce(l,"en"))
230
231 return m
232
233
234 class LDSTPort(Elaboratable):
235 def __init__(self, idx, regwid=64, addrwid=48):
236 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
237
238 def elaborate(self, platform):
239 m = Module()
240 comb, sync = m.d.comb, m.d.sync
241
242 # latches
243 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
244 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
245 comb += cyc_l.s.eq(0)
246 comb += cyc_l.r.eq(0)
247
248 # this is a little weird: we let the L0Cache/Buffer set
249 # the outputs: this module just monitors "state".
250
251 # LD/ST requested activates "busy"
252 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
253 comb += busy_l.s.eq(1)
254
255 # monitor for an exception or the completion of LD.
256 with m.If(self.pi.addr_exc_o):
257 comb += busy_l.r.eq(1)
258
259 # however ST needs one cycle before busy is reset
260 with m.If(self.pi.st.ok | self.pi.ld.ok):
261 comb += cyc_l.s.eq(1)
262
263 with m.If(cyc_l.q):
264 comb += cyc_l.r.eq(1)
265 comb += busy_l.r.eq(1)
266
267 # busy latch outputs to interface
268 comb += self.pi.busy_o.eq(busy_l.q)
269
270 return m
271
272 def __iter__(self):
273 yield self.pi.is_ld_i
274 yield self.pi.is_st_i
275 yield from self.pi.op.ports()
276 yield self.pi.busy_o
277 yield self.pi.go_die_i
278 yield from self.pi.addr.ports()
279 yield self.pi.addr_ok_o
280 yield self.pi.addr_exc_o
281
282 yield from self.pi.ld.ports()
283 yield from self.pi.st.ports()
284
285 def ports(self):
286 return list(self)
287
288
289 class L0CacheBuffer(Elaboratable):
290 """L0 Cache / Buffer
291
292 Note that the final version will have *two* interfaces per LDSTCompUnit,
293 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
294 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
295
296 This version is to be used for test purposes (and actively maintained
297 for such, rather than "replaced")
298
299 There are much better ways to implement this. However it's only
300 a "demo" / "test" class, and one important aspect: it responds
301 combinatorially, where a nmigen FSM's state-changes only activate
302 on clock-sync boundaries.
303 """
304
305 def __init__(self, n_units, mem, regwid=64, addrwid=48):
306 self.n_units = n_units
307 self.mem = mem
308 ul = []
309 for i in range(n_units):
310 ul.append(LDSTPort(i, regwid, addrwid))
311 self.dports = Array(ul)
312
313 def elaborate(self, platform):
314 m = Module()
315 comb, sync = m.d.comb, m.d.sync
316
317 # connect the ports as modules
318 for i in range(self.n_units):
319 setattr(m.submodules, "port%d" % i, self.dports[i])
320
321 # state-machine latches
322 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
323 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
324 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
325 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
326 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
327
328 # find one LD (or ST) and do it. only one per cycle.
329 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
330 # LD/STs using mask-expansion - see LenExpand class
331
332 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
333 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
334
335 lds = Signal(self.n_units, reset_less=True)
336 sts = Signal(self.n_units, reset_less=True)
337 ldi = []
338 sti = []
339 for i in range(self.n_units):
340 pi = self.dports[i].pi
341 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
342 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
343 # put the requests into the priority-pickers
344 comb += ldpick.i.eq(Cat(*ldi))
345 comb += stpick.i.eq(Cat(*sti))
346
347 # hmm, have to select (record) the right port index
348 nbits = log2_int(self.n_units, False)
349 ld_idx = Signal(nbits, reset_less=False)
350 st_idx = Signal(nbits, reset_less=False)
351 # use these because of the sync-and-comb pass-through capability
352 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
353 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
354
355 # convenience variables to reference the "picked" port
356 ldport = self.dports[ld_idx].pi
357 stport = self.dports[st_idx].pi
358 # and the memory ports
359 rdport = self.mem.rdport
360 wrport = self.mem.wrport
361
362 # Priority-Pickers pick one and only one request, capture its index.
363 # from that point on this code *only* "listens" to that port.
364
365 sync += adrok_l.s.eq(0)
366 comb += adrok_l.r.eq(0)
367 with m.If(~ldpick.n):
368 comb += ld_active.s.eq(1) # activate LD mode
369 comb += idx_l.r.eq(1) # pick (and capture) the port index
370 with m.Elif(~stpick.n):
371 comb += st_active.s.eq(1) # activate ST mode
372 comb += idx_l.r.eq(1) # pick (and capture) the port index
373
374 # from this point onwards, with the port "picked", it stays picked
375 # until ld_active (or st_active) are de-asserted.
376
377 # if now in "LD" mode: wait for addr_ok, then send the address out
378 # to memory, acknowledge address, and send out LD data
379 with m.If(ld_active.q):
380 with m.If(ldport.addr.ok & adrok_l.qn):
381 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
382 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
383 sync += adrok_l.s.eq(1) # and pull "ack" latch
384
385 # if now in "ST" mode: likewise do the same but with "ST"
386 # to memory, acknowledge address, and send out LD data
387 with m.If(st_active.q):
388 with m.If(stport.addr.ok):
389 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
390 with m.If(adrok_l.qn):
391 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
392 sync += adrok_l.s.eq(1) # and pull "ack" latch
393
394 # NOTE: in both these, below, the port itself takes care
395 # of de-asserting its "busy_o" signal, based on either ld.ok going
396 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
397
398 # for LD mode, when addr has been "ok'd", assume that (because this
399 # is a "Memory" test-class) the memory read data is valid.
400 comb += reset_l.s.eq(0)
401 comb += reset_l.r.eq(0)
402 with m.If(ld_active.q & adrok_l.q):
403 comb += ldport.ld.data.eq(rdport.data) # put data out
404 comb += ldport.ld.ok.eq(1) # indicate data valid
405 comb += reset_l.s.eq(1) # reset mode after 1 cycle
406
407 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
408 with m.If(st_active.q & stport.st.ok):
409 comb += wrport.data.eq(stport.st.data) # write st to mem
410 comb += wrport.en.eq(1) # enable write
411 comb += reset_l.s.eq(1) # reset mode after 1 cycle
412
413 # after waiting one cycle (reset_l is "sync" mode), reset the port
414 with m.If(reset_l.q):
415 comb += idx_l.s.eq(1) # deactivate port-index selector
416 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
417 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
418 comb += reset_l.r.eq(1) # clear reset
419 comb += adrok_l.r.eq(1) # address reset
420
421 return m
422
423 def ports(self):
424 for p in self.dports:
425 yield from p.ports()
426
427
428 class TstL0CacheBuffer(Elaboratable):
429 def __init__(self, n_units=3, regwid=16, addrwid=4):
430 self.mem = TestMemory(regwid, addrwid)
431 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
432
433 def elaborate(self, platform):
434 m = Module()
435 m.submodules.mem = self.mem
436 m.submodules.l0 = self.l0
437
438 return m
439
440 def ports(self):
441 yield from self.l0.ports()
442 yield self.mem.rdport.addr
443 yield self.mem.rdport.data
444 yield self.mem.wrport.addr
445 yield self.mem.wrport.data
446 # TODO: mem ports
447
448
449 def wait_busy(port, no=False):
450 while True:
451 busy = yield port.pi.busy_o
452 print("busy", no, busy)
453 if bool(busy) == no:
454 break
455 yield
456
457
458 def wait_addr(port):
459 while True:
460 addr_ok = yield port.pi.addr_ok_o
461 print("addrok", addr_ok)
462 if not addr_ok:
463 break
464 yield
465
466
467 def wait_ldok(port):
468 while True:
469 ldok = yield port.pi.ld.ok
470 print("ldok", ldok)
471 if ldok:
472 break
473 yield
474
475
476 def l0_cache_st(dut, addr, data):
477 l0 = dut.l0
478 mem = dut.mem
479 port0 = l0.dports[0]
480 port1 = l0.dports[1]
481
482 # have to wait until not busy
483 yield from wait_busy(port1, no=False) # wait until not busy
484
485 # set up a ST on the port. address first:
486 yield port1.pi.is_st_i.eq(1) # indicate LD
487
488 yield port1.pi.addr.data.eq(addr) # set address
489 yield port1.pi.addr.ok.eq(1) # set ok
490 yield from wait_addr(port1) # wait until addr ok
491 # yield # not needed, just for checking
492 # yield # not needed, just for checking
493 # assert "ST" for one cycle (required by the API)
494 yield port1.pi.st.data.eq(data)
495 yield port1.pi.st.ok.eq(1)
496 yield
497 yield port1.pi.st.ok.eq(0)
498
499 # can go straight to reset.
500 yield port1.pi.is_st_i.eq(0) # end
501 yield port1.pi.addr.ok.eq(0) # set !ok
502 # yield from wait_busy(port1, False) # wait until not busy
503
504
505 def l0_cache_ld(dut, addr, expected):
506
507 l0 = dut.l0
508 mem = dut.mem
509 port0 = l0.dports[0]
510 port1 = l0.dports[1]
511
512 # have to wait until not busy
513 yield from wait_busy(port1, no=False) # wait until not busy
514
515 # set up a LD on the port. address first:
516 yield port1.pi.is_ld_i.eq(1) # indicate LD
517
518 yield port1.pi.addr.data.eq(addr) # set address
519 yield port1.pi.addr.ok.eq(1) # set ok
520 yield from wait_addr(port1) # wait until addr ok
521
522 yield from wait_ldok(port1) # wait until ld ok
523 data = yield port1.pi.ld.data
524
525 # cleanup
526 yield port1.pi.is_ld_i.eq(0) # end
527 yield port1.pi.addr.ok.eq(0) # set !ok
528 # yield from wait_busy(port1, no=False) # wait until not busy
529
530 return data
531
532
533 def l0_cache_ldst(dut):
534 yield
535 addr = 0x2
536 data = 0xbeef
537 data2 = 0xf00f
538 #data = 0x4
539 yield from l0_cache_st(dut, 0x2, data)
540 yield from l0_cache_st(dut, 0x3, data2)
541 result = yield from l0_cache_ld(dut, 0x2, data)
542 result2 = yield from l0_cache_ld(dut, 0x3, data2)
543 yield
544 assert data == result, "data %x != %x" % (result, data)
545 assert data2 == result2, "data2 %x != %x" % (result2, data2)
546
547 def data_merger_merge(dut):
548 print("data_merger")
549 #starting with all inputs zero
550 yield Settle()
551 en = yield dut.data_o.en
552 data = yield dut.data_o.data
553 assert en == 0, "en must be zero"
554 assert data == 0, "data must be zero"
555 yield
556
557 yield dut.addr_array_i[0].eq(0xFF)
558 for j in range(dut.array_size):
559 yield dut.data_i[j].en.eq(1 << j)
560 yield dut.data_i[j].data.eq(0xFF << (16*j))
561 yield Settle()
562
563 en = yield dut.data_o.en
564 data = yield dut.data_o.data
565 assert data == 0xff00ff00ff00ff00ff00ff00ff00ff
566 assert en == 0xff
567 yield
568
569 def test_l0_cache():
570
571 dut = TstL0CacheBuffer()
572 #vl = rtlil.convert(dut, ports=dut.ports())
573 #with open("test_basic_l0_cache.il", "w") as f:
574 # f.write(vl)
575
576 run_simulation(dut, l0_cache_ldst(dut),
577 vcd_name='test_l0_cache_basic.vcd')
578
579 def test_data_merger():
580
581 dut = DataMerger(8)
582 #vl = rtlil.convert(dut, ports=dut.ports())
583 #with open("test_data_merger.il", "w") as f:
584 # f.write(vl)
585
586 run_simulation(dut, data_merger_merge(dut),
587 vcd_name='test_data_merger.vcd')
588
589
590 if __name__ == '__main__':
591 test_l0_cache()
592 test_data_merger()