fix own copy/paste error
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28
29 from soc.experiment.compldst import CompLDSTOpSubset
30 from soc.decoder.power_decoder2 import Data
31 #from nmutil.picker import PriorityPicker
32 from nmigen.lib.coding import PriorityEncoder
33
34 # for testing purposes
35 from soc.experiment.testmem import TestMemory
36
37
38 class PortInterface(RecordObject):
39 """PortInterface
40
41 defines the interface - the API - that the LDSTCompUnit connects
42 to. note that this is NOT a "fire-and-forget" interface. the
43 LDSTCompUnit *must* be kept appraised that the request is in
44 progress, and only when it has a 100% successful completion rate
45 can the notification be given (busy dropped).
46
47 The interface FSM rules are as follows:
48
49 * if busy_o is asserted, a LD/ST is in progress. further
50 requests may not be made until busy_o is deasserted.
51
52 * only one of is_ld_i or is_st_i may be asserted. busy_o
53 will immediately be asserted and remain asserted.
54
55 * addr.ok is to be asserted when the LD/ST address is known.
56 addr.data is to be valid on the same cycle.
57
58 addr.ok and addr.data must REMAIN asserted until busy_o
59 is de-asserted. this ensures that there is no need
60 for the L0 Cache/Buffer to have an additional address latch
61 (because the LDSTCompUnit already has it)
62
63 * addr_ok_o (or addr_exc_o) must be waited for. these will
64 be asserted *only* for one cycle and one cycle only.
65
66 * addr_exc_o will be asserted if there is no chance that the
67 memory request may be fulfilled.
68
69 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
70
71 * conversely: addr_ok_o must *ONLY* be asserted if there is a
72 HUNDRED PERCENT guarantee that the memory request will be
73 fulfilled.
74
75 * for a LD, ld.ok will be asserted - for only one clock cycle -
76 at any point in the future that is acceptable to the underlying
77 Memory subsystem. the recipient MUST latch ld.data on that cycle.
78
79 busy_o is deasserted on the same cycle as ld.ok is asserted.
80
81 * for a ST, st.ok may be asserted only after addr_ok_o had been
82 asserted, alongside valid st.data at the same time. st.ok
83 must only be asserted for one cycle.
84
85 the underlying Memory is REQUIRED to pick up that data and
86 guarantee its delivery. no back-acknowledgement is required.
87
88 busy_o is deasserted on the cycle AFTER st.ok is asserted.
89 """
90
91 def __init__(self, name=None, regwid=64, addrwid=48):
92
93 self._regwid = regwid
94 self._addrwid = addrwid
95
96 RecordObject.__init__(self, name=name)
97
98 # distinguish op type (ld/st)
99 self.is_ld_i = Signal(reset_less=True)
100 self.is_st_i = Signal(reset_less=True)
101 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
102
103 # common signals
104 self.busy_o = Signal(reset_less=True) # do not use if busy
105 self.go_die_i = Signal(reset_less=True) # back to reset
106 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
107 # addr is valid (TLB, L1 etc.)
108 self.addr_ok_o = Signal(reset_less=True)
109 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
110
111 # LD/ST
112 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
113 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
114
115 # TODO:
116
117
118 class DualPortSplitter(Elaboratable):
119 """DualPortSplitter
120
121 * one incoming PortInterface
122 * two *OUTGOING* PortInterfaces
123 * uses LDSTSplitter to do it
124
125 (actually, thinking about it LDSTSplitter could simply be
126 modified to conform to PortInterface: one in, two out)
127
128 once that is done each pair of ports may be wired directly
129 to the dual ports of L0CacheBuffer
130 """
131 pass
132
133
134 class DataMergerRecord(Record):
135 """
136 {data: 128 bit, byte_enable: 16 bit}
137 """
138
139 def __init__(self, name=None):
140 layout = (('data', 128),
141 ('en', 16)
142 )
143
144 Record.__init__(self, Layout(layout), name=name)
145
146 # TODO:
147
148
149 class DataMerger(Elaboratable):
150 """DataMerger
151
152 Merges data based on an address-match matrix.
153 Identifies (picks) one (any) row, then uses that row,
154 based on matching address bits, to merge (OR) all data
155 rows into the output.
156
157 Basically, by the time DataMerger is used, all of its incoming data is
158 determined not to conflict. The last step before actually submitting
159 the request to the Memory Subsystem is to work out which requests,
160 on the same 128-bit cache line, can be "merged" due to them being:
161 (A) on the same address (bits 4 and above) (B) having byte-enable
162 lines that (as previously mentioned) do not conflict.
163
164 Therefore, put simply, this module will:
165 (1) pick a row (any row) and identify it by an index labelled "idx"
166 (2) merge all byte-enable lines which are on that same address, as
167 indicated by addr_match_i[idx], onto the output
168 """
169
170 def __init__(self, array_size):
171 """
172 :addr_array_i: an NxN Array of Signals with bits set indicating address
173 match. bits across the diagonal (addr_array_i[x][x])
174 will always be set, to indicate "active".
175 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
176 :data_o: an Output Record of same type
177 {data: 128 bit, byte_enable: 16 bit}
178 """
179 self.array_size = array_size
180 ul = []
181 for i in range(array_size):
182 ul.append(Signal(array_size,
183 reset_less=True,
184 name="addr_match_%d" % i))
185 self.addr_array_i = Array(ul)
186
187 ul = []
188 for i in range(array_size):
189 ul.append(DataMergerRecord())
190 self.data_i = Array(ul)
191 self.data_o = DataMergerRecord()
192
193
194 class LDSTPort(Elaboratable):
195 def __init__(self, idx, regwid=64, addrwid=48):
196 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
197
198 def elaborate(self, platform):
199 m = Module()
200 comb, sync = m.d.comb, m.d.sync
201
202 # latches
203 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
204 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
205 comb += cyc_l.s.eq(0)
206 comb += cyc_l.r.eq(0)
207
208 # this is a little weird: we let the L0Cache/Buffer set
209 # the outputs: this module just monitors "state".
210
211 # LD/ST requested activates "busy"
212 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
213 comb += busy_l.s.eq(1)
214
215 # monitor for an exception or the completion of LD.
216 with m.If(self.pi.addr_exc_o):
217 comb += busy_l.r.eq(1)
218
219 # however ST needs one cycle before busy is reset
220 with m.If(self.pi.st.ok | self.pi.ld.ok):
221 comb += cyc_l.s.eq(1)
222
223 with m.If(cyc_l.q):
224 comb += cyc_l.r.eq(1)
225 comb += busy_l.r.eq(1)
226
227 # busy latch outputs to interface
228 comb += self.pi.busy_o.eq(busy_l.q)
229
230 return m
231
232 def __iter__(self):
233 yield self.pi.is_ld_i
234 yield self.pi.is_st_i
235 yield from self.pi.op.ports()
236 yield self.pi.busy_o
237 yield self.pi.go_die_i
238 yield from self.pi.addr.ports()
239 yield self.pi.addr_ok_o
240 yield self.pi.addr_exc_o
241
242 yield from self.pi.ld.ports()
243 yield from self.pi.st.ports()
244
245 def ports(self):
246 return list(self)
247
248
249 class L0CacheBuffer(Elaboratable):
250 """L0 Cache / Buffer
251
252 Note that the final version will have *two* interfaces per LDSTCompUnit,
253 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
254 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
255
256 This version is to be used for test purposes (and actively maintained
257 for such, rather than "replaced")
258
259 There are much better ways to implement this. However it's only
260 a "demo" / "test" class, and one important aspect: it responds
261 combinatorially, where a nmigen FSM's state-changes only activate
262 on clock-sync boundaries.
263 """
264
265 def __init__(self, n_units, mem, regwid=64, addrwid=48):
266 self.n_units = n_units
267 self.mem = mem
268 ul = []
269 for i in range(n_units):
270 ul.append(LDSTPort(i, regwid, addrwid))
271 self.dports = Array(ul)
272
273 def elaborate(self, platform):
274 m = Module()
275 comb, sync = m.d.comb, m.d.sync
276
277 # connect the ports as modules
278 for i in range(self.n_units):
279 setattr(m.submodules, "port%d" % i, self.dports[i])
280
281 # state-machine latches
282 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
283 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
284 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
285 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
286 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
287
288 # find one LD (or ST) and do it. only one per cycle.
289 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
290 # LD/STs using mask-expansion - see LenExpand class
291
292 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
293 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
294
295 lds = Signal(self.n_units, reset_less=True)
296 sts = Signal(self.n_units, reset_less=True)
297 ldi = []
298 sti = []
299 for i in range(self.n_units):
300 pi = self.dports[i].pi
301 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
302 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
303 # put the requests into the priority-pickers
304 comb += ldpick.i.eq(Cat(*ldi))
305 comb += stpick.i.eq(Cat(*sti))
306
307 # hmm, have to select (record) the right port index
308 nbits = log2_int(self.n_units, False)
309 ld_idx = Signal(nbits, reset_less=False)
310 st_idx = Signal(nbits, reset_less=False)
311 # use these because of the sync-and-comb pass-through capability
312 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
313 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
314
315 # convenience variables to reference the "picked" port
316 ldport = self.dports[ld_idx].pi
317 stport = self.dports[st_idx].pi
318 # and the memory ports
319 rdport = self.mem.rdport
320 wrport = self.mem.wrport
321
322 # Priority-Pickers pick one and only one request, capture its index.
323 # from that point on this code *only* "listens" to that port.
324
325 sync += adrok_l.s.eq(0)
326 comb += adrok_l.r.eq(0)
327 with m.If(~ldpick.n):
328 comb += ld_active.s.eq(1) # activate LD mode
329 comb += idx_l.r.eq(1) # pick (and capture) the port index
330 with m.Elif(~stpick.n):
331 comb += st_active.s.eq(1) # activate ST mode
332 comb += idx_l.r.eq(1) # pick (and capture) the port index
333
334 # from this point onwards, with the port "picked", it stays picked
335 # until ld_active (or st_active) are de-asserted.
336
337 # if now in "LD" mode: wait for addr_ok, then send the address out
338 # to memory, acknowledge address, and send out LD data
339 with m.If(ld_active.q):
340 with m.If(ldport.addr.ok & adrok_l.qn):
341 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
342 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
343 sync += adrok_l.s.eq(1) # and pull "ack" latch
344
345 # if now in "ST" mode: likewise do the same but with "ST"
346 # to memory, acknowledge address, and send out LD data
347 with m.If(st_active.q):
348 with m.If(stport.addr.ok):
349 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
350 with m.If(adrok_l.qn):
351 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
352 sync += adrok_l.s.eq(1) # and pull "ack" latch
353
354 # NOTE: in both these, below, the port itself takes care
355 # of de-asserting its "busy_o" signal, based on either ld.ok going
356 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
357
358 # for LD mode, when addr has been "ok'd", assume that (because this
359 # is a "Memory" test-class) the memory read data is valid.
360 comb += reset_l.s.eq(0)
361 comb += reset_l.r.eq(0)
362 with m.If(ld_active.q & adrok_l.q):
363 comb += ldport.ld.data.eq(rdport.data) # put data out
364 comb += ldport.ld.ok.eq(1) # indicate data valid
365 comb += reset_l.s.eq(1) # reset mode after 1 cycle
366
367 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
368 with m.If(st_active.q & stport.st.ok):
369 comb += wrport.data.eq(stport.st.data) # write st to mem
370 comb += wrport.en.eq(1) # enable write
371 comb += reset_l.s.eq(1) # reset mode after 1 cycle
372
373 # after waiting one cycle (reset_l is "sync" mode), reset the port
374 with m.If(reset_l.q):
375 comb += idx_l.s.eq(1) # deactivate port-index selector
376 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
377 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
378 comb += reset_l.r.eq(1) # clear reset
379 comb += adrok_l.r.eq(1) # address reset
380
381 return m
382
383 def ports(self):
384 for p in self.dports:
385 yield from p.ports()
386
387
388 class TstL0CacheBuffer(Elaboratable):
389 def __init__(self, n_units=3, regwid=16, addrwid=4):
390 self.mem = TestMemory(regwid, addrwid)
391 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
392
393 def elaborate(self, platform):
394 m = Module()
395 m.submodules.mem = self.mem
396 m.submodules.l0 = self.l0
397
398 return m
399
400 def ports(self):
401 yield from self.l0.ports()
402 yield self.mem.rdport.addr
403 yield self.mem.rdport.data
404 yield self.mem.wrport.addr
405 yield self.mem.wrport.data
406 # TODO: mem ports
407
408
409 def wait_busy(port, no=False):
410 while True:
411 busy = yield port.pi.busy_o
412 print("busy", no, busy)
413 if bool(busy) == no:
414 break
415 yield
416
417
418 def wait_addr(port):
419 while True:
420 addr_ok = yield port.pi.addr_ok_o
421 print("addrok", addr_ok)
422 if not addr_ok:
423 break
424 yield
425
426
427 def wait_ldok(port):
428 while True:
429 ldok = yield port.pi.ld.ok
430 print("ldok", ldok)
431 if ldok:
432 break
433 yield
434
435
436 def l0_cache_st(dut, addr, data):
437 l0 = dut.l0
438 mem = dut.mem
439 port0 = l0.dports[0]
440 port1 = l0.dports[1]
441
442 # have to wait until not busy
443 yield from wait_busy(port1, no=False) # wait until not busy
444
445 # set up a ST on the port. address first:
446 yield port1.pi.is_st_i.eq(1) # indicate LD
447
448 yield port1.pi.addr.data.eq(addr) # set address
449 yield port1.pi.addr.ok.eq(1) # set ok
450 yield from wait_addr(port1) # wait until addr ok
451 # yield # not needed, just for checking
452 # yield # not needed, just for checking
453 # assert "ST" for one cycle (required by the API)
454 yield port1.pi.st.data.eq(data)
455 yield port1.pi.st.ok.eq(1)
456 yield
457 yield port1.pi.st.ok.eq(0)
458
459 # can go straight to reset.
460 yield port1.pi.is_st_i.eq(0) # end
461 yield port1.pi.addr.ok.eq(0) # set !ok
462 # yield from wait_busy(port1, False) # wait until not busy
463
464
465 def l0_cache_ld(dut, addr, expected):
466
467 l0 = dut.l0
468 mem = dut.mem
469 port0 = l0.dports[0]
470 port1 = l0.dports[1]
471
472 # have to wait until not busy
473 yield from wait_busy(port1, no=False) # wait until not busy
474
475 # set up a LD on the port. address first:
476 yield port1.pi.is_ld_i.eq(1) # indicate LD
477
478 yield port1.pi.addr.data.eq(addr) # set address
479 yield port1.pi.addr.ok.eq(1) # set ok
480 yield from wait_addr(port1) # wait until addr ok
481
482 yield from wait_ldok(port1) # wait until ld ok
483 data = yield port1.pi.ld.data
484
485 # cleanup
486 yield port1.pi.is_ld_i.eq(0) # end
487 yield port1.pi.addr.ok.eq(0) # set !ok
488 # yield from wait_busy(port1, no=False) # wait until not busy
489
490 return data
491
492
493 def l0_cache_ldst(dut):
494 yield
495 addr = 0x2
496 data = 0xbeef
497 data2 = 0xf00f
498 #data = 0x4
499 yield from l0_cache_st(dut, 0x2, data)
500 yield from l0_cache_st(dut, 0x3, data2)
501 result = yield from l0_cache_ld(dut, 0x2, data)
502 result2 = yield from l0_cache_ld(dut, 0x3, data2)
503 yield
504 assert data == result, "data %x != %x" % (result, data)
505 assert data2 == result2, "data2 %x != %x" % (result2, data2)
506
507
508 def test_l0_cache():
509
510 dut = TstL0CacheBuffer()
511 vl = rtlil.convert(dut, ports=dut.ports())
512 with open("test_basic_l0_cache.il", "w") as f:
513 f.write(vl)
514
515 run_simulation(dut, l0_cache_ldst(dut),
516 vcd_name='test_l0_cache_basic.vcd')
517
518
519 if __name__ == '__main__':
520 test_l0_cache()