09f8c1b677989adcbe80e7f1091026d3d4d28622
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23
24 from nmutil.latch import SRLatch, latchregister
25 from soc.decoder.power_decoder2 import Data
26 from soc.decoder.power_enums import InternalOp
27
28 from soc.experiment.compldst import CompLDSTOpSubset
29 from soc.decoder.power_decoder2 import Data
30 #from nmutil.picker import PriorityPicker
31 from nmigen.lib.coding import PriorityEncoder
32
33 # for testing purposes
34 from soc.experiment.testmem import TestMemory
35
36
37 class PortInterface(RecordObject):
38 """PortInterface
39
40 defines the interface - the API - that the LDSTCompUnit connects
41 to. note that this is NOT a "fire-and-forget" interface. the
42 LDSTCompUnit *must* be kept appraised that the request is in
43 progress, and only when it has a 100% successful completion rate
44 can the notification be given (busy dropped).
45
46 The interface FSM rules are as follows:
47
48 * if busy_o is asserted, a LD/ST is in progress. further
49 requests may not be made until busy_o is deasserted.
50
51 * only one of is_ld_i or is_st_i may be asserted. busy_o
52 will immediately be asserted and remain asserted.
53
54 * addr.ok is to be asserted when the LD/ST address is known.
55 addr.data is to be valid on the same cycle.
56
57 addr.ok and addr.data must REMAIN asserted until busy_o
58 is de-asserted. this ensures that there is no need
59 for the L0 Cache/Buffer to have an additional address latch
60 (because the LDSTCompUnit already has it)
61
62 * addr_ok_o (or addr_exc_o) must be waited for. these will
63 be asserted *only* for one cycle and one cycle only.
64
65 * addr_exc_o will be asserted if there is no chance that the
66 memory request may be fulfilled.
67
68 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
69
70 * conversely: addr_ok_o must *ONLY* be asserted if there is a
71 HUNDRED PERCENT guarantee that the memory request will be
72 fulfilled.
73
74 * for a LD, ld.ok will be asserted - for only one clock cycle -
75 at any point in the future that is acceptable to the underlying
76 Memory subsystem. the recipient MUST latch ld.data on that cycle.
77
78 busy_o is deasserted on the same cycle as ld.ok is asserted.
79
80 * for a ST, st.ok may be asserted only after addr_ok_o had been
81 asserted, alongside valid st.data at the same time. st.ok
82 must only be asserted for one cycle.
83
84 the underlying Memory is REQUIRED to pick up that data and
85 guarantee its delivery. no back-acknowledgement is required.
86
87 busy_o is deasserted on the cycle AFTER st.ok is asserted.
88 """
89
90 def __init__(self, name=None, regwid=64, addrwid=48):
91
92 self._regwid = regwid
93 self._addrwid = addrwid
94
95 RecordObject.__init__(self, name=name)
96
97 # distinguish op type (ld/st)
98 self.is_ld_i = Signal(reset_less=True)
99 self.is_st_i = Signal(reset_less=True)
100 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
101
102 # common signals
103 self.busy_o = Signal(reset_less=True) # do not use if busy
104 self.go_die_i = Signal(reset_less=True) # back to reset
105 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
106 self.addr_ok_o = Signal(reset_less=True) # addr is valid (TLB, L1 etc.)
107 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
108
109 # LD/ST
110 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
111 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
112
113 # TODO:
114 class DualPortSplitter(Elaboratable):
115 """DualPortSplitter
116
117 * one incoming PortInterface
118 * two *OUTGOING* PortInterfaces
119 * uses LDSTSplitter to do it
120
121 (actually, thinking about it LDSTSplitter could simply be
122 modified to conform to PortInterface: one in, two out)
123
124 once that is done each pair of ports may be wired directly
125 to the dual ports of L0CacheBuffer
126 """
127 pass
128
129
130 # TODO:
131 class DataMerger(Elaboratable):
132 """DataMerger
133
134 Merges data based on an address-match matrix
135
136 """
137 def __init__(self, array_size):
138 """
139 :addr_array_i: an NxN Array of Signals with bits set indicating address match
140 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
141 :data_o: an Output Record of same type {data: 128 bit, byte_enable: 16 bit}
142 """
143 pass
144
145
146 class LDSTPort(Elaboratable):
147 def __init__(self, idx, regwid=64, addrwid=48):
148 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
149
150 def elaborate(self, platform):
151 m = Module()
152 comb, sync = m.d.comb, m.d.sync
153
154 # latches
155 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
156 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
157 comb += cyc_l.s.eq(0)
158 comb += cyc_l.r.eq(0)
159
160 # this is a little weird: we let the L0Cache/Buffer set
161 # the outputs: this module just monitors "state".
162
163 # LD/ST requested activates "busy"
164 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
165 comb += busy_l.s.eq(1)
166
167 # monitor for an exception or the completion of LD.
168 with m.If(self.pi.addr_exc_o):
169 comb += busy_l.r.eq(1)
170
171 # however ST needs one cycle before busy is reset
172 with m.If(self.pi.st.ok | self.pi.ld.ok):
173 comb += cyc_l.s.eq(1)
174
175 with m.If(cyc_l.q):
176 comb += cyc_l.r.eq(1)
177 comb += busy_l.r.eq(1)
178
179 # busy latch outputs to interface
180 comb += self.pi.busy_o.eq(busy_l.q)
181
182 return m
183
184 def __iter__(self):
185 yield self.pi.is_ld_i
186 yield self.pi.is_st_i
187 yield from self.pi.op.ports()
188 yield self.pi.busy_o
189 yield self.pi.go_die_i
190 yield from self.pi.addr.ports()
191 yield self.pi.addr_ok_o
192 yield self.pi.addr_exc_o
193
194 yield from self.pi.ld.ports()
195 yield from self.pi.st.ports()
196
197 def ports(self):
198 return list(self)
199
200
201 class L0CacheBuffer(Elaboratable):
202 """L0 Cache / Buffer
203
204 Note that the final version will have *two* interfaces per LDSTCompUnit,
205 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
206 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
207
208 This version is to be used for test purposes (and actively maintained
209 for such, rather than "replaced")
210
211 There are much better ways to implement this. However it's only
212 a "demo" / "test" class, and one important aspect: it responds
213 combinatorially, where a nmigen FSM's state-changes only activate
214 on clock-sync boundaries.
215 """
216 def __init__(self, n_units, mem, regwid=64, addrwid=48):
217 self.n_units = n_units
218 self.mem = mem
219 ul = []
220 for i in range(n_units):
221 ul.append(LDSTPort(i, regwid, addrwid))
222 self.dports = Array(ul)
223
224 def elaborate(self, platform):
225 m = Module()
226 comb, sync = m.d.comb, m.d.sync
227
228 # connect the ports as modules
229 for i in range(self.n_units):
230 setattr(m.submodules, "port%d" % i, self.dports[i])
231
232 # state-machine latches
233 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
234 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
235 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
236 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
237 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
238
239 # find one LD (or ST) and do it. only one per cycle.
240 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
241 # LD/STs using mask-expansion - see LenExpand class
242
243 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
244 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
245
246 lds = Signal(self.n_units, reset_less=True)
247 sts = Signal(self.n_units, reset_less=True)
248 ldi = []
249 sti = []
250 for i in range(self.n_units):
251 pi = self.dports[i].pi
252 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
253 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
254 # put the requests into the priority-pickers
255 comb += ldpick.i.eq(Cat(*ldi))
256 comb += stpick.i.eq(Cat(*sti))
257
258 # hmm, have to select (record) the right port index
259 nbits = log2_int(self.n_units, False)
260 ld_idx = Signal(nbits, reset_less=False)
261 st_idx = Signal(nbits, reset_less=False)
262 # use these because of the sync-and-comb pass-through capability
263 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
264 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
265
266 # convenience variables to reference the "picked" port
267 ldport = self.dports[ld_idx].pi
268 stport = self.dports[st_idx].pi
269 # and the memory ports
270 rdport = self.mem.rdport
271 wrport = self.mem.wrport
272
273 # Priority-Pickers pick one and only one request, capture its index.
274 # from that point on this code *only* "listens" to that port.
275
276 sync += adrok_l.s.eq(0)
277 comb += adrok_l.r.eq(0)
278 with m.If(~ldpick.n):
279 comb += ld_active.s.eq(1) # activate LD mode
280 comb += idx_l.r.eq(1) # pick (and capture) the port index
281 with m.Elif(~stpick.n):
282 comb += st_active.s.eq(1) # activate ST mode
283 comb += idx_l.r.eq(1) # pick (and capture) the port index
284
285 # from this point onwards, with the port "picked", it stays picked
286 # until ld_active (or st_active) are de-asserted.
287
288 # if now in "LD" mode: wait for addr_ok, then send the address out
289 # to memory, acknowledge address, and send out LD data
290 with m.If(ld_active.q):
291 with m.If(ldport.addr.ok & adrok_l.qn):
292 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
293 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
294 sync += adrok_l.s.eq(1) # and pull "ack" latch
295
296 # if now in "ST" mode: likewise do the same but with "ST"
297 # to memory, acknowledge address, and send out LD data
298 with m.If(st_active.q):
299 with m.If(stport.addr.ok):
300 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
301 with m.If(adrok_l.qn):
302 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
303 sync += adrok_l.s.eq(1) # and pull "ack" latch
304
305 # NOTE: in both these, below, the port itself takes care
306 # of de-asserting its "busy_o" signal, based on either ld.ok going
307 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
308
309 # for LD mode, when addr has been "ok'd", assume that (because this
310 # is a "Memory" test-class) the memory read data is valid.
311 comb += reset_l.s.eq(0)
312 comb += reset_l.r.eq(0)
313 with m.If(ld_active.q & adrok_l.q):
314 comb += ldport.ld.data.eq(rdport.data) # put data out
315 comb += ldport.ld.ok.eq(1) # indicate data valid
316 comb += reset_l.s.eq(1) # reset mode after 1 cycle
317
318 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
319 with m.If(st_active.q & stport.st.ok):
320 comb += wrport.data.eq(stport.st.data) # write st to mem
321 comb += wrport.en.eq(1) # enable write
322 comb += reset_l.s.eq(1) # reset mode after 1 cycle
323
324 # after waiting one cycle (reset_l is "sync" mode), reset the port
325 with m.If(reset_l.q):
326 comb += idx_l.s.eq(1) # deactivate port-index selector
327 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
328 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
329 comb += reset_l.r.eq(1) # clear reset
330 comb += adrok_l.r.eq(1) # address reset
331
332 return m
333
334 def ports(self):
335 for p in self.dports:
336 yield from p.ports()
337
338
339 class TstL0CacheBuffer(Elaboratable):
340 def __init__(self, n_units=3, regwid=16, addrwid=4):
341 self.mem = TestMemory(regwid, addrwid)
342 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
343
344 def elaborate(self, platform):
345 m = Module()
346 m.submodules.mem = self.mem
347 m.submodules.l0 = self.l0
348
349 return m
350
351 def ports(self):
352 yield from self.l0.ports()
353 yield self.mem.rdport.addr
354 yield self.mem.rdport.data
355 yield self.mem.wrport.addr
356 yield self.mem.wrport.data
357 # TODO: mem ports
358
359
360 def wait_busy(port, no=False):
361 while True:
362 busy = yield port.pi.busy_o
363 print ("busy", no, busy)
364 if bool(busy) == no:
365 break
366 yield
367
368
369 def wait_addr(port):
370 while True:
371 addr_ok = yield port.pi.addr_ok_o
372 print ("addrok", addr_ok)
373 if not addr_ok:
374 break
375 yield
376
377
378 def wait_ldok(port):
379 while True:
380 ldok = yield port.pi.ld.ok
381 print ("ldok", ldok)
382 if ldok:
383 break
384 yield
385
386
387 def l0_cache_st(dut, addr, data):
388 l0 = dut.l0
389 mem = dut.mem
390 port0 = l0.dports[0]
391 port1 = l0.dports[1]
392
393 # have to wait until not busy
394 yield from wait_busy(port1, no=False) # wait until not busy
395
396 # set up a ST on the port. address first:
397 yield port1.pi.is_st_i.eq(1) # indicate LD
398
399 yield port1.pi.addr.data.eq(addr) # set address
400 yield port1.pi.addr.ok.eq(1) # set ok
401 yield from wait_addr(port1) # wait until addr ok
402 #yield # not needed, just for checking
403 #yield # not needed, just for checking
404 # assert "ST" for one cycle (required by the API)
405 yield port1.pi.st.data.eq(data)
406 yield port1.pi.st.ok.eq(1)
407 yield
408 yield port1.pi.st.ok.eq(0)
409
410 # can go straight to reset.
411 yield port1.pi.is_st_i.eq(0) #end
412 yield port1.pi.addr.ok.eq(0) # set !ok
413 #yield from wait_busy(port1, False) # wait until not busy
414
415
416 def l0_cache_ld(dut, addr, expected):
417
418 l0 = dut.l0
419 mem = dut.mem
420 port0 = l0.dports[0]
421 port1 = l0.dports[1]
422
423 # have to wait until not busy
424 yield from wait_busy(port1, no=False) # wait until not busy
425
426 # set up a LD on the port. address first:
427 yield port1.pi.is_ld_i.eq(1) # indicate LD
428
429 yield port1.pi.addr.data.eq(addr) # set address
430 yield port1.pi.addr.ok.eq(1) # set ok
431 yield from wait_addr(port1) # wait until addr ok
432
433 yield from wait_ldok(port1) # wait until ld ok
434 data = yield port1.pi.ld.data
435
436 # cleanup
437 yield port1.pi.is_ld_i.eq(0) #end
438 yield port1.pi.addr.ok.eq(0) # set !ok
439 #yield from wait_busy(port1, no=False) # wait until not busy
440
441 return data
442
443
444 def l0_cache_ldst(dut):
445 yield
446 addr = 0x2
447 data = 0xbeef
448 data2 = 0xf00f
449 #data = 0x4
450 yield from l0_cache_st(dut, 0x2, data)
451 yield from l0_cache_st(dut, 0x3, data2)
452 result = yield from l0_cache_ld(dut, 0x2, data)
453 result2 = yield from l0_cache_ld(dut, 0x3, data2)
454 yield
455 assert data == result, "data %x != %x" % (result, data)
456 assert data2 == result2, "data2 %x != %x" % (result2, data2)
457
458
459 def test_l0_cache():
460
461 dut = TstL0CacheBuffer()
462 vl = rtlil.convert(dut, ports=dut.ports())
463 with open("test_basic_l0_cache.il", "w") as f:
464 f.write(vl)
465
466 run_simulation(dut, l0_cache_ldst(dut),
467 vcd_name='test_l0_cache_basic.vcd')
468
469
470 if __name__ == '__main__':
471 test_l0_cache()