implement DataMerger interface
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28
29 from soc.experiment.compldst import CompLDSTOpSubset
30 from soc.decoder.power_decoder2 import Data
31 #from nmutil.picker import PriorityPicker
32 from nmigen.lib.coding import PriorityEncoder
33
34 # for testing purposes
35 from soc.experiment.testmem import TestMemory
36
37
38 class PortInterface(RecordObject):
39 """PortInterface
40
41 defines the interface - the API - that the LDSTCompUnit connects
42 to. note that this is NOT a "fire-and-forget" interface. the
43 LDSTCompUnit *must* be kept appraised that the request is in
44 progress, and only when it has a 100% successful completion rate
45 can the notification be given (busy dropped).
46
47 The interface FSM rules are as follows:
48
49 * if busy_o is asserted, a LD/ST is in progress. further
50 requests may not be made until busy_o is deasserted.
51
52 * only one of is_ld_i or is_st_i may be asserted. busy_o
53 will immediately be asserted and remain asserted.
54
55 * addr.ok is to be asserted when the LD/ST address is known.
56 addr.data is to be valid on the same cycle.
57
58 addr.ok and addr.data must REMAIN asserted until busy_o
59 is de-asserted. this ensures that there is no need
60 for the L0 Cache/Buffer to have an additional address latch
61 (because the LDSTCompUnit already has it)
62
63 * addr_ok_o (or addr_exc_o) must be waited for. these will
64 be asserted *only* for one cycle and one cycle only.
65
66 * addr_exc_o will be asserted if there is no chance that the
67 memory request may be fulfilled.
68
69 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
70
71 * conversely: addr_ok_o must *ONLY* be asserted if there is a
72 HUNDRED PERCENT guarantee that the memory request will be
73 fulfilled.
74
75 * for a LD, ld.ok will be asserted - for only one clock cycle -
76 at any point in the future that is acceptable to the underlying
77 Memory subsystem. the recipient MUST latch ld.data on that cycle.
78
79 busy_o is deasserted on the same cycle as ld.ok is asserted.
80
81 * for a ST, st.ok may be asserted only after addr_ok_o had been
82 asserted, alongside valid st.data at the same time. st.ok
83 must only be asserted for one cycle.
84
85 the underlying Memory is REQUIRED to pick up that data and
86 guarantee its delivery. no back-acknowledgement is required.
87
88 busy_o is deasserted on the cycle AFTER st.ok is asserted.
89 """
90
91 def __init__(self, name=None, regwid=64, addrwid=48):
92
93 self._regwid = regwid
94 self._addrwid = addrwid
95
96 RecordObject.__init__(self, name=name)
97
98 # distinguish op type (ld/st)
99 self.is_ld_i = Signal(reset_less=True)
100 self.is_st_i = Signal(reset_less=True)
101 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
102
103 # common signals
104 self.busy_o = Signal(reset_less=True) # do not use if busy
105 self.go_die_i = Signal(reset_less=True) # back to reset
106 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
107 # addr is valid (TLB, L1 etc.)
108 self.addr_ok_o = Signal(reset_less=True)
109 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
110
111 # LD/ST
112 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
113 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
114
115 # TODO:
116
117
118 class DualPortSplitter(Elaboratable):
119 """DualPortSplitter
120
121 * one incoming PortInterface
122 * two *OUTGOING* PortInterfaces
123 * uses LDSTSplitter to do it
124
125 (actually, thinking about it LDSTSplitter could simply be
126 modified to conform to PortInterface: one in, two out)
127
128 once that is done each pair of ports may be wired directly
129 to the dual ports of L0CacheBuffer
130 """
131 pass
132
133
134 class DataMergerRecord(Record):
135 """
136 {data: 128 bit, byte_enable: 16 bit}
137 """
138
139 def __init__(self, name=None):
140 layout = (('data', 128),
141 ('byte_enable', 16)
142 )
143
144 Record.__init__(self, Layout(layout), name=name)
145
146 # TODO:
147
148
149 class DataMerger(Elaboratable):
150 """DataMerger
151
152 Merges data based on an address-match matrix
153
154 """
155
156 def __init__(self, array_size):
157 """
158 :addr_array_i: an NxN Array of Signals with bits set indicating address match
159 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
160 :data_o: an Output Record of same type {data: 128 bit, byte_enable: 16 bit}
161 """
162 self.array_size = array_size
163 ul = []
164 for i in range(0, array_size):
165 ul2 = []
166 for j in range(0, array_size):
167 ul2.append(Signal())
168 ul.append(ul2)
169 self.addr_array_i = Array(ul)
170
171 ul = []
172 for i in range(0, array_size):
173 ul.append(DataMergerRecord())
174 self.data_i = Array(ul)
175 self.data_o = DataMergerRecord()
176
177
178 class LDSTPort(Elaboratable):
179 def __init__(self, idx, regwid=64, addrwid=48):
180 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
181
182 def elaborate(self, platform):
183 m = Module()
184 comb, sync = m.d.comb, m.d.sync
185
186 # latches
187 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
188 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
189 comb += cyc_l.s.eq(0)
190 comb += cyc_l.r.eq(0)
191
192 # this is a little weird: we let the L0Cache/Buffer set
193 # the outputs: this module just monitors "state".
194
195 # LD/ST requested activates "busy"
196 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
197 comb += busy_l.s.eq(1)
198
199 # monitor for an exception or the completion of LD.
200 with m.If(self.pi.addr_exc_o):
201 comb += busy_l.r.eq(1)
202
203 # however ST needs one cycle before busy is reset
204 with m.If(self.pi.st.ok | self.pi.ld.ok):
205 comb += cyc_l.s.eq(1)
206
207 with m.If(cyc_l.q):
208 comb += cyc_l.r.eq(1)
209 comb += busy_l.r.eq(1)
210
211 # busy latch outputs to interface
212 comb += self.pi.busy_o.eq(busy_l.q)
213
214 return m
215
216 def __iter__(self):
217 yield self.pi.is_ld_i
218 yield self.pi.is_st_i
219 yield from self.pi.op.ports()
220 yield self.pi.busy_o
221 yield self.pi.go_die_i
222 yield from self.pi.addr.ports()
223 yield self.pi.addr_ok_o
224 yield self.pi.addr_exc_o
225
226 yield from self.pi.ld.ports()
227 yield from self.pi.st.ports()
228
229 def ports(self):
230 return list(self)
231
232
233 class L0CacheBuffer(Elaboratable):
234 """L0 Cache / Buffer
235
236 Note that the final version will have *two* interfaces per LDSTCompUnit,
237 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
238 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
239
240 This version is to be used for test purposes (and actively maintained
241 for such, rather than "replaced")
242
243 There are much better ways to implement this. However it's only
244 a "demo" / "test" class, and one important aspect: it responds
245 combinatorially, where a nmigen FSM's state-changes only activate
246 on clock-sync boundaries.
247 """
248
249 def __init__(self, n_units, mem, regwid=64, addrwid=48):
250 self.n_units = n_units
251 self.mem = mem
252 ul = []
253 for i in range(n_units):
254 ul.append(LDSTPort(i, regwid, addrwid))
255 self.dports = Array(ul)
256
257 def elaborate(self, platform):
258 m = Module()
259 comb, sync = m.d.comb, m.d.sync
260
261 # connect the ports as modules
262 for i in range(self.n_units):
263 setattr(m.submodules, "port%d" % i, self.dports[i])
264
265 # state-machine latches
266 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
267 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
268 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
269 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
270 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
271
272 # find one LD (or ST) and do it. only one per cycle.
273 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
274 # LD/STs using mask-expansion - see LenExpand class
275
276 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
277 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
278
279 lds = Signal(self.n_units, reset_less=True)
280 sts = Signal(self.n_units, reset_less=True)
281 ldi = []
282 sti = []
283 for i in range(self.n_units):
284 pi = self.dports[i].pi
285 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
286 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
287 # put the requests into the priority-pickers
288 comb += ldpick.i.eq(Cat(*ldi))
289 comb += stpick.i.eq(Cat(*sti))
290
291 # hmm, have to select (record) the right port index
292 nbits = log2_int(self.n_units, False)
293 ld_idx = Signal(nbits, reset_less=False)
294 st_idx = Signal(nbits, reset_less=False)
295 # use these because of the sync-and-comb pass-through capability
296 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
297 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
298
299 # convenience variables to reference the "picked" port
300 ldport = self.dports[ld_idx].pi
301 stport = self.dports[st_idx].pi
302 # and the memory ports
303 rdport = self.mem.rdport
304 wrport = self.mem.wrport
305
306 # Priority-Pickers pick one and only one request, capture its index.
307 # from that point on this code *only* "listens" to that port.
308
309 sync += adrok_l.s.eq(0)
310 comb += adrok_l.r.eq(0)
311 with m.If(~ldpick.n):
312 comb += ld_active.s.eq(1) # activate LD mode
313 comb += idx_l.r.eq(1) # pick (and capture) the port index
314 with m.Elif(~stpick.n):
315 comb += st_active.s.eq(1) # activate ST mode
316 comb += idx_l.r.eq(1) # pick (and capture) the port index
317
318 # from this point onwards, with the port "picked", it stays picked
319 # until ld_active (or st_active) are de-asserted.
320
321 # if now in "LD" mode: wait for addr_ok, then send the address out
322 # to memory, acknowledge address, and send out LD data
323 with m.If(ld_active.q):
324 with m.If(ldport.addr.ok & adrok_l.qn):
325 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
326 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
327 sync += adrok_l.s.eq(1) # and pull "ack" latch
328
329 # if now in "ST" mode: likewise do the same but with "ST"
330 # to memory, acknowledge address, and send out LD data
331 with m.If(st_active.q):
332 with m.If(stport.addr.ok):
333 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
334 with m.If(adrok_l.qn):
335 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
336 sync += adrok_l.s.eq(1) # and pull "ack" latch
337
338 # NOTE: in both these, below, the port itself takes care
339 # of de-asserting its "busy_o" signal, based on either ld.ok going
340 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
341
342 # for LD mode, when addr has been "ok'd", assume that (because this
343 # is a "Memory" test-class) the memory read data is valid.
344 comb += reset_l.s.eq(0)
345 comb += reset_l.r.eq(0)
346 with m.If(ld_active.q & adrok_l.q):
347 comb += ldport.ld.data.eq(rdport.data) # put data out
348 comb += ldport.ld.ok.eq(1) # indicate data valid
349 comb += reset_l.s.eq(1) # reset mode after 1 cycle
350
351 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
352 with m.If(st_active.q & stport.st.ok):
353 comb += wrport.data.eq(stport.st.data) # write st to mem
354 comb += wrport.en.eq(1) # enable write
355 comb += reset_l.s.eq(1) # reset mode after 1 cycle
356
357 # after waiting one cycle (reset_l is "sync" mode), reset the port
358 with m.If(reset_l.q):
359 comb += idx_l.s.eq(1) # deactivate port-index selector
360 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
361 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
362 comb += reset_l.r.eq(1) # clear reset
363 comb += adrok_l.r.eq(1) # address reset
364
365 return m
366
367 def ports(self):
368 for p in self.dports:
369 yield from p.ports()
370
371
372 class TstL0CacheBuffer(Elaboratable):
373 def __init__(self, n_units=3, regwid=16, addrwid=4):
374 self.mem = TestMemory(regwid, addrwid)
375 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
376
377 def elaborate(self, platform):
378 m = Module()
379 m.submodules.mem = self.mem
380 m.submodules.l0 = self.l0
381
382 return m
383
384 def ports(self):
385 yield from self.l0.ports()
386 yield self.mem.rdport.addr
387 yield self.mem.rdport.data
388 yield self.mem.wrport.addr
389 yield self.mem.wrport.data
390 # TODO: mem ports
391
392
393 def wait_busy(port, no=False):
394 while True:
395 busy = yield port.pi.busy_o
396 print("busy", no, busy)
397 if bool(busy) == no:
398 break
399 yield
400
401
402 def wait_addr(port):
403 while True:
404 addr_ok = yield port.pi.addr_ok_o
405 print("addrok", addr_ok)
406 if not addr_ok:
407 break
408 yield
409
410
411 def wait_ldok(port):
412 while True:
413 ldok = yield port.pi.ld.ok
414 print("ldok", ldok)
415 if ldok:
416 break
417 yield
418
419
420 def l0_cache_st(dut, addr, data):
421 l0 = dut.l0
422 mem = dut.mem
423 port0 = l0.dports[0]
424 port1 = l0.dports[1]
425
426 # have to wait until not busy
427 yield from wait_busy(port1, no=False) # wait until not busy
428
429 # set up a ST on the port. address first:
430 yield port1.pi.is_st_i.eq(1) # indicate LD
431
432 yield port1.pi.addr.data.eq(addr) # set address
433 yield port1.pi.addr.ok.eq(1) # set ok
434 yield from wait_addr(port1) # wait until addr ok
435 # yield # not needed, just for checking
436 # yield # not needed, just for checking
437 # assert "ST" for one cycle (required by the API)
438 yield port1.pi.st.data.eq(data)
439 yield port1.pi.st.ok.eq(1)
440 yield
441 yield port1.pi.st.ok.eq(0)
442
443 # can go straight to reset.
444 yield port1.pi.is_st_i.eq(0) # end
445 yield port1.pi.addr.ok.eq(0) # set !ok
446 # yield from wait_busy(port1, False) # wait until not busy
447
448
449 def l0_cache_ld(dut, addr, expected):
450
451 l0 = dut.l0
452 mem = dut.mem
453 port0 = l0.dports[0]
454 port1 = l0.dports[1]
455
456 # have to wait until not busy
457 yield from wait_busy(port1, no=False) # wait until not busy
458
459 # set up a LD on the port. address first:
460 yield port1.pi.is_ld_i.eq(1) # indicate LD
461
462 yield port1.pi.addr.data.eq(addr) # set address
463 yield port1.pi.addr.ok.eq(1) # set ok
464 yield from wait_addr(port1) # wait until addr ok
465
466 yield from wait_ldok(port1) # wait until ld ok
467 data = yield port1.pi.ld.data
468
469 # cleanup
470 yield port1.pi.is_ld_i.eq(0) # end
471 yield port1.pi.addr.ok.eq(0) # set !ok
472 # yield from wait_busy(port1, no=False) # wait until not busy
473
474 return data
475
476
477 def l0_cache_ldst(dut):
478 yield
479 addr = 0x2
480 data = 0xbeef
481 data2 = 0xf00f
482 #data = 0x4
483 yield from l0_cache_st(dut, 0x2, data)
484 yield from l0_cache_st(dut, 0x3, data2)
485 result = yield from l0_cache_ld(dut, 0x2, data)
486 result2 = yield from l0_cache_ld(dut, 0x3, data2)
487 yield
488 assert data == result, "data %x != %x" % (result, data)
489 assert data2 == result2, "data2 %x != %x" % (result2, data2)
490
491
492 def test_l0_cache():
493
494 dut = TstL0CacheBuffer()
495 vl = rtlil.convert(dut, ports=dut.ports())
496 with open("test_basic_l0_cache.il", "w") as f:
497 f.write(vl)
498
499 run_simulation(dut, l0_cache_ldst(dut),
500 vcd_name='test_l0_cache_basic.vcd')
501
502
503 if __name__ == '__main__':
504 test_l0_cache()