bit of a mess, but functional. unit test passes on "basic" L0CacheBuffer
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23
24 from nmutil.latch import SRLatch, latchregister
25 from soc.decoder.power_decoder2 import Data
26 from soc.decoder.power_enums import InternalOp
27
28 from soc.experiment.compldst import CompLDSTOpSubset
29 from soc.decoder.power_decoder2 import Data
30 #from nmutil.picker import PriorityPicker
31 from nmigen.lib.coding import PriorityEncoder
32
33 # for testing purposes
34 from soc.experiment.testmem import TestMemory
35
36
37 class PortInterface(RecordObject):
38 """PortInterface
39
40 defines the interface - the API - that the LDSTCompUnit connects
41 to. note that this is NOT a "fire-and-forget" interface. the
42 LDSTCompUnit *must* be kept appraised that the request is in
43 progress, and only when it has a 100% successful completion rate
44 can the notification be given (busy dropped).
45
46 The interface FSM rules are as follows:
47
48 * if busy_o is asserted, a LD/ST is in progress. further
49 requests may not be made until busy_o is deasserted.
50
51 * only one of is_ld_i or is_st_i may be asserted. busy_o
52 will immediately be asserted and remain asserted.
53
54 * addr.ok is to be asserted when the LD/ST address is known.
55 addr.data is to be valid on the same cycle.
56
57 addr.ok and addr.data must REMAIN asserted until busy_o
58 is de-asserted. this ensures that there is no need
59 for the L0 Cache/Buffer to have an additional address latch
60 (because the LDSTCompUnit already has it)
61
62 * addr_ok_o (or addr_exc_o) must be waited for. these will
63 be asserted *only* for one cycle and one cycle only.
64
65 * addr_exc_o will be asserted if there is no chance that the
66 memory request may be fulfilled.
67
68 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
69
70 * conversely: addr_ok_o must *ONLY* be asserted if there is a
71 HUNDRED PERCENT guarantee that the memory request will be
72 fulfilled.
73
74 * for a LD, ld.ok will be asserted - for only one clock cycle -
75 at any point in the future that is acceptable to the underlying
76 Memory subsystem. the recipient MUST latch ld.data on that cycle.
77
78 busy_o is deasserted on the same cycle as ld.ok is asserted.
79
80 * for a ST, st.ok may be asserted only after addr_ok_o had been
81 asserted, alongside valid st.data at the same time. st.ok
82 must only be asserted for one cycle.
83
84 the underlying Memory is REQUIRED to pick up that data and
85 guarantee its delivery. no back-acknowledgement is required.
86
87 busy_o is deasserted on the cycle AFTER st.ok is asserted.
88 """
89
90 def __init__(self, name=None, regwid=64, addrwid=48):
91
92 self._regwid = regwid
93 self._addrwid = addrwid
94
95 RecordObject.__init__(self, name=name)
96
97 # distinguish op type (ld/st)
98 self.is_ld_i = Signal(reset_less=True)
99 self.is_st_i = Signal(reset_less=True)
100 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
101
102 # common signals
103 self.busy_o = Signal(reset_less=True) # do not use if busy
104 self.go_die_i = Signal(reset_less=True) # back to reset
105 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
106 self.addr_ok_o = Signal(reset_less=True) # addr is valid (TLB, L1 etc.)
107 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
108
109 # LD/ST
110 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
111 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
112
113
114 class LDSTPort(Elaboratable):
115 def __init__(self, idx, regwid=64, addrwid=48):
116 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
117
118 def elaborate(self, platform):
119 m = Module()
120 comb, sync = m.d.comb, m.d.sync
121
122 # latches
123 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
124 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
125 comb += cyc_l.s.eq(0)
126 comb += cyc_l.r.eq(0)
127
128 # this is a little weird: we let the L0Cache/Buffer set
129 # the outputs: this module just monitors "state".
130
131 # LD/ST requested activates "busy"
132 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
133 comb += busy_l.s.eq(1)
134
135 # monitor for an exception or the completion of LD.
136 with m.If(self.pi.addr_exc_o):
137 comb += busy_l.r.eq(1)
138
139 # however ST needs one cycle before busy is reset
140 with m.If(self.pi.st.ok | self.pi.ld.ok):
141 comb += cyc_l.s.eq(1)
142
143 with m.If(cyc_l.q):
144 comb += cyc_l.r.eq(1)
145 comb += busy_l.r.eq(1)
146
147 # busy latch outputs to interface
148 comb += self.pi.busy_o.eq(busy_l.q)
149
150 return m
151
152 def __iter__(self):
153 yield self.pi.is_ld_i
154 yield self.pi.is_st_i
155 yield from self.pi.op.ports()
156 yield self.pi.busy_o
157 yield self.pi.go_die_i
158 yield from self.pi.addr.ports()
159 yield self.pi.addr_ok_o
160 yield self.pi.addr_exc_o
161
162 yield from self.pi.ld.ports()
163 yield from self.pi.st.ports()
164
165 def ports(self):
166 return list(self)
167
168
169 class L0CacheBuffer(Elaboratable):
170 """L0 Cache / Buffer
171
172 Note that the final version will have *two* interfaces per LDSTCompUnit,
173 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
174 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
175
176 This version is to be used for test purposes (and actively maintained
177 for such, rather than "replaced")
178
179 There are much better ways to implement this. However it's only
180 a "demo" / "test" class, and one important aspect: it responds
181 combinatorially, where a nmigen FSM's state-changes only activate
182 on clock-sync boundaries.
183 """
184 def __init__(self, n_units, mem, regwid=64, addrwid=48):
185 self.n_units = n_units
186 self.mem = mem
187 ul = []
188 for i in range(n_units):
189 ul.append(LDSTPort(i, regwid, addrwid))
190 self.dports = Array(ul)
191
192 def elaborate(self, platform):
193 m = Module()
194 comb, sync = m.d.comb, m.d.sync
195
196 # connect the ports as modules
197 for i in range(self.n_units):
198 setattr(m.submodules, "port%d" % i, self.dports[i])
199
200 # state-machine latches
201 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
202 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
203 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
204 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
205 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
206
207 # find one LD (or ST) and do it. only one per cycle.
208 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
209 # LD/STs using mask-expansion - see LenExpand class
210
211 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
212 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
213
214 lds = Signal(self.n_units, reset_less=True)
215 sts = Signal(self.n_units, reset_less=True)
216 ldi = []
217 sti = []
218 for i in range(self.n_units):
219 pi = self.dports[i].pi
220 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
221 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
222 # put the requests into the priority-pickers
223 comb += ldpick.i.eq(Cat(*ldi))
224 comb += stpick.i.eq(Cat(*sti))
225
226 # hmm, have to select (record) the right port index
227 nbits = log2_int(self.n_units, False)
228 ld_idx = Signal(nbits, reset_less=False)
229 st_idx = Signal(nbits, reset_less=False)
230 # use these because of the sync-and-comb pass-through capability
231 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx")
232 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx")
233
234 # convenience variables to reference the "picked" port
235 ldport = self.dports[ld_idx].pi
236 stport = self.dports[st_idx].pi
237 # and the memory ports
238 rdport = self.mem.rdport
239 wrport = self.mem.wrport
240
241 # Priority-Pickers pick one and only one request, capture its index.
242 # from that point on this code *only* "listens" to that port.
243
244 sync += adrok_l.s.eq(0)
245 comb += adrok_l.r.eq(0)
246 with m.If(~ldpick.n):
247 comb += ld_active.s.eq(1) # activate LD mode
248 comb += idx_l.r.eq(1) # pick (and capture) the port index
249 comb += adrok_l.r.eq(1) # address not yet "ok'd"
250 with m.Elif(~stpick.n):
251 comb += st_active.s.eq(1) # activate ST mode
252 comb += idx_l.r.eq(1) # pick (and capture) the port index
253 comb += adrok_l.r.eq(1) # address not yet "ok'd"
254
255 # from this point onwards, with the port "picked", it stays picked
256 # until ld_active (or st_active) are de-asserted.
257
258 # if now in "LD" mode: wait for addr_ok, then send the address out
259 # to memory, acknowledge address, and send out LD data
260 with m.If(ld_active.q):
261 with m.If(ldport.addr.ok):
262 comb += rdport.addr.eq(ldport.addr.data) # addr ok, send thru
263 with m.If(adrok_l.qn):
264 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
265 sync += adrok_l.s.eq(1) # and pull "ack" latch
266
267 # if now in "ST" mode: likewise do the same but with "ST"
268 # to memory, acknowledge address, and send out LD data
269 with m.If(st_active.q):
270 with m.If(stport.addr.ok):
271 comb += wrport.addr.eq(stport.addr.data) # addr ok, send thru
272 with m.If(adrok_l.qn):
273 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
274 sync += adrok_l.s.eq(1) # and pull "ack" latch
275
276 # NOTE: in both these, below, the port itself takes care
277 # of de-asserting its "busy_o" signal, based on either ld.ok going
278 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
279
280 # for LD mode, when addr has been "ok'd", assume that (because this
281 # is a "Memory" test-class) the memory read data is valid.
282 comb += reset_l.s.eq(0)
283 comb += reset_l.r.eq(0)
284 with m.If(ld_active.q & adrok_l.q):
285 comb += ldport.ld.data.eq(rdport.data) # put data out
286 comb += ldport.ld.ok.eq(1) # indicate data valid
287 comb += reset_l.s.eq(1) # reset mode after 1 cycle
288
289 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
290 with m.If(st_active.q & stport.st.ok):
291 comb += wrport.data.eq(stport.st.data) # write st to mem
292 comb += wrport.en.eq(1) # enable write
293 comb += reset_l.s.eq(1) # reset mode after 1 cycle
294
295 with m.If(reset_l.q):
296 comb += idx_l.s.eq(1) # deactivate port-index selector
297 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
298 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
299 comb += reset_l.r.eq(1) # clear reset
300
301 return m
302
303 def ports(self):
304 for p in self.dports:
305 yield from p.ports()
306
307
308 class TstL0CacheBuffer(Elaboratable):
309 def __init__(self, n_units=3, regwid=16, addrwid=4):
310 self.mem = TestMemory(regwid, addrwid)
311 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
312
313 def elaborate(self, platform):
314 m = Module()
315 m.submodules.mem = self.mem
316 m.submodules.l0 = self.l0
317
318 return m
319
320 def ports(self):
321 yield from self.l0.ports()
322 yield self.mem.rdport.addr
323 yield self.mem.rdport.data
324 yield self.mem.wrport.addr
325 yield self.mem.wrport.data
326 # TODO: mem ports
327
328 def wait_busy(port, no=False):
329 while True:
330 busy = yield port.pi.busy_o
331 print ("busy", no, busy)
332 if bool(busy) == no:
333 break
334 yield
335
336
337 def wait_addr(port):
338 while True:
339 addr_ok = yield port.pi.addr_ok_o
340 print ("addrok", addr_ok)
341 if not addr_ok:
342 break
343 yield
344
345 def wait_ldok(port):
346 while True:
347 ldok = yield port.pi.ld.ok
348 print ("ldok", ldok)
349 if ldok:
350 break
351 yield
352
353 def l0_cache_st(dut, addr, data):
354 l0 = dut.l0
355 mem = dut.mem
356 port0 = l0.dports[0]
357 port1 = l0.dports[1]
358
359 # have to wait until not busy
360 yield from wait_busy(port1, no=False) # wait until not busy
361
362 # set up a ST on the port. address first:
363 yield port1.pi.is_st_i.eq(1) # indicate LD
364
365 yield port1.pi.addr.data.eq(addr) # set address
366 yield port1.pi.addr.ok.eq(1) # set ok
367 yield from wait_addr(port1) # wait until addr ok
368
369 # assert "ST" for one cycle (required by the API)
370 yield port1.pi.st.data.eq(data)
371 yield port1.pi.st.ok.eq(1)
372 yield
373 yield port1.pi.st.ok.eq(0)
374
375 # can go straight to reset.
376 yield port1.pi.is_st_i.eq(0) #end
377 yield port1.pi.addr.ok.eq(0) # set !ok
378 yield from wait_busy(port1, True) # wait until not busy
379
380
381 def l0_cache_ld(dut, addr, expected):
382
383 l0 = dut.l0
384 mem = dut.mem
385 port0 = l0.dports[0]
386 port1 = l0.dports[1]
387
388 # have to wait until not busy
389 yield from wait_busy(port1, no=False) # wait until not busy
390
391 # set up a LD on the port. address first:
392 yield port1.pi.is_ld_i.eq(1) # indicate LD
393
394 yield port1.pi.addr.data.eq(addr) # set address
395 yield port1.pi.addr.ok.eq(1) # set ok
396 yield from wait_addr(port1) # wait until addr ok
397
398 yield from wait_ldok(port1) # wait until ld ok
399 data = yield port1.pi.ld.data
400
401 # cleanup
402 yield port1.pi.is_ld_i.eq(0) #end
403 yield port1.pi.addr.ok.eq(0) # set !ok
404 yield from wait_busy(port1, no=True) # wait until not busy
405
406 return data
407
408
409 def l0_cache_ldst(dut):
410 yield
411 addr = 0x2
412 data = 0xbeef
413 #data = 0x4
414 yield from l0_cache_st(dut, addr, data)
415 result = yield from l0_cache_ld(dut, addr, data)
416 yield
417 yield
418 yield
419 assert data == result, "data %x != %x" % (result, data)
420
421
422 def test_l0_cache():
423
424 dut = TstL0CacheBuffer()
425 vl = rtlil.convert(dut, ports=dut.ports())
426 with open("test_basic_l0_cache.il", "w") as f:
427 f.write(vl)
428
429 run_simulation(dut, l0_cache_ldst(dut),
430 vcd_name='test_l0_cache_basic.vcd')
431
432
433 if __name__ == '__main__':
434 test_l0_cache()