19d4e4e94f6fbcac8f0283b8f8ec7ed2c96ab9aa
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation, Settle
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29 from nmutil.util import treereduce
30
31 from soc.fu.ldst.ldst_input_record import CompLDSTOpSubset
32 from soc.decoder.power_decoder2 import Data
33 #from nmutil.picker import PriorityPicker
34 from nmigen.lib.coding import PriorityEncoder
35 from soc.scoreboard.addr_split import LDSTSplitter
36 from soc.scoreboard.addr_match import LenExpand
37
38 # for testing purposes
39 from soc.experiment.testmem import TestMemory # TODO: replace with TMLSUI
40 # TODO: from soc.experiment.testmem import TestMemoryLoadStoreUnit
41
42 import unittest
43
44
45 class PortInterface(RecordObject):
46 """PortInterface
47
48 defines the interface - the API - that the LDSTCompUnit connects
49 to. note that this is NOT a "fire-and-forget" interface. the
50 LDSTCompUnit *must* be kept appraised that the request is in
51 progress, and only when it has a 100% successful completion rate
52 can the notification be given (busy dropped).
53
54 The interface FSM rules are as follows:
55
56 * if busy_o is asserted, a LD/ST is in progress. further
57 requests may not be made until busy_o is deasserted.
58
59 * only one of is_ld_i or is_st_i may be asserted. busy_o
60 will immediately be asserted and remain asserted.
61
62 * addr.ok is to be asserted when the LD/ST address is known.
63 addr.data is to be valid on the same cycle.
64
65 addr.ok and addr.data must REMAIN asserted until busy_o
66 is de-asserted. this ensures that there is no need
67 for the L0 Cache/Buffer to have an additional address latch
68 (because the LDSTCompUnit already has it)
69
70 * addr_ok_o (or addr_exc_o) must be waited for. these will
71 be asserted *only* for one cycle and one cycle only.
72
73 * addr_exc_o will be asserted if there is no chance that the
74 memory request may be fulfilled.
75
76 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
77
78 * conversely: addr_ok_o must *ONLY* be asserted if there is a
79 HUNDRED PERCENT guarantee that the memory request will be
80 fulfilled.
81
82 * for a LD, ld.ok will be asserted - for only one clock cycle -
83 at any point in the future that is acceptable to the underlying
84 Memory subsystem. the recipient MUST latch ld.data on that cycle.
85
86 busy_o is deasserted on the same cycle as ld.ok is asserted.
87
88 * for a ST, st.ok may be asserted only after addr_ok_o had been
89 asserted, alongside valid st.data at the same time. st.ok
90 must only be asserted for one cycle.
91
92 the underlying Memory is REQUIRED to pick up that data and
93 guarantee its delivery. no back-acknowledgement is required.
94
95 busy_o is deasserted on the cycle AFTER st.ok is asserted.
96 """
97
98 def __init__(self, name=None, regwid=64, addrwid=48):
99
100 self._regwid = regwid
101 self._addrwid = addrwid
102
103 RecordObject.__init__(self, name=name)
104
105 # distinguish op type (ld/st)
106 self.is_ld_i = Signal(reset_less=True)
107 self.is_st_i = Signal(reset_less=True)
108 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
109
110 # common signals
111 self.busy_o = Signal(reset_less=True) # do not use if busy
112 self.go_die_i = Signal(reset_less=True) # back to reset
113 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
114 # addr is valid (TLB, L1 etc.)
115 self.addr_ok_o = Signal(reset_less=True)
116 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
117
118 # LD/ST
119 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
120 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
121
122 # TODO: elaborate function
123
124
125 class DualPortSplitter(Elaboratable):
126 """DualPortSplitter
127
128 * one incoming PortInterface
129 * two *OUTGOING* PortInterfaces
130 * uses LDSTSplitter to do it
131
132 (actually, thinking about it LDSTSplitter could simply be
133 modified to conform to PortInterface: one in, two out)
134
135 once that is done each pair of ports may be wired directly
136 to the dual ports of L0CacheBuffer
137
138 The split is carried out so that, regardless of alignment or
139 mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
140 of the address, whilst outgoing PortInterface[1] takes
141 bit 4 == 1.
142
143 PortInterface *may* need to be changed so that the length is
144 a binary number (accepting values 1-16).
145 """
146 def __init__(self):
147 self.outp = [PortInterface(name="outp_0"),
148 PortInterface(name="outp_1")]
149 self.inp = PortInterface(name="inp")
150 print(self.outp)
151
152 def elaborate(self, platform):
153 m = Module()
154 comb = m.d.comb
155 m.submodules.splitter = splitter = LDSTSplitter(64, 48, 4)
156 comb += splitter.addr_i.eq(self.inp.addr) #XXX
157 #comb += splitter.len_i.eq()
158 #comb += splitter.valid_i.eq()
159 comb += splitter.is_ld_i.eq(self.inp.is_ld_i)
160 comb += splitter.is_st_i.eq(self.inp.is_st_i)
161 #comb += splitter.st_data_i.eq()
162 #comb += splitter.sld_valid_i.eq()
163 #comb += splitter.sld_data_i.eq()
164 #comb += splitter.sst_valid_i.eq()
165 return m
166
167
168 class DataMergerRecord(Record):
169 """
170 {data: 128 bit, byte_enable: 16 bit}
171 """
172
173 def __init__(self, name=None):
174 layout = (('data', 128),
175 ('en', 16))
176 Record.__init__(self, Layout(layout), name=name)
177
178 self.data.reset_less = True
179 self.en.reset_less = True
180
181
182 # TODO: formal verification
183 class DataMerger(Elaboratable):
184 """DataMerger
185
186 Merges data based on an address-match matrix.
187 Identifies (picks) one (any) row, then uses that row,
188 based on matching address bits, to merge (OR) all data
189 rows into the output.
190
191 Basically, by the time DataMerger is used, all of its incoming data is
192 determined not to conflict. The last step before actually submitting
193 the request to the Memory Subsystem is to work out which requests,
194 on the same 128-bit cache line, can be "merged" due to them being:
195 (A) on the same address (bits 4 and above) (B) having byte-enable
196 lines that (as previously mentioned) do not conflict.
197
198 Therefore, put simply, this module will:
199 (1) pick a row (any row) and identify it by an index labelled "idx"
200 (2) merge all byte-enable lines which are on that same address, as
201 indicated by addr_match_i[idx], onto the output
202 """
203
204 def __init__(self, array_size):
205 """
206 :addr_array_i: an NxN Array of Signals with bits set indicating address
207 match. bits across the diagonal (addr_array_i[x][x])
208 will always be set, to indicate "active".
209 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
210 :data_o: an Output Record of same type
211 {data: 128 bit, byte_enable: 16 bit}
212 """
213 self.array_size = array_size
214 ul = []
215 for i in range(array_size):
216 ul.append(Signal(array_size,
217 reset_less=True,
218 name="addr_match_%d" % i))
219 self.addr_array_i = Array(ul)
220
221 ul = []
222 for i in range(array_size):
223 ul.append(DataMergerRecord())
224 self.data_i = Array(ul)
225 self.data_o = DataMergerRecord()
226
227 def elaborate(self, platform):
228 m = Module()
229 comb = m.d.comb
230 #(1) pick a row
231 m.submodules.pick = pick = PriorityEncoder(self.array_size)
232 for j in range(self.array_size):
233 comb += pick.i[j].eq(self.addr_array_i[j].bool())
234 valid = ~pick.n
235 idx = pick.o
236 #(2) merge
237 with m.If(valid):
238 l = []
239 for j in range(self.array_size):
240 select = self.addr_array_i[idx][j]
241 r = DataMergerRecord()
242 with m.If(select):
243 comb += r.eq(self.data_i[j])
244 l.append(r)
245 comb += self.data_o.data.eq(ortreereduce(l,"data"))
246 comb += self.data_o.en.eq(ortreereduce(l,"en"))
247
248 return m
249
250
251 class LDSTPort(Elaboratable):
252 def __init__(self, idx, regwid=64, addrwid=48):
253 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
254
255 def elaborate(self, platform):
256 m = Module()
257 comb, sync = m.d.comb, m.d.sync
258
259 # latches
260 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
261 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
262 comb += cyc_l.s.eq(0)
263 comb += cyc_l.r.eq(0)
264
265 # this is a little weird: we let the L0Cache/Buffer set
266 # the outputs: this module just monitors "state".
267
268 # LD/ST requested activates "busy"
269 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
270 comb += busy_l.s.eq(1)
271
272 # monitor for an exception or the completion of LD.
273 with m.If(self.pi.addr_exc_o):
274 comb += busy_l.r.eq(1)
275
276 # however ST needs one cycle before busy is reset
277 with m.If(self.pi.st.ok | self.pi.ld.ok):
278 comb += cyc_l.s.eq(1)
279
280 with m.If(cyc_l.q):
281 comb += cyc_l.r.eq(1)
282 comb += busy_l.r.eq(1)
283
284 # busy latch outputs to interface
285 comb += self.pi.busy_o.eq(busy_l.q)
286
287 return m
288
289 def __iter__(self):
290 yield self.pi.is_ld_i
291 yield self.pi.is_st_i
292 yield from self.pi.op.ports()
293 yield self.pi.busy_o
294 yield self.pi.go_die_i
295 yield from self.pi.addr.ports()
296 yield self.pi.addr_ok_o
297 yield self.pi.addr_exc_o
298
299 yield from self.pi.ld.ports()
300 yield from self.pi.st.ports()
301
302 def ports(self):
303 return list(self)
304
305
306 class L0CacheBuffer(Elaboratable):
307 """L0 Cache / Buffer
308
309 Note that the final version will have *two* interfaces per LDSTCompUnit,
310 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
311 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
312
313 This version is to be used for test purposes (and actively maintained
314 for such, rather than "replaced")
315
316 There are much better ways to implement this. However it's only
317 a "demo" / "test" class, and one important aspect: it responds
318 combinatorially, where a nmigen FSM's state-changes only activate
319 on clock-sync boundaries.
320
321 Note: the data byte-order is *not* expected to be normalised (LE/BE)
322 by this class. That task is taken care of by LDSTCompUnit.
323 """
324
325 def __init__(self, n_units, mem, regwid=64, addrwid=48):
326 self.n_units = n_units
327 self.mem = mem # TODO: remove, replace with lsui
328 # TODO: self.lsui = LoadStoreUnitInterface(addr_wid=addrwid....)
329 self.regwid = regwid
330 self.addrwid = addrwid
331 ul = []
332 for i in range(n_units):
333 ul.append(LDSTPort(i, regwid, addrwid))
334 self.dports = Array(ul)
335
336 @property
337 def addrbits(self):
338 return log2_int(self.mem.regwid//8)
339
340 def splitaddr(self, addr):
341 """split the address into top and bottom bits of the memory granularity
342 """
343 return addr[:self.addrbits], addr[self.addrbits:]
344
345 def elaborate(self, platform):
346 m = Module()
347 comb, sync = m.d.comb, m.d.sync
348
349 # connect the ports as modules
350 for i in range(self.n_units):
351 setattr(m.submodules, "port%d" % i, self.dports[i])
352
353 # state-machine latches
354 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
355 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
356 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
357 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
358 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
359
360 # find one LD (or ST) and do it. only one per cycle.
361 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
362 # LD/STs using mask-expansion - see LenExpand class
363
364 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
365 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
366 m.submodules.lenexp = lenexp = LenExpand(4, 8)
367
368 lds = Signal(self.n_units, reset_less=True)
369 sts = Signal(self.n_units, reset_less=True)
370 ldi = []
371 sti = []
372 for i in range(self.n_units):
373 pi = self.dports[i].pi
374 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
375 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
376 # put the requests into the priority-pickers
377 comb += ldpick.i.eq(Cat(*ldi))
378 comb += stpick.i.eq(Cat(*sti))
379
380 # hmm, have to select (record) the right port index
381 nbits = log2_int(self.n_units, False)
382 ld_idx = Signal(nbits, reset_less=False)
383 st_idx = Signal(nbits, reset_less=False)
384 # use these because of the sync-and-comb pass-through capability
385 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
386 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
387
388 # convenience variables to reference the "picked" port
389 ldport = self.dports[ld_idx].pi
390 stport = self.dports[st_idx].pi
391 # and the memory ports
392 rdport = self.mem.rdport
393 wrport = self.mem.wrport
394
395 # Priority-Pickers pick one and only one request, capture its index.
396 # from that point on this code *only* "listens" to that port.
397
398 sync += adrok_l.s.eq(0)
399 comb += adrok_l.r.eq(0)
400 with m.If(~ldpick.n):
401 comb += ld_active.s.eq(1) # activate LD mode
402 comb += idx_l.r.eq(1) # pick (and capture) the port index
403 with m.Elif(~stpick.n):
404 comb += st_active.s.eq(1) # activate ST mode
405 comb += idx_l.r.eq(1) # pick (and capture) the port index
406
407 # from this point onwards, with the port "picked", it stays picked
408 # until ld_active (or st_active) are de-asserted.
409
410 # if now in "LD" mode: wait for addr_ok, then send the address out
411 # to memory, acknowledge address, and send out LD data
412 with m.If(ld_active.q):
413 # set up LenExpander with the LD len and lower bits of addr
414 lsbaddr, msbaddr = self.splitaddr(ldport.addr.data)
415 comb += lenexp.len_i.eq(ldport.op.data_len)
416 comb += lenexp.addr_i.eq(lsbaddr)
417 with m.If(ldport.addr.ok & adrok_l.qn):
418 comb += rdport.addr.eq(msbaddr) # addr ok, send thru
419 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
420 sync += adrok_l.s.eq(1) # and pull "ack" latch
421
422 # if now in "ST" mode: likewise do the same but with "ST"
423 # to memory, acknowledge address, and send out LD data
424 with m.If(st_active.q):
425 # set up LenExpander with the ST len and lower bits of addr
426 lsbaddr, msbaddr = self.splitaddr(stport.addr.data)
427 comb += lenexp.len_i.eq(stport.op.data_len)
428 comb += lenexp.addr_i.eq(lsbaddr)
429 with m.If(stport.addr.ok):
430 comb += wrport.addr.eq(msbaddr) # addr ok, send thru
431 with m.If(adrok_l.qn):
432 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
433 sync += adrok_l.s.eq(1) # and pull "ack" latch
434
435 # NOTE: in both these, below, the port itself takes care
436 # of de-asserting its "busy_o" signal, based on either ld.ok going
437 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
438
439 # for LD mode, when addr has been "ok'd", assume that (because this
440 # is a "Memory" test-class) the memory read data is valid.
441 comb += reset_l.s.eq(0)
442 comb += reset_l.r.eq(0)
443 with m.If(ld_active.q & adrok_l.q):
444 # shift data down before pushing out. requires masking
445 # from the *byte*-expanded version of LenExpand output
446 lddata = Signal(self.regwid, reset_less=True)
447 # TODO: replace rdport.data with LoadStoreUnitInterface.x_load_data
448 # and also handle the ready/stall/busy protocol
449 comb += lddata.eq((rdport.data & lenexp.rexp_o) >>
450 (lenexp.addr_i*8))
451 comb += ldport.ld.data.eq(lddata) # put data out
452 comb += ldport.ld.ok.eq(1) # indicate data valid
453 comb += reset_l.s.eq(1) # reset mode after 1 cycle
454
455 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
456 with m.If(st_active.q & stport.st.ok):
457 # shift data up before storing. lenexp *bit* version of mask is
458 # passed straight through as byte-level "write-enable" lines.
459 stdata = Signal(self.regwid, reset_less=True)
460 comb += stdata.eq(stport.st.data << (lenexp.addr_i*8))
461 # TODO: replace with link to LoadStoreUnitInterface.x_store_data
462 # and also handle the ready/stall/busy protocol
463 comb += wrport.data.eq(stdata) # write st to mem
464 comb += wrport.en.eq(lenexp.lexp_o) # enable writes
465 comb += reset_l.s.eq(1) # reset mode after 1 cycle
466
467 # ugly hack, due to simultaneous addr req-go acknowledge
468 reset_delay = Signal(reset_less=True)
469 sync += reset_delay.eq(reset_l.q)
470 with m.If(reset_delay):
471 comb += adrok_l.r.eq(1) # address reset
472
473 # after waiting one cycle (reset_l is "sync" mode), reset the port
474 with m.If(reset_l.q):
475 comb += idx_l.s.eq(1) # deactivate port-index selector
476 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
477 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
478 comb += reset_l.r.eq(1) # clear reset
479 comb += adrok_l.r.eq(1) # address reset
480
481 return m
482
483 def ports(self):
484 for p in self.dports:
485 yield from p.ports()
486
487
488 class TstL0CacheBuffer(Elaboratable):
489 def __init__(self, n_units=3, regwid=16, addrwid=4):
490 # TODO: replace with TestMemoryLoadStoreUnit
491 self.mem = TestMemory(regwid, addrwid, granularity=regwid//8)
492 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid<<1)
493
494 def elaborate(self, platform):
495 m = Module()
496 m.submodules.mem = self.mem
497 m.submodules.l0 = self.l0
498
499 return m
500
501 def ports(self):
502 yield from self.l0.ports()
503 yield self.mem.rdport.addr
504 yield self.mem.rdport.data
505 yield self.mem.wrport.addr
506 yield self.mem.wrport.data
507 # TODO: mem ports
508
509
510 def wait_busy(port, no=False):
511 while True:
512 busy = yield port.pi.busy_o
513 print("busy", no, busy)
514 if bool(busy) == no:
515 break
516 yield
517
518
519 def wait_addr(port):
520 while True:
521 addr_ok = yield port.pi.addr_ok_o
522 print("addrok", addr_ok)
523 if not addr_ok:
524 break
525 yield
526
527
528 def wait_ldok(port):
529 while True:
530 ldok = yield port.pi.ld.ok
531 print("ldok", ldok)
532 if ldok:
533 break
534 yield
535
536
537 def l0_cache_st(dut, addr, data, datalen):
538 l0 = dut.l0
539 mem = dut.mem
540 port0 = l0.dports[0]
541 port1 = l0.dports[1]
542
543 # have to wait until not busy
544 yield from wait_busy(port1, no=False) # wait until not busy
545
546 # set up a ST on the port. address first:
547 yield port1.pi.is_st_i.eq(1) # indicate ST
548 yield port1.pi.op.data_len.eq(datalen) # ST length (1/2/4/8)
549
550 yield port1.pi.addr.data.eq(addr) # set address
551 yield port1.pi.addr.ok.eq(1) # set ok
552 yield from wait_addr(port1) # wait until addr ok
553 # yield # not needed, just for checking
554 # yield # not needed, just for checking
555 # assert "ST" for one cycle (required by the API)
556 yield port1.pi.st.data.eq(data)
557 yield port1.pi.st.ok.eq(1)
558 yield
559 yield port1.pi.st.ok.eq(0)
560
561 # can go straight to reset.
562 yield port1.pi.is_st_i.eq(0) # end
563 yield port1.pi.addr.ok.eq(0) # set !ok
564 # yield from wait_busy(port1, False) # wait until not busy
565
566
567 def l0_cache_ld(dut, addr, datalen, expected):
568
569 l0 = dut.l0
570 mem = dut.mem
571 port0 = l0.dports[0]
572 port1 = l0.dports[1]
573
574 # have to wait until not busy
575 yield from wait_busy(port1, no=False) # wait until not busy
576
577 # set up a LD on the port. address first:
578 yield port1.pi.is_ld_i.eq(1) # indicate LD
579 yield port1.pi.op.data_len.eq(datalen) # LD length (1/2/4/8)
580
581 yield port1.pi.addr.data.eq(addr) # set address
582 yield port1.pi.addr.ok.eq(1) # set ok
583 yield from wait_addr(port1) # wait until addr ok
584
585 yield from wait_ldok(port1) # wait until ld ok
586 data = yield port1.pi.ld.data
587
588 # cleanup
589 yield port1.pi.is_ld_i.eq(0) # end
590 yield port1.pi.addr.ok.eq(0) # set !ok
591 # yield from wait_busy(port1, no=False) # wait until not busy
592
593 return data
594
595
596 def l0_cache_ldst(arg, dut):
597 yield
598 addr = 0x2
599 data = 0xbeef
600 data2 = 0xf00f
601 #data = 0x4
602 yield from l0_cache_st(dut, 0x2, data, 2)
603 yield from l0_cache_st(dut, 0x4, data2, 2)
604 result = yield from l0_cache_ld(dut, 0x2, 2, data)
605 result2 = yield from l0_cache_ld(dut, 0x4, 2, data2)
606 yield
607 arg.assertEqual(data, result, "data %x != %x" % (result, data))
608 arg.assertEqual(data2, result2, "data2 %x != %x" % (result2, data2))
609
610
611 def data_merger_merge(dut):
612 print("data_merger")
613 #starting with all inputs zero
614 yield Settle()
615 en = yield dut.data_o.en
616 data = yield dut.data_o.data
617 assert en == 0, "en must be zero"
618 assert data == 0, "data must be zero"
619 yield
620
621 yield dut.addr_array_i[0].eq(0xFF)
622 for j in range(dut.array_size):
623 yield dut.data_i[j].en.eq(1 << j)
624 yield dut.data_i[j].data.eq(0xFF << (16*j))
625 yield Settle()
626
627 en = yield dut.data_o.en
628 data = yield dut.data_o.data
629 assert data == 0xff00ff00ff00ff00ff00ff00ff00ff
630 assert en == 0xff
631 yield
632
633
634 class TestL0Cache(unittest.TestCase):
635
636 def test_l0_cache(self):
637
638 dut = TstL0CacheBuffer(regwid=64)
639 #vl = rtlil.convert(dut, ports=dut.ports())
640 #with open("test_basic_l0_cache.il", "w") as f:
641 # f.write(vl)
642
643 run_simulation(dut, l0_cache_ldst(self, dut),
644 vcd_name='test_l0_cache_basic.vcd')
645
646
647 class TestDataMerger(unittest.TestCase):
648
649 def test_data_merger(self):
650
651 dut = DataMerger(8)
652 #vl = rtlil.convert(dut, ports=dut.ports())
653 #with open("test_data_merger.il", "w") as f:
654 # f.write(vl)
655
656 run_simulation(dut, data_merger_merge(dut),
657 vcd_name='test_data_merger.vcd')
658
659
660 class TestDualPortSplitter(unittest.TestCase):
661
662 def test_dual_port_splitter(self):
663
664 dut = DualPortSplitter()
665 #vl = rtlil.convert(dut, ports=dut.ports())
666 #with open("test_data_merger.il", "w") as f:
667 # f.write(vl)
668
669 #run_simulation(dut, data_merger_merge(dut),
670 # vcd_name='test_dual_port_splitter.vcd')
671
672
673 if __name__ == '__main__':
674 unittest.main(exit=False)
675