add byte-reversal on LD and ST in L0CacheBuffer
[soc.git] / src / soc / experiment / l0_cache.py
1 """L0 Cache/Buffer
2
3 This first version is intended for prototyping and test purposes:
4 it has "direct" access to Memory.
5
6 The intention is that this version remains an integral part of the
7 test infrastructure, and, just as with minerva's memory arrangement,
8 a dynamic runtime config *selects* alternative memory arrangements
9 rather than *replaces and discards* this code.
10
11 Links:
12
13 * https://bugs.libre-soc.org/show_bug.cgi?id=216
14 * https://libre-soc.org/3d_gpu/architecture/memory_and_cache/
15
16 """
17
18 from nmigen.compat.sim import run_simulation, Settle
19 from nmigen.cli import verilog, rtlil
20 from nmigen import Module, Signal, Mux, Elaboratable, Array, Cat
21 from nmutil.iocontrol import RecordObject
22 from nmigen.utils import log2_int
23 from nmigen.hdl.rec import Record, Layout
24
25 from nmutil.latch import SRLatch, latchregister
26 from soc.decoder.power_decoder2 import Data
27 from soc.decoder.power_enums import InternalOp
28 from soc.regfile.regfile import ortreereduce
29 from nmutil.util import treereduce
30
31 from soc.fu.ldst.ldst_input_record import CompLDSTOpSubset
32 from soc.decoder.power_decoder2 import Data
33 #from nmutil.picker import PriorityPicker
34 from nmigen.lib.coding import PriorityEncoder
35 from soc.scoreboard.addr_split import LDSTSplitter
36 from soc.scoreboard.addr_match import LenExpand
37
38 # for testing purposes
39 from soc.experiment.testmem import TestMemory
40
41 import unittest
42
43
44 class PortInterface(RecordObject):
45 """PortInterface
46
47 defines the interface - the API - that the LDSTCompUnit connects
48 to. note that this is NOT a "fire-and-forget" interface. the
49 LDSTCompUnit *must* be kept appraised that the request is in
50 progress, and only when it has a 100% successful completion rate
51 can the notification be given (busy dropped).
52
53 The interface FSM rules are as follows:
54
55 * if busy_o is asserted, a LD/ST is in progress. further
56 requests may not be made until busy_o is deasserted.
57
58 * only one of is_ld_i or is_st_i may be asserted. busy_o
59 will immediately be asserted and remain asserted.
60
61 * addr.ok is to be asserted when the LD/ST address is known.
62 addr.data is to be valid on the same cycle.
63
64 addr.ok and addr.data must REMAIN asserted until busy_o
65 is de-asserted. this ensures that there is no need
66 for the L0 Cache/Buffer to have an additional address latch
67 (because the LDSTCompUnit already has it)
68
69 * addr_ok_o (or addr_exc_o) must be waited for. these will
70 be asserted *only* for one cycle and one cycle only.
71
72 * addr_exc_o will be asserted if there is no chance that the
73 memory request may be fulfilled.
74
75 busy_o is deasserted on the same cycle as addr_exc_o is asserted.
76
77 * conversely: addr_ok_o must *ONLY* be asserted if there is a
78 HUNDRED PERCENT guarantee that the memory request will be
79 fulfilled.
80
81 * for a LD, ld.ok will be asserted - for only one clock cycle -
82 at any point in the future that is acceptable to the underlying
83 Memory subsystem. the recipient MUST latch ld.data on that cycle.
84
85 busy_o is deasserted on the same cycle as ld.ok is asserted.
86
87 * for a ST, st.ok may be asserted only after addr_ok_o had been
88 asserted, alongside valid st.data at the same time. st.ok
89 must only be asserted for one cycle.
90
91 the underlying Memory is REQUIRED to pick up that data and
92 guarantee its delivery. no back-acknowledgement is required.
93
94 busy_o is deasserted on the cycle AFTER st.ok is asserted.
95 """
96
97 def __init__(self, name=None, regwid=64, addrwid=48):
98
99 self._regwid = regwid
100 self._addrwid = addrwid
101
102 RecordObject.__init__(self, name=name)
103
104 # distinguish op type (ld/st)
105 self.is_ld_i = Signal(reset_less=True)
106 self.is_st_i = Signal(reset_less=True)
107 self.op = CompLDSTOpSubset() # hm insn_type ld/st duplicates here
108
109 # common signals
110 self.busy_o = Signal(reset_less=True) # do not use if busy
111 self.go_die_i = Signal(reset_less=True) # back to reset
112 self.addr = Data(addrwid, "addr_i") # addr/addr-ok
113 # addr is valid (TLB, L1 etc.)
114 self.addr_ok_o = Signal(reset_less=True)
115 self.addr_exc_o = Signal(reset_less=True) # TODO, "type" of exception
116
117 # LD/ST
118 self.ld = Data(regwid, "ld_data_o") # ok to be set by L0 Cache/Buf
119 self.st = Data(regwid, "st_data_i") # ok to be set by CompUnit
120
121 # TODO: elaborate function
122
123
124 class DualPortSplitter(Elaboratable):
125 """DualPortSplitter
126
127 * one incoming PortInterface
128 * two *OUTGOING* PortInterfaces
129 * uses LDSTSplitter to do it
130
131 (actually, thinking about it LDSTSplitter could simply be
132 modified to conform to PortInterface: one in, two out)
133
134 once that is done each pair of ports may be wired directly
135 to the dual ports of L0CacheBuffer
136
137 The split is carried out so that, regardless of alignment or
138 mis-alignment, outgoing PortInterface[0] takes bit 4 == 0
139 of the address, whilst outgoing PortInterface[1] takes
140 bit 4 == 1.
141
142 PortInterface *may* need to be changed so that the length is
143 a binary number (accepting values 1-16).
144 """
145 def __init__(self):
146 self.outp = [PortInterface(name="outp_0"),
147 PortInterface(name="outp_1")]
148 self.inp = PortInterface(name="inp")
149 print(self.outp)
150
151 def elaborate(self, platform):
152 m = Module()
153 comb = m.d.comb
154 m.submodules.splitter = splitter = LDSTSplitter(64, 48, 4)
155 comb += splitter.addr_i.eq(self.inp.addr) #XXX
156 #comb += splitter.len_i.eq()
157 #comb += splitter.valid_i.eq()
158 comb += splitter.is_ld_i.eq(self.inp.is_ld_i)
159 comb += splitter.is_st_i.eq(self.inp.is_st_i)
160 #comb += splitter.st_data_i.eq()
161 #comb += splitter.sld_valid_i.eq()
162 #comb += splitter.sld_data_i.eq()
163 #comb += splitter.sst_valid_i.eq()
164 return m
165
166
167 class DataMergerRecord(Record):
168 """
169 {data: 128 bit, byte_enable: 16 bit}
170 """
171
172 def __init__(self, name=None):
173 layout = (('data', 128),
174 ('en', 16))
175 Record.__init__(self, Layout(layout), name=name)
176
177 self.data.reset_less = True
178 self.en.reset_less = True
179
180
181 # TODO: formal verification
182 class DataMerger(Elaboratable):
183 """DataMerger
184
185 Merges data based on an address-match matrix.
186 Identifies (picks) one (any) row, then uses that row,
187 based on matching address bits, to merge (OR) all data
188 rows into the output.
189
190 Basically, by the time DataMerger is used, all of its incoming data is
191 determined not to conflict. The last step before actually submitting
192 the request to the Memory Subsystem is to work out which requests,
193 on the same 128-bit cache line, can be "merged" due to them being:
194 (A) on the same address (bits 4 and above) (B) having byte-enable
195 lines that (as previously mentioned) do not conflict.
196
197 Therefore, put simply, this module will:
198 (1) pick a row (any row) and identify it by an index labelled "idx"
199 (2) merge all byte-enable lines which are on that same address, as
200 indicated by addr_match_i[idx], onto the output
201 """
202
203 def __init__(self, array_size):
204 """
205 :addr_array_i: an NxN Array of Signals with bits set indicating address
206 match. bits across the diagonal (addr_array_i[x][x])
207 will always be set, to indicate "active".
208 :data_i: an Nx Array of Records {data: 128 bit, byte_enable: 16 bit}
209 :data_o: an Output Record of same type
210 {data: 128 bit, byte_enable: 16 bit}
211 """
212 self.array_size = array_size
213 ul = []
214 for i in range(array_size):
215 ul.append(Signal(array_size,
216 reset_less=True,
217 name="addr_match_%d" % i))
218 self.addr_array_i = Array(ul)
219
220 ul = []
221 for i in range(array_size):
222 ul.append(DataMergerRecord())
223 self.data_i = Array(ul)
224 self.data_o = DataMergerRecord()
225
226 def elaborate(self, platform):
227 m = Module()
228 comb = m.d.comb
229 #(1) pick a row
230 m.submodules.pick = pick = PriorityEncoder(self.array_size)
231 for j in range(self.array_size):
232 comb += pick.i[j].eq(self.addr_array_i[j].bool())
233 valid = ~pick.n
234 idx = pick.o
235 #(2) merge
236 with m.If(valid):
237 l = []
238 for j in range(self.array_size):
239 select = self.addr_array_i[idx][j]
240 r = DataMergerRecord()
241 with m.If(select):
242 comb += r.eq(self.data_i[j])
243 l.append(r)
244 comb += self.data_o.data.eq(ortreereduce(l,"data"))
245 comb += self.data_o.en.eq(ortreereduce(l,"en"))
246
247 return m
248
249
250 class LDSTPort(Elaboratable):
251 def __init__(self, idx, regwid=64, addrwid=48):
252 self.pi = PortInterface("ldst_port%d" % idx, regwid, addrwid)
253
254 def elaborate(self, platform):
255 m = Module()
256 comb, sync = m.d.comb, m.d.sync
257
258 # latches
259 m.submodules.busy_l = busy_l = SRLatch(False, name="busy")
260 m.submodules.cyc_l = cyc_l = SRLatch(True, name="cyc")
261 comb += cyc_l.s.eq(0)
262 comb += cyc_l.r.eq(0)
263
264 # this is a little weird: we let the L0Cache/Buffer set
265 # the outputs: this module just monitors "state".
266
267 # LD/ST requested activates "busy"
268 with m.If(self.pi.is_ld_i | self.pi.is_st_i):
269 comb += busy_l.s.eq(1)
270
271 # monitor for an exception or the completion of LD.
272 with m.If(self.pi.addr_exc_o):
273 comb += busy_l.r.eq(1)
274
275 # however ST needs one cycle before busy is reset
276 with m.If(self.pi.st.ok | self.pi.ld.ok):
277 comb += cyc_l.s.eq(1)
278
279 with m.If(cyc_l.q):
280 comb += cyc_l.r.eq(1)
281 comb += busy_l.r.eq(1)
282
283 # busy latch outputs to interface
284 comb += self.pi.busy_o.eq(busy_l.q)
285
286 return m
287
288 def __iter__(self):
289 yield self.pi.is_ld_i
290 yield self.pi.is_st_i
291 yield from self.pi.op.ports()
292 yield self.pi.busy_o
293 yield self.pi.go_die_i
294 yield from self.pi.addr.ports()
295 yield self.pi.addr_ok_o
296 yield self.pi.addr_exc_o
297
298 yield from self.pi.ld.ports()
299 yield from self.pi.st.ports()
300
301 def ports(self):
302 return list(self)
303
304 # TODO: turn this into a module
305 def byte_reverse(m, data, length):
306 comb = m.d.comb
307 name = "%s_r" % (data.name)
308 data_r = Signal.like(data, name=name)
309 with m.Switch(length):
310 for j in [1,2,4,8]:
311 with m.Case(j):
312 for i in range(j):
313 dest = data_r.word_select(i, 8)
314 src = data.word_select(j-1-i, 8)
315 comb += dest.eq(src)
316 return data_r
317
318
319 class L0CacheBuffer(Elaboratable):
320 """L0 Cache / Buffer
321
322 Note that the final version will have *two* interfaces per LDSTCompUnit,
323 to cover mis-aligned requests, as well as *two* 128-bit L1 Cache
324 interfaces: one for odd (addr[4] == 1) and one for even (addr[4] == 1).
325
326 This version is to be used for test purposes (and actively maintained
327 for such, rather than "replaced")
328
329 There are much better ways to implement this. However it's only
330 a "demo" / "test" class, and one important aspect: it responds
331 combinatorially, where a nmigen FSM's state-changes only activate
332 on clock-sync boundaries.
333 """
334
335 def __init__(self, n_units, mem, regwid=64, addrwid=48):
336 self.n_units = n_units
337 self.mem = mem
338 self.regwid = regwid
339 self.addrwid = addrwid
340 ul = []
341 for i in range(n_units):
342 ul.append(LDSTPort(i, regwid, addrwid))
343 self.dports = Array(ul)
344
345 @property
346 def addrbits(self):
347 return log2_int(self.mem.regwid//8)
348
349 def splitaddr(self, addr):
350 """split the address into top and bottom bits of the memory granularity
351 """
352 return addr[:self.addrbits], addr[self.addrbits:]
353
354 def elaborate(self, platform):
355 m = Module()
356 comb, sync = m.d.comb, m.d.sync
357
358 # connect the ports as modules
359 for i in range(self.n_units):
360 setattr(m.submodules, "port%d" % i, self.dports[i])
361
362 # state-machine latches
363 m.submodules.st_active = st_active = SRLatch(False, name="st_active")
364 m.submodules.ld_active = ld_active = SRLatch(False, name="ld_active")
365 m.submodules.reset_l = reset_l = SRLatch(True, name="reset")
366 m.submodules.idx_l = idx_l = SRLatch(False, name="idx_l")
367 m.submodules.adrok_l = adrok_l = SRLatch(False, name="addr_acked")
368
369 # find one LD (or ST) and do it. only one per cycle.
370 # TODO: in the "live" (production) L0Cache/Buffer, merge multiple
371 # LD/STs using mask-expansion - see LenExpand class
372
373 m.submodules.ldpick = ldpick = PriorityEncoder(self.n_units)
374 m.submodules.stpick = stpick = PriorityEncoder(self.n_units)
375 m.submodules.lenexp = lenexp = LenExpand(4, 8)
376
377 lds = Signal(self.n_units, reset_less=True)
378 sts = Signal(self.n_units, reset_less=True)
379 ldi = []
380 sti = []
381 for i in range(self.n_units):
382 pi = self.dports[i].pi
383 ldi.append(pi.is_ld_i & pi.busy_o) # accumulate ld-req signals
384 sti.append(pi.is_st_i & pi.busy_o) # accumulate st-req signals
385 # put the requests into the priority-pickers
386 comb += ldpick.i.eq(Cat(*ldi))
387 comb += stpick.i.eq(Cat(*sti))
388
389 # hmm, have to select (record) the right port index
390 nbits = log2_int(self.n_units, False)
391 ld_idx = Signal(nbits, reset_less=False)
392 st_idx = Signal(nbits, reset_less=False)
393 # use these because of the sync-and-comb pass-through capability
394 latchregister(m, ldpick.o, ld_idx, idx_l.qn, name="ld_idx_l")
395 latchregister(m, stpick.o, st_idx, idx_l.qn, name="st_idx_l")
396
397 # convenience variables to reference the "picked" port
398 ldport = self.dports[ld_idx].pi
399 stport = self.dports[st_idx].pi
400 # and the memory ports
401 rdport = self.mem.rdport
402 wrport = self.mem.wrport
403
404 # Priority-Pickers pick one and only one request, capture its index.
405 # from that point on this code *only* "listens" to that port.
406
407 sync += adrok_l.s.eq(0)
408 comb += adrok_l.r.eq(0)
409 with m.If(~ldpick.n):
410 comb += ld_active.s.eq(1) # activate LD mode
411 comb += idx_l.r.eq(1) # pick (and capture) the port index
412 with m.Elif(~stpick.n):
413 comb += st_active.s.eq(1) # activate ST mode
414 comb += idx_l.r.eq(1) # pick (and capture) the port index
415
416 # from this point onwards, with the port "picked", it stays picked
417 # until ld_active (or st_active) are de-asserted.
418
419 # if now in "LD" mode: wait for addr_ok, then send the address out
420 # to memory, acknowledge address, and send out LD data
421 with m.If(ld_active.q):
422 # set up LenExpander with the LD len and lower bits of addr
423 lsbaddr, msbaddr = self.splitaddr(ldport.addr.data)
424 comb += lenexp.len_i.eq(ldport.op.data_len)
425 comb += lenexp.addr_i.eq(lsbaddr)
426 with m.If(ldport.addr.ok & adrok_l.qn):
427 comb += rdport.addr.eq(msbaddr) # addr ok, send thru
428 comb += ldport.addr_ok_o.eq(1) # acknowledge addr ok
429 sync += adrok_l.s.eq(1) # and pull "ack" latch
430
431 # if now in "ST" mode: likewise do the same but with "ST"
432 # to memory, acknowledge address, and send out LD data
433 with m.If(st_active.q):
434 # set up LenExpander with the ST len and lower bits of addr
435 lsbaddr, msbaddr = self.splitaddr(stport.addr.data)
436 comb += lenexp.len_i.eq(stport.op.data_len)
437 comb += lenexp.addr_i.eq(lsbaddr)
438 with m.If(stport.addr.ok):
439 comb += wrport.addr.eq(msbaddr) # addr ok, send thru
440 with m.If(adrok_l.qn):
441 comb += stport.addr_ok_o.eq(1) # acknowledge addr ok
442 sync += adrok_l.s.eq(1) # and pull "ack" latch
443
444 # NOTE: in both these, below, the port itself takes care
445 # of de-asserting its "busy_o" signal, based on either ld.ok going
446 # high (by us, here) or by st.ok going high (by the LDSTCompUnit).
447
448 # for LD mode, when addr has been "ok'd", assume that (because this
449 # is a "Memory" test-class) the memory read data is valid.
450 comb += reset_l.s.eq(0)
451 comb += reset_l.r.eq(0)
452 with m.If(ld_active.q & adrok_l.q):
453 # shift data down before pushing out. requires masking
454 # from the *byte*-expanded version of LenExpand output
455 lddata = Signal(self.regwid, reset_less=True)
456 comb += lddata.eq((rdport.data & lenexp.rexp_o) >>
457 (lenexp.addr_i*8))
458 # byte-reverse the data based on width
459 lddata_r = byte_reverse(m, lddata, lenexp.len_i)
460 comb += ldport.ld.data.eq(lddata_r) # put data out
461 comb += ldport.ld.ok.eq(1) # indicate data valid
462 comb += reset_l.s.eq(1) # reset mode after 1 cycle
463
464 # for ST mode, when addr has been "ok'd", wait for incoming "ST ok"
465 with m.If(st_active.q & stport.st.ok):
466 # shift data up before storing. lenexp *bit* version of mask is
467 # passed straight through as byte-level "write-enable" lines.
468 # byte-reverse the data based on width
469 stdata_r = byte_reverse(m, stport.st.data, lenexp.len_i)
470 stdata = Signal(self.regwid, reset_less=True)
471 comb += stdata.eq(stdata_r << (lenexp.addr_i*8))
472 comb += wrport.data.eq(stdata) # write st to mem
473 comb += wrport.en.eq(lenexp.lexp_o) # enable writes
474 comb += reset_l.s.eq(1) # reset mode after 1 cycle
475
476 # ugly hack, due to simultaneous addr req-go acknowledge
477 reset_delay = Signal(reset_less=True)
478 sync += reset_delay.eq(reset_l.q)
479 with m.If(reset_delay):
480 comb += adrok_l.r.eq(1) # address reset
481
482 # after waiting one cycle (reset_l is "sync" mode), reset the port
483 with m.If(reset_l.q):
484 comb += idx_l.s.eq(1) # deactivate port-index selector
485 comb += ld_active.r.eq(1) # leave the ST active for 1 cycle
486 comb += st_active.r.eq(1) # leave the ST active for 1 cycle
487 comb += reset_l.r.eq(1) # clear reset
488 comb += adrok_l.r.eq(1) # address reset
489
490 return m
491
492 def ports(self):
493 for p in self.dports:
494 yield from p.ports()
495
496
497 class TstL0CacheBuffer(Elaboratable):
498 def __init__(self, n_units=3, regwid=16, addrwid=4):
499 self.mem = TestMemory(regwid, addrwid, granularity=regwid//8)
500 self.l0 = L0CacheBuffer(n_units, self.mem, regwid, addrwid)
501
502 def elaborate(self, platform):
503 m = Module()
504 m.submodules.mem = self.mem
505 m.submodules.l0 = self.l0
506
507 return m
508
509 def ports(self):
510 yield from self.l0.ports()
511 yield self.mem.rdport.addr
512 yield self.mem.rdport.data
513 yield self.mem.wrport.addr
514 yield self.mem.wrport.data
515 # TODO: mem ports
516
517
518 def wait_busy(port, no=False):
519 while True:
520 busy = yield port.pi.busy_o
521 print("busy", no, busy)
522 if bool(busy) == no:
523 break
524 yield
525
526
527 def wait_addr(port):
528 while True:
529 addr_ok = yield port.pi.addr_ok_o
530 print("addrok", addr_ok)
531 if not addr_ok:
532 break
533 yield
534
535
536 def wait_ldok(port):
537 while True:
538 ldok = yield port.pi.ld.ok
539 print("ldok", ldok)
540 if ldok:
541 break
542 yield
543
544
545 def l0_cache_st(dut, addr, data, datalen):
546 l0 = dut.l0
547 mem = dut.mem
548 port0 = l0.dports[0]
549 port1 = l0.dports[1]
550
551 # have to wait until not busy
552 yield from wait_busy(port1, no=False) # wait until not busy
553
554 # set up a ST on the port. address first:
555 yield port1.pi.is_st_i.eq(1) # indicate ST
556 yield port1.pi.op.data_len.eq(datalen) # ST length (1/2/4/8)
557
558 yield port1.pi.addr.data.eq(addr) # set address
559 yield port1.pi.addr.ok.eq(1) # set ok
560 yield from wait_addr(port1) # wait until addr ok
561 # yield # not needed, just for checking
562 # yield # not needed, just for checking
563 # assert "ST" for one cycle (required by the API)
564 yield port1.pi.st.data.eq(data)
565 yield port1.pi.st.ok.eq(1)
566 yield
567 yield port1.pi.st.ok.eq(0)
568
569 # can go straight to reset.
570 yield port1.pi.is_st_i.eq(0) # end
571 yield port1.pi.addr.ok.eq(0) # set !ok
572 # yield from wait_busy(port1, False) # wait until not busy
573
574
575 def l0_cache_ld(dut, addr, datalen, expected):
576
577 l0 = dut.l0
578 mem = dut.mem
579 port0 = l0.dports[0]
580 port1 = l0.dports[1]
581
582 # have to wait until not busy
583 yield from wait_busy(port1, no=False) # wait until not busy
584
585 # set up a LD on the port. address first:
586 yield port1.pi.is_ld_i.eq(1) # indicate LD
587 yield port1.pi.op.data_len.eq(datalen) # LD length (1/2/4/8)
588
589 yield port1.pi.addr.data.eq(addr) # set address
590 yield port1.pi.addr.ok.eq(1) # set ok
591 yield from wait_addr(port1) # wait until addr ok
592
593 yield from wait_ldok(port1) # wait until ld ok
594 data = yield port1.pi.ld.data
595
596 # cleanup
597 yield port1.pi.is_ld_i.eq(0) # end
598 yield port1.pi.addr.ok.eq(0) # set !ok
599 # yield from wait_busy(port1, no=False) # wait until not busy
600
601 return data
602
603
604 def l0_cache_ldst(arg, dut):
605 yield
606 addr = 0x2
607 data = 0xbeef
608 data2 = 0xf00f
609 #data = 0x4
610 yield from l0_cache_st(dut, 0x2, data, 2)
611 yield from l0_cache_st(dut, 0x4, data2, 2)
612 result = yield from l0_cache_ld(dut, 0x2, 2, data)
613 result2 = yield from l0_cache_ld(dut, 0x4, 2, data2)
614 yield
615 arg.assertEqual(data, result, "data %x != %x" % (result, data))
616 arg.assertEqual(data2, result2, "data2 %x != %x" % (result2, data2))
617
618
619 def data_merger_merge(dut):
620 print("data_merger")
621 #starting with all inputs zero
622 yield Settle()
623 en = yield dut.data_o.en
624 data = yield dut.data_o.data
625 assert en == 0, "en must be zero"
626 assert data == 0, "data must be zero"
627 yield
628
629 yield dut.addr_array_i[0].eq(0xFF)
630 for j in range(dut.array_size):
631 yield dut.data_i[j].en.eq(1 << j)
632 yield dut.data_i[j].data.eq(0xFF << (16*j))
633 yield Settle()
634
635 en = yield dut.data_o.en
636 data = yield dut.data_o.data
637 assert data == 0xff00ff00ff00ff00ff00ff00ff00ff
638 assert en == 0xff
639 yield
640
641
642 class TestL0Cache(unittest.TestCase):
643
644 def test_l0_cache(self):
645
646 dut = TstL0CacheBuffer(regwid=64)
647 #vl = rtlil.convert(dut, ports=dut.ports())
648 #with open("test_basic_l0_cache.il", "w") as f:
649 # f.write(vl)
650
651 run_simulation(dut, l0_cache_ldst(self, dut),
652 vcd_name='test_l0_cache_basic.vcd')
653
654
655 class TestDataMerger(unittest.TestCase):
656
657 def test_data_merger(self):
658
659 dut = DataMerger(8)
660 #vl = rtlil.convert(dut, ports=dut.ports())
661 #with open("test_data_merger.il", "w") as f:
662 # f.write(vl)
663
664 run_simulation(dut, data_merger_merge(dut),
665 vcd_name='test_data_merger.vcd')
666
667
668 class TestDualPortSplitter(unittest.TestCase):
669
670 def test_dual_port_splitter(self):
671
672 dut = DualPortSplitter()
673 #vl = rtlil.convert(dut, ports=dut.ports())
674 #with open("test_data_merger.il", "w") as f:
675 # f.write(vl)
676
677 #run_simulation(dut, data_merger_merge(dut),
678 # vcd_name='test_dual_port_splitter.vcd')
679
680
681 if __name__ == '__main__':
682 unittest.main(exit=False)
683