sort out misaligned store in LoadStore1
[soc.git] / src / soc / fu / ldst / loadstore.py
1 """LoadStore1 FSM.
2
3 based on microwatt loadstore1.vhdl, but conforming to PortInterface.
4 unlike loadstore1.vhdl this does *not* deal with actual Load/Store
5 ops: that job is handled by LDSTCompUnit, which talks to LoadStore1
6 by way of PortInterface. PortInterface is where things need extending,
7 such as adding dcbz support, etc.
8
9 this module basically handles "pure" load / store operations, and
10 its first job is to ask the D-Cache for the data. if that fails,
11 the second task (if virtual memory is enabled) is to ask the MMU
12 to perform a TLB, then to go *back* to the cache and ask again.
13
14 Links:
15
16 * https://bugs.libre-soc.org/show_bug.cgi?id=465
17
18 """
19
20 from nmigen import (Elaboratable, Module, Signal, Shape, unsigned, Cat, Mux,
21 Record, Memory,
22 Const)
23 from nmutil.iocontrol import RecordObject
24 from nmutil.util import rising_edge, Display
25 from enum import Enum, unique
26
27 from soc.experiment.dcache import DCache
28 from soc.experiment.icache import ICache
29 from soc.experiment.pimem import PortInterfaceBase
30 from soc.experiment.mem_types import LoadStore1ToMMUType
31 from soc.experiment.mem_types import MMUToLoadStore1Type
32
33 from soc.minerva.wishbone import make_wb_layout
34 from soc.bus.sram import SRAM
35 from nmutil.util import Display
36
37
38 @unique
39 class State(Enum):
40 IDLE = 0 # ready for instruction
41 ACK_WAIT = 1 # waiting for ack from dcache
42 MMU_LOOKUP = 2 # waiting for MMU to look up translation
43 #SECOND_REQ = 3 # second request for unaligned transfer
44
45 @unique
46 class Misalign(Enum):
47 ONEWORD = 0 # only one word needed, all good
48 NEED2WORDS = 1 # need to send/receive two words
49 WAITFIRST = 2 # waiting for the first word
50 WAITSECOND = 3 # waiting for the second word
51
52
53 # captures the LDSTRequest from the PortInterface, which "blips" most
54 # of this at us (pipeline-style).
55 class LDSTRequest(RecordObject):
56 def __init__(self, name=None):
57 RecordObject.__init__(self, name=name)
58
59 self.load = Signal()
60 self.dcbz = Signal()
61 self.raddr = Signal(64)
62 # self.store_data = Signal(64) # this is already sync (on a delay)
63 self.byte_sel = Signal(16)
64 self.nc = Signal() # non-cacheable access
65 self.virt_mode = Signal()
66 self.priv_mode = Signal()
67 self.mode_32bit = Signal() # XXX UNUSED AT PRESENT
68 self.alignstate = Signal(Misalign) # progress of alignment request
69 self.align_intr = Signal()
70 # atomic (LR/SC reservation)
71 self.reserve = Signal()
72 self.atomic = Signal()
73 self.atomic_last = Signal()
74
75
76 # glue logic for microwatt mmu and dcache
77 class LoadStore1(PortInterfaceBase):
78 def __init__(self, pspec):
79 self.pspec = pspec
80 self.disable_cache = (hasattr(pspec, "disable_cache") and
81 pspec.disable_cache == True)
82 regwid = pspec.reg_wid
83 addrwid = pspec.addr_wid
84
85 super().__init__(regwid, addrwid)
86 self.dcache = DCache(pspec)
87 self.icache = ICache(pspec)
88 # these names are from the perspective of here (LoadStore1)
89 self.d_out = self.dcache.d_in # in to dcache is out for LoadStore
90 self.d_in = self.dcache.d_out # out from dcache is in for LoadStore
91 self.i_out = self.icache.i_in # in to icache is out for LoadStore
92 self.i_in = self.icache.i_out # out from icache is in for LoadStore
93 self.m_out = LoadStore1ToMMUType("m_out") # out *to* MMU
94 self.m_in = MMUToLoadStore1Type("m_in") # in *from* MMU
95 self.req = LDSTRequest(name="ldst_req")
96
97 # TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
98 self.dbus = Record(make_wb_layout(pspec))
99 self.ibus = Record(make_wb_layout(pspec))
100
101 # for creating a single clock blip to DCache
102 self.d_valid = Signal()
103 self.d_w_valid = Signal()
104 self.d_validblip = Signal()
105
106 # state info for LD/ST
107 self.done = Signal()
108 self.done_delay = Signal()
109 # latch most of the input request
110 self.load = Signal()
111 self.tlbie = Signal()
112 self.dcbz = Signal()
113 self.raddr = Signal(64)
114 self.maddr = Signal(64)
115 self.store_data = Signal(64) # first half (aligned)
116 self.store_data2 = Signal(64) # second half (misaligned)
117 self.load_data = Signal(128) # 128 to cope with misalignment
118 self.load_data_delay = Signal(128) # perform 2 LD/STs
119 self.byte_sel = Signal(16) # also for misaligned, 16-bit
120 self.alignstate = Signal(Misalign) # progress of alignment request
121 #self.xerc : xer_common_t;
122 #self.rc = Signal()
123 self.nc = Signal() # non-cacheable access
124 self.mode_32bit = Signal() # XXX UNUSED AT PRESENT
125 self.state = Signal(State)
126 self.instr_fault = Signal() # indicator to request i-cache MMU lookup
127 self.r_instr_fault = Signal() # accessed in external_busy
128 self.priv_mode = Signal() # only for instruction fetch (not LDST)
129 self.align_intr = Signal()
130 self.busy = Signal()
131 self.wait_dcache = Signal()
132 self.wait_mmu = Signal()
133 self.lrsc_misalign = Signal()
134 #self.intr_vec : integer range 0 to 16#fff#;
135 #self.nia = Signal(64)
136 #self.srr1 = Signal(16)
137 # use these to set the dsisr or dar respectively
138 self.mmu_set_spr = Signal()
139 self.mmu_set_dsisr = Signal()
140 self.mmu_set_dar = Signal()
141 self.sprval_in = Signal(64)
142
143 # ONLY access these read-only, do NOT attempt to change
144 self.dsisr = Signal(32)
145 self.dar = Signal(64)
146
147 # when external_busy set, do not allow PortInterface to proceed
148 def external_busy(self, m):
149 return self.instr_fault | self.r_instr_fault
150
151 def set_wr_addr(self, m, addr, mask, misalign, msr, is_dcbz):
152 m.d.comb += self.req.load.eq(0) # store operation
153 m.d.comb += self.req.byte_sel.eq(mask)
154 m.d.comb += self.req.raddr.eq(addr)
155 m.d.comb += self.req.priv_mode.eq(~msr.pr) # not-problem ==> priv
156 m.d.comb += self.req.virt_mode.eq(msr.dr) # DR ==> virt
157 m.d.comb += self.req.mode_32bit.eq(~msr.sf) # not-sixty-four ==> 32bit
158 m.d.comb += self.req.dcbz.eq(is_dcbz)
159 with m.If(misalign):
160 m.d.comb += self.req.alignstate.eq(Misalign.NEED2WORDS)
161
162 # m.d.comb += Display("set_wr_addr %i dcbz %i",addr,is_dcbz)
163
164 # option to disable the cache entirely for write
165 if self.disable_cache:
166 m.d.comb += self.req.nc.eq(1)
167
168 # dcbz cannot do no-cache
169 with m.If(is_dcbz & self.req.nc):
170 m.d.comb += self.req.align_intr.eq(1)
171
172 # hmm, rather than add yet another argument to set_wr_addr
173 # read direct from PortInterface
174 m.d.comb += self.req.reserve.eq(self.pi.reserve) # atomic request
175 m.d.comb += self.req.atomic.eq(~self.lrsc_misalign)
176 m.d.comb += self.req.atomic_last.eq(~self.lrsc_misalign)
177
178 return None
179
180 def set_rd_addr(self, m, addr, mask, misalign, msr):
181 m.d.comb += self.d_valid.eq(1)
182 m.d.comb += self.req.load.eq(1) # load operation
183 m.d.comb += self.req.byte_sel.eq(mask)
184 m.d.comb += self.req.raddr.eq(addr)
185 m.d.comb += self.req.priv_mode.eq(~msr.pr) # not-problem ==> priv
186 m.d.comb += self.req.virt_mode.eq(msr.dr) # DR ==> virt
187 m.d.comb += self.req.mode_32bit.eq(~msr.sf) # not-sixty-four ==> 32bit
188 # BAD HACK! disable cacheing on LD when address is 0xCxxx_xxxx
189 # this is for peripherals. same thing done in Microwatt loadstore1.vhdl
190 with m.If(addr[28:] == Const(0xc, 4)):
191 m.d.comb += self.req.nc.eq(1)
192 # option to disable the cache entirely for read
193 if self.disable_cache:
194 m.d.comb += self.req.nc.eq(1)
195 with m.If(misalign):
196 m.d.comb += self.req.alignstate.eq(Misalign.NEED2WORDS)
197
198 # hmm, rather than add yet another argument to set_rd_addr
199 # read direct from PortInterface
200 m.d.comb += self.req.reserve.eq(self.pi.reserve) # atomic request
201 m.d.comb += self.req.atomic.eq(~self.lrsc_misalign)
202 m.d.comb += self.req.atomic_last.eq(~self.lrsc_misalign)
203
204 return None #FIXME return value
205
206 def set_wr_data(self, m, data, wen):
207 # do the "blip" on write data
208 m.d.comb += self.d_valid.eq(1)
209 # put data into comb which is picked up in main elaborate()
210 m.d.comb += self.d_w_valid.eq(1)
211 m.d.comb += self.store_data.eq(data)
212 m.d.sync += self.store_data2.eq(data[64:128])
213 st_ok = self.done # TODO indicates write data is valid
214 m.d.comb += self.pi.store_done.data.eq(self.d_in.store_done)
215 m.d.comb += self.pi.store_done.ok.eq(1)
216 return st_ok
217
218 def get_rd_data(self, m):
219 ld_ok = self.done_delay # indicates read data is valid
220 data = self.load_data_delay # actual read data
221 return data, ld_ok
222
223 def elaborate(self, platform):
224 m = super().elaborate(platform)
225 comb, sync = m.d.comb, m.d.sync
226
227 # microwatt takes one more cycle before next operation can be issued
228 sync += self.done_delay.eq(self.done)
229 #sync += self.load_data_delay[0:64].eq(self.load_data[0:64])
230
231 # create dcache and icache module
232 m.submodules.dcache = dcache = self.dcache
233 m.submodules.icache = icache = self.icache
234
235 # temp vars
236 d_out, d_in, dbus = self.d_out, self.d_in, self.dbus
237 i_out, i_in, ibus = self.i_out, self.i_in, self.ibus
238 m_out, m_in = self.m_out, self.m_in
239 exc = self.pi.exc_o
240 exception = exc.happened
241 mmureq = Signal()
242
243 # copy of address, but gets over-ridden for instr_fault
244 maddr = Signal(64)
245 m.d.comb += maddr.eq(self.raddr)
246
247 # check for LR/SC misalignment, used in set_rd/wr_addr above
248 comb += self.lrsc_misalign.eq(((self.pi.data_len[0:3]-1) &
249 self.req.raddr[0:3]).bool())
250 with m.If(self.lrsc_misalign & self.req.reserve):
251 m.d.comb += self.req.align_intr.eq(1)
252
253 # create a blip (single pulse) on valid read/write request
254 # this can be over-ridden in the FSM to get dcache to re-run
255 # a request when MMU_LOOKUP completes.
256 m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
257 ldst_r = LDSTRequest("ldst_r")
258 sync += Display("MMUTEST: LoadStore1 d_in.error=%i",d_in.error)
259
260 # fsm skeleton
261 with m.Switch(self.state):
262 with m.Case(State.IDLE):
263 sync += self.load_data_delay.eq(0) # clear out
264 with m.If((self.d_validblip | self.instr_fault) &
265 ~exc.happened):
266 comb += self.busy.eq(1)
267 sync += self.state.eq(State.ACK_WAIT)
268 sync += ldst_r.eq(self.req) # copy of LDSTRequest on "blip"
269 # sync += Display("validblip self.req.virt_mode=%i",
270 # self.req.virt_mode)
271 with m.If(self.instr_fault):
272 comb += mmureq.eq(1)
273 sync += self.r_instr_fault.eq(1)
274 comb += maddr.eq(self.maddr)
275 sync += self.state.eq(State.MMU_LOOKUP)
276 with m.Else():
277 sync += self.r_instr_fault.eq(0)
278 # if the LD/ST requires two dwords, move to waiting
279 # for first word
280 with m.If(self.req.alignstate == Misalign.NEED2WORDS):
281 sync += ldst_r.alignstate.eq(Misalign.WAITFIRST)
282 with m.Else():
283 sync += ldst_r.eq(0)
284
285 # waiting for completion
286 with m.Case(State.ACK_WAIT):
287 sync += Display("MMUTEST: ACK_WAIT")
288 comb += self.busy.eq(~exc.happened)
289
290 with m.If(d_in.error):
291 # cache error is not necessarily "final", it could
292 # be that it was just a TLB miss
293 with m.If(d_in.cache_paradox):
294 comb += exception.eq(1)
295 sync += self.state.eq(State.IDLE)
296 sync += ldst_r.eq(0)
297 sync += Display("cache error -> update dsisr")
298 sync += self.dsisr[63 - 38].eq(~ldst_r.load)
299 # XXX there is no architected bit for this
300 # (probably should be a machine check in fact)
301 sync += self.dsisr[63 - 35].eq(d_in.cache_paradox)
302 sync += self.r_instr_fault.eq(0)
303
304 with m.Else():
305 # Look up the translation for TLB miss
306 # and also for permission error and RC error
307 # in case the PTE has been updated.
308 comb += mmureq.eq(1)
309 sync += self.state.eq(State.MMU_LOOKUP)
310 with m.If(d_in.valid):
311 with m.If(self.done):
312 sync += Display("ACK_WAIT, done %x", self.raddr)
313 with m.If(ldst_r.alignstate == Misalign.ONEWORD):
314 # done if there is only one dcache operation
315 sync += self.state.eq(State.IDLE)
316 sync += ldst_r.eq(0)
317 with m.If(ldst_r.load):
318 m.d.comb += self.load_data.eq(d_in.data)
319 sync += self.load_data_delay[0:64].eq(d_in.data)
320 m.d.comb += self.done.eq(~mmureq) # done if not MMU
321 with m.Elif(ldst_r.alignstate == Misalign.WAITFIRST):
322 # first LD done: load data, initiate 2nd request.
323 # leave in ACK_WAIT state
324 with m.If(ldst_r.load):
325 m.d.comb += self.load_data[0:63].eq(d_in.data)
326 sync += self.load_data_delay[0:64].eq(d_in.data)
327 with m.Else():
328 m.d.sync += d_out.data.eq(self.store_data2)
329 # mmm kinda cheating, make a 2nd blip.
330 # use an aligned version of the address
331 addr_aligned, z3 = Signal(64), Const(0, 3)
332 comb += addr_aligned.eq(Cat(z3, ldst_r.raddr[3:]+1))
333 m.d.comb += self.d_validblip.eq(1)
334 comb += self.req.eq(ldst_r) # from copy of request
335 comb += self.req.raddr.eq(addr_aligned)
336 comb += self.req.byte_sel.eq(ldst_r.byte_sel[8:])
337 comb += self.req.alignstate.eq(Misalign.WAITSECOND)
338 sync += ldst_r.raddr.eq(addr_aligned)
339 sync += ldst_r.byte_sel.eq(ldst_r.byte_sel[8:])
340 sync += ldst_r.alignstate.eq(Misalign.WAITSECOND)
341 sync += Display(" second req %x", self.req.raddr)
342 with m.Elif(ldst_r.alignstate == Misalign.WAITSECOND):
343 sync += Display(" done second %x", d_in.data)
344 # done second load
345 sync += self.state.eq(State.IDLE)
346 sync += ldst_r.eq(0)
347 with m.If(ldst_r.load):
348 m.d.comb += self.load_data[64:128].eq(d_in.data)
349 sync += self.load_data_delay[64:128].eq(d_in.data)
350 m.d.comb += self.done.eq(~mmureq) # done if not MMU
351
352 # waiting here for the MMU TLB lookup to complete.
353 # either re-try the dcache lookup or throw MMU exception
354 with m.Case(State.MMU_LOOKUP):
355 comb += self.busy.eq(~exception)
356 with m.If(m_in.done):
357 with m.If(~self.r_instr_fault):
358 sync += Display("MMU_LOOKUP, done %x -> %x",
359 self.raddr, d_out.addr)
360 # retry the request now that the MMU has
361 # installed a TLB entry, if not exception raised
362 m.d.comb += self.d_out.valid.eq(~exception)
363 sync += self.state.eq(State.ACK_WAIT)
364 with m.Else():
365 sync += self.state.eq(State.IDLE)
366 sync += self.r_instr_fault.eq(0)
367 comb += self.done.eq(1)
368
369 with m.If(m_in.err):
370 # MMU RADIX exception thrown. XXX
371 # TODO: critical that the write here has to
372 # notify the MMU FSM of the change to dsisr
373 comb += exception.eq(1)
374 comb += self.done.eq(1)
375 sync += Display("MMU RADIX exception thrown")
376 sync += self.dsisr[63 - 33].eq(m_in.invalid)
377 sync += self.dsisr[63 - 36].eq(m_in.perm_error) # noexec
378 sync += self.dsisr[63 - 38].eq(~ldst_r.load)
379 sync += self.dsisr[63 - 44].eq(m_in.badtree)
380 sync += self.dsisr[63 - 45].eq(m_in.rc_error)
381 sync += self.state.eq(State.IDLE)
382 # exception thrown, clear out instruction fault state
383 sync += self.r_instr_fault.eq(0)
384
385 # MMU FSM communicating a request to update DSISR or DAR (OP_MTSPR)
386 with m.If(self.mmu_set_spr):
387 with m.If(self.mmu_set_dsisr):
388 sync += self.dsisr.eq(self.sprval_in)
389 with m.If(self.mmu_set_dar):
390 sync += self.dar.eq(self.sprval_in)
391
392 # hmmm, alignment occurs in set_rd_addr/set_wr_addr, note exception
393 with m.If(self.align_intr):
394 comb += exc.happened.eq(1)
395 # check for updating DAR
396 with m.If(exception):
397 sync += Display("exception %x", self.raddr)
398 # alignment error: store address in DAR
399 with m.If(self.align_intr):
400 sync += Display("alignment error: addr in DAR %x", self.raddr)
401 sync += self.dar.eq(self.raddr)
402 with m.Elif(~self.r_instr_fault):
403 sync += Display("not instr fault, addr in DAR %x", self.raddr)
404 sync += self.dar.eq(self.raddr)
405
406 # when done or exception, return to idle state
407 with m.If(self.done | exception):
408 sync += self.state.eq(State.IDLE)
409 comb += self.busy.eq(0)
410
411 # happened, alignment, instr_fault, invalid.
412 # note that all of these flow through - eventually to the TRAP
413 # pipeline, via PowerDecoder2.
414 comb += self.align_intr.eq(self.req.align_intr)
415 comb += exc.invalid.eq(m_in.invalid)
416 comb += exc.alignment.eq(self.align_intr)
417 comb += exc.instr_fault.eq(self.r_instr_fault)
418 # badtree, perm_error, rc_error, segment_fault
419 comb += exc.badtree.eq(m_in.badtree)
420 comb += exc.perm_error.eq(m_in.perm_error)
421 comb += exc.rc_error.eq(m_in.rc_error)
422 comb += exc.segment_fault.eq(m_in.segerr)
423 # conditions for 0x400 trap need these in SRR1
424 with m.If(exception & ~exc.alignment & exc.instr_fault):
425 comb += exc.srr1[14].eq(exc.invalid) # 47-33
426 comb += exc.srr1[12].eq(exc.perm_error) # 47-35
427 comb += exc.srr1[3].eq(exc.badtree) # 47-44
428 comb += exc.srr1[2].eq(exc.rc_error) # 47-45
429
430 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
431 comb += dbus.adr.eq(dcache.bus.adr)
432 comb += dbus.dat_w.eq(dcache.bus.dat_w)
433 comb += dbus.sel.eq(dcache.bus.sel)
434 comb += dbus.cyc.eq(dcache.bus.cyc)
435 comb += dbus.stb.eq(dcache.bus.stb)
436 comb += dbus.we.eq(dcache.bus.we)
437
438 comb += dcache.bus.dat_r.eq(dbus.dat_r)
439 comb += dcache.bus.ack.eq(dbus.ack)
440 if hasattr(dbus, "stall"):
441 comb += dcache.bus.stall.eq(dbus.stall)
442
443 # update out d data when flag set, for first half (second done in FSM)
444 with m.If(self.d_w_valid):
445 m.d.sync += d_out.data.eq(self.store_data)
446 #with m.Else():
447 # m.d.sync += d_out.data.eq(0)
448 # unit test passes with that change
449
450 # this must move into the FSM, conditionally noticing that
451 # the "blip" comes from self.d_validblip.
452 # task 1: look up in dcache
453 # task 2: if dcache fails, look up in MMU.
454 # do **NOT** confuse the two.
455 with m.If(self.d_validblip):
456 m.d.comb += self.d_out.valid.eq(~exc.happened)
457 m.d.comb += d_out.load.eq(self.req.load)
458 m.d.comb += d_out.byte_sel.eq(self.req.byte_sel)
459 m.d.comb += self.raddr.eq(self.req.raddr)
460 m.d.comb += d_out.nc.eq(self.req.nc)
461 m.d.comb += d_out.priv_mode.eq(self.req.priv_mode)
462 m.d.comb += d_out.virt_mode.eq(self.req.virt_mode)
463 m.d.comb += d_out.reserve.eq(self.req.reserve)
464 m.d.comb += d_out.atomic.eq(self.req.atomic)
465 m.d.comb += d_out.atomic_last.eq(self.req.atomic_last)
466 #m.d.comb += Display("validblip dcbz=%i addr=%x",
467 #self.req.dcbz,self.req.addr)
468 m.d.comb += d_out.dcbz.eq(self.req.dcbz)
469 with m.Else():
470 m.d.comb += d_out.load.eq(ldst_r.load)
471 m.d.comb += d_out.byte_sel.eq(ldst_r.byte_sel)
472 m.d.comb += self.raddr.eq(ldst_r.raddr)
473 m.d.comb += d_out.nc.eq(ldst_r.nc)
474 m.d.comb += d_out.priv_mode.eq(ldst_r.priv_mode)
475 m.d.comb += d_out.virt_mode.eq(ldst_r.virt_mode)
476 m.d.comb += d_out.reserve.eq(ldst_r.reserve)
477 m.d.comb += d_out.atomic.eq(ldst_r.atomic)
478 m.d.comb += d_out.atomic_last.eq(ldst_r.atomic_last)
479 #m.d.comb += Display("no_validblip dcbz=%i addr=%x",
480 #ldst_r.dcbz,ldst_r.addr)
481 m.d.comb += d_out.dcbz.eq(ldst_r.dcbz)
482 m.d.comb += d_out.addr.eq(self.raddr)
483
484 # Update outputs to MMU
485 m.d.comb += m_out.valid.eq(mmureq)
486 m.d.comb += m_out.iside.eq(self.instr_fault)
487 m.d.comb += m_out.load.eq(ldst_r.load)
488 with m.If(self.instr_fault):
489 m.d.comb += m_out.priv.eq(self.priv_mode)
490 with m.Else():
491 m.d.comb += m_out.priv.eq(ldst_r.priv_mode)
492 m.d.comb += m_out.tlbie.eq(self.tlbie)
493 # m_out.mtspr <= mmu_mtspr; # TODO
494 # m_out.sprn <= sprn; # TODO
495 m.d.comb += m_out.addr.eq(maddr)
496 # m_out.slbia <= l_in.insn(7); # TODO: no idea what this is
497 # m_out.rs <= l_in.data; # nope, probably not needed, TODO investigate
498
499 return m
500
501 def ports(self):
502 yield from super().ports()
503 # TODO: memory ports
504
505
506 class TestSRAMLoadStore1(LoadStore1):
507 def __init__(self, pspec):
508 super().__init__(pspec)
509 pspec = self.pspec
510 # small 32-entry Memory
511 if (hasattr(pspec, "dmem_test_depth") and
512 isinstance(pspec.dmem_test_depth, int)):
513 depth = pspec.dmem_test_depth
514 else:
515 depth = 32
516 print("TestSRAMBareLoadStoreUnit depth", depth)
517
518 self.mem = Memory(width=pspec.reg_wid, depth=depth)
519
520 def elaborate(self, platform):
521 m = super().elaborate(platform)
522 comb = m.d.comb
523 m.submodules.sram = sram = SRAM(memory=self.mem, granularity=8,
524 features={'cti', 'bte', 'err'})
525 dbus = self.dbus
526
527 # directly connect the wishbone bus of LoadStoreUnitInterface to SRAM
528 # note: SRAM is a target (slave), dbus is initiator (master)
529 fanouts = ['dat_w', 'sel', 'cyc', 'stb', 'we', 'cti', 'bte']
530 fanins = ['dat_r', 'ack', 'err']
531 for fanout in fanouts:
532 print("fanout", fanout, getattr(sram.bus, fanout).shape(),
533 getattr(dbus, fanout).shape())
534 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
535 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
536 for fanin in fanins:
537 comb += getattr(dbus, fanin).eq(getattr(sram.bus, fanin))
538 # connect address
539 comb += sram.bus.adr.eq(dbus.adr)
540
541 return m
542