3 based on microwatt loadstore1.vhdl, but conforming to PortInterface.
4 unlike loadstore1.vhdl this does *not* deal with actual Load/Store
5 ops: that job is handled by LDSTCompUnit, which talks to LoadStore1
6 by way of PortInterface. PortInterface is where things need extending,
7 such as adding dcbz support, etc.
9 this module basically handles "pure" load / store operations, and
10 its first job is to ask the D-Cache for the data. if that fails,
11 the second task (if virtual memory is enabled) is to ask the MMU
12 to perform a TLB, then to go *back* to the cache and ask again.
16 * https://bugs.libre-soc.org/show_bug.cgi?id=465
20 from nmigen
import (Elaboratable
, Module
, Signal
, Shape
, unsigned
, Cat
, Mux
,
23 from nmutil
.iocontrol
import RecordObject
24 from nmutil
.util
import rising_edge
, Display
25 from enum
import Enum
, unique
27 from soc
.experiment
.dcache
import DCache
28 from soc
.experiment
.icache
import ICache
29 from soc
.experiment
.pimem
import PortInterfaceBase
30 from soc
.experiment
.mem_types
import LoadStore1ToMMUType
31 from soc
.experiment
.mem_types
import MMUToLoadStore1Type
33 from soc
.minerva
.wishbone
import make_wb_layout
34 from soc
.bus
.sram
import SRAM
35 from nmutil
.util
import Display
40 IDLE
= 0 # ready for instruction
41 ACK_WAIT
= 1 # waiting for ack from dcache
42 MMU_LOOKUP
= 2 # waiting for MMU to look up translation
43 #SECOND_REQ = 3 # second request for unaligned transfer
47 ONEWORD
= 0 # only one word needed, all good
48 NEED2WORDS
= 1 # need to send/receive two words
49 WAITFIRST
= 2 # waiting for the first word
50 WAITSECOND
= 3 # waiting for the second word
53 # captures the LDSTRequest from the PortInterface, which "blips" most
54 # of this at us (pipeline-style).
55 class LDSTRequest(RecordObject
):
56 def __init__(self
, name
=None):
57 RecordObject
.__init
__(self
, name
=name
)
61 self
.raddr
= Signal(64)
62 # self.store_data = Signal(64) # this is already sync (on a delay)
63 self
.byte_sel
= Signal(16)
64 self
.nc
= Signal() # non-cacheable access
65 self
.virt_mode
= Signal()
66 self
.priv_mode
= Signal()
67 self
.mode_32bit
= Signal() # XXX UNUSED AT PRESENT
68 self
.alignstate
= Signal(Misalign
) # progress of alignment request
69 self
.align_intr
= Signal()
70 # atomic (LR/SC reservation)
71 self
.reserve
= Signal()
72 self
.atomic
= Signal()
73 self
.atomic_last
= Signal()
76 # glue logic for microwatt mmu and dcache
77 class LoadStore1(PortInterfaceBase
):
78 def __init__(self
, pspec
):
80 self
.disable_cache
= (hasattr(pspec
, "disable_cache") and
81 pspec
.disable_cache
== True)
82 regwid
= pspec
.reg_wid
83 addrwid
= pspec
.addr_wid
85 super().__init
__(regwid
, addrwid
)
86 self
.dcache
= DCache(pspec
)
87 self
.icache
= ICache(pspec
)
88 # these names are from the perspective of here (LoadStore1)
89 self
.d_out
= self
.dcache
.d_in
# in to dcache is out for LoadStore
90 self
.d_in
= self
.dcache
.d_out
# out from dcache is in for LoadStore
91 self
.i_out
= self
.icache
.i_in
# in to icache is out for LoadStore
92 self
.i_in
= self
.icache
.i_out
# out from icache is in for LoadStore
93 self
.m_out
= LoadStore1ToMMUType("m_out") # out *to* MMU
94 self
.m_in
= MMUToLoadStore1Type("m_in") # in *from* MMU
95 self
.req
= LDSTRequest(name
="ldst_req")
97 # TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
98 self
.dbus
= Record(make_wb_layout(pspec
))
99 self
.ibus
= Record(make_wb_layout(pspec
))
101 # for creating a single clock blip to DCache
102 self
.d_valid
= Signal()
103 self
.d_w_valid
= Signal()
104 self
.d_validblip
= Signal()
106 # state info for LD/ST
108 self
.done_delay
= Signal()
109 # latch most of the input request
111 self
.tlbie
= Signal()
113 self
.raddr
= Signal(64)
114 self
.maddr
= Signal(64)
115 self
.store_data
= Signal(64) # first half (aligned)
116 self
.store_data2
= Signal(64) # second half (misaligned)
117 self
.load_data
= Signal(128) # 128 to cope with misalignment
118 self
.load_data_delay
= Signal(128) # perform 2 LD/STs
119 self
.byte_sel
= Signal(16) # also for misaligned, 16-bit
120 self
.alignstate
= Signal(Misalign
) # progress of alignment request
121 self
.next_addr
= Signal(64) # 2nd (aligned) read/write addr
122 #self.xerc : xer_common_t;
124 self
.nc
= Signal() # non-cacheable access
125 self
.mode_32bit
= Signal() # XXX UNUSED AT PRESENT
126 self
.state
= Signal(State
)
127 self
.instr_fault
= Signal() # indicator to request i-cache MMU lookup
128 self
.r_instr_fault
= Signal() # accessed in external_busy
129 self
.priv_mode
= Signal() # only for instruction fetch (not LDST)
130 self
.align_intr
= Signal()
132 self
.wait_dcache
= Signal()
133 self
.wait_mmu
= Signal()
134 self
.lrsc_misalign
= Signal()
135 #self.intr_vec : integer range 0 to 16#fff#;
136 #self.nia = Signal(64)
137 #self.srr1 = Signal(16)
138 # use these to set the dsisr or dar respectively
139 self
.mmu_set_spr
= Signal()
140 self
.mmu_set_dsisr
= Signal()
141 self
.mmu_set_dar
= Signal()
142 self
.sprval_in
= Signal(64)
144 # ONLY access these read-only, do NOT attempt to change
145 self
.dsisr
= Signal(32)
146 self
.dar
= Signal(64)
148 # when external_busy set, do not allow PortInterface to proceed
149 def external_busy(self
, m
):
150 return self
.instr_fault | self
.r_instr_fault
152 def set_wr_addr(self
, m
, addr
, mask
, misalign
, msr
, is_dcbz
, is_nc
):
153 m
.d
.comb
+= self
.req
.nc
.eq(is_nc
)
154 m
.d
.comb
+= self
.req
.load
.eq(0) # store operation
155 m
.d
.comb
+= self
.req
.byte_sel
.eq(mask
)
156 m
.d
.comb
+= self
.req
.raddr
.eq(addr
)
157 m
.d
.comb
+= self
.req
.priv_mode
.eq(~msr
.pr
) # not-problem ==> priv
158 m
.d
.comb
+= self
.req
.virt_mode
.eq(msr
.dr
) # DR ==> virt
159 m
.d
.comb
+= self
.req
.mode_32bit
.eq(~msr
.sf
) # not-sixty-four ==> 32bit
160 m
.d
.comb
+= self
.req
.dcbz
.eq(is_dcbz
)
162 m
.d
.comb
+= self
.req
.alignstate
.eq(Misalign
.NEED2WORDS
)
163 m
.d
.sync
+= self
.next_addr
.eq(Cat(C(0, 3), addr
[3:]+1))
165 # m.d.comb += Display("set_wr_addr %i dcbz %i",addr,is_dcbz)
167 # option to disable the cache entirely for write
168 if self
.disable_cache
:
169 m
.d
.comb
+= self
.req
.nc
.eq(1)
171 # dcbz cannot do no-cache
172 with m
.If(is_dcbz
& self
.req
.nc
):
173 m
.d
.comb
+= self
.req
.align_intr
.eq(1)
175 # hmm, rather than add yet another argument to set_wr_addr
176 # read direct from PortInterface
177 m
.d
.comb
+= self
.req
.reserve
.eq(self
.pi
.reserve
) # atomic request
178 m
.d
.comb
+= self
.req
.atomic
.eq(~self
.lrsc_misalign
)
179 m
.d
.comb
+= self
.req
.atomic_last
.eq(~self
.lrsc_misalign
)
183 def set_rd_addr(self
, m
, addr
, mask
, misalign
, msr
, is_nc
):
184 m
.d
.comb
+= self
.d_valid
.eq(1)
185 m
.d
.comb
+= self
.req
.load
.eq(1) # load operation
186 m
.d
.comb
+= self
.req
.byte_sel
.eq(mask
)
187 m
.d
.comb
+= self
.req
.raddr
.eq(addr
)
188 m
.d
.comb
+= self
.req
.priv_mode
.eq(~msr
.pr
) # not-problem ==> priv
189 m
.d
.comb
+= self
.req
.virt_mode
.eq(msr
.dr
) # DR ==> virt
190 m
.d
.comb
+= self
.req
.mode_32bit
.eq(~msr
.sf
) # not-sixty-four ==> 32bit
191 m
.d
.comb
+= self
.req
.nc
.eq(is_nc
)
192 # BAD HACK! disable cacheing on LD when address is 0xCxxx_xxxx
193 # this is for peripherals. same thing done in Microwatt loadstore1.vhdl
194 with m
.If(addr
[28:] == Const(0xc, 4)):
195 m
.d
.comb
+= self
.req
.nc
.eq(1)
196 # option to disable the cache entirely for read
197 if self
.disable_cache
:
198 m
.d
.comb
+= self
.req
.nc
.eq(1)
200 # need two reads: prepare next address in advance
201 m
.d
.comb
+= self
.req
.alignstate
.eq(Misalign
.NEED2WORDS
)
202 m
.d
.sync
+= self
.next_addr
.eq(Cat(C(0, 3), addr
[3:]+1))
204 # hmm, rather than add yet another argument to set_rd_addr
205 # read direct from PortInterface
206 m
.d
.comb
+= self
.req
.reserve
.eq(self
.pi
.reserve
) # atomic request
207 m
.d
.comb
+= self
.req
.atomic
.eq(~self
.lrsc_misalign
)
208 m
.d
.comb
+= self
.req
.atomic_last
.eq(~self
.lrsc_misalign
)
210 return None #FIXME return value
212 def set_wr_data(self
, m
, data
, wen
):
213 # do the "blip" on write data
214 m
.d
.comb
+= self
.d_valid
.eq(1)
215 # put data into comb which is picked up in main elaborate()
216 m
.d
.comb
+= self
.d_w_valid
.eq(1)
217 m
.d
.comb
+= self
.store_data
.eq(data
)
218 m
.d
.sync
+= self
.store_data2
.eq(data
[64:128])
219 st_ok
= self
.done
# TODO indicates write data is valid
220 m
.d
.comb
+= self
.pi
.store_done
.data
.eq(self
.d_in
.store_done
)
221 m
.d
.comb
+= self
.pi
.store_done
.ok
.eq(1)
224 def get_rd_data(self
, m
):
225 ld_ok
= self
.done_delay
# indicates read data is valid
226 data
= self
.load_data_delay
# actual read data
229 def elaborate(self
, platform
):
230 m
= super().elaborate(platform
)
231 comb
, sync
= m
.d
.comb
, m
.d
.sync
233 # microwatt takes one more cycle before next operation can be issued
234 sync
+= self
.done_delay
.eq(self
.done
)
235 #sync += self.load_data_delay[0:64].eq(self.load_data[0:64])
237 # create dcache and icache module
238 m
.submodules
.dcache
= dcache
= self
.dcache
239 m
.submodules
.icache
= icache
= self
.icache
242 d_out
, d_in
, dbus
= self
.d_out
, self
.d_in
, self
.dbus
243 i_out
, i_in
, ibus
= self
.i_out
, self
.i_in
, self
.ibus
244 m_out
, m_in
= self
.m_out
, self
.m_in
246 exception
= exc
.happened
249 # copy of address, but gets over-ridden for instr_fault
251 m
.d
.comb
+= maddr
.eq(self
.raddr
)
253 # check for LR/SC misalignment, used in set_rd/wr_addr above
254 comb
+= self
.lrsc_misalign
.eq(((self
.pi
.data_len
[0:3]-1) &
255 self
.req
.raddr
[0:3]).bool())
256 with m
.If(self
.lrsc_misalign
& self
.req
.reserve
):
257 m
.d
.comb
+= self
.req
.align_intr
.eq(1)
259 # create a blip (single pulse) on valid read/write request
260 # this can be over-ridden in the FSM to get dcache to re-run
261 # a request when MMU_LOOKUP completes.
262 m
.d
.comb
+= self
.d_validblip
.eq(rising_edge(m
, self
.d_valid
))
263 ldst_r
= LDSTRequest("ldst_r")
264 sync
+= Display("MMUTEST: LoadStore1 d_in.error=%i",d_in
.error
)
267 with m
.Switch(self
.state
):
268 with m
.Case(State
.IDLE
):
269 sync
+= self
.load_data_delay
.eq(0) # clear out
270 with m
.If((self
.d_validblip | self
.instr_fault
) &
272 comb
+= self
.busy
.eq(1)
273 sync
+= self
.state
.eq(State
.ACK_WAIT
)
274 sync
+= ldst_r
.eq(self
.req
) # copy of LDSTRequest on "blip"
275 # sync += Display("validblip self.req.virt_mode=%i",
276 # self.req.virt_mode)
277 with m
.If(self
.instr_fault
):
279 sync
+= self
.r_instr_fault
.eq(1)
280 comb
+= maddr
.eq(self
.maddr
)
281 sync
+= self
.state
.eq(State
.MMU_LOOKUP
)
283 sync
+= self
.r_instr_fault
.eq(0)
284 # if the LD/ST requires two dwords, move to waiting
286 with m
.If(self
.req
.alignstate
== Misalign
.NEED2WORDS
):
287 sync
+= ldst_r
.alignstate
.eq(Misalign
.WAITFIRST
)
291 # waiting for completion
292 with m
.Case(State
.ACK_WAIT
):
293 sync
+= Display("MMUTEST: ACK_WAIT")
294 comb
+= self
.busy
.eq(~exc
.happened
)
296 with m
.If(d_in
.error
):
297 # cache error is not necessarily "final", it could
298 # be that it was just a TLB miss
299 with m
.If(d_in
.cache_paradox
):
300 comb
+= exception
.eq(1)
301 sync
+= self
.state
.eq(State
.IDLE
)
303 sync
+= Display("cache error -> update dsisr")
304 sync
+= self
.dsisr
[63 - 38].eq(~ldst_r
.load
)
305 # XXX there is no architected bit for this
306 # (probably should be a machine check in fact)
307 sync
+= self
.dsisr
[63 - 35].eq(d_in
.cache_paradox
)
308 sync
+= self
.r_instr_fault
.eq(0)
311 # Look up the translation for TLB miss
312 # and also for permission error and RC error
313 # in case the PTE has been updated.
315 sync
+= self
.state
.eq(State
.MMU_LOOKUP
)
316 with m
.If(d_in
.valid
):
317 with m
.If(self
.done
):
318 sync
+= Display("ACK_WAIT, done %x", self
.raddr
)
319 with m
.If(ldst_r
.alignstate
== Misalign
.ONEWORD
):
320 # done if there is only one dcache operation
321 sync
+= self
.state
.eq(State
.IDLE
)
323 with m
.If(ldst_r
.load
):
324 m
.d
.comb
+= self
.load_data
.eq(d_in
.data
)
325 sync
+= self
.load_data_delay
[0:64].eq(d_in
.data
)
326 m
.d
.comb
+= self
.done
.eq(~mmureq
) # done if not MMU
327 with m
.Elif(ldst_r
.alignstate
== Misalign
.WAITFIRST
):
328 # first LD done: load data, initiate 2nd request.
329 # leave in ACK_WAIT state
330 with m
.If(ldst_r
.load
):
331 m
.d
.comb
+= self
.load_data
[0:63].eq(d_in
.data
)
332 sync
+= self
.load_data_delay
[0:64].eq(d_in
.data
)
334 m
.d
.sync
+= d_out
.data
.eq(self
.store_data2
)
335 # mmm kinda cheating, make a 2nd blip.
336 # use an aligned version of the address
337 m
.d
.comb
+= self
.d_validblip
.eq(1)
338 comb
+= self
.req
.eq(ldst_r
) # from copy of request
339 comb
+= self
.req
.raddr
.eq(self
.next_addr
)
340 comb
+= self
.req
.byte_sel
.eq(ldst_r
.byte_sel
[8:])
341 comb
+= self
.req
.alignstate
.eq(Misalign
.WAITSECOND
)
342 sync
+= ldst_r
.raddr
.eq(self
.next_addr
)
343 sync
+= ldst_r
.byte_sel
.eq(ldst_r
.byte_sel
[8:])
344 sync
+= ldst_r
.alignstate
.eq(Misalign
.WAITSECOND
)
345 sync
+= Display(" second req %x", self
.req
.raddr
)
346 with m
.Elif(ldst_r
.alignstate
== Misalign
.WAITSECOND
):
347 sync
+= Display(" done second %x", d_in
.data
)
349 sync
+= self
.state
.eq(State
.IDLE
)
351 with m
.If(ldst_r
.load
):
352 m
.d
.comb
+= self
.load_data
[64:128].eq(d_in
.data
)
353 sync
+= self
.load_data_delay
[64:128].eq(d_in
.data
)
354 m
.d
.comb
+= self
.done
.eq(~mmureq
) # done if not MMU
356 # waiting here for the MMU TLB lookup to complete.
357 # either re-try the dcache lookup or throw MMU exception
358 with m
.Case(State
.MMU_LOOKUP
):
359 comb
+= self
.busy
.eq(~exception
)
360 with m
.If(m_in
.done
):
361 with m
.If(~self
.r_instr_fault
):
362 sync
+= Display("MMU_LOOKUP, done %x -> %x",
363 self
.raddr
, d_out
.addr
)
364 # retry the request now that the MMU has
365 # installed a TLB entry, if not exception raised
366 m
.d
.comb
+= self
.d_out
.valid
.eq(~exception
)
367 sync
+= self
.state
.eq(State
.ACK_WAIT
)
369 sync
+= self
.state
.eq(State
.IDLE
)
370 sync
+= self
.r_instr_fault
.eq(0)
371 comb
+= self
.done
.eq(1)
374 # MMU RADIX exception thrown. XXX
375 # TODO: critical that the write here has to
376 # notify the MMU FSM of the change to dsisr
377 comb
+= exception
.eq(1)
378 comb
+= self
.done
.eq(1)
379 sync
+= Display("MMU RADIX exception thrown")
380 sync
+= self
.dsisr
[63 - 33].eq(m_in
.invalid
)
381 sync
+= self
.dsisr
[63 - 36].eq(m_in
.perm_error
) # noexec
382 sync
+= self
.dsisr
[63 - 38].eq(~ldst_r
.load
)
383 sync
+= self
.dsisr
[63 - 44].eq(m_in
.badtree
)
384 sync
+= self
.dsisr
[63 - 45].eq(m_in
.rc_error
)
385 sync
+= self
.state
.eq(State
.IDLE
)
386 # exception thrown, clear out instruction fault state
387 sync
+= self
.r_instr_fault
.eq(0)
389 # MMU FSM communicating a request to update DSISR or DAR (OP_MTSPR)
390 with m
.If(self
.mmu_set_spr
):
391 with m
.If(self
.mmu_set_dsisr
):
392 sync
+= self
.dsisr
.eq(self
.sprval_in
)
393 with m
.If(self
.mmu_set_dar
):
394 sync
+= self
.dar
.eq(self
.sprval_in
)
396 # hmmm, alignment occurs in set_rd_addr/set_wr_addr, note exception
397 with m
.If(self
.align_intr
):
398 comb
+= exc
.happened
.eq(1)
399 # check for updating DAR
400 with m
.If(exception
):
401 sync
+= Display("exception %x", self
.raddr
)
402 # alignment error: store address in DAR
403 with m
.If(self
.align_intr
):
404 sync
+= Display("alignment error: addr in DAR %x", self
.raddr
)
405 sync
+= self
.dar
.eq(self
.raddr
)
406 with m
.Elif(~self
.r_instr_fault
):
407 sync
+= Display("not instr fault, addr in DAR %x", self
.raddr
)
408 sync
+= self
.dar
.eq(self
.raddr
)
410 # when done or exception, return to idle state
411 with m
.If(self
.done | exception
):
412 sync
+= self
.state
.eq(State
.IDLE
)
413 comb
+= self
.busy
.eq(0)
415 # happened, alignment, instr_fault, invalid.
416 # note that all of these flow through - eventually to the TRAP
417 # pipeline, via PowerDecoder2.
418 comb
+= self
.align_intr
.eq(self
.req
.align_intr
)
419 comb
+= exc
.invalid
.eq(m_in
.invalid
)
420 comb
+= exc
.alignment
.eq(self
.align_intr
)
421 comb
+= exc
.instr_fault
.eq(self
.r_instr_fault
)
422 # badtree, perm_error, rc_error, segment_fault
423 comb
+= exc
.badtree
.eq(m_in
.badtree
)
424 comb
+= exc
.perm_error
.eq(m_in
.perm_error
)
425 comb
+= exc
.rc_error
.eq(m_in
.rc_error
)
426 comb
+= exc
.segment_fault
.eq(m_in
.segerr
)
427 # conditions for 0x400 trap need these in SRR1
428 with m
.If(exception
& ~exc
.alignment
& exc
.instr_fault
):
429 comb
+= exc
.srr1
[14].eq(exc
.invalid
) # 47-33
430 comb
+= exc
.srr1
[12].eq(exc
.perm_error
) # 47-35
431 comb
+= exc
.srr1
[3].eq(exc
.badtree
) # 47-44
432 comb
+= exc
.srr1
[2].eq(exc
.rc_error
) # 47-45
434 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
435 comb
+= dbus
.adr
.eq(dcache
.bus
.adr
)
436 comb
+= dbus
.dat_w
.eq(dcache
.bus
.dat_w
)
437 comb
+= dbus
.sel
.eq(dcache
.bus
.sel
)
438 comb
+= dbus
.cyc
.eq(dcache
.bus
.cyc
)
439 comb
+= dbus
.stb
.eq(dcache
.bus
.stb
)
440 comb
+= dbus
.we
.eq(dcache
.bus
.we
)
442 comb
+= dcache
.bus
.dat_r
.eq(dbus
.dat_r
)
443 comb
+= dcache
.bus
.ack
.eq(dbus
.ack
)
444 if hasattr(dbus
, "stall"):
445 comb
+= dcache
.bus
.stall
.eq(dbus
.stall
)
447 # update out d data when flag set, for first half (second done in FSM)
448 with m
.If(self
.d_w_valid
):
449 m
.d
.sync
+= d_out
.data
.eq(self
.store_data
)
451 # m.d.sync += d_out.data.eq(0)
452 # unit test passes with that change
454 # this must move into the FSM, conditionally noticing that
455 # the "blip" comes from self.d_validblip.
456 # task 1: look up in dcache
457 # task 2: if dcache fails, look up in MMU.
458 # do **NOT** confuse the two.
459 with m
.If(self
.d_validblip
):
460 m
.d
.comb
+= self
.d_out
.valid
.eq(~exc
.happened
)
461 m
.d
.comb
+= d_out
.load
.eq(self
.req
.load
)
462 m
.d
.comb
+= d_out
.byte_sel
.eq(self
.req
.byte_sel
)
463 m
.d
.comb
+= self
.raddr
.eq(self
.req
.raddr
)
464 m
.d
.comb
+= d_out
.nc
.eq(self
.req
.nc
)
465 m
.d
.comb
+= d_out
.priv_mode
.eq(self
.req
.priv_mode
)
466 m
.d
.comb
+= d_out
.virt_mode
.eq(self
.req
.virt_mode
)
467 m
.d
.comb
+= d_out
.reserve
.eq(self
.req
.reserve
)
468 m
.d
.comb
+= d_out
.atomic
.eq(self
.req
.atomic
)
469 m
.d
.comb
+= d_out
.atomic_last
.eq(self
.req
.atomic_last
)
470 #m.d.comb += Display("validblip dcbz=%i addr=%x",
471 #self.req.dcbz,self.req.addr)
472 m
.d
.comb
+= d_out
.dcbz
.eq(self
.req
.dcbz
)
474 m
.d
.comb
+= d_out
.load
.eq(ldst_r
.load
)
475 m
.d
.comb
+= d_out
.byte_sel
.eq(ldst_r
.byte_sel
)
476 m
.d
.comb
+= self
.raddr
.eq(ldst_r
.raddr
)
477 m
.d
.comb
+= d_out
.nc
.eq(ldst_r
.nc
)
478 m
.d
.comb
+= d_out
.priv_mode
.eq(ldst_r
.priv_mode
)
479 m
.d
.comb
+= d_out
.virt_mode
.eq(ldst_r
.virt_mode
)
480 m
.d
.comb
+= d_out
.reserve
.eq(ldst_r
.reserve
)
481 m
.d
.comb
+= d_out
.atomic
.eq(ldst_r
.atomic
)
482 m
.d
.comb
+= d_out
.atomic_last
.eq(ldst_r
.atomic_last
)
483 #m.d.comb += Display("no_validblip dcbz=%i addr=%x",
484 #ldst_r.dcbz,ldst_r.addr)
485 m
.d
.comb
+= d_out
.dcbz
.eq(ldst_r
.dcbz
)
486 m
.d
.comb
+= d_out
.addr
.eq(self
.raddr
)
488 # Update outputs to MMU
489 m
.d
.comb
+= m_out
.valid
.eq(mmureq
)
490 m
.d
.comb
+= m_out
.iside
.eq(self
.instr_fault
)
491 m
.d
.comb
+= m_out
.load
.eq(ldst_r
.load
)
492 with m
.If(self
.instr_fault
):
493 m
.d
.comb
+= m_out
.priv
.eq(self
.priv_mode
)
495 m
.d
.comb
+= m_out
.priv
.eq(ldst_r
.priv_mode
)
496 m
.d
.comb
+= m_out
.tlbie
.eq(self
.tlbie
)
497 # m_out.mtspr <= mmu_mtspr; # TODO
498 # m_out.sprn <= sprn; # TODO
499 m
.d
.comb
+= m_out
.addr
.eq(maddr
)
500 # m_out.slbia <= l_in.insn(7); # TODO: no idea what this is
501 # m_out.rs <= l_in.data; # nope, probably not needed, TODO investigate
506 yield from super().ports()
510 class TestSRAMLoadStore1(LoadStore1
):
511 def __init__(self
, pspec
):
512 super().__init
__(pspec
)
514 # small 32-entry Memory
515 if (hasattr(pspec
, "dmem_test_depth") and
516 isinstance(pspec
.dmem_test_depth
, int)):
517 depth
= pspec
.dmem_test_depth
520 print("TestSRAMBareLoadStoreUnit depth", depth
)
522 self
.mem
= Memory(width
=pspec
.reg_wid
, depth
=depth
)
524 def elaborate(self
, platform
):
525 m
= super().elaborate(platform
)
527 m
.submodules
.sram
= sram
= SRAM(memory
=self
.mem
, granularity
=8,
528 features
={'cti', 'bte', 'err'})
531 # directly connect the wishbone bus of LoadStoreUnitInterface to SRAM
532 # note: SRAM is a target (slave), dbus is initiator (master)
533 fanouts
= ['dat_w', 'sel', 'cyc', 'stb', 'we', 'cti', 'bte']
534 fanins
= ['dat_r', 'ack', 'err']
535 for fanout
in fanouts
:
536 print("fanout", fanout
, getattr(sram
.bus
, fanout
).shape(),
537 getattr(dbus
, fanout
).shape())
538 comb
+= getattr(sram
.bus
, fanout
).eq(getattr(dbus
, fanout
))
539 comb
+= getattr(sram
.bus
, fanout
).eq(getattr(dbus
, fanout
))
541 comb
+= getattr(dbus
, fanin
).eq(getattr(sram
.bus
, fanin
))
543 comb
+= sram
.bus
.adr
.eq(dbus
.adr
)