move LoadStore1 d_validblip setting, and get MMU_LOOKUP to re-run
[soc.git] / src / soc / fu / ldst / loadstore.py
1 """LoadStore1 FSM.
2
3 based on microwatt loadstore1.vhdl, but conforming to PortInterface.
4 unlike loadstore1.vhdl this does *not* deal with actual Load/Store
5 ops: that job is handled by LDSTCompUnit, which talks to LoadStore1
6 by way of PortInterface. PortInterface is where things need extending,
7 such as adding dcbz support, etc.
8
9 this module basically handles "pure" load / store operations, and
10 its first job is to ask the D-Cache for the data. if that fails,
11 the second task (if virtual memory is enabled) is to ask the MMU
12 to perform a TLB, then to go *back* to the cache and ask again.
13
14 Links:
15
16 * https://bugs.libre-soc.org/show_bug.cgi?id=465
17
18 """
19
20 from nmigen import (Elaboratable, Module, Signal, Shape, unsigned, Cat, Mux,
21 Record, Memory,
22 Const)
23 from nmutil.util import rising_edge
24 from enum import Enum, unique
25
26 from soc.experiment.dcache import DCache
27 from soc.experiment.pimem import PortInterfaceBase
28 from soc.experiment.mem_types import LoadStore1ToMMUType
29 from soc.experiment.mem_types import MMUToLoadStore1Type
30
31 from soc.minerva.wishbone import make_wb_layout
32 from soc.bus.sram import SRAM
33
34
35 @unique
36 class State(Enum):
37 IDLE = 0 # ready for instruction
38 ACK_WAIT = 1 # waiting for ack from dcache
39 MMU_LOOKUP = 2 # waiting for MMU to look up translation
40 TLBIE_WAIT = 3 # waiting for MMU to finish doing a tlbie
41 COMPLETE = 4 # extra cycle to complete an operation
42
43
44 # glue logic for microwatt mmu and dcache
45 class LoadStore1(PortInterfaceBase):
46 def __init__(self, pspec):
47 self.pspec = pspec
48 self.disable_cache = (hasattr(pspec, "disable_cache") and
49 pspec.disable_cache == True)
50 regwid = pspec.reg_wid
51 addrwid = pspec.addr_wid
52
53 super().__init__(regwid, addrwid)
54 self.dcache = DCache()
55 self.d_in = self.dcache.d_in
56 self.d_out = self.dcache.d_out
57 self.l_in = LoadStore1ToMMUType()
58 self.l_out = MMUToLoadStore1Type()
59 # TODO microwatt
60 self.mmureq = Signal()
61 self.derror = Signal()
62
63 # TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
64 self.dbus = Record(make_wb_layout(pspec))
65
66 # for creating a single clock blip to DCache
67 self.d_valid = Signal()
68 self.d_w_valid = Signal()
69 self.d_validblip = Signal()
70
71 # DSISR and DAR cached values. note that the MMU FSM is where
72 # these are accessed by OP_MTSPR/OP_MFSPR, on behalf of LoadStore1.
73 # by contrast microwatt has the spr set/get done *in* loadstore1.vhdl
74 self.dsisr = Signal(64)
75 self.dar = Signal(64)
76
77 # state info for LD/ST
78 self.done = Signal()
79 # latch most of the input request
80 self.load = Signal()
81 self.tlbie = Signal()
82 self.dcbz = Signal()
83 self.addr = Signal(64)
84 self.store_data = Signal(64)
85 self.load_data = Signal(64)
86 self.byte_sel = Signal(8)
87 self.update = Signal()
88 #self.xerc : xer_common_t;
89 #self.reserve = Signal()
90 #self.atomic = Signal()
91 #self.atomic_last = Signal()
92 #self.rc = Signal()
93 self.nc = Signal() # non-cacheable access
94 self.virt_mode = Signal()
95 self.priv_mode = Signal()
96 self.state = Signal(State)
97 self.instr_fault = Signal()
98 self.align_intr = Signal()
99 self.busy = Signal()
100 self.wait_dcache = Signal()
101 self.wait_mmu = Signal()
102 #self.mode_32bit = Signal()
103 self.wr_sel = Signal(2)
104 self.interrupt = Signal()
105 #self.intr_vec : integer range 0 to 16#fff#;
106 #self.nia = Signal(64)
107 #self.srr1 = Signal(16)
108
109 def set_wr_addr(self, m, addr, mask, misalign):
110 m.d.comb += self.load.eq(0) # store operation
111
112 m.d.comb += self.d_in.load.eq(0)
113 m.d.comb += self.byte_sel.eq(mask)
114 m.d.comb += self.addr.eq(addr)
115 m.d.comb += self.align_intr.eq(misalign)
116 # option to disable the cache entirely for write
117 if self.disable_cache:
118 m.d.comb += self.nc.eq(1)
119 return None
120
121 def set_rd_addr(self, m, addr, mask, misalign):
122 m.d.comb += self.d_valid.eq(1)
123 m.d.comb += self.d_in.valid.eq(self.d_validblip)
124 m.d.comb += self.load.eq(1) # load operation
125 m.d.comb += self.d_in.load.eq(1)
126 m.d.comb += self.byte_sel.eq(mask)
127 m.d.comb += self.align_intr.eq(misalign)
128 m.d.comb += self.addr.eq(addr)
129 # BAD HACK! disable cacheing on LD when address is 0xCxxx_xxxx
130 # this is for peripherals. same thing done in Microwatt loadstore1.vhdl
131 with m.If(addr[28:] == Const(0xc, 4)):
132 m.d.comb += self.nc.eq(1)
133 # option to disable the cache entirely for read
134 if self.disable_cache:
135 m.d.comb += self.nc.eq(1)
136 return None #FIXME return value
137
138 def set_wr_data(self, m, data, wen):
139 # do the "blip" on write data
140 m.d.comb += self.d_valid.eq(1)
141 m.d.comb += self.d_in.valid.eq(self.d_validblip)
142 # put data into comb which is picked up in main elaborate()
143 m.d.comb += self.d_w_valid.eq(1)
144 m.d.comb += self.store_data.eq(data)
145 #m.d.sync += self.d_in.byte_sel.eq(wen) # this might not be needed
146 st_ok = self.done # TODO indicates write data is valid
147 return st_ok
148
149 def get_rd_data(self, m):
150 ld_ok = self.done # indicates read data is valid
151 data = self.load_data # actual read data
152 return data, ld_ok
153
154 """
155 if d_in.error = '1' then
156 if d_in.cache_paradox = '1' then
157 -- signal an interrupt straight away
158 exception := '1';
159 dsisr(63 - 38) := not r2.req.load;
160 -- XXX there is no architected bit for this
161 -- (probably should be a machine check in fact)
162 dsisr(63 - 35) := d_in.cache_paradox;
163 else
164 -- Look up the translation for TLB miss
165 -- and also for permission error and RC error
166 -- in case the PTE has been updated.
167 mmureq := '1';
168 v.state := MMU_LOOKUP;
169 v.stage1_en := '0';
170 end if;
171 end if;
172 """
173
174 def elaborate(self, platform):
175 m = super().elaborate(platform)
176 comb, sync = m.d.comb, m.d.sync
177
178 # create dcache module
179 m.submodules.dcache = dcache = self.dcache
180
181 # temp vars
182 d_in, d_out, l_out, dbus = self.d_in, self.d_out, self.l_out, self.dbus
183
184 # create a blip (single pulse) on valid read/write request
185 # this can be over-ridden in the FSM to get dcache to re-run
186 # a request when MMU_LOOKUP completes
187 m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
188
189 # fsm skeleton
190 with m.Switch(self.state):
191 with m.Case(State.IDLE):
192 with m.If(self.d_validblip):
193 sync += self.state.eq(State.ACK_WAIT)
194
195 with m.Case(State.ACK_WAIT): # waiting for completion
196 with m.If(d_out.error):
197 with m.If(d_out.cache_paradox):
198 sync += self.derror.eq(1)
199 sync += self.state.eq(State.IDLE)
200 sync += self.dsisr[63 - 38].eq(~self.load)
201 # XXX there is no architected bit for this
202 # (probably should be a machine check in fact)
203 sync += self.dsisr[63 - 35].eq(d_out.cache_paradox)
204
205 with m.Else():
206 # Look up the translation for TLB miss
207 # and also for permission error and RC error
208 # in case the PTE has been updated.
209 sync += self.mmureq.eq(1)
210 sync += self.state.eq(State.MMU_LOOKUP)
211 with m.If(d_out.valid):
212 m.d.comb += self.done.eq(1)
213 sync += self.state.eq(State.IDLE)
214 with m.If(self.load):
215 m.d.comb += self.load_data.eq(d_out.data)
216
217 with m.Case(State.MMU_LOOKUP):
218 with m.If(l_out.done):
219 with m.If(~self.instr_fault):
220 # retry the request now that the MMU has
221 # installed a TLB entry
222 m.d.comb += self.d_validblip.eq(1) # re-run dcache req
223 sync += self.state.eq(State.ACK_WAIT)
224 with m.If(l_out.err):
225 sync += self.dsisr[63 - 33].eq(l_out.invalid)
226 sync += self.dsisr[63 - 36].eq(l_out.perm_error)
227 sync += self.dsisr[63 - 38].eq(self.load)
228 sync += self.dsisr[63 - 44].eq(l_out.badtree)
229 sync += self.dsisr[63 - 45].eq(l_out.rc_error)
230
231 '''
232 if m_in.done = '1' then # actually l_out.done
233 if r.instr_fault = '0' then
234 # retry the request now that the MMU has
235 # installed a TLB entry
236 v.state := ACK_WAIT;
237 end if;
238 end if;
239 if m_in.err = '1' then # actually l_out.err
240 dsisr(63 - 33) := m_in.invalid;
241 dsisr(63 - 36) := m_in.perm_error;
242 dsisr(63 - 38) := not r.load;
243 dsisr(63 - 44) := m_in.badtree;
244 dsisr(63 - 45) := m_in.rc_error;
245 end if;
246 '''
247 pass
248
249 with m.Case(State.TLBIE_WAIT):
250 pass
251 with m.Case(State.COMPLETE):
252 pass
253
254 # happened, alignment, instr_fault, invalid.
255 # note that all of these flow through - eventually to the TRAP
256 # pipeline, via PowerDecoder2.
257 exc = self.pi.exc_o
258 comb += exc.happened.eq(d_out.error | l_out.err | self.align_intr)
259 comb += exc.invalid.eq(l_out.invalid)
260 comb += exc.alignment.eq(self.align_intr)
261
262 # badtree, perm_error, rc_error, segment_fault
263 comb += exc.badtree.eq(l_out.badtree)
264 comb += exc.perm_error.eq(l_out.perm_error)
265 comb += exc.rc_error.eq(l_out.rc_error)
266 comb += exc.segment_fault.eq(l_out.segerr)
267
268 # TODO some exceptions set SPRs
269
270 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
271 comb += dbus.adr.eq(dcache.wb_out.adr)
272 comb += dbus.dat_w.eq(dcache.wb_out.dat)
273 comb += dbus.sel.eq(dcache.wb_out.sel)
274 comb += dbus.cyc.eq(dcache.wb_out.cyc)
275 comb += dbus.stb.eq(dcache.wb_out.stb)
276 comb += dbus.we.eq(dcache.wb_out.we)
277
278 comb += dcache.wb_in.dat.eq(dbus.dat_r)
279 comb += dcache.wb_in.ack.eq(dbus.ack)
280 if hasattr(dbus, "stall"):
281 comb += dcache.wb_in.stall.eq(dbus.stall)
282
283 # write out d data only when flag set
284 with m.If(self.d_w_valid):
285 m.d.sync += d_in.data.eq(self.store_data)
286 with m.Else():
287 m.d.sync += d_in.data.eq(0)
288
289 # this must move into the FSM, conditionally noticing that
290 # the "blip" comes from self.d_validblip.
291 # task 1: look up in dcache
292 # task 2: if dcache fails, look up in MMU.
293 # do **NOT** confuse the two.
294 m.d.comb += d_in.load.eq(self.load)
295 m.d.comb += d_in.byte_sel.eq(self.byte_sel)
296 m.d.comb += d_in.addr.eq(self.addr)
297 m.d.comb += d_in.nc.eq(self.nc)
298
299 # XXX these should be possible to remove but for some reason
300 # cannot be... yet. TODO, investigate
301 m.d.comb += self.done.eq(d_out.valid)
302 m.d.comb += self.load_data.eq(d_out.data)
303
304 return m
305
306 def ports(self):
307 yield from super().ports()
308 # TODO: memory ports
309
310
311 class TestSRAMLoadStore1(LoadStore1):
312 def __init__(self, pspec):
313 super().__init__(pspec)
314 pspec = self.pspec
315 # small 32-entry Memory
316 if (hasattr(pspec, "dmem_test_depth") and
317 isinstance(pspec.dmem_test_depth, int)):
318 depth = pspec.dmem_test_depth
319 else:
320 depth = 32
321 print("TestSRAMBareLoadStoreUnit depth", depth)
322
323 self.mem = Memory(width=pspec.reg_wid, depth=depth)
324
325 def elaborate(self, platform):
326 m = super().elaborate(platform)
327 comb = m.d.comb
328 m.submodules.sram = sram = SRAM(memory=self.mem, granularity=8,
329 features={'cti', 'bte', 'err'})
330 dbus = self.dbus
331
332 # directly connect the wishbone bus of LoadStoreUnitInterface to SRAM
333 # note: SRAM is a target (slave), dbus is initiator (master)
334 fanouts = ['dat_w', 'sel', 'cyc', 'stb', 'we', 'cti', 'bte']
335 fanins = ['dat_r', 'ack', 'err']
336 for fanout in fanouts:
337 print("fanout", fanout, getattr(sram.bus, fanout).shape(),
338 getattr(dbus, fanout).shape())
339 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
340 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
341 for fanin in fanins:
342 comb += getattr(dbus, fanin).eq(getattr(sram.bus, fanin))
343 # connect address
344 comb += sram.bus.adr.eq(dbus.adr)
345
346 return m
347