move skeleton to elaborate
[soc.git] / src / soc / fu / ldst / loadstore.py
1 """LoadStore1 FSM.
2
3 based on microwatt loadstore1.vhdl
4
5 Links:
6
7 * https://bugs.libre-soc.org/show_bug.cgi?id=465
8
9 """
10
11 from nmigen import (Elaboratable, Module, Signal, Shape, unsigned, Cat, Mux,
12 Record, Memory,
13 Const)
14 from nmutil.util import rising_edge
15 from enum import Enum, unique
16
17 from soc.experiment.dcache import DCache
18 from soc.experiment.pimem import PortInterfaceBase
19 from soc.experiment.mem_types import LoadStore1ToMMUType
20 from soc.experiment.mem_types import MMUToLoadStore1Type
21
22 from soc.minerva.wishbone import make_wb_layout
23 from soc.bus.sram import SRAM
24
25
26 @unique
27 class State(Enum):
28 IDLE = 0 # ready for instruction
29 SECOND_REQ = 1 # send 2nd request of unaligned xfer
30 ACK_WAIT = 2 # waiting for ack from dcache
31 MMU_LOOKUP = 3 # waiting for MMU to look up translation
32 TLBIE_WAIT = 4 # waiting for MMU to finish doing a tlbie
33 FINISH_LFS = 5 # write back converted SP data for lfs*
34 COMPLETE = 6 # extra cycle to complete an operation
35
36
37 # glue logic for microwatt mmu and dcache
38 class LoadStore1(PortInterfaceBase):
39 def __init__(self, pspec):
40 self.pspec = pspec
41 self.disable_cache = (hasattr(pspec, "disable_cache") and
42 pspec.disable_cache == True)
43 regwid = pspec.reg_wid
44 addrwid = pspec.addr_wid
45
46 super().__init__(regwid, addrwid)
47 self.dcache = DCache()
48 self.d_in = self.dcache.d_in
49 self.d_out = self.dcache.d_out
50 self.l_in = LoadStore1ToMMUType()
51 self.l_out = MMUToLoadStore1Type()
52 # TODO microwatt
53 self.mmureq = Signal()
54 self.derror = Signal()
55
56 # TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
57 self.dbus = Record(make_wb_layout(pspec))
58
59 # for creating a single clock blip to DCache
60 self.d_valid = Signal()
61 self.d_w_valid = Signal()
62 self.d_validblip = Signal()
63
64 # DSISR and DAR cached values. note that the MMU FSM is where
65 # these are accessed by OP_MTSPR/OP_MFSPR, on behalf of LoadStore1.
66 # by contrast microwatt has the spr set/get done *in* loadstore1.vhdl
67 self.dsisr = Signal(64)
68 self.dar = Signal(64)
69
70 # state info for LD/ST
71 self.done = Signal()
72 # latch most of the input request
73 self.load = Signal()
74 self.tlbie = Signal()
75 self.dcbz = Signal()
76 self.addr = Signal(64)
77 self.store_data = Signal(64)
78 self.load_data = Signal(64)
79 self.byte_sel = Signal(8)
80 self.update = Signal()
81 #self.xerc : xer_common_t;
82 #self.reserve = Signal()
83 #self.atomic = Signal()
84 #self.atomic_last = Signal()
85 #self.rc = Signal()
86 self.nc = Signal() # non-cacheable access
87 self.virt_mode = Signal()
88 self.priv_mode = Signal()
89 self.state = Signal(State)
90 self.instr_fault = Signal()
91 self.align_intr = Signal()
92 self.busy = Signal()
93 self.wait_dcache = Signal()
94 self.wait_mmu = Signal()
95 #self.mode_32bit = Signal()
96 self.wr_sel = Signal(2)
97 self.interrupt = Signal()
98 #self.intr_vec : integer range 0 to 16#fff#;
99 #self.nia = Signal(64)
100 #self.srr1 = Signal(16)
101
102 def set_wr_addr(self, m, addr, mask, misalign):
103 m.d.comb += self.load.eq(0) # store operation
104
105 m.d.comb += self.d_in.load.eq(0)
106 m.d.comb += self.byte_sel.eq(mask)
107 m.d.comb += self.addr.eq(addr)
108 m.d.comb += self.align_intr.eq(misalign)
109 # option to disable the cache entirely for write
110 if self.disable_cache:
111 m.d.comb += self.nc.eq(1)
112 return None
113
114 def set_rd_addr(self, m, addr, mask, misalign):
115 m.d.comb += self.d_valid.eq(1)
116 m.d.comb += self.d_in.valid.eq(self.d_validblip)
117 m.d.comb += self.load.eq(1) # load operation
118 m.d.comb += self.d_in.load.eq(1)
119 m.d.comb += self.byte_sel.eq(mask)
120 m.d.comb += self.align_intr.eq(misalign)
121 m.d.comb += self.addr.eq(addr)
122 # BAD HACK! disable cacheing on LD when address is 0xCxxx_xxxx
123 # this is for peripherals. same thing done in Microwatt loadstore1.vhdl
124 with m.If(addr[28:] == Const(0xc, 4)):
125 m.d.comb += self.nc.eq(1)
126 # option to disable the cache entirely for read
127 if self.disable_cache:
128 m.d.comb += self.nc.eq(1)
129 return None #FIXME return value
130
131 def set_wr_data(self, m, data, wen):
132 # do the "blip" on write data
133 m.d.comb += self.d_valid.eq(1)
134 m.d.comb += self.d_in.valid.eq(self.d_validblip)
135 # put data into comb which is picked up in main elaborate()
136 m.d.comb += self.d_w_valid.eq(1)
137 m.d.comb += self.store_data.eq(data)
138 #m.d.sync += self.d_in.byte_sel.eq(wen) # this might not be needed
139 st_ok = self.done # TODO indicates write data is valid
140 return st_ok
141
142 def get_rd_data(self, m):
143 ld_ok = self.done # indicates read data is valid
144 data = self.load_data # actual read data
145 return data, ld_ok
146
147 """
148 if d_in.error = '1' then
149 if d_in.cache_paradox = '1' then
150 -- signal an interrupt straight away
151 exception := '1';
152 dsisr(63 - 38) := not r2.req.load;
153 -- XXX there is no architected bit for this
154 -- (probably should be a machine check in fact)
155 dsisr(63 - 35) := d_in.cache_paradox;
156 else
157 -- Look up the translation for TLB miss
158 -- and also for permission error and RC error
159 -- in case the PTE has been updated.
160 mmureq := '1';
161 v.state := MMU_LOOKUP;
162 v.stage1_en := '0';
163 end if;
164 end if;
165 """
166
167 def elaborate(self, platform):
168 m = super().elaborate(platform)
169 comb, sync = m.d.comb, m.d.sync
170
171 # create dcache module
172 m.submodules.dcache = dcache = self.dcache
173
174 # temp vars
175 d_in, d_out, l_out, dbus = self.d_in, self.d_out, self.l_out, self.dbus
176
177 # fsm skeleton
178 with m.Switch(self.state):
179 with m.Case(State.IDLE):
180 pass
181 with m.Case(State.SECOND_REQ):
182 # req.eq(1);
183 # v.state.eq(ACK_WAIT)
184 # v.last_dword.eq(0);
185 pass
186 with m.Case(State.ACK_WAIT):
187 pass
188 with m.Case(State.MMU_LOOKUP):
189 pass
190 with m.Case(State.TLBIE_WAIT):
191 pass
192 with m.Case(State.FINISH_LFS):
193 pass
194 with m.Case(State.COMPLETE):
195 pass
196
197 with m.If(d_out.error):
198 with m.If(d_out.cache_paradox):
199 sync += self.derror.eq(1)
200 sync += self.dsisr[63 - 38].eq(~self.load)
201 # XXX there is no architected bit for this
202 # (probably should be a machine check in fact)
203 sync += self.dsisr[63 - 35].eq(d_out.cache_paradox)
204
205 with m.Else():
206 # Look up the translation for TLB miss
207 # and also for permission error and RC error
208 # in case the PTE has been updated.
209 sync += self.mmureq.eq(1)
210 sync += self.state.eq(State.MMU_LOOKUP)
211
212 exc = self.pi.exc_o
213
214 # happened, alignment, instr_fault, invalid.
215 # note that all of these flow through - eventually to the TRAP
216 # pipeline, via PowerDecoder2.
217 comb += exc.happened.eq(d_out.error | l_out.err | self.align_intr)
218 comb += exc.invalid.eq(l_out.invalid)
219 comb += exc.alignment.eq(self.align_intr)
220
221 # badtree, perm_error, rc_error, segment_fault
222 comb += exc.badtree.eq(l_out.badtree)
223 comb += exc.perm_error.eq(l_out.perm_error)
224 comb += exc.rc_error.eq(l_out.rc_error)
225 comb += exc.segment_fault.eq(l_out.segerr)
226
227 # TODO some exceptions set SPRs
228
229 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
230 comb += dbus.adr.eq(dcache.wb_out.adr)
231 comb += dbus.dat_w.eq(dcache.wb_out.dat)
232 comb += dbus.sel.eq(dcache.wb_out.sel)
233 comb += dbus.cyc.eq(dcache.wb_out.cyc)
234 comb += dbus.stb.eq(dcache.wb_out.stb)
235 comb += dbus.we.eq(dcache.wb_out.we)
236
237 comb += dcache.wb_in.dat.eq(dbus.dat_r)
238 comb += dcache.wb_in.ack.eq(dbus.ack)
239 if hasattr(dbus, "stall"):
240 comb += dcache.wb_in.stall.eq(dbus.stall)
241
242 # create a blip (single pulse) on valid read/write request
243 m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
244
245 # write out d data only when flag set
246 with m.If(self.d_w_valid):
247 m.d.sync += d_in.data.eq(self.store_data)
248 with m.Else():
249 m.d.sync += d_in.data.eq(0)
250
251 m.d.comb += d_in.load.eq(self.load)
252 m.d.comb += d_in.byte_sel.eq(self.byte_sel)
253 m.d.comb += d_in.addr.eq(self.addr)
254 m.d.comb += d_in.nc.eq(self.nc)
255 m.d.comb += self.done.eq(d_out.valid)
256 m.d.comb += self.load_data.eq(d_out.data)
257
258 return m
259
260 def ports(self):
261 yield from super().ports()
262 # TODO: memory ports
263
264
265 class TestSRAMLoadStore1(LoadStore1):
266 def __init__(self, pspec):
267 super().__init__(pspec)
268 pspec = self.pspec
269 # small 32-entry Memory
270 if (hasattr(pspec, "dmem_test_depth") and
271 isinstance(pspec.dmem_test_depth, int)):
272 depth = pspec.dmem_test_depth
273 else:
274 depth = 32
275 print("TestSRAMBareLoadStoreUnit depth", depth)
276
277 self.mem = Memory(width=pspec.reg_wid, depth=depth)
278
279 def elaborate(self, platform):
280 m = super().elaborate(platform)
281 comb = m.d.comb
282 m.submodules.sram = sram = SRAM(memory=self.mem, granularity=8,
283 features={'cti', 'bte', 'err'})
284 dbus = self.dbus
285
286 # directly connect the wishbone bus of LoadStoreUnitInterface to SRAM
287 # note: SRAM is a target (slave), dbus is initiator (master)
288 fanouts = ['dat_w', 'sel', 'cyc', 'stb', 'we', 'cti', 'bte']
289 fanins = ['dat_r', 'ack', 'err']
290 for fanout in fanouts:
291 print("fanout", fanout, getattr(sram.bus, fanout).shape(),
292 getattr(dbus, fanout).shape())
293 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
294 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
295 for fanin in fanins:
296 comb += getattr(dbus, fanin).eq(getattr(sram.bus, fanin))
297 # connect address
298 comb += sram.bus.adr.eq(dbus.adr)
299
300 return m
301