3 # License for original copyright mmu.vhdl by microwatt authors: CC4
4 # License for copyrighted modifications made in mmu.py: LGPLv3+
6 # This derivative work although includes CC4 licensed material is
7 # covered by the LGPLv3+
11 based on Anton Blanchard microwatt mmu.vhdl
14 from enum
import Enum
, unique
15 from nmigen
import (C
, Module
, Signal
, Elaboratable
, Mux
, Cat
, Repl
, Signal
)
16 from nmigen
.cli
import main
17 from nmigen
.cli
import rtlil
18 from nmutil
.iocontrol
import RecordObject
19 from nmutil
.byterev
import byte_reverse
20 from nmutil
.mask
import Mask
, masked
21 from nmutil
.util
import Display
23 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
24 # Also, check out the cxxsim nmigen branch, and latest yosys from git
25 from nmutil
.sim_tmp_alternative
import Simulator
, Settle
27 from nmutil
.util
import wrap
29 from soc
.experiment
.mem_types
import (LoadStore1ToMMUType
,
35 # Radix Tree Page Directory Entry Record, TODO put this somewhere sensible
36 # v3.0C Book III p1015-1016 section 6.7.10.1
37 class RTPDE(RecordObject
):
38 def __init__(self
, name
=None):
39 super().__init
__(name
=name
)
40 self
.nls
= Signal(5) # Nextded Access Auth bits 59:63 LSB0 0:4
41 self
.rs1
= Signal(3) # Reserved bits 56:58 LSB0 5:7
42 self
.nlb
= Signal(52) # Next Level Base bit 4:55 LSB0 8:59
43 self
.rs2
= Signal(2) # Reserved bit 2:3 LSB0 60:61
44 self
.leaf
= Signal(1) # leaf bit 1 LSB0 62
45 self
.valid
= Signal(1) # valid bit 0 LSB0 63
48 # Radix Tree Page Table Entry Record, TODO put this somewhere sensible
49 # v3.0C Book III p1016 section 6.7.10.2
50 class RTPTE(RecordObject
):
51 def __init__(self
, name
=None):
52 super().__init
__(name
=name
)
53 self
.eaa
= Signal(4) # Encoded Access Auth bits 60:63 LSB0 0:3
54 self
.att
= Signal(2) # Attributes bits 58:59 LSB0 4:5
55 self
.rs1
= Signal(1) # Reserved bit 57 LSB0 6
56 self
.c
= Signal(1) # Change bit 56 LSB0 7
57 self
.r
= Signal(1) # Reference bit 55 LSB0 8
58 self
.sw
= Signal(3) # SW bits 1:3 bits 52:54 LSB0 9:11
59 self
.rpn
= Signal(45) # Real Page Number bits 7:51 LSB0 12:56
60 self
.rs2
= Signal(4) # Reserved bit 3:6 LSB0 57-60
61 self
.sw0
= Signal(1) # SW bit 0 bit 2 LSB0 61
62 self
.leaf
= Signal(1) # leaf bit 1 LSB0 62
63 self
.valid
= Signal(1) # valid bit 0 LSB0 63
65 # and these... which of course are turned round to LSB0 order.
66 # TODO: sigh. use botchify and put them in openpower.consts
67 EAA_PRIV
= 3 # bit 0 (in MSB0) set ==> problem-state banned (priv=1 only)
68 EAA_RD
= 2 # bit 1 (in MSB0) set ==> loads are permitted
69 EAA_WR
= 1 # bit 2 (in MSB0) set ==> load and stores permitted
70 EAA_EXE
= 0 # bit 3 (in MSB0) set ==> execute permitted
73 display_invalid
= True
77 IDLE
= 0 # zero is default on reset for r.state
89 # Process Table Record - near-identical to Page Table Record (same format)
90 # v3.0C Book III Section 6.7.6.2 p1004
91 class PRTBL(RecordObject
):
92 def __init__(self
, name
=None):
93 super().__init
__(name
=name
)
94 self
.rpds
= Signal(5) # Root Page Directory Size 59:63 LSB0 0:4
95 self
.rts2
= Signal(3) # Radix Tree Size part 2 56:58 LSB0 5:7
96 self
.rpdb
= Signal(52) # Root Page Directory Base 4:55 LSB0 8:59
97 self
.rsv2
= Signal(1) # reserved 3 LSB0 60
98 self
.rts1
= Signal(2) # Radix Tree Size part 1 1:2 LSB0 61:62
99 self
.rsv1
= Signal(1) # reserved 0 LSB0 63
102 class RegStage(RecordObject
):
103 def __init__(self
, name
=None):
104 super().__init
__(name
=name
)
105 # latched request from loadstore1
106 self
.valid
= Signal()
107 self
.iside
= Signal()
108 self
.store
= Signal()
110 self
.addr
= Signal(64)
111 self
.inval_all
= Signal()
114 self
.prtbl
= Signal(64)
115 self
.pid
= Signal(32)
118 self
.state
= Signal(State
) # resets to IDLE
122 # there are 4 quadrants (0-3): here we only support 2 (pt0 and pt3)
123 # these are bits 62-63 of any given address.
124 # except in segment_check, bit 62 is ignored
125 # Quadrant Select can be seen in v3.0C 6.7.10 p1015 book III figure 36
126 # and is further described in 6.7.11.3 p1019
127 self
.pgtbl0
= Signal(64)
128 self
.pt0_valid
= Signal()
129 self
.pgtbl3
= Signal(64)
130 self
.pt3_valid
= Signal()
132 self
.shift
= Signal(6)
133 self
.mask_size
= Signal(5)
134 self
.pgbase
= Signal(56)
135 self
.pde
= Signal(64)
136 self
.invalid
= Signal()
137 self
.badtree
= Signal()
138 self
.segerror
= Signal()
139 self
.perm_err
= Signal()
140 self
.rc_error
= Signal()
143 # Page Table Record - note that HR bit is treated as part of rts below
144 # (near-identical to Process Table Record - same format)
145 # v3.0C Book III Section 6.7.6.1 p1003
146 class PGTBL(RecordObject
):
147 def __init__(self
, name
=None):
148 super().__init
__(name
=name
)
149 self
.rpds
= Signal(5) # Root Page Directory Size 59:63 LSB0 0:4
150 self
.rts2
= Signal(3) # Radix Tree Size part 2 56:58 LSB0 5:7
151 self
.rpdb
= Signal(52) # Root Page Directory Base 4:55 LSB0 8:59
152 self
.s
= Signal(1) # Host Secure 3 LSB0 60
153 self
.rts1
= Signal(2) # Radix Tree Size part 1 1:2 LSB0 61:62
154 self
.hr
= Signal(1) # Host Radix 0 LSB0 63
157 class MMU(Elaboratable
):
160 Supports 4-level trees as in arch 3.0B, but not the
161 two-step translation for guests under a hypervisor
162 (i.e. there is no gRA -> hRA translation).
165 self
.l_in
= LoadStore1ToMMUType("l_in")
166 self
.l_out
= MMUToLoadStore1Type("l_out")
167 self
.d_out
= MMUToDCacheType("d_out")
168 self
.d_in
= DCacheToMMUType("d_in")
169 self
.i_out
= MMUToICacheType("i_out")
171 def radix_tree_idle(self
, m
, l_in
, r
, v
):
172 """radix_tree_idle - the main decision-point. valid actions include:
173 * LDST incoming TLBIE request (invalidate TLB entry)
174 * LDST incoming RADIX walk request
175 * set either PRTBL or PID SPRs (which then fires a TLB invalidate)
181 pgtbl
= PGTBL("pgtbl")
185 with m
.If(l_in
.addr
[63]): # quadrant 3
186 comb
+= pgtbl
.eq(r
.pgtbl3
)
187 comb
+= pt_valid
.eq(r
.pt3_valid
)
189 comb
+= pgtbl
.eq(r
.pgtbl0
)
190 comb
+= pt_valid
.eq(r
.pt0_valid
)
192 # rts == radix tree size, number of address bits
193 # being translated. takes bits 5:7 and 61:62
194 comb
+= rts
.eq(Cat(pgtbl
.rts2
, pgtbl
.rts1
, Const(0)))
196 # mbits == number of address bits to index top
197 # level of tree. takes bits 0:4
198 comb
+= mbits
.eq(pgtbl
.rpds
)
200 # set v.shift to rts so that we can use finalmask
201 # for the segment check.
202 # note: rpdb (52 bits long) is truncated to 48 bits
203 comb
+= v
.shift
.eq(rts
)
204 comb
+= v
.mask_size
.eq(mbits
[0:5])
206 # create the page base from root page directory base (48 bits with 8 0s)
207 comb
+= v
.pgbase
.eq(Cat(C(0, 8), pgtbl
.rpdb
[:48])) # bits 8:55
209 # request either TLB invalidate
210 # or start a RADIX walk
212 with m
.If(l_in
.valid
):
213 comb
+= v
.addr
.eq(l_in
.addr
)
214 comb
+= v
.iside
.eq(l_in
.iside
)
215 comb
+= v
.store
.eq(~
(l_in
.load | l_in
.iside
))
216 comb
+= v
.priv
.eq(l_in
.priv
)
218 comb
+= Display("state %d l_in.valid addr %x iside %d store %d "
219 "rts %x mbits %x pt_valid %d",
220 v
.state
, v
.addr
, v
.iside
, v
.store
,
221 rts
, mbits
, pt_valid
)
223 with m
.If(l_in
.tlbie
):
224 # Invalidate all iTLB/dTLB entries for
225 # tlbie with RB[IS] != 0 or RB[AP] != 0,
227 comb
+= v
.inval_all
.eq(l_in
.slbia
234 # The RIC field of the tlbie instruction
235 # comes across on the sprn bus as bits 2--3.
236 # RIC=2 flushes process table caches.
237 with m
.If(l_in
.sprn
[3]):
238 comb
+= v
.pt0_valid
.eq(0)
239 comb
+= v
.pt3_valid
.eq(0)
240 comb
+= v
.state
.eq(State
.DO_TLBIE
)
242 comb
+= v
.valid
.eq(1)
243 with m
.If(~pt_valid
):
244 # need to fetch process table entry
245 # set v.shift so we can use finalmask
246 # for generating the process table
248 prtbl
= PRTBL("prtbl")
249 comb
+= prtbl
.eq(r
.prtbl
)
250 comb
+= v
.shift
.eq(prtbl
.rpds
)
251 comb
+= v
.state
.eq(State
.PROC_TBL_READ
)
253 with m
.Elif(mbits
== 0):
254 # Use RPDS = 0 to disable radix tree walks
255 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
256 comb
+= v
.invalid
.eq(1)
258 sync
+= Display("MMUBUG: Use RPDS = 0 to disable"
261 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
263 # set either PID or PRTBL SPRs
264 # (then invalidate TLBs)
266 with m
.If(l_in
.mtspr
):
267 # Move to PID needs to invalidate L1 TLBs
268 # and cached pgtbl0 value.
269 # Move to PRTBL does that plus invalidating the cached
270 # pgtbl3 value as well.
271 with m
.If(~l_in
.sprn
[9]):
272 comb
+= v
.pid
.eq(l_in
.rs
[0:32])
274 comb
+= v
.prtbl
.eq(l_in
.rs
)
275 comb
+= v
.pt3_valid
.eq(0)
277 comb
+= v
.pt0_valid
.eq(0)
278 comb
+= v
.inval_all
.eq(1)
279 comb
+= v
.state
.eq(State
.DO_TLBIE
)
281 def proc_tbl_wait(self
, m
, v
, r
, data
):
285 prtbl
= PRTBL("prtblw")
286 comb
+= prtbl
.eq(data
)
288 with m
.If(r
.addr
[63]): # top bit of quadrant selects pt3
289 comb
+= v
.pgtbl3
.eq(prtbl
)
290 comb
+= v
.pt3_valid
.eq(1)
292 comb
+= v
.pgtbl0
.eq(prtbl
)
293 comb
+= v
.pt0_valid
.eq(1)
295 # rts == radix tree size, # address bits being translated
296 comb
+= rts
.eq(Cat(prtbl
.rts2
, prtbl
.rts1
, Const(0)))
298 # mbits == # address bits to index top level of tree
299 comb
+= mbits
.eq(prtbl
.rpds
)
301 # set v.shift to rts so that we can use finalmask for the segment check
302 comb
+= v
.shift
.eq(rts
)
303 comb
+= v
.mask_size
.eq(mbits
[0:5])
305 # create the page base from root page directory base (48 bits with 8 0s)
306 comb
+= v
.pgbase
.eq(Cat(C(0, 8), prtbl
.rpdb
[:48])) # bits 8:55
309 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
311 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
312 comb
+= v
.invalid
.eq(1)
313 if (display_invalid
): m
.d
.sync
+= Display("MMU: mbits is invalid")
315 def radix_read_wait(self
, m
, v
, r
, d_in
, data
):
319 rpte
= RTPTE(name
="radix_rpte") # page-table (leaf) entry
320 rpde
= RTPDE(name
="radix_rpde") # page-directory (non-leaf) entry
330 comb
+= Display("RDW %016x done %d "
331 "perm %d rc %d mbits %d shf %d "
332 "valid %d leaf %d bad %d",
333 data
, d_in
.done
, perm_ok
, rc_ok
,
334 mbits
, r
.shift
, valid
, leaf
, badtree
)
336 # set pde and interpret as Radix Tree Page Table Entry (leaf=1 case)
337 comb
+= v
.pde
.eq(data
)
338 comb
+= rpte
.eq(data
)
339 comb
+= rpde
.eq(data
)
342 # valid & leaf: RADIX Page-Table Entry
344 # check permissions and RC bits
345 with m
.If(r
.priv | ~eaa
[EAA_PRIV
]):
346 with m
.If(r
.iside
): # instruction-side request
347 # no IAMR, so no KUEP support for now
348 # deny execute permission if cache inhibited
349 comb
+= perm_ok
.eq(eaa
[EAA_EXE
] & ~rpte
.att
[1])
351 # Load/Store (read/write)
352 comb
+= perm_ok
.eq(eaa
[EAA_WR
] |
353 (eaa
[EAA_RD
] & ~r
.store
))
354 comb
+= rc_ok
.eq(rpte
.r
& (rpte
.c | ~r
.store
))
356 # permissions / rc ok, load TLB, otherwise report error
357 with m
.If(perm_ok
& rc_ok
):
358 comb
+= v
.state
.eq(State
.RADIX_LOAD_TLB
)
360 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
361 comb
+= v
.perm_err
.eq(~perm_ok
)
362 # permission error takes precedence over RC error
363 comb
+= v
.rc_error
.eq(perm_ok
)
365 # valid & !leaf: RADIX Page-Directory Entry
367 comb
+= mbits
.eq(rpde
.nls
) # 5 bits NLS into 6-bit-long mbits
368 comb
+= badtree
.eq((mbits
< 5) |
372 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
373 comb
+= v
.badtree
.eq(1)
375 comb
+= v
.shift
.eq(r
.shift
- mbits
)
376 comb
+= v
.mask_size
.eq(mbits
)
377 # pagebase is first 48 bits of NLB, shifted up 1 byte
378 comb
+= v
.pgbase
.eq(Cat(C(0, 8), rpde
.nlb
[:48]))
379 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
382 # non-present PTE, generate a DSI
383 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
384 comb
+= v
.invalid
.eq(1)
385 if (display_invalid
):
386 sync
+= Display("MMU: non-present PTE, generate a DSI")
388 def segment_check(self
, m
, v
, r
, data
, finalmask
):
389 """segment_check: checks validity of the request before doing a
390 RADIX lookup. reports either segment error or bad tree if not ok
396 comb
+= mbits
.eq(r
.mask_size
)
397 comb
+= v
.shift
.eq(r
.shift
+ (31 - 12) - mbits
)
398 comb
+= nonzero
.eq((r
.addr
[31:62] & ~finalmask
[0:31]).bool())
399 with m
.If((r
.addr
[63] ^ r
.addr
[62]) # pt3 == 0b11 and pt1 == 0b00
401 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
402 comb
+= v
.segerror
.eq(1)
403 with m
.Elif((mbits
< 5) |
(mbits
> 16) |
404 (mbits
> (r
.shift
+ (31-12)))):
405 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
406 comb
+= v
.badtree
.eq(1)
408 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
410 def mmu_0(self
, m
, r
, rin
, l_in
, l_out
, d_out
, addrsh
, mask
):
414 # Multiplex internal SPR values back to loadstore1,
415 # selected by l_in.sprn.
416 with m
.If(l_in
.sprn
[9]):
417 comb
+= l_out
.sprval
.eq(r
.prtbl
)
419 comb
+= l_out
.sprval
.eq(r
.pid
)
421 with m
.If(rin
.valid
):
422 sync
+= Display("MMU got tlb miss for %x", rin
.addr
)
424 with m
.If(l_out
.done
):
425 sync
+= Display("MMU completing op without error")
427 with m
.If(l_out
.err
):
428 sync
+= Display("MMU completing op with err invalid="
429 "%d badtree=%d", l_out
.invalid
, l_out
.badtree
)
431 with m
.If(rin
.state
== State
.RADIX_LOOKUP
):
432 sync
+= Display ("radix lookup shift=%d msize=%d",
433 rin
.shift
, rin
.mask_size
)
435 with m
.If(r
.state
== State
.RADIX_LOOKUP
):
436 sync
+= Display(f
"send load addr=%x addrsh=%d mask=%x",
437 d_out
.addr
, addrsh
, mask
)
439 # update the internal register
442 def elaborate(self
, platform
):
450 finalmask
= Signal(44)
452 self
.rin
= rin
= RegStage("r_in")
455 # get access to prtbl and pid for debug / testing purposes ONLY
456 # (actually, not needed, because setup_regs() triggers mmu direct)
457 # self._prtbl = r.prtbl
466 self
.mmu_0(m
, r
, rin
, l_in
, l_out
, d_out
, addrsh
, mask
)
475 prtb_adr
= Signal(64)
476 pgtb_adr
= Signal(64)
478 tlb_data
= Signal(64)
482 comb
+= v
.valid
.eq(0)
486 comb
+= v
.invalid
.eq(0)
487 comb
+= v
.badtree
.eq(0)
488 comb
+= v
.segerror
.eq(0)
489 comb
+= v
.perm_err
.eq(0)
490 comb
+= v
.rc_error
.eq(0)
491 comb
+= tlb_load
.eq(0)
492 comb
+= itlb_load
.eq(0)
493 comb
+= tlbie_req
.eq(0)
494 comb
+= v
.inval_all
.eq(0)
495 comb
+= prtbl_rd
.eq(0)
497 # Radix tree data structures in memory are
498 # big-endian, so we need to byte-swap them
499 data
= byte_reverse(m
, "data", d_in
.data
, 8)
501 # generate mask for extracting address fields for PTE addr generation
502 m
.submodules
.pte_mask
= pte_mask
= Mask(16-5)
503 comb
+= pte_mask
.shift
.eq(r
.mask_size
- 5)
504 comb
+= mask
.eq(Cat(C(0x1f, 5), pte_mask
.mask
))
506 # generate mask for extracting address bits to go in
507 # TLB entry in order to support pages > 4kB
508 m
.submodules
.tlb_mask
= tlb_mask
= Mask(44)
509 comb
+= tlb_mask
.shift
.eq(r
.shift
)
510 comb
+= finalmask
.eq(tlb_mask
.mask
)
512 with m
.If(r
.state
!= State
.IDLE
):
513 sync
+= Display("MMU state %d %016x", r
.state
, data
)
519 with m
.Switch(r
.state
):
520 with m
.Case(State
.IDLE
):
521 self
.radix_tree_idle(m
, l_in
, r
, v
)
523 with m
.Case(State
.DO_TLBIE
):
525 comb
+= tlbie_req
.eq(1)
526 comb
+= v
.state
.eq(State
.TLB_WAIT
)
528 with m
.Case(State
.TLB_WAIT
):
529 with m
.If(d_in
.done
):
530 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
532 with m
.Case(State
.PROC_TBL_READ
):
533 sync
+= Display(" TBL_READ %016x", prtb_adr
)
535 comb
+= prtbl_rd
.eq(1)
536 comb
+= v
.state
.eq(State
.PROC_TBL_WAIT
)
538 with m
.Case(State
.PROC_TBL_WAIT
):
539 with m
.If(d_in
.done
):
540 self
.proc_tbl_wait(m
, v
, r
, data
)
543 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
544 comb
+= v
.badtree
.eq(1)
546 with m
.Case(State
.SEGMENT_CHECK
):
547 self
.segment_check(m
, v
, r
, data
, finalmask
)
549 with m
.Case(State
.RADIX_LOOKUP
):
550 sync
+= Display(" RADIX_LOOKUP")
552 comb
+= v
.state
.eq(State
.RADIX_READ_WAIT
)
554 with m
.Case(State
.RADIX_READ_WAIT
):
555 sync
+= Display(" READ_WAIT")
556 with m
.If(d_in
.done
):
557 self
.radix_read_wait(m
, v
, r
, d_in
, data
)
559 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
560 comb
+= v
.badtree
.eq(1)
562 with m
.Case(State
.RADIX_LOAD_TLB
):
563 comb
+= tlb_load
.eq(1)
566 comb
+= v
.state
.eq(State
.TLB_WAIT
)
568 comb
+= itlb_load
.eq(1)
569 comb
+= v
.state
.eq(State
.IDLE
)
571 with m
.Case(State
.RADIX_FINISH
):
572 sync
+= Display(" RADIX_FINISH")
573 comb
+= v
.state
.eq(State
.IDLE
)
575 # check and report either error or done.
576 with m
.If((v
.state
== State
.RADIX_FINISH
) |
577 ((v
.state
== State
.RADIX_LOAD_TLB
) & r
.iside
)):
578 comb
+= v
.err
.eq(v
.invalid | v
.badtree | v
.segerror
579 | v
.perm_err | v
.rc_error
)
580 comb
+= v
.done
.eq(~v
.err
)
582 # PID is only valid if MSB of address is zero, top 2 bits are Quadrant
583 with m
.If(~r
.addr
[63]): # quadrant 0 (pt0)
584 comb
+= effpid
.eq(r
.pid
)
586 # calculate Process Table Address
587 pr24
= Signal(24, reset_less
=True)
588 prtbla
= PRTBL("prtbla")
589 comb
+= prtbla
.eq(r
.prtbl
)
591 comb
+= pr24
.eq(masked(rpdb
[4:28], effpid
[8:32], finalmask
))
592 comb
+= prtb_adr
.eq(Cat(C(0, 4), effpid
[0:8], pr24
, rpdb
[28:48]))
594 # calculate Page Table Address
595 pg16
= Signal(16, reset_less
=True)
596 comb
+= pg16
.eq(masked(r
.pgbase
[3:19], addrsh
, mask
))
597 comb
+= pgtb_adr
.eq(Cat(C(0, 3), pg16
, r
.pgbase
[19:56]))
599 # calculate Page Table Entry from Real Page Number (leaf=1, RTPTE)
600 rpte
= RTPTE(name
="rpte")
601 comb
+= rpte
.eq(r
.pde
)
602 pd44
= Signal(44, reset_less
=True)
603 comb
+= pd44
.eq(masked(rpte
.rpn
, r
.addr
[12:56], finalmask
))
604 comb
+= pte
.eq(Cat(r
.pde
[0:12], pd44
))
610 with m
.If(tlbie_req
):
611 comb
+= addr
.eq(r
.addr
)
612 with m
.Elif(tlb_load
):
613 comb
+= addr
.eq(Cat(C(0, 12), r
.addr
[12:64]))
614 comb
+= tlb_data
.eq(pte
)
615 with m
.Elif(prtbl_rd
):
616 comb
+= addr
.eq(prtb_adr
)
618 comb
+= addr
.eq(pgtb_adr
)
620 # connect to other interfaces: LDST, D-Cache, I-Cache
621 comb
+= l_out
.done
.eq(r
.done
)
622 comb
+= l_out
.err
.eq(r
.err
)
623 comb
+= l_out
.invalid
.eq(r
.invalid
)
624 comb
+= l_out
.badtree
.eq(r
.badtree
)
625 comb
+= l_out
.segerr
.eq(r
.segerror
)
626 comb
+= l_out
.perm_error
.eq(r
.perm_err
)
627 comb
+= l_out
.rc_error
.eq(r
.rc_error
)
629 comb
+= d_out
.valid
.eq(dcreq
)
630 comb
+= d_out
.tlbie
.eq(tlbie_req
)
631 comb
+= d_out
.doall
.eq(r
.inval_all
)
632 comb
+= d_out
.tlbld
.eq(tlb_load
)
633 comb
+= d_out
.addr
.eq(addr
)
634 comb
+= d_out
.pte
.eq(tlb_data
)
636 comb
+= i_out
.tlbld
.eq(itlb_load
)
637 comb
+= i_out
.tlbie
.eq(tlbie_req
)
638 comb
+= i_out
.doall
.eq(r
.inval_all
)
639 comb
+= i_out
.addr
.eq(addr
)
640 comb
+= i_out
.pte
.eq(tlb_data
)
647 """simulator process for getting memory load requests
653 return int.from_bytes(x
.to_bytes(8, byteorder
='little'),
654 byteorder
='big', signed
=False)
656 mem
= {0x0: 0x000000, # to get mtspr prtbl working
658 0x10000: # PARTITION_TABLE_2
659 # PATB_GR=1 PRTB=0x1000 PRTS=0xb
660 b(0x800000000100000b),
662 0x30000: # RADIX_ROOT_PTE
663 # V = 1 L = 0 NLB = 0x400 NLS = 9
664 b(0x8000000000040009),
666 0x40000: # RADIX_SECOND_LEVEL
667 # V = 1 L = 1 SW = 0 RPN = 0
668 # R = 1 C = 1 ATT = 0 EAA 0x7
669 b(0xc000000000000187),
671 0x1000000: # PROCESS_TABLE_3
672 # RTS1 = 0x2 RPDB = 0x300 RTS2 = 0x5 RPDS = 13
673 b(0x40000000000300ad),
677 while True: # wait for dc_valid
680 dc_valid
= yield (dut
.d_out
.valid
)
684 addr
= yield dut
.d_out
.addr
686 print (" DCACHE LOOKUP FAIL %x" % (addr
))
692 yield dut
.d_in
.data
.eq(data
)
693 print (" DCACHE GET %x data %x" % (addr
, data
))
694 yield dut
.d_in
.done
.eq(1)
696 yield dut
.d_in
.done
.eq(0)
700 while not stop
: # wait for dc_valid / err
701 l_done
= yield (dut
.l_out
.done
)
702 l_err
= yield (dut
.l_out
.err
)
703 l_badtree
= yield (dut
.l_out
.badtree
)
704 l_permerr
= yield (dut
.l_out
.perm_error
)
705 l_rc_err
= yield (dut
.l_out
.rc_error
)
706 l_segerr
= yield (dut
.l_out
.segerr
)
707 l_invalid
= yield (dut
.l_out
.invalid
)
708 if (l_done
or l_err
or l_badtree
or
709 l_permerr
or l_rc_err
or l_segerr
or l_invalid
):
712 yield dut
.l_in
.valid
.eq(0) # data already in MMU by now
713 yield dut
.l_in
.mtspr
.eq(0) # captured by RegStage(s)
714 yield dut
.l_in
.load
.eq(0) # can reset everything safely
719 # MMU MTSPR set prtbl
720 yield dut
.l_in
.mtspr
.eq(1)
721 yield dut
.l_in
.sprn
[9].eq(1) # totally fake way to set SPR=prtbl
722 yield dut
.l_in
.rs
.eq(0x1000000) # set process table
723 yield dut
.l_in
.valid
.eq(1)
724 yield from mmu_wait(dut
)
726 yield dut
.l_in
.sprn
.eq(0)
727 yield dut
.l_in
.rs
.eq(0)
730 prtbl
= yield (dut
.rin
.prtbl
)
731 print ("prtbl after MTSPR %x" % prtbl
)
732 assert prtbl
== 0x1000000
734 #yield dut.rin.prtbl.eq(0x1000000) # manually set process table
739 yield dut
.l_in
.load
.eq(1)
740 yield dut
.l_in
.priv
.eq(1)
741 yield dut
.l_in
.addr
.eq(0x10000)
742 yield dut
.l_in
.valid
.eq(1)
743 yield from mmu_wait(dut
)
745 addr
= yield dut
.d_out
.addr
746 pte
= yield dut
.d_out
.pte
747 l_done
= yield (dut
.l_out
.done
)
748 l_err
= yield (dut
.l_out
.err
)
749 l_badtree
= yield (dut
.l_out
.badtree
)
750 print ("translated done %d err %d badtree %d addr %x pte %x" % \
751 (l_done
, l_err
, l_badtree
, addr
, pte
))
753 yield dut
.l_in
.priv
.eq(0)
754 yield dut
.l_in
.addr
.eq(0)
762 vl
= rtlil
.convert(dut
, ports
=[])#dut.ports())
763 with
open("test_mmu.il", "w") as f
:
767 m
.submodules
.mmu
= dut
773 sim
.add_sync_process(wrap(mmu_sim(dut
)))
774 sim
.add_sync_process(wrap(dcache_get(dut
)))
775 with sim
.write_vcd('test_mmu.vcd'):
778 if __name__
== '__main__':