continue tidyup, comment removal/review. use byte_reverse function
[soc.git] / src / soc / experiment / mmu.py
1 """MMU
2
3 based on Anton Blanchard microwatt mmu.vhdl
4
5 """
6 from enum import Enum, unique
7 from nmigen import (Module, Signal, Elaboratable, Mux, Cat, Repl, Signal)
8 from nmigen.cli import main
9 from nmutil.iocontrol import RecordObject
10 from nmutil.byterev import byte_reverse
11
12 from soc.experiment.mem_types import (LoadStore1ToMmuType,
13 MmuToLoadStore1Type,
14 MmuToDcacheType,
15 DcacheToMmuType,
16 MmuToIcacheType)
17
18 # -- Radix MMU
19 # -- Supports 4-level trees as in arch 3.0B, but not the
20 # -- two-step translation
21 # -- for guests under a hypervisor (i.e. there is no gRA -> hRA translation).
22
23 @unique
24 class State(Enum):
25 IDLE = 0 # zero is default on reset for r.state
26 DO_TLBIE = 1
27 TLB_WAIT = 2
28 PROC_TBL_READ = 3
29 PROC_TBL_WAIT = 4
30 SEGMENT_CHECK = 5
31 RADIX_LOOKUP = 6
32 RADIX_READ_WAIT = 7
33 RADIX_LOAD_TLB = 8
34 RADIX_FINISH = 9
35
36
37 # -- generate mask for extracting address fields for PTE address
38 # -- generation
39 # addrmaskgen: process(all)
40 # generate mask for extracting address fields for PTE address
41 # generation
42 class AddrMaskGen(Elaboratable):
43 def __init__(self):
44 # variable m : std_ulogic_vector(15 downto 0);
45 super().__init__()
46 self.msk = Signal(16)
47
48 # begin
49 def elaborate(self, platform):
50 m = Module()
51
52 comb = m.d.comb
53 sync = m.d.sync
54
55 rst = ResetSignal()
56
57 msk = self.msk
58
59 r = self.r
60 mask = self.mask
61
62 # -- mask_count has to be >= 5
63 # m := x"001f";
64 # mask_count has to be >= 5
65 comb += mask.eq(Const(0x001F, 16))
66
67 # for i in 5 to 15 loop
68 for i in range(5,16):
69 # if i < to_integer(r.mask_size) then
70 with m.If(i < r.mask_size):
71 # m(i) := '1';
72 comb += msk[i].eq(1)
73 # end if;
74 # end loop;
75 # mask <= m;
76 comb += mask.eq(msk)
77 # end process;
78
79 # -- generate mask for extracting address bits to go in
80 # -- TLB entry in order to support pages > 4kB
81 # finalmaskgen: process(all)
82 # generate mask for extracting address bits to go in
83 # TLB entry in order to support pages > 4kB
84 class FinalMaskGen(Elaboratable):
85 def __init__(self):
86 # variable m : std_ulogic_vector(43 downto 0);
87 super().__init__()
88 self.msk = Signal(44)
89
90 # begin
91 def elaborate(self, platform):
92 m = Module()
93
94 comb = m.d.comb
95 sync = m.d.sync
96
97 rst = ResetSignal()
98
99 mask = self.mask
100 r = self.r
101
102 msk = self.msk
103
104 # for i in 0 to 43 loop
105 for i in range(44):
106 # if i < to_integer(r.shift) then
107 with m.If(i < r.shift):
108 # m(i) := '1';
109 comb += msk.eq(1)
110 # end if;
111 # end loop;
112 # finalmask <= m;
113 comb += self.finalmask(mask)
114 # end process;
115
116
117 class RegStage(RecordObject):
118 def __init__(self, name=None):
119 super().__init__(self, name=name)
120 # latched request from loadstore1
121 self.valid = Signal(reset_less=True)
122 self.iside = Signal(reset_less=True)
123 self.store = Signal(reset_less=True)
124 self.priv = Signal(reset_less=True)
125 self.addr = Signal(64, reset_less=True)
126 self.inval_all = Signal(reset_less=True)
127 # config SPRs
128 self.prtbl = Signal(64, reset_less=True)
129 self.pid = Signal(32, reset_less=True)
130 # internal state
131 self.state = State.IDLE
132 self.done = Signal(reset_less=True)
133 self.err = Signal(reset_less=True)
134 self.pgtbl0 = Signal(64, reset_less=True)
135 self.pt0_valid = Signal(reset_less=True)
136 self.pgtbl3 = Signal(64, reset_less=True)
137 self.pt3_valid = Signal(reset_less=True)
138 self.shift = Signal(6, reset_less=True)
139 self.mask_size = Signal(5, reset_less=True)
140 self.pgbase = Signal(56, reset_less=True)
141 self.pde = Signal(64, reset_less=True)
142 self.invalid = Signal(reset_less=True)
143 self.badtree = Signal(reset_less=True)
144 self.segerror = Signal(reset_less=True)
145 self.perm_err = Signal(reset_less=True)
146 self.rc_error = Signal(reset_less=True)
147
148
149 class MMU(Elaboratable):
150 """Radix MMU
151
152 Supports 4-level trees as in arch 3.0B, but not the
153 two-step translation for guests under a hypervisor
154 (i.e. there is no gRA -> hRA translation).
155 """
156 def __init__(self):
157 self.l_in = LoadStore1ToMmuType()
158 self.l_out = MmuToLoadStore1Type()
159 self.d_out = MmuToDcacheType()
160 self.d_in = DcacheToMmuType()
161 self.i_out = MmuToIcacheType()
162
163 def elaborate(self, platform):
164 m = Module()
165
166 comb = m.d.comb
167 sync = m.d.sync
168
169 addrsh = Signal(16)
170 mask = Signal(16)
171 finalmask = Signal(44)
172
173 r = RegStage()
174 rin = RegStage()
175
176 l_in = self.l_in
177 l_out = self.l_out
178 d_out = self.d_out
179 d_in = self.d_in
180 i_out = self.i_out
181
182 # Multiplex internal SPR values back to loadstore1,
183 # selected by l_in.sprn.
184 with m.If(l_in.sprn[9]):
185 comb += l_out.sprval.eq(r.prtbl)
186 with m.Else():
187 comb += l_out.sprval.eq(r.pid)
188
189 with m.If(rin.valid):
190 pass
191 #sync += Display(f"MMU got tlb miss for {rin.addr}")
192
193 with m.If(l_out.done):
194 pass
195 # sync += Display("MMU completing op without error")
196
197 with m.If(l_out.err):
198 pass
199 # sync += Display(f"MMU completing op with err invalid"
200 # "{l_out.invalid} badtree={l_out.badtree}")
201
202 with m.If(rin.state == State.RADIX_LOOKUP):
203 pass
204 # sync += Display (f"radix lookup shift={rin.shift}"
205 # "msize={rin.mask_size}")
206
207 with m.If(r.state == State.RADIX_LOOKUP):
208 pass
209 # sync += Display(f"send load addr={d_out.addr}"
210 # "addrsh={addrsh} mask={mask}")
211
212 sync += r.eq(rin)
213
214 v = RegStage()
215 dcrq = Signal()
216 tlb_load = Signal()
217 itlb_load = Signal()
218 tlbie_req = Signal()
219 prtbl_rd = Signal()
220 pt_valid = Signal()
221 effpid = Signal(32)
222 prtable_addr = Signal(64)
223 rts = Signal(6)
224 mbits = Signal(6)
225 pgtable_addr = Signal(64)
226 pte = Signal(64)
227 tlb_data = Signal(64)
228 nonzero = Signal()
229 pgtbl = Signal(64)
230 perm_ok = Signal()
231 rc_ok = Signal()
232 addr = Signal(64)
233
234 comb += v.eq(r)
235 comb += v.valid.eq(0)
236 comb += dcreq.eq(0)
237 comb += v.done.eq(0)
238 comb += v.err.eq(0)
239 comb += v.invalid.eq(0)
240 comb += v.badtree.eq(0)
241 comb += v.segerror.eq(0)
242 comb += v.perm_err.eq(0)
243 comb += v.rc_error.eq(0)
244 comb += tlb_load.eq(0)
245 comb += itlb_load.eq(0)
246 comb += tlbie_req.eq(0)
247 comb += v.inval_all.eq(0)
248 comb += prtbl_rd.eq(0)
249
250 # Radix tree data structures in memory are
251 # big-endian, so we need to byte-swap them
252 data = byte_reverse(m, "data", d_in.data, 8)
253
254 with m.Switch(r.state):
255 with m.Case(State.IDLE):
256 with m.If(~l_in.addr[63]):
257 comb += pgtbl.eq(r.pgtbl0)
258 comb += pt_valid.eq(r.pt0_valid)
259 with m.Else():
260 comb += pgtbl.eq(r.pt3_valid)
261 comb += pt_valid.eq(r.pt3_valid)
262
263 # rts == radix tree size, number of address bits
264 # being translated
265 comb += rts.eq(Cat(pgtbl[5:8], pgtbl[61:63]))
266
267 # mbits == number of address bits to index top
268 # level of tree
269 comb += mbits.eq(pgtbl[0:5])
270
271 # -- set v.shift to rts so that we can use finalmask
272 # -- for the segment check
273 # v.shift := rts;
274 # v.mask_size := mbits(4 downto 0);
275 # v.pgbase := pgtbl(55 downto 8) & x"00";
276 # set v.shift to rts so that we can use finalmask
277 # for the segment check
278 comb += v.shift.eq(rts)
279 comb += v.mask_size.eq(mbits[0:5])
280 comb += v.pgbase.eq(pgtbl[8:56])
281
282 # if l_in.valid = '1' then
283 with m.If(l_in.valid):
284 # v.addr := l_in.addr;
285 # v.iside := l_in.iside;
286 # v.store := not (l_in.load or l_in.iside);
287 # v.priv := l_in.priv;
288 comb += v.addr.eq(l_in.addr)
289 comb += v.iside.eq(l_in.iside)
290 comb += v.store.eq(~(l_in.load | l_in.iside))
291 # if l_in.tlbie = '1' then
292 with m.If(l_in.tlbie):
293 # -- Invalidate all iTLB/dTLB entries for
294 # -- tlbie with RB[IS] != 0 or RB[AP] != 0,
295 # -- or for slbia
296 # v.inval_all := l_in.slbia or l_in.addr(11)
297 # or l_in.addr(10) or
298 # l_in.addr(7) or l_in.addr(6)
299 # or l_in.addr(5);
300 # Invalidate all iTLB/dTLB entries for
301 # tlbie with RB[IS] != 0 or RB[AP] != 0,
302 # or for slbia
303 comb += v.inval_all.eq(l_in.slbia
304 | l_in.addr[11]
305 | l_in.addr[10]
306 | l_in.addr[7]
307 | l_in.addr[6]
308 | l_in.addr[5]
309 )
310 # -- The RIC field of the tlbie instruction
311 # -- comes across on the sprn bus as bits 2--3.
312 # -- RIC=2 flushes process table caches.
313 # if l_in.sprn(3) = '1' then
314 # The RIC field of the tlbie instruction
315 # comes across on the sprn bus as bits 2--3.
316 # RIC=2 flushes process table caches.
317 with m.If(l_in.sprn[3]):
318 # v.pt0_valid := '0';
319 # v.pt3_valid := '0';
320 comb += v.pt0_valid.eq(0)
321 comb += v.pt3_valid.eq(0)
322 # end if;
323 # v.state := DO_TLBIE;
324 comb += v.state.eq(State.DO_TLBIE)
325 # else
326 with m.Else():
327 # v.valid := '1';
328 comb += v.valid.eq(1)
329 # if pt_valid = '0' then
330 with m.If(~pt_valid):
331 # -- need to fetch process table entry
332 # -- set v.shift so we can use finalmask
333 # -- for generating the process table
334 # -- entry address
335 # v.shift := unsigned('0' & r.prtbl(
336 # 4 downto 0));
337 # v.state := PROC_TBL_READ;
338 # need to fetch process table entry
339 # set v.shift so we can use finalmask
340 # for generating the process table
341 # entry address
342 comb += v.shift.eq(r.prtble[0:5])
343 comb += v.state.eq(State.PROC_TBL_READ)
344
345 # elsif mbits = 0 then
346 with m.If(~mbits):
347 # -- Use RPDS = 0 to disable radix
348 # -- tree walks
349 # v.state := RADIX_FINISH;
350 # v.invalid := '1';
351 # Use RPDS = 0 to disable radix
352 # tree walks
353 comb += v.state.eq(State.RADIX_FINISH)
354 comb += v.invalid.eq(1)
355 # else
356 with m.Else():
357 # v.state := SEGMENT_CHECK;
358 comb += v.state.eq(State.SEGMENT_CHECK)
359 # end if;
360 # end if;
361 # end if;
362
363 # if l_in.mtspr = '1' then
364 with m.If(l_in.mtspr):
365 # -- Move to PID needs to invalidate L1 TLBs
366 # -- and cached pgtbl0 value. Move to PRTBL
367 # -- does that plus invalidating the cached
368 # -- pgtbl3 value as well.
369 # if l_in.sprn(9) = '0' then
370 # Move to PID needs to invalidate L1 TLBs
371 # and cached pgtbl0 value. Move to PRTBL
372 # does that plus invalidating the cached
373 # pgtbl3 value as well.
374 with m.If(~l_in.sprn[9]):
375 # v.pid := l_in.rs(31 downto 0);
376 comb += v.pid.eq(l_in.rs[0:32])
377 # else
378 with m.Else():
379 # v.prtbl := l_in.rs;
380 # v.pt3_valid := '0';
381 comb += v.prtbl.eq(l_in.rs)
382 comb += v.pt3_valid.eq(0)
383 # end if;
384
385 # v.pt0_valid := '0';
386 # v.inval_all := '1';
387 # v.state := DO_TLBIE;
388 comb += v.pt0_valid.eq(0)
389 comb += v.inval_all.eq(1)
390 comb += v.state.eq(State.DO_TLBIE)
391 # end if;
392
393 # when DO_TLBIE =>
394 with m.Case(State.DO_TLBIE):
395 # dcreq := '1';
396 # tlbie_req := '1';
397 # v.state := TLB_WAIT;
398 comb += dcreq.eq(1)
399 comb += tlbie_req.eq(1)
400 comb += v.state.eq(State.TLB_WAIT)
401
402 # when TLB_WAIT =>
403 with m.Case(State.TLB_WAIT):
404 # if d_in.done = '1' then
405 with m.If(d_in.done):
406 # v.state := RADIX_FINISH;
407 comb += v.state.eq(State.RADIX_FINISH)
408 # end if;
409
410 # when PROC_TBL_READ =>
411 with m.Case(State.PROC_TBL_READ):
412 # dcreq := '1';
413 # prtbl_rd := '1';
414 # v.state := PROC_TBL_WAIT;
415 comb += dcreq.eq(1)
416 comb += prtbl_rd.eq(1)
417 comb += v.state.eq(State.PROC_TBL_WAIT)
418
419 # when PROC_TBL_WAIT =>
420 with m.Case(State.PROC_TBL_WAIT):
421 # if d_in.done = '1' then
422 with m.If(d_in.done):
423 # if r.addr(63) = '1' then
424 with m.If(r.addr[63]):
425 # v.pgtbl3 := data;
426 # v.pt3_valid := '1';
427 comb += v.pgtbl3.eq(data)
428 comb += v.pt3_valid.eq(1)
429 # else
430 with m.Else():
431 # v.pgtbl0 := data;
432 # v.pt0_valid := '1';
433 comb += v.pgtbl0.eq(data)
434 comb += v.pt0_valid.eq(1)
435 # end if;
436 # -- rts == radix tree size, # address bits
437 # -- being translated
438 # rts := unsigned('0' & data(62 downto 61) &
439 # data(7 downto 5));
440 # rts == radix tree size, # address bits
441 # being translated
442 comb += rts.eq(Cat(data[5:8], data[61:63]))
443
444 # -- mbits == # address bits to index
445 # -- top level of tree
446 # mbits := unsigned('0' & data(4 downto 0));
447 # mbits == # address bits to index
448 # top level of tree
449 comb += mbits.eq(data[0:5])
450 # -- set v.shift to rts so that we can use
451 # -- finalmask for the segment check
452 # v.shift := rts;
453 # v.mask_size := mbits(4 downto 0);
454 # v.pgbase := data(55 downto 8) & x"00";
455 # set v.shift to rts so that we can use
456 # finalmask for the segment check
457 comb += v.shift.eq(rts)
458 comb += v.mask_size.eq(mbits[0:5])
459 comb += v.pgbase.eq(data[8:56])
460
461 # if mbits = 0 then
462 with m.If(~mbits):
463 # v.state := RADIX_FINISH;
464 # v.invalid := '1';
465 comb += v.state.eq(State.RADIX_FINISH)
466 comb += v.invalid.eq(1)
467 # else
468 # v.state := SEGMENT_CHECK;
469 comb += v.state.eq(State.SEGMENT_CHECK)
470 # end if;
471 # end if;
472
473 # if d_in.err = '1' then
474 with m.If(d_in.err):
475 # v.state := RADIX_FINISH;
476 # v.badtree := '1';
477 comb += v.state.eq(State.RADIX_FINISH)
478 comb += v.badtree.eq(1)
479 # end if;
480
481 # when SEGMENT_CHECK =>
482 with m.Case(State.SEGMENT_CHECK):
483 # mbits := '0' & r.mask_size;
484 # v.shift := r.shift + (31 - 12) - mbits;
485 # nonzero := or(r.addr(61 downto 31) and
486 # not finalmask(30 downto 0));
487 comb += mbits.eq(r.mask_size)
488 comb += v.shift.eq(r.shift + (31 -12) - mbits)
489 comb += nonzero.eq((
490 r.addr[31:62] & ~finalmask[0:31]
491 ).bool())
492 # if r.addr(63) /= r.addr(62) or nonzero = '1' then
493 # v.state := RADIX_FINISH;
494 # v.segerror := '1';
495 with m.If((r.addr[63] != r.addr[62])
496 | nonzero):
497 comb += v.state.eq(State.RADIX_FINISH)
498 comb += v.segerror.eq(1)
499 # elsif mbits < 5 or mbits > 16 or mbits
500 # > (r.shift + (31 - 12)) then
501 # v.state := RADIX_FINISH;
502 # v.badtree := '1';
503 with m.If((mbits < 5) | (mbits > 16)
504 | (mbits > (r.shift + (31-12)))):
505 comb += v.state.eq(State.RADIX_FINISH)
506 comb += v.badtree.eq(1)
507 # else
508 # v.state := RADIX_LOOKUP;
509 with m.Else():
510 comb += v.state.eq(State.RADIX_LOOKUP)
511 # end if;
512 #
513 # when RADIX_LOOKUP =>
514 with m.Case(State.RADIX_LOOKUP):
515 # dcreq := '1';
516 # v.state := RADIX_READ_WAIT;
517 comb += dcreq.eq(1)
518 comb += v.state.eq(State.RADIX_READ_WAIT)
519
520 # when RADIX_READ_WAIT =>
521 with m.Case(State.RADIX_READ_WAIT):
522 # if d_in.done = '1' then
523 with m.If(d_in.done):
524 # v.pde := data;
525 comb += v.pde.eq(data)
526 # -- test valid bit
527 # if data(63) = '1' then
528 # test valid bit
529 with m.If(data[63]):
530 # -- test leaf bit
531 # if data(62) = '1' then
532 # test leaf bit
533 with m.If(data[62]):
534 # -- check permissions and RC bits
535 # perm_ok := '0';
536 comb += perm_ok.eq(0)
537 # if r.priv = '1' or data(3) = '0' then
538 with m.If(r.priv | ~data[3]):
539 # if r.iside = '0' then
540 # perm_ok := data(1) or (data(2)
541 # and not r.store);
542 with m.If(~r.iside):
543 comb += perm_ok.eq(
544 (data[1] | data[2])
545 & (~r.store)
546 )
547 # else
548 with m.Else():
549 # -- no IAMR, so no KUEP support
550 # -- for now deny execute
551 # -- permission if cache inhibited
552 # perm_ok :=
553 # data(0) and not data(5);
554 # no IAMR, so no KUEP support
555 # for now deny execute
556 # permission if cache inhibited
557 comb += perm_ok.eq(
558 data[0] & ~data[5]
559 )
560 # end if;
561 # end if;
562
563 # rc_ok := data(8) and (data(7) or
564 # not r.store);
565 comb += rc_ok.eq(
566 data[8] &
567 (data[7] | (~r.store))
568 )
569 # if perm_ok = '1' and rc_ok = '1' then
570 # v.state := RADIX_LOAD_TLB;
571 with m.If(perm_ok & rc_ok):
572 comb += v.state.eq(
573 State.RADIX_LOAD_TLB
574 )
575 # else
576 with m.Else():
577 # v.state := RADIX_FINISH;
578 # v.perm_err := not perm_ok;
579 # -- permission error takes precedence
580 # -- over RC error
581 # v.rc_error := perm_ok;
582 comb += vl.state.eq(
583 State.RADIX_FINISH
584 )
585 comb += v.perm_err.eq(~perm_ok)
586 # permission error takes precedence
587 # over RC error
588 comb += v.rc_error.eq(perm_ok)
589 # end if;
590 # else
591 with m.Else():
592 # mbits := unsigned('0' &
593 # data(4 downto 0));
594 comb += mbits.eq(data[0:5])
595 # if mbits < 5 or mbits > 16 or
596 # mbits > r.shift then
597 # v.state := RADIX_FINISH;
598 # v.badtree := '1';
599 with m.If((mbits < 5) | (mbits > 16) |
600 (mbits > r.shift)):
601 comb += v.state.eq(
602 State.RADIX_FINISH
603 )
604 comb += v.badtree.eq(1)
605 # else
606 with m.Else():
607 # v.shift := v.shift - mbits;
608 # v.mask_size := mbits(4 downto 0);
609 # v.pgbase := data(55 downto 8)
610 # & x"00";
611 # v.state := RADIX_LOOKUP;
612 comb += v.shift.eq(v.shif - mbits)
613 comb += v.mask_size.eq(mbits[0:5])
614 comb += v.pgbase.eq(mbits[8:56])
615 comb += v.state.eq(
616 State.RADIX_LOOKUP
617 )
618 # end if;
619 # end if;
620 # else
621 with m.Else():
622 # -- non-present PTE, generate a DSI
623 # v.state := RADIX_FINISH;
624 # v.invalid := '1';
625 # non-present PTE, generate a DSI
626 comb += v.state.eq(State.RADIX_FINISH)
627 comb += v.invalid.eq(1)
628 # end if;
629 # end if;
630
631 # if d_in.err = '1' then
632 with m.If(d_in.err):
633 # v.state := RADIX_FINISH;
634 # v.badtree := '1';
635 comb += v.state.eq(State.RADIX_FINISH)
636 comb += v.badtree.eq(1)
637 # end if;
638
639 # when RADIX_LOAD_TLB =>
640 with m.Case(State.RADIX_LOAD_TLB):
641 # tlb_load := '1';
642 comb += tlb_load.eq(1)
643 # if r.iside = '0' then
644 with m.If(~r.iside):
645 # dcreq := '1';
646 # v.state := TLB_WAIT;
647 comb += dcreq.eq(1)
648 comb += v.state.eq(State.TLB_WAIT)
649 # else
650 with m.Else():
651 # itlb_load := '1';
652 # v.state := IDLE;
653 comb += itlb_load.eq(1)
654 comb += v.state.eq(State.IDLE)
655 # end if;
656
657 # when RADIX_FINISH =>
658 # v.state := IDLE;
659 with m.Case(State.RADIX_FINISH):
660 # v.state := IDLE
661 comb += v.state.eq(State.IDLE)
662 # end case;
663 #
664 # if v.state = RADIX_FINISH or (v.state = RADIX_LOAD_TLB
665 # and r.iside = '1') then
666 with m.If((v.state == State.RADIX_FINISH)
667 | (v.state == State.RADIX_LOAD_TLB & r.iside)
668 ):
669 # v.err := v.invalid or v.badtree or v.segerror
670 # or v.perm_err or v.rc_error;
671 # v.done := not v.err;
672 comb += v.err.eq(v.invalid | v.badtree | v.segerror
673 | v.perm_err | v.rc_error)
674 comb += v.done.eq(~v.err)
675 # end if;
676
677 # if r.addr(63) = '1' then
678 with m.If(r.addr[63]):
679 # effpid := x"00000000";
680 comb += effpid.eq(Const(0x00000000, 32))
681 # else
682 with m.Else():
683 # effpid := r.pid;
684 comb += effpid.eq(r.pid)
685 # end if;
686 # prtable_addr := x"00" & r.prtbl(55 downto 36) &
687 # ((r.prtbl(35 downto 12) and not finalmask(
688 # 23 downto 0)) or (effpid(31 downto 8) and
689 # finalmask(23 downto 0))) & effpid(7 downto 0)
690 # & "0000";
691 comb += prtable_addr.eq(Cat(
692 Const(0b0000, 4), effpid[0:8],
693 (
694 (r.prtble[12:36] & ~finalmask[0:24])
695 | effpid[8:32] & finalmask[0:24]
696 ),
697 r.prtbl[36:56]
698 ))
699
700 # pgtable_addr := x"00" & r.pgbase(55 downto 19) &
701 # ((r.pgbase(18 downto 3) and not mask) or
702 # (addrsh and mask)) & "000";
703 comb += pgtable_addr.eq(Cat(
704 Const(0b000, 3),
705 (
706 (r.pgbase[3:19] & ~mask)
707 | (addrsh & mask)
708 ),
709 r.pgbase[19:56]
710 ))
711
712 # pte := x"00" & ((r.pde(55 downto 12) and not finalmask) or
713 # (r.addr(55 downto 12) and finalmask)) & r.pde(11 downto 0);
714 comb += pte.eq(Cat(
715 r.pde[0:12],
716 (
717 (r.pde[12:56] & ~finalmask)
718 | (r.addr[12:56] & finalmask)
719 ),
720 ))
721
722 # -- update registers
723 # rin <= v;
724 # update registers
725 rin.eq(v)
726
727 # -- drive outputs
728 # if tlbie_req = '1' then
729 # drive outputs
730 with m.If(tlbie_req):
731 # addr := r.addr;
732 # tlb_data := (others => '0');
733 comb += addr.eq(r.addr)
734 comb += tlb_data.eq(0)
735 # elsif tlb_load = '1' then
736 with m.If(tlb_load):
737 # addr := r.addr(63 downto 12) & x"000";
738 # tlb_data := pte;
739 comb += addr.eq(r.addr[12:64])
740 comb += tlb_data.eq(pte)
741 # elsif prtbl_rd = '1' then
742 with m.If(prtbl_rd):
743 # addr := prtable_addr;
744 # tlb_data := (others => '0');
745 comb += addr.eq(prtable_addr)
746 comb += tlb_data.eq(0)
747 # else
748 with m.Else():
749 # addr := pgtable_addr;
750 # tlb_data := (others => '0');
751 comb += addr.eq(pgtable_addr)
752 comb += tlb_data.eq(0)
753 # end if;
754
755 # l_out.done <= r.done;
756 # l_out.err <= r.err;
757 # l_out.invalid <= r.invalid;
758 # l_out.badtree <= r.badtree;
759 # l_out.segerr <= r.segerror;
760 # l_out.perm_error <= r.perm_err;
761 # l_out.rc_error <= r.rc_error;
762 comb += l_out.done.eq(r.done)
763 comb += l_out.err.eq(r.err)
764 comb += l_out.invalid.eq(r.invalid)
765 comb += l_out.badtree.eq(r.badtree)
766 comb += l_out.segerr.eq(r.segerror)
767 comb += l_out.perm_error.eq(r.perm_err)
768 comb += l_out.rc_error.eq(r.rc_error)
769
770 # d_out.valid <= dcreq;
771 # d_out.tlbie <= tlbie_req;
772 # d_out.doall <= r.inval_all;
773 # d_out.tlbld <= tlb_load;
774 # d_out.addr <= addr;
775 # d_out.pte <= tlb_data;
776 comb += d_out.valid.eq(dcreq)
777 comb += d_out.tlbie.eq(tlbie_req)
778 comb += d_out.doall.eq(r.inval_all)
779 comb += d_out.tlbld.eeq(tlb_load)
780 comb += d_out.addr.eq(addr)
781 comb += d_out.pte.eq(tlb_data)
782
783 # i_out.tlbld <= itlb_load;
784 # i_out.tlbie <= tlbie_req;
785 # i_out.doall <= r.inval_all;
786 # i_out.addr <= addr;
787 # i_out.pte <= tlb_data;
788 comb += i_out.tlbld.eq(itlb_load)
789 comb += i_out.tblie.eq(tlbie_req)
790 comb += i_out.doall.eq(r.inval_all)
791 comb += i_out.addr.eq(addr)
792 comb += i_out.pte.eq(tlb_data)
793
794 # end process;
795 # end;
796
797
798 def mmu_sim():
799 yield wp.waddr.eq(1)
800 yield wp.data_i.eq(2)
801 yield wp.wen.eq(1)
802 yield
803 yield wp.wen.eq(0)
804 yield rp.ren.eq(1)
805 yield rp.raddr.eq(1)
806 yield Settle()
807 data = yield rp.data_o
808 print(data)
809 assert data == 2
810 yield
811
812 yield wp.waddr.eq(5)
813 yield rp.raddr.eq(5)
814 yield rp.ren.eq(1)
815 yield wp.wen.eq(1)
816 yield wp.data_i.eq(6)
817 yield Settle()
818 data = yield rp.data_o
819 print(data)
820 assert data == 6
821 yield
822 yield wp.wen.eq(0)
823 yield rp.ren.eq(0)
824 yield Settle()
825 data = yield rp.data_o
826 print(data)
827 assert data == 0
828 yield
829 data = yield rp.data_o
830 print(data)
831
832 def test_mmu():
833 dut = MMU()
834 rp = dut.read_port()
835 wp = dut.write_port()
836 vl = rtlil.convert(dut, ports=dut.ports())
837 with open("test_mmu.il", "w") as f:
838 f.write(vl)
839
840 run_simulation(dut, mmu_sim(), vcd_name='test_mmu.vcd')
841
842 if __name__ == '__main__':
843 test_mmu()