3 based on Anton Blanchard microwatt mmu.vhdl
6 from enum
import Enum
, unique
7 from nmigen
import (Module
, Signal
, Elaboratable
,
8 Mux
, Cat
, Repl
, signed
,
10 from nmigen
.cli
import main
11 from nmigen
.iocontrol
import RecordObject
13 from experiment
.mem_types
import LoadStore1ToMmuType
,
19 # library ieee; use ieee.std_logic_1164.all;
20 # use ieee.numeric_std.all;
22 # library work; use work.common.all;
26 # -- Supports 4-level trees as in arch 3.0B, but not the
27 # -- two-step translation
28 # -- for guests under a hypervisor (i.e. there is no gRA -> hRA translation).
43 # architecture behave of mmu is
58 # type reg_stage_t is record
59 # -- latched request from loadstore1
64 # addr : std_ulogic_vector(63 downto 0);
65 # inval_all : std_ulogic;
67 # prtbl : std_ulogic_vector(63 downto 0);
68 # pid : std_ulogic_vector(31 downto 0);
73 # pgtbl0 : std_ulogic_vector(63 downto 0);
74 # pt0_valid : std_ulogic;
75 # pgtbl3 : std_ulogic_vector(63 downto 0);
76 # pt3_valid : std_ulogic;
77 # shift : unsigned(5 downto 0);
78 # mask_size : unsigned(4 downto 0);
79 # pgbase : std_ulogic_vector(55 downto 0);
80 # pde : std_ulogic_vector(63 downto 0);
81 # invalid : std_ulogic;
82 # badtree : std_ulogic;
83 # segerror : std_ulogic;
84 # perm_err : std_ulogic;
85 # rc_error : std_ulogic;
89 class RegStage(RecordObject
):
90 def __init__(self
, name
=None):
91 super().__init
__(self
, name
=name
)
92 # latched request from loadstore1
93 self
.valid
= Signal(reset_less
=True)
94 self
.iside
= Signal(reset_less
=True)
95 self
.store
= Signal(reset_less
=True)
96 self
.priv
= Signal(reset_less
=True)
97 self
.addr
= Signal(64, reset_less
=True)
98 self
.inval_all
= Signal(reset_less
=True)
100 self
.prtbl
= Signal(64, reset_less
=True)
101 self
.pid
= Signal(32, reset_less
=True)
103 self
.state
= State
.IDLE
104 self
.done
= Signal(reset_less
=True)
105 self
.err
= Signal(reset_less
=True)
106 self
.pgtbl0
= Signal(64, reset_less
=True)
107 self
.pt0_valid
= Signal(reset_less
=True)
108 self
.pgtbl3
= Signal(64, reset_less
=True)
109 self
.pt3_valid
= Signal(reset_less
=True)
110 self
.shift
= Signal(6, reset_less
=True)
111 self
.mask_size
= Signal(5, reset_less
=True)
112 self
.pgbase
= Signal(56, reset_less
=True)
113 self
.pde
= Signal(64, reset_less
=True)
114 self
.invalid
= Signal(reset_less
=True)
115 self
.badtree
= Signal(reset_less
=True)
116 self
.segerror
= Signal(reset_less
=True)
117 self
.perm_err
= Signal(reset_less
=True)
118 self
.rc_error
= Signal(reset_less
=True)
122 # Supports 4-level trees as in arch 3.0B, but not the
123 # two-step translation for guests under a hypervisor
124 # (i.e. there is no gRA -> hRA translation).
125 class MMU(Elaboratable
):
128 # clk : in std_ulogic;
129 # rst : in std_ulogic;
131 # l_in : in Loadstore1ToMmuType;
132 # l_out : out MmuToLoadstore1Type;
134 # d_out : out MmuToDcacheType;
135 # d_in : in DcacheToMmuType;
137 # i_out : out MmuToIcacheType
141 self
.l_in
= LoadStore1ToMmuType()
142 self
.l_out
= MmuToLoadStore1Type()
143 self
.d_out
= MmuToDcacheType()
144 self
.d_in
= DcacheToMmuType()
145 self
.i_out
= MmuToIcacheType()
147 # signal addrsh : std_ulogic_vector(15 downto 0);
148 # signal mask : std_ulogic_vector(15 downto 0);
149 # signal finalmask : std_ulogic_vector(43 downto 0);
150 self
.addrsh
= Signal(16)
151 self
.mask
= Signal(16)
152 self
.finalmask
= Signal(44)
154 # signal r, rin : reg_stage_t;
156 self
.rin
= RegStage()
159 def elaborate(self
, platform
):
160 # -- Multiplex internal SPR values back to loadstore1,
161 # -- selected by l_in.sprn.
162 # Multiplex internal SPR values back to loadstore1,
163 # selected by l_in.sprn.
177 finalmask
= self
.finalmask
182 # l_out.sprval <= r.prtbl when l_in.sprn(9) = '1'
183 with m
.If(l_in
.sprn
[9]):
184 comb
+= l_out
.sprval
.eq(r
.prtbl
)
186 # else x"00000000" & r.pid;
188 comb
+= l_out
.sprval
.eq(Cat(r
.pid
,
189 Const(0x00000000, 32))
191 # if rin.valid = '1' then
192 # report "MMU got tlb miss for "
193 # & to_hstring(rin.addr);
195 with m
.If(rin
.valid
):
196 print(f
"MMU got tlb miss for {rin.addr}")
198 # if l_out.done = '1' then
199 # report "MMU completing op without error";
201 with m
.If(l_out
.done
):
202 print("MMU completing op without error")
204 # if l_out.err = '1' then
205 # report "MMU completing op with err invalid=" &
206 # std_ulogic'image(l_out.invalid) &
207 # " badtree=" & std_ulogic'image(
210 with m
.If(l_out
.err
):
211 print(f
"MMU completing op with err invalid=
212 {l_out.invalid} badtree={l_out.badtree}")
214 # if rin.state = RADIX_LOOKUP then
215 # report "radix lookup shift=" & integer'image(
216 # to_integer(rin.shift)) & " msize=" &
217 # integer'image(to_integer(rin.mask_size));
219 with m
.If(rin
.state
== State
.RADIX_LOOKUP
):
220 print(f
"radix lookup shift={rin.shift}
221 msize={rin.mask_size}")
223 # if r.state = RADIX_LOOKUP then
224 # report "send load addr=" & to_hstring(d_out.addr)
225 # & " addrsh=" & to_hstring(addrsh) &
226 # " mask=" & to_hstring(mask);
228 with m
.If(r
.state
== State
.RADIX_LOOKUP
):
229 print(f
"send load addr={d_out.addr}
230 addrsh={addrsh} mask={mask}")
236 # -- generate mask for extracting address fields for PTE address
238 # addrmaskgen: process(all)
239 # generate mask for extracting address fields for PTE address
241 class AddrMaskGen(Elaboratable
, MMU
):
243 # variable m : std_ulogic_vector(15 downto 0);
245 self
.msk
= Signal(16)
248 def elaborate(self
, platform
):
261 # -- mask_count has to be >= 5
263 # mask_count has to be >= 5
264 comb
+= mask
.eq(Const(0x001F, 16)
266 # for i in 5 to 15 loop
267 for i
in range(5,16):
268 # if i < to_integer(r.mask_size) then
269 with m
.If(i
< r
.mask_size
):
278 # -- generate mask for extracting address bits to go in
279 # -- TLB entry in order to support pages > 4kB
280 # finalmaskgen: process(all)
281 # generate mask for extracting address bits to go in
282 # TLB entry in order to support pages > 4kB
283 class FinalMaskGen(Elaboratable
, MMU
):
285 # variable m : std_ulogic_vector(43 downto 0);
287 self
.msk
= Signal(44)
290 def elaborate(self
, platform
):
303 # for i in 0 to 43 loop
305 # if i < to_integer(r.shift) then
306 with m
.If(i
< r
.shift
):
312 comb
+= self
.finalmask(mask
)
315 # mmu_1: process(all)
316 class MMU1(Elaboratable
):
318 # variable v : reg_stage_t;
319 # variable dcreq : std_ulogic;
320 # variable tlb_load : std_ulogic;
321 # variable itlb_load : std_ulogic;
322 # variable tlbie_req : std_ulogic;
323 # variable prtbl_rd : std_ulogic;
324 # variable pt_valid : std_ulogic;
325 # variable effpid : std_ulogic_vector(31 downto 0);
326 # variable prtable_addr : std_ulogic_vector(63 downto 0);
327 # variable rts : unsigned(5 downto 0);
328 # variable mbits : unsigned(5 downto 0);
329 # variable pgtable_addr : std_ulogic_vector(63 downto 0);
330 # variable pte : std_ulogic_vector(63 downto 0);
331 # variable tlb_data : std_ulogic_vector(63 downto 0);
332 # variable nonzero : std_ulogic;
333 # variable pgtbl : std_ulogic_vector(63 downto 0);
334 # variable perm_ok : std_ulogic;
335 # variable rc_ok : std_ulogic;
336 # variable addr : std_ulogic_vector(63 downto 0);
337 # variable data : std_ulogic_vector(63 downto 0);
340 self
.tlb_load
= Signal()
341 self
.itlb_load
= Signal()
342 self
.tlbie_req
= Signal()
343 self
.prtbl_rd
= Signal()
344 self
.pt_valid
= Signal()
345 self
.effpid
= Signal(32)
346 self
.prtable_addr
= Signal(64)
348 self
.mbits
= Signal(6)
349 self
.pgtable_addr
= Signal(64)
350 self
.pte
= Signal(64)
351 self
.tlb_data
= Signal(64)
352 self
.nonzero
= Signal()
353 self
.pgtbl
= Signal(64)
354 self
.perm_ok
= Signal()
355 self
.rc_ok
= Signal()
356 self
.addr
= Signal(64)
357 self
.data
= Signal(64)
360 def elaborate(self
, platform
):
377 tlb_load
= self
.tlb_load
378 itlb_load
= self
.itlb_load
379 tlbie_req
= self
.tlbie_req
380 prtbl_rd
= self
.prtbl_rd
381 pt_valid
= self
.pt_valid
383 prtable_addr
= self
.prtable_addr
386 pgtable_addr
= self
.pgtable_addr
388 tlb_data
= self
.tlb_data
389 nonzero
= self
.nonzero
391 perm_ok
= self
.perm_ok
409 # v.inval_all := '0';
413 comb
+= v
.valid
.eq(0)
417 comb
+= v
.invalid
.eq(0)
418 comb
+= v
.badtree
.eq(0)
419 comb
+= v
.segerror
.eq(0)
420 comb
+= v
.perm_err
.eq(0)
421 comb
+= v
.rc_error
.eq(0)
422 comb
+= tlb_load
.eq(0)
423 comb
+= itlb_load
.eq(0)
424 comb
+= tlbie_req
.eq(0)
425 comb
+= v
.inval_all
.eq(0)
426 comb
+= prtbl_rd
.eq(0)
429 # -- Radix tree data structures in memory are
430 # -- big-endian, so we need to byte-swap them
431 # for i in 0 to 7 loop
432 # Radix tree data structures in memory are
433 # big-endian, so we need to byte-swap them
435 # data(i * 8 + 7 downto i * 8) := d_in.data(
436 # (7 - i) * 8 + 7 downto (7 - i) * 8);
437 comb
+= data
[i
* 8:i
* 8 + 7 + 1].eq(
439 (7 - i
) * 8:(7 - i
) * 8 + 7 + 1
444 with m
.Switch(r
.state
):
446 with m
.Case(State
.IDLE
):
447 # if l_in.addr(63) = '0' then
449 # pt_valid := r.pt0_valid;
450 with m
.If(~l_in
.addr
[63]):
451 comb
+= pgtbl
.eq(r
.pgtbl0
)
452 comb
+= pt_valid
.eq(r
.pt0_valid
)
455 # pt_valid := r.pt3_valid;
457 comb
+= pgtbl
.eq(r
.pt3_valid
)
458 comb
+= pt_valid
.eq(r
.pt3_valid
)
461 # -- rts == radix tree size, # address bits being
463 # rts := unsigned('0' & pgtbl(62 downto 61) &
464 # pgtbl(7 downto 5));
465 # rts == radix tree size, number of address bits
475 # -- mbits == # address bits to index top level
477 # mbits := unsigned('0' & pgtbl(4 downto 0));
478 # mbits == number of address bits to index top
481 Cat(pgtbl
[0:5], Const(0b0, 1))
483 # -- set v.shift to rts so that we can use finalmask
484 # -- for the segment check
486 # v.mask_size := mbits(4 downto 0);
487 # v.pgbase := pgtbl(55 downto 8) & x"00";
488 # set v.shift to rts so that we can use finalmask
489 # for the segment check
490 comb
+= v
.shift
.eq(rts
)
491 comb
+= v
.mask_size
.eq(mbits
[0:5])
492 comb
+= v
.pgbase
.eq(Cat(
497 # if l_in.valid = '1' then
498 with m
.If(l_in
.valid
):
499 # v.addr := l_in.addr;
500 # v.iside := l_in.iside;
501 # v.store := not (l_in.load or l_in.iside);
502 # v.priv := l_in.priv;
503 comb
+= v
.addr
.eq(l_in
.addr
504 comb
+= v
.iside
.eq(l_in
.iside
)
505 comb
+= v
.store
.eq(~
(l_in
.load | l_in
.iside
))
506 # if l_in.tlbie = '1' then
507 with m
.If(l_in
.tlbie
):
508 # -- Invalidate all iTLB/dTLB entries for
509 # -- tlbie with RB[IS] != 0 or RB[AP] != 0,
511 # v.inval_all := l_in.slbia or l_in.addr(11)
512 # or l_in.addr(10) or
513 # l_in.addr(7) or l_in.addr(6)
515 # Invalidate all iTLB/dTLB entries for
516 # tlbie with RB[IS] != 0 or RB[AP] != 0,
518 comb
+= v
.inval_all
.eq(l_in
.slbia
525 # -- The RIC field of the tlbie instruction
526 # -- comes across on the sprn bus as bits 2--3.
527 # -- RIC=2 flushes process table caches.
528 # if l_in.sprn(3) = '1' then
529 # The RIC field of the tlbie instruction
530 # comes across on the sprn bus as bits 2--3.
531 # RIC=2 flushes process table caches.
532 with m
.If(l_in
.sprn
[3]):
533 # v.pt0_valid := '0';
534 # v.pt3_valid := '0';
535 comb
+= v
.pt0_valid
.eq(0)
536 comb
+= v
.pt3_valid
.eq(0)
538 # v.state := DO_TLBIE;
539 comb
+= v
.state
.eq(State
.DO_TLBIE
)
543 comb
+= v
.valid
.eq(1)
544 # if pt_valid = '0' then
545 with m
.If(~pt_valid
):
546 # -- need to fetch process table entry
547 # -- set v.shift so we can use finalmask
548 # -- for generating the process table
550 # v.shift := unsigned('0' & r.prtbl(
552 # v.state := PROC_TBL_READ;
553 # need to fetch process table entry
554 # set v.shift so we can use finalmask
555 # for generating the process table
557 comb
+= v
.shift
.eq((Cat(
561 comb
+= v
.state
.eq(State
.PROC_TBL_READ
)
563 # elsif mbits = 0 then
565 # -- Use RPDS = 0 to disable radix
567 # v.state := RADIX_FINISH;
569 # Use RPDS = 0 to disable radix
571 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
572 comb
+= v
.invalid
.eq(1)
575 # v.state := SEGMENT_CHECK;
576 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
581 # if l_in.mtspr = '1' then
582 with m
.If(l_in
.mtspr
):
583 # -- Move to PID needs to invalidate L1 TLBs
584 # -- and cached pgtbl0 value. Move to PRTBL
585 # -- does that plus invalidating the cached
586 # -- pgtbl3 value as well.
587 # if l_in.sprn(9) = '0' then
588 # Move to PID needs to invalidate L1 TLBs
589 # and cached pgtbl0 value. Move to PRTBL
590 # does that plus invalidating the cached
591 # pgtbl3 value as well.
592 with m
.If(~l_in
.sprn
[9]):
593 # v.pid := l_in.rs(31 downto 0);
594 comb
+= v
.pid
.eq(l_in
.rs
[0:32])
597 # v.prtbl := l_in.rs;
598 # v.pt3_valid := '0';
599 comb
+= v
.prtbl
.eq(l_in
.rs
)
600 comb
+= v
.pt3_valid
.eq(0)
603 # v.pt0_valid := '0';
604 # v.inval_all := '1';
605 # v.state := DO_TLBIE;
606 comb
+= v
.pt0_valid
.eq(0)
607 comb
+= v
.inval_all
.eq(1)
608 comb
+= v
.state
.eq(State
.DO_TLBIE
)
612 with m
.Case(State
.DO_TLBIE
):
615 # v.state := TLB_WAIT;
617 comb
+= tlbie_req
.eq(1)
618 comb
+= v
.state
.eq(State
.TLB_WAIT
)
621 with m
.Case(State
.TLB_WAIT
):
622 # if d_in.done = '1' then
623 with m
.If(d_in
.done
):
624 # v.state := RADIX_FINISH;
625 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
628 # when PROC_TBL_READ =>
629 with m
.Case(State
.PROC_TBL_READ
):
632 # v.state := PROC_TBL_WAIT;
634 comb
+= prtbl_rd
.eq(1)
635 comb
+= v
.state
.eq(State
.PROC_TBL_WAIT
)
637 # when PROC_TBL_WAIT =>
638 with m
.Case(State
.PROC_TBL_WAIT
):
639 # if d_in.done = '1' then
640 with m
.If(d_in
.done
):
641 # if r.addr(63) = '1' then
642 with m
.If(r
.addr
[63]):
644 # v.pt3_valid := '1';
645 comb
+= v
.pgtbl3
.eq(data
)
646 comb
+= v
.pt3_valid
.eq(1)
650 # v.pt0_valid := '1';
651 comb
+= v
.pgtbl0
.eq(data
)
652 comb
+= v
.pt0_valid
.eq(1)
654 # -- rts == radix tree size, # address bits
655 # -- being translated
656 # rts := unsigned('0' & data(62 downto 61) &
658 # rts == radix tree size, # address bits
670 # -- mbits == # address bits to index
671 # -- top level of tree
672 # mbits := unsigned('0' & data(4 downto 0));
673 # mbits == # address bits to index
681 # -- set v.shift to rts so that we can use
682 # -- finalmask for the segment check
684 # v.mask_size := mbits(4 downto 0);
685 # v.pgbase := data(55 downto 8) & x"00";
686 # set v.shift to rts so that we can use
687 # finalmask for the segment check
688 comb
+= v
.shift
.eq(rts
)
689 comb
+= v
.mask_size
.eq(mbits
[0:5])
699 # v.state := RADIX_FINISH;
701 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
702 comb
+= v
.invalid
.eq(1)
704 # v.state := SEGMENT_CHECK;
705 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
709 # if d_in.err = '1' then
711 # v.state := RADIX_FINISH;
713 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
714 comb
+= v
.badtree
.eq(1)
717 # when SEGMENT_CHECK =>
718 with m
.Case(State
.SEGMENT_CHECK
):
719 # mbits := '0' & r.mask_size;
720 # v.shift := r.shift + (31 - 12) - mbits;
721 # nonzero := or(r.addr(61 downto 31) and
722 # not finalmask(30 downto 0));
729 comb
+= v
.shift
.eq(r
.shift
+ (31 -12) - mbits
)
731 r
.addr
[31:62] & ~finalmask
[0:31]
733 # if r.addr(63) /= r.addr(62) or nonzero = '1' then
734 # v.state := RADIX_FINISH;
736 with m
.If((r
.addr
[63] != r
.addr
[62])
738 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
739 comb
+= v
.segerror
.eq(1)
740 # elsif mbits < 5 or mbits > 16 or mbits
741 # > (r.shift + (31 - 12)) then
742 # v.state := RADIX_FINISH;
744 with m
.If((mbits
< 5) |
(mbits
> 16)
745 |
(mbits
> (r
.shift
+ (31-12)))):
746 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
747 comb
+= v
.badtree
.eq(1)
749 # v.state := RADIX_LOOKUP;
751 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
754 # when RADIX_LOOKUP =>
755 with m
.Case(State
.RADIX_LOOKUP
):
757 # v.state := RADIX_READ_WAIT;
759 comb
+= v
.state
.eq(State
.RADIX_READ_WAIT
)
761 # when RADIX_READ_WAIT =>
762 with m
.Case(State
.RADIX_READ_WAIT
):
763 # if d_in.done = '1' then
764 with m
.If(d_in
.done
):
766 comb
+= v
.pde
.eq(data
)
768 # if data(63) = '1' then
772 # if data(62) = '1' then
775 # -- check permissions and RC bits
777 comb
+= perm_ok
.eq(0)
778 # if r.priv = '1' or data(3) = '0' then
779 with m
.If(r
.priv | ~data
[3])):
780 # if r.iside = '0' then
781 # perm_ok := data(1) or (data(2)
790 # -- no IAMR, so no KUEP support
791 # -- for now deny execute
792 # -- permission if cache inhibited
794 # data(0) and not data(5);
795 # no IAMR, so no KUEP support
796 # for now deny execute
797 # permission if cache inhibited
804 # rc_ok := data(8) and (data(7) or
808 (data
[7] |
(~r
.store
))
810 # if perm_ok = '1' and rc_ok = '1' then
811 # v.state := RADIX_LOAD_TLB;
812 with m
.If(perm_ok
& rc_ok
):
818 # v.state := RADIX_FINISH;
819 # v.perm_err := not perm_ok;
820 # -- permission error takes precedence
822 # v.rc_error := perm_ok;
826 comb
+= v
.perm_err
.eq(~perm_ok
)
827 # permission error takes precedence
829 comb
+= v
.rc_error
.eq(perm_ok
)
833 # mbits := unsigned('0' &
841 # if mbits < 5 or mbits > 16 or
842 # mbits > r.shift then
843 # v.state := RADIX_FINISH;
845 with m
.If((mbits
< 5) |
(mbits
> 16) |
850 comb
+= v
.badtree
.eq(1)
853 # v.shift := v.shift - mbits;
854 # v.mask_size := mbits(4 downto 0);
855 # v.pgbase := data(55 downto 8)
857 # v.state := RADIX_LOOKUP;
858 comb
+= v
.shift
.eq(v
.shif
- mbits
)
859 comb
+= v
.mask_size
.eq(mbits
[0:5])
873 # -- non-present PTE, generate a DSI
874 # v.state := RADIX_FINISH;
876 # non-present PTE, generate a DSI
877 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
878 comb
+= v
.invalid
.eq(1)
882 # if d_in.err = '1' then
884 # v.state := RADIX_FINISH;
886 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
887 comb
+= v
.badtree
.eq(1)
890 # when RADIX_LOAD_TLB =>
891 with m
.Case(State
.RADIX_LOAD_TLB
):
893 comb
+= tlb_load
.eq(1)
894 # if r.iside = '0' then
897 # v.state := TLB_WAIT;
899 comb
+= v
.state
.eq(State
.TLB_WAIT
)
904 comb
+= itlb_load
.eq(1)
905 comb
+= v
.state
.eq(State
.IDLE
)
908 # when RADIX_FINISH =>
910 with m
.Case(State
.RADIX_FINISH
):
912 comb
+= v
.state
.eq(State
.IDLE
)
915 # if v.state = RADIX_FINISH or (v.state = RADIX_LOAD_TLB
916 # and r.iside = '1') then
917 with m
.If(v
.state
== State
.RADIX_FINISH
918 |
(v
.state
== State
.RADIX_LOAD_TLB
& r
.iside
)
920 # v.err := v.invalid or v.badtree or v.segerror
921 # or v.perm_err or v.rc_error;
922 # v.done := not v.err;
923 comb
+= v
.err
.eq(v
.invalid | v
.badtree | v
.segerror
924 | v
.perm_err | v
.rc_error
)
925 comb
+= v
.done
.eq(~v
.err
)
928 # if r.addr(63) = '1' then
929 with m
.If(r
.addr
[63]):
930 # effpid := x"00000000";
931 comb
+= effpid
.eq(Const(0x00000000, 32))
935 comb
+= effpid
.eq(r
.pid
)
937 # prtable_addr := x"00" & r.prtbl(55 downto 36) &
938 # ((r.prtbl(35 downto 12) and not finalmask(
939 # 23 downto 0)) or (effpid(31 downto 8) and
940 # finalmask(23 downto 0))) & effpid(7 downto 0)
942 comb
+= prtable_addr
.eq(
946 Cat(Const(0b0000, 4), effpid
[0:8]),
948 (r
.prtble
[12:36] & ~finalmask
[0:24])
949 | effpid
[8:32] & finalmask
[0:24]
958 # pgtable_addr := x"00" & r.pgbase(55 downto 19) &
959 # ((r.pgbase(18 downto 3) and not mask) or
960 # (addrsh and mask)) & "000";
961 comb
+= pgtable_addr
.eq(
967 (r
.pgbase
[3:19] & ~mask
)
977 # pte := x"00" & ((r.pde(55 downto 12) and not finalmask) or
978 # (r.addr(55 downto 12) and finalmask)) & r.pde(11 downto 0);
984 (r
.pde
[12:56] & ~finalmask
)
985 |
(r
.addr
[12:56] & finalmask
)
992 # -- update registers
998 # if tlbie_req = '1' then
1000 with m
.If(tlbie_req
):
1002 # tlb_data := (others => '0');
1003 comb
+= addr
.eq(r
.addr
)
1004 comb
+= tlb_data
.eq(0)
1005 # elsif tlb_load = '1' then
1006 with m
.If(tlb_load
):
1007 # addr := r.addr(63 downto 12) & x"000";
1009 comb
+= addr
.eq(Cat(Const(0x000, 12), r
.addr
[12:64]))
1010 comb
+= tlb_data
.eq(pte
)
1011 # elsif prtbl_rd = '1' then
1012 with m
.If(prtbl_rd
):
1013 # addr := prtable_addr;
1014 # tlb_data := (others => '0');
1015 comb
+= addr
.eq(prtable_addr
)
1016 comb
+= tlb_data
.eq(0)
1019 # addr := pgtable_addr;
1020 # tlb_data := (others => '0');
1021 comb
+= addr
.eq(pgtable_addr
)
1022 comb
+= tlb_data
.eq(0)
1025 # l_out.done <= r.done;
1026 # l_out.err <= r.err;
1027 # l_out.invalid <= r.invalid;
1028 # l_out.badtree <= r.badtree;
1029 # l_out.segerr <= r.segerror;
1030 # l_out.perm_error <= r.perm_err;
1031 # l_out.rc_error <= r.rc_error;
1032 comb
+= l_out
.done
.eq(r
.done
)
1033 comb
+= l_out
.err
.eq(r
.err
)
1034 comb
+= l_out
.invalid
.eq(r
.invalid
)
1035 comb
+= l_out
.badtree
.eq(r
.badtree
)
1036 comb
+= l_out
.segerr
.eq(r
.segerror
)
1037 comb
+= l_out
.perm_error
.eq(r
.perm_err
)
1038 comb
+= l_out
.rc_error
.eq(r
.rc_error
)
1040 # d_out.valid <= dcreq;
1041 # d_out.tlbie <= tlbie_req;
1042 # d_out.doall <= r.inval_all;
1043 # d_out.tlbld <= tlb_load;
1044 # d_out.addr <= addr;
1045 # d_out.pte <= tlb_data;
1046 comb
+= d_out
.valid
.eq(dcreq
)
1047 comb
+= d_out
.tlbie
.eq(tlbie_req
)
1048 comb
+= d_out
.doall
.eq(r
.inval_all
)
1049 comb
+= d_out
.tlbld
.eeq(tlb_load
)
1050 comb
+= d_out
.addr
.eq(addr
)
1051 comb
+= d_out
.pte
.eq(tlb_data
)
1053 # i_out.tlbld <= itlb_load;
1054 # i_out.tlbie <= tlbie_req;
1055 # i_out.doall <= r.inval_all;
1056 # i_out.addr <= addr;
1057 # i_out.pte <= tlb_data;
1058 comb
+= i_out
.tlbld
.eq(itlb_load
)
1059 comb
+= i_out
.tblie
.eq(tlbie_req
)
1060 comb
+= i_out
.doall
.eq(r
.inval_all
)
1061 comb
+= i_out
.addr
.eq(addr
)
1062 comb
+= i_out
.pte
.eq(tlb_data
)
1069 yield wp
.waddr
.eq(1)
1070 yield wp
.data_i
.eq(2)
1075 yield rp
.raddr
.eq(1)
1077 data
= yield rp
.data_o
1082 yield wp
.waddr
.eq(5)
1083 yield rp
.raddr
.eq(5)
1086 yield wp
.data_i
.eq(6)
1088 data
= yield rp
.data_o
1095 data
= yield rp
.data_o
1099 data
= yield rp
.data_o
1104 rp
= dut
.read_port()
1105 wp
= dut
.write_port()
1106 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
1107 with
open("test_mmu.il", "w") as f
:
1110 run_simulation(dut
, mmu_sim(), vcd_name
='test_mmu.vcd')
1112 if __name__
== '__main__':