3 based on Anton Blanchard microwatt mmu.vhdl
6 from enum
import Enum
, unique
7 from nmigen
import (Module
, Signal
, Elaboratable
, Mux
, Cat
, Repl
, signed
,
9 from nmigen
.cli
import main
10 from nmigen
.iocontrol
import RecordObject
12 # library ieee; use ieee.std_logic_1164.all;
13 # use ieee.numeric_std.all;
15 # library work; use work.common.all;
18 # start from common.vhdl
19 # type Loadstore1ToMmuType is record
27 # sprn : std_ulogic_vector(9 downto 0);
28 # addr : std_ulogic_vector(63 downto 0);
29 # rs : std_ulogic_vector(63 downto 0);
31 class LoadStore1ToMmuType(RecordObject
):
41 self
.sprn
= Signal(10)
42 self
.addr
= Signal(64)
45 # type MmuToLoadstore1Type is record
48 # invalid : std_ulogic;
49 # badtree : std_ulogic;
50 # segerr : std_ulogic;
51 # perm_error : std_ulogic;
52 # rc_error : std_ulogic;
53 # sprval : std_ulogic_vector(63 downto 0);
55 class MmuToLoadStore1Type(RecordObject
):
60 self
.invalid
= Signal()
61 self
.badtree
= Signal()
62 self
.segerr
= Signal()
63 self
.perm_error
= Signal()
64 self
.rc_error
= Signal()
65 self
.sprval
= Signal(64)
67 # type MmuToDcacheType is record
72 # addr : std_ulogic_vector(63 downto 0);
73 # pte : std_ulogic_vector(63 downto 0);
75 class MmuToDcacheType(RecordObject
):
82 self
.addr
= Signal(64)
85 # type DcacheToMmuType is record
89 # data : std_ulogic_vector(63 downto 0);
91 class DcacheToMmuType(RecordObject
):
97 self
.data
= Signal(64)
100 # type MmuToIcacheType is record
101 # tlbld : std_ulogic;
102 # tlbie : std_ulogic;
103 # doall : std_ulogic;
104 # addr : std_ulogic_vector(63 downto 0);
105 # pte : std_ulogic_vector(63 downto 0);
107 class MmuToIcacheType(RecordObject
):
109 self
.tlbld
= Signal()
110 self
.tlbie
= Signal()
111 self
.doall
= Signal()
112 self
.addr
= Signal(64)
113 self
.pte
= Signal(64)
114 # end from common.vhdl
122 # -- Supports 4-level trees as in arch 3.0B, but not the
123 # -- two-step translation
124 # -- for guests under a hypervisor (i.e. there is no gRA -> hRA translation).
139 # architecture behave of mmu is
154 # type reg_stage_t is record
155 # -- latched request from loadstore1
156 # valid : std_ulogic;
157 # iside : std_ulogic;
158 # store : std_ulogic;
160 # addr : std_ulogic_vector(63 downto 0);
161 # inval_all : std_ulogic;
163 # prtbl : std_ulogic_vector(63 downto 0);
164 # pid : std_ulogic_vector(31 downto 0);
169 # pgtbl0 : std_ulogic_vector(63 downto 0);
170 # pt0_valid : std_ulogic;
171 # pgtbl3 : std_ulogic_vector(63 downto 0);
172 # pt3_valid : std_ulogic;
173 # shift : unsigned(5 downto 0);
174 # mask_size : unsigned(4 downto 0);
175 # pgbase : std_ulogic_vector(55 downto 0);
176 # pde : std_ulogic_vector(63 downto 0);
177 # invalid : std_ulogic;
178 # badtree : std_ulogic;
179 # segerror : std_ulogic;
180 # perm_err : std_ulogic;
181 # rc_error : std_ulogic;
185 class RegStage(RecordObject
):
186 def __init__(self
, name
=None):
187 super().__init
__(self
, name
=name
)
188 # latched request from loadstore1
189 self
.valid
= Signal(reset_less
=True)
190 self
.iside
= Signal(reset_less
=True)
191 self
.store
= Signal(reset_less
=True)
192 self
.priv
= Signal(reset_less
=True)
193 self
.addr
= Signal(64, reset_less
=True)
194 self
.inval_all
= Signal(reset_less
=True)
196 self
.prtbl
= Signal(64, reset_less
=True)
197 self
.pid
= Signal(32, reset_less
=True)
199 self
.state
= State
.IDLE
200 self
.done
= Signal(reset_less
=True)
201 self
.err
= Signal(reset_less
=True)
202 self
.pgtbl0
= Signal(64, reset_less
=True)
203 self
.pt0_valid
= Signal(reset_less
=True)
204 self
.pgtbl3
= Signal(64, reset_less
=True)
205 self
.pt3_valid
= Signal(reset_less
=True)
206 self
.shift
= Signal(6, reset_less
=True)
207 self
.mask_size
= Signal(5, reset_less
=True)
208 self
.pgbase
= Signal(56, reset_less
=True)
209 self
.pde
= Signal(64, reset_less
=True)
210 self
.invalid
= Signal(reset_less
=True)
211 self
.badtree
= Signal(reset_less
=True)
212 self
.segerror
= Signal(reset_less
=True)
213 self
.perm_err
= Signal(reset_less
=True)
214 self
.rc_error
= Signal(reset_less
=True)
218 # Supports 4-level trees as in arch 3.0B, but not the
219 # two-step translation for guests under a hypervisor
220 # (i.e. there is no gRA -> hRA translation).
221 class MMU(Elaboratable
):
224 # clk : in std_ulogic;
225 # rst : in std_ulogic;
227 # l_in : in Loadstore1ToMmuType;
228 # l_out : out MmuToLoadstore1Type;
230 # d_out : out MmuToDcacheType;
231 # d_in : in DcacheToMmuType;
233 # i_out : out MmuToIcacheType
237 self
.l_in
= LoadStore1ToMmuType()
238 self
.l_out
= MmuToLoadStore1Type()
239 self
.d_out
= MmuToDcacheType()
240 self
.d_in
= DcacheToMmuType()
241 self
.i_out
= MmuToIcacheType()
243 # signal addrsh : std_ulogic_vector(15 downto 0);
244 # signal mask : std_ulogic_vector(15 downto 0);
245 # signal finalmask : std_ulogic_vector(43 downto 0);
246 self
.addrsh
= Signal(16)
247 self
.mask
= Signal(16)
248 self
.finalmask
= Signal(44)
250 # signal r, rin : reg_stage_t;
252 self
.rin
= RegStage()
255 def elaborate(self
, platform
):
256 # -- Multiplex internal SPR values back to loadstore1,
257 # -- selected by l_in.sprn.
258 # Multiplex internal SPR values back to loadstore1,
259 # selected by l_in.sprn.
275 finalmask
= self
.finalmask
280 # l_out.sprval <= r.prtbl when l_in.sprn(9) = '1'
281 with m
.If(l_in
.sprn
[9]):
282 comb
+= l_out
.sprval
.eq(r
.prtbl
)
284 # else x"00000000" & r.pid;
286 comb
+= l_out
.sprval
.eq(Cat(r
.pid
,
287 Const(0x00000000, 8))
289 # if rin.valid = '1' then
290 # report "MMU got tlb miss for "
291 # & to_hstring(rin.addr);
293 with m
.If(rin
.valid
):
294 print(f
"MMU got tlb miss for {rin.addr}")
296 # if l_out.done = '1' then
297 # report "MMU completing op without error";
299 with m
.If(l_out
.done
):
300 print("MMU completing op without error")
302 # if l_out.err = '1' then
303 # report "MMU completing op with err invalid=" &
304 # std_ulogic'image(l_out.invalid) &
305 # " badtree=" & std_ulogic'image(
308 with m
.If(l_out
.err
):
309 print(f
"MMU completing op with err invalid=
310 {l_out.invalid} badtree={l_out.badtree}")
312 # if rin.state = RADIX_LOOKUP then
313 # report "radix lookup shift=" & integer'image(
314 # to_integer(rin.shift)) & " msize=" &
315 # integer'image(to_integer(rin.mask_size));
317 with m
.If(rin
.state
== State
.RADIX_LOOKUP
):
318 print(f
"radix lookup shift={rin.shift}
319 msize={rin.mask_size}")
321 # if r.state = RADIX_LOOKUP then
322 # report "send load addr=" & to_hstring(d_out.addr)
323 # & " addrsh=" & to_hstring(addrsh) &
324 # " mask=" & to_hstring(mask);
326 with m
.If(r
.state
== State
.RADIX_LOOKUP
):
327 print(f
"send load addr={d_out.addr}
328 addrsh={addrsh} mask={mask}")
334 # -- generate mask for extracting address fields for PTE address
336 # addrmaskgen: process(all)
337 # generate mask for extracting address fields for PTE address
339 class AddrMaskGen(Elaboratable
, MMU
):
341 # variable m : std_ulogic_vector(15 downto 0);
343 self
.msk
= Signal(16)
346 def elaborate(self
, platform
):
359 # -- mask_count has to be >= 5
361 # mask_count has to be >= 5
362 comb
+= mask
.eq(Const(0x001F, 16)
364 # for i in 5 to 15 loop
365 for i
in range(5,16):
366 # if i < to_integer(r.mask_size) then
367 with m
.If(i
< r
.mask_size
):
376 # -- generate mask for extracting address bits to go in
377 # -- TLB entry in order to support pages > 4kB
378 # finalmaskgen: process(all)
379 # generate mask for extracting address bits to go in
380 # TLB entry in order to support pages > 4kB
381 class FinalMaskGen(Elaboratable
, MMU
):
383 # variable m : std_ulogic_vector(43 downto 0);
385 self
.msk
= Signal(44)
388 def elaborate(self
, platform
):
401 # for i in 0 to 43 loop
403 # if i < to_integer(r.shift) then
404 with m
.If(i
< r
.shift
):
410 comb
+= self
.finalmask(mask
)
413 # mmu_1: process(all)
414 class MMU1(Elaboratable
):
416 # variable v : reg_stage_t;
417 # variable dcreq : std_ulogic;
418 # variable tlb_load : std_ulogic;
419 # variable itlb_load : std_ulogic;
420 # variable tlbie_req : std_ulogic;
421 # variable prtbl_rd : std_ulogic;
422 # variable pt_valid : std_ulogic;
423 # variable effpid : std_ulogic_vector(31 downto 0);
424 # variable prtable_addr : std_ulogic_vector(63 downto 0);
425 # variable rts : unsigned(5 downto 0);
426 # variable mbits : unsigned(5 downto 0);
427 # variable pgtable_addr : std_ulogic_vector(63 downto 0);
428 # variable pte : std_ulogic_vector(63 downto 0);
429 # variable tlb_data : std_ulogic_vector(63 downto 0);
430 # variable nonzero : std_ulogic;
431 # variable pgtbl : std_ulogic_vector(63 downto 0);
432 # variable perm_ok : std_ulogic;
433 # variable rc_ok : std_ulogic;
434 # variable addr : std_ulogic_vector(63 downto 0);
435 # variable data : std_ulogic_vector(63 downto 0);
438 self
.tlb_load
= Signal()
439 self
.itlb_load
= Signal()
440 self
.tlbie_req
= Signal()
441 self
.prtbl_rd
= Signal()
442 self
.pt_valid
= Signal()
443 self
.effpid
= Signal(32)
444 self
.prtable_addr
= Signal(64)
446 self
.mbits
= Signal(6)
447 self
.pgtable_addr
= Signal(64)
448 self
.pte
= Signal(64)
449 self
.tlb_data
= Signal(64)
450 self
.nonzero
= Signal()
451 self
.pgtbl
= Signal(64)
452 self
.perm_ok
= Signal()
453 self
.rc_ok
= Signal()
454 self
.addr
= Signal(64)
455 self
.data
= Signal(64)
458 def elaborate(self
, platform
):
478 tlb_load
= self
.tlb_load
479 itlb_load
= self
.itlb_load
480 tlbie_req
= self
.tlbie_req
481 prtbl_rd
= self
.prtbl_rd
482 pt_valid
= self
.pt_valid
484 prtable_addr
= self
.prtable_addr
487 pgtable_addr
= self
.pgtable_addr
489 tlb_data
= self
.tlb_data
490 nonzero
= self
.nonzero
492 perm_ok
= self
.perm_ok
510 # v.inval_all := '0';
514 comb
+= v
.valid
.eq(0)
518 comb
+= v
.invalid
.eq(0)
519 comb
+= v
.badtree
.eq(0)
520 comb
+= v
.segerror
.eq(0)
521 comb
+= v
.perm_err
.eq(0)
522 comb
+= v
.rc_error
.eq(0)
523 comb
+= tlb_load
.eq(0)
524 comb
+= itlb_load
.eq(0)
525 comb
+= tlbie_req
.eq(0)
526 comb
+= v
.inval_all
.eq(0)
527 comb
+= prtbl_rd
.eq(0)
530 # -- Radix tree data structures in memory are
531 # -- big-endian, so we need to byte-swap them
532 # for i in 0 to 7 loop
533 # Radix tree data structures in memory are
534 # big-endian, so we need to byte-swap them
536 # data(i * 8 + 7 downto i * 8) := d_in.data(
537 # (7 - i) * 8 + 7 downto (7 - i) * 8);
538 comb
+= data
[i
* 8:i
* 8 + 7 + 1].eq(
540 (7 - i
) * 8:(7 - i
) * 8 + 7 + 1
545 with m
.Switch(r
.state
):
547 with m
.Case(State
.IDLE
):
548 # if l_in.addr(63) = '0' then
550 # pt_valid := r.pt0_valid;
551 with m
.If(~l_in
.addr
[63]):
552 comb
+= pgtbl
.eq(r
.pgtbl0
)
553 comb
+= pt_valid
.eq(r
.pt0_valid
)
556 # pt_valid := r.pt3_valid;
558 comb
+= pgtbl
.eq(r
.pt3_valid
)
559 comb
+= pt_valid
.eq(r
.pt3_valid
)
562 # -- rts == radix tree size, # address bits being
564 # rts := unsigned('0' & pgtbl(62 downto 61) &
565 # pgtbl(7 downto 5));
566 # rts == radix tree size, number of address bits
576 # -- mbits == # address bits to index top level
578 # mbits := unsigned('0' & pgtbl(4 downto 0));
579 # mbits == number of address bits to index top
582 Cat(pgtbl
[0:5], Const(0b0, 1))
584 # -- set v.shift to rts so that we can use finalmask
585 # -- for the segment check
587 # v.mask_size := mbits(4 downto 0);
588 # v.pgbase := pgtbl(55 downto 8) & x"00";
589 # set v.shift to rts so that we can use finalmask
590 # for the segment check
591 comb
+= v
.shift
.eq(rts
)
592 comb
+= v
.mask_size
.eq(mbits
[0:5])
593 comb
+= v
.pgbase
.eq(Cat(
598 # if l_in.valid = '1' then
599 with m
.If(l_in
.valid
):
600 # v.addr := l_in.addr;
601 # v.iside := l_in.iside;
602 # v.store := not (l_in.load or l_in.iside);
603 # v.priv := l_in.priv;
604 comb
+= v
.addr
.eq(l_in
.addr
605 comb
+= v
.iside
.eq(l_in
.iside
)
606 comb
+= v
.store
.eq(~
(l_in
.load | l_in
.siside
))
607 # if l_in.tlbie = '1' then
608 with m
.If(l_in
.tlbie
):
609 # -- Invalidate all iTLB/dTLB entries for
610 # -- tlbie with RB[IS] != 0 or RB[AP] != 0,
612 # v.inval_all := l_in.slbia or l_in.addr(11)
613 # or l_in.addr(10) or
614 # l_in.addr(7) or l_in.addr(6)
616 # Invalidate all iTLB/dTLB entries for
617 # tlbie with RB[IS] != 0 or RB[AP] != 0,
619 comb
+= v
.inval_all
.eq(l_in
.slbia
626 # -- The RIC field of the tlbie instruction
627 # -- comes across on the sprn bus as bits 2--3.
628 # -- RIC=2 flushes process table caches.
629 # if l_in.sprn(3) = '1' then
630 # The RIC field of the tlbie instruction
631 # comes across on the sprn bus as bits 2--3.
632 # RIC=2 flushes process table caches.
633 with m
.If(l_in
.sprn
[3]):
634 # v.pt0_valid := '0';
635 # v.pt3_valid := '0';
636 comb
+= v
.pt0_valid
.eq(0)
637 comb
+= v
.pt3_valid
.eq(0)
639 # v.state := DO_TLBIE;
640 comb
+= v
.state
.eq(State
.DO_TLBIE
)
644 comb
+= v
.valid
.eq(1)
645 # if pt_valid = '0' then
646 with m
.If(~pt_valid
):
647 # -- need to fetch process table entry
648 # -- set v.shift so we can use finalmask
649 # -- for generating the process table
651 # v.shift := unsigned('0' & r.prtbl(
653 # v.state := PROC_TBL_READ;
654 # need to fetch process table entry
655 # set v.shift so we can use finalmask
656 # for generating the process table
658 comb
+= v
.shift
.eq((Cat(
662 comb
+= v
.state
.eq(State
.PROC_TBL_READ
)
664 # elsif mbits = 0 then
666 # -- Use RPDS = 0 to disable radix
668 # v.state := RADIX_FINISH;
670 # Use RPDS = 0 to disable radix
672 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
673 comb
+= v
.invalid
.eq(1)
676 # v.state := SEGMENT_CHECK;
677 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
682 # if l_in.mtspr = '1' then
683 with m
.If(l_in
.mtspr
):
684 # -- Move to PID needs to invalidate L1 TLBs
685 # -- and cached pgtbl0 value. Move to PRTBL
686 # -- does that plus invalidating the cached
687 # -- pgtbl3 value as well.
688 # if l_in.sprn(9) = '0' then
689 # Move to PID needs to invalidate L1 TLBs
690 # and cached pgtbl0 value. Move to PRTBL
691 # does that plus invalidating the cached
692 # pgtbl3 value as well.
693 with m
.If(~l_in
.sprn
[9]):
694 # v.pid := l_in.rs(31 downto 0);
695 comb
+= v
.pid
.eq(l_in
.rs
[0:32])
698 # v.prtbl := l_in.rs;
699 # v.pt3_valid := '0';
700 comb
+= v
.prtbl
.eq(l_in
.rs
)
701 comb
+= v
.pt3_valid
.eq(0)
704 # v.pt0_valid := '0';
705 # v.inval_all := '1';
706 # v.state := DO_TLBIE;
707 comb
+= v
.pt0_valid
.eq(0)
708 comb
+= v
.inval_all
.eq(0)
709 comb
+= v
.state
.eq(State
.DO_TLBIE
)
713 with m
.Case(State
.DO_TLBIE
):
716 # v.state := TLB_WAIT;
718 comb
+= tlbie_req
.eq(1)
719 comb
+= v
.state
.eq(State
.TLB_WAIT
)
722 with m
.Case(State
.TLB_WAIT
):
723 # if d_in.done = '1' then
724 with m
.If(d_in
.done
):
725 # v.state := RADIX_FINISH;
726 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
729 # when PROC_TBL_READ =>
730 with m
.Case(State
.PROC_TBL_READ
):
733 # v.state := PROC_TBL_WAIT;
735 comb
+= prtbl_rd
.eq(1)
736 comb
+= v
.state
.eq(State
.PROC_TBL_WAIT
)
738 # when PROC_TBL_WAIT =>
739 with m
.Case(State
.PROC_TBL_WAIT
):
740 # if d_in.done = '1' then
741 with m
.If(d_in
.done
):
742 # if r.addr(63) = '1' then
743 with m
.If(r
.addr
[63]):
745 # v.pt3_valid := '1';
746 comb
+= v
.pgtbl3
.eq(data
)
747 comb
+= v
.pt3_valid
.eq(1)
751 # v.pt0_valid := '1';
752 comb
+= v
.pgtbl0
.eq(data
)
753 comb
+= v
.pt0_valid
.eq(1)
755 # -- rts == radix tree size, # address bits
756 # -- being translated
757 # rts := unsigned('0' & data(62 downto 61) &
759 # rts == radix tree size, # address bits
762 0 & data
[61:63] & data
[5:8]
764 # -- mbits == # address bits to index
765 # -- top level of tree
766 # mbits := unsigned('0' & data(4 downto 0));
767 # mbits == # address bits to index
772 # -- set v.shift to rts so that we can use
773 # -- finalmask for the segment check
775 # v.mask_size := mbits(4 downto 0);
776 # v.pgbase := data(55 downto 8) & x"00";
777 # set v.shift to rts so that we can use
778 # finalmask for the segment check
779 comb
+= v
.shift
.eq(rts
)
780 comb
+= v
.mask_size
.eq(mbits
[0:5])
781 comb
+= v
.pgbase
.eq(data
[8:56] & 0x00)
784 # v.state := RADIX_FINISH;
786 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
787 comb
+= v
.invalid
.eq(1)
789 # v.state := SEGMENT_CHECK;
790 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
794 # if d_in.err = '1' then
795 with m
.If(d_in
.err
=== 1):
796 # v.state := RADIX_FINISH;
798 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
799 comb
+= v
.badtree
.eq(1)
802 # when SEGMENT_CHECK =>
803 with m
.Case(State
.SEGMENT_CHECK
):
804 # mbits := '0' & r.mask_size;
805 # v.shift := r.shift + (31 - 12) - mbits;
806 # nonzero := or(r.addr(61 downto 31) and
807 # not finalmask(30 downto 0));
808 comb
+= mbits
.eq(0 & r
.mask_size
)
809 comb
+= v
.shift
.eq(r
.shift
+ (31 -12) - mbits
)
811 r
.addr
[31:62] & ~finalmask
[0:31]
813 # if r.addr(63) /= r.addr(62) or nonzero = '1' then
814 # v.state := RADIX_FINISH;
816 with m
.If((r
.addr
[63] != r
.addr
[62])
818 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
819 comb
+= v
.segerror
.eq(1)
820 # elsif mbits < 5 or mbits > 16 or mbits
821 # > (r.shift + (31 - 12)) then
822 # v.state := RADIX_FINISH;
824 with m
.If((mbits
< 5) |
(mbits
> 16)
825 |
(mbits
> (r
.shift
+ (31-12)))):
826 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
827 comb
+= v
.badtree
.eq(1)
829 # v.state := RADIX_LOOKUP;
831 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
834 # when RADIX_LOOKUP =>
835 with m
.Case(State
.RADIX_LOOKUP
):
837 # v.state := RADIX_READ_WAIT;
839 comb
+= v
.state
.eq(State
.RADIX_READ_WAIT
)
841 # when RADIX_READ_WAIT =>
842 with m
.Case(State
.RADIX_READ_WAIT
):
843 # if d_in.done = '1' then
844 with m
.If(d_in
.done
):
846 comb
+= v
.pde
.eq(data
)
848 # if data(63) = '1' then
852 # if data(62) = '1' then
855 # -- check permissions and RC bits
857 comb
+= perm_ok
.eq(0)
858 # if r.priv = '1' or data(3) = '0' then
859 with m
.If((r
.priv
== 1) |
(data
[3] == 0)):
860 # if r.iside = '0' then
861 # perm_ok := data(1) or (data(2)
863 with m
.If(r
.iside
== 0):
870 # -- no IAMR, so no KUEP support
871 # -- for now deny execute
872 # -- permission if cache inhibited
874 # data(0) and not data(5);
875 # no IAMR, so no KUEP support
876 # for now deny execute
877 # permission if cache inhibited
884 # rc_ok := data(8) and (data(7) or
888 (data
[7] |
(~r
.store
))
890 # if perm_ok = '1' and rc_ok = '1' then
891 # v.state := RADIX_LOAD_TLB;
892 with m
.If(perm_ok
& rc_ok
):
898 # v.state := RADIX_FINISH;
899 # v.perm_err := not perm_ok;
900 # -- permission error takes precedence
902 # v.rc_error := perm_ok;
906 comb
+= v
.perm_err
.eq(~perm_ok
)
907 # permission error takes precedence
909 comb
+= v
.rc_error
.eq(perm_ok
)
913 # mbits := unsigned('0' &
915 comb
+= mbits
.eq((Cat(
919 # if mbits < 5 or mbits > 16 or
920 # mbits > r.shift then
921 # v.state := RADIX_FINISH;
923 with m
.If((mbits
< 5) & (mbits
> 16) |
928 comb
+= v
.badtree
.eq(1)
931 # v.shift := v.shift - mbits;
932 # v.mask_size := mbits(4 downto 0);
933 # v.pgbase := data(55 downto 8)
935 # v.state := RADIX_LOOKUP;
936 comb
+= v
.shift
.eq(v
.shif
- mbits
)
937 comb
+= v
.mask_size
.eq(mbits
[0:5])
938 comb
+= v
.pgbase
.eq(Cat(
949 # -- non-present PTE, generate a DSI
950 # v.state := RADIX_FINISH;
952 # non-present PTE, generate a DSI
953 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
954 comb
+= v
.invalid
.eq(1)
958 # if d_in.err = '1' then
960 # v.state := RADIX_FINISH;
962 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
963 comb
+= v
.badtree
.eq(1)
966 # when RADIX_LOAD_TLB =>
967 with m
.Case(State
.RADIX_LOAD_TLB
):
969 comb
+= tlb_load
.eq(1)
970 # if r.iside = '0' then
973 # v.state := TLB_WAIT;
975 comb
+= v
.state
.eq(State
.TLB_WAIT
)
980 comb
+= itlb_load
.eq(1)
981 comb
+= v
.state
.eq(State
.IDLE
)
984 # when RADIX_FINISH =>
986 with m
.Case(State
.RADIX_FINISH
):
988 comb
+= v
.state
.eq(State
.IDLE
)
991 # if v.state = RADIX_FINISH or (v.state = RADIX_LOAD_TLB
992 # and r.iside = '1') then
993 with m
.If(v
.state
== State
.RADIX_FINISH
994 |
(v
.state
== State
.RADIX_LOAD_TLB
& r
.iside
)
996 # v.err := v.invalid or v.badtree or v.segerror
997 # or v.perm_err or v.rc_error;
998 # v.done := not v.err;
999 comb
+= v
.err
.eq(v
.invalid | v
.badtree | v
.segerror
1000 | v
.perm_err | v
.rc_error
)
1001 comb
+= v
.done
.eq(~v
.err
)
1004 # if r.addr(63) = '1' then
1005 with m
.If(r
.addr
[63]):
1006 # effpid := x"00000000";
1007 comb
+= effpid
.eq(Const(0x00000000,1))
1011 comb
+= effpid
.eq(r
.pid
)
1013 # prtable_addr := x"00" & r.prtbl(55 downto 36) &
1014 # ((r.prtbl(35 downto 12) and not finalmask(
1015 # 23 downto 0)) or (effpid(31 downto 8) and
1016 # finalmask(23 downto 0))) & effpid(7 downto 0)
1018 comb
+= prtable_addr
.eq(
1022 Cat(Const(0b000, 4), effpid
[0:8]),
1024 (r
.prtble
[12:36] & (~finalmask
[0:24]))
1025 | effpid
[8:32] & finalmask
[0:24]
1034 # pgtable_addr := x"00" & r.pgbase(55 downto 19) &
1035 # ((r.pgbase(18 downto 3) and not mask) or
1036 # (addrsh and mask)) & "000";
1037 comb
+= pgtable_addr
.eq(
1042 (r
.pgbase
[3:19] & (~mask
))
1050 # pte := x"00" & ((r.pde(55 downto 12) and not finalmask) or
1051 # (r.addr(55 downto 12) and finalmask)) & r.pde(11 downto 0);
1057 (r
.pde
[12:56] & (~finalmask
))
1058 |
(r
.addr
[12:56] & finalmask
)
1065 # -- update registers
1071 # if tlbie_req = '1' then
1073 with m
.If(tlbie_req
):
1075 # tlb_data := (others => '0');
1076 comb
+= addr
.eq(r
.addr
)
1077 # elsif tlb_load = '1' then
1078 with m
.If(tlb_load
):
1079 # addr := r.addr(63 downto 12) & x"000";
1081 comb
+= addr
.eq(Cat(Const(0x000, 3), r
.addr
[12:64]))
1082 # elsif prtbl_rd = '1' then
1083 with m
.If(prtbl_rd
):
1084 # addr := prtable_addr;
1085 # tlb_data := (others => '0');
1086 comb
+= addr
.eq(prtable_addr
)
1089 # addr := pgtable_addr;
1090 # tlb_data := (others => '0');
1091 comb
+= addr
.eq(pgtable_addr
)
1094 # l_out.done <= r.done;
1095 # l_out.err <= r.err;
1096 # l_out.invalid <= r.invalid;
1097 # l_out.badtree <= r.badtree;
1098 # l_out.segerr <= r.segerror;
1099 # l_out.perm_error <= r.perm_err;
1100 # l_out.rc_error <= r.rc_error;
1101 comb
+= l_out
.done
.eq(r
.done
)
1102 comb
+= l_out
.err
.eq(r
.err
)
1103 comb
+= l_out
.invalid
.eq(r
.invalid
)
1104 comb
+= l_out
.badtree
.eq(r
.badtree
)
1105 comb
+= l_out
.segerr
.eq(r
.segerror
)
1106 comb
+= l_out
.perm_error
.eq(r
.perm_err
)
1107 comb
+= l_out
.rc_error
.eq(r
.rc_error
)
1109 # d_out.valid <= dcreq;
1110 # d_out.tlbie <= tlbie_req;
1111 # d_out.doall <= r.inval_all;
1112 # d_out.tlbld <= tlb_load;
1113 # d_out.addr <= addr;
1114 # d_out.pte <= tlb_data;
1115 comb
+= d_out
.valid
.eq(dcreq
)
1116 comb
+= d_out
.tlbie
.eq(tlbie_req
)
1117 comb
+= d_out
.doall
.eq(r
.inval_all
)
1118 comb
+= d_out
.tlbld
.eeq(tlb_load
)
1119 comb
+= d_out
.addr
.eq(addr
)
1120 comb
+= d_out
.pte
.eq(tlb_data
)
1122 # i_out.tlbld <= itlb_load;
1123 # i_out.tlbie <= tlbie_req;
1124 # i_out.doall <= r.inval_all;
1125 # i_out.addr <= addr;
1126 # i_out.pte <= tlb_data;
1127 comb
+= i_out
.tlbld
.eq(itlb_load
)
1128 comb
+= i_out
.tblie
.eq(tlbie_req
)
1129 comb
+= i_out
.doall
.eq(r
.inval_all
)
1130 comb
+= i_out
.addr
.eq(addr
)
1131 comb
+= i_out
.pte
.eq(tlb_data
)