3 based on Anton Blanchard microwatt mmu.vhdl
6 from enum
import Enum
, unique
7 from nmigen
import (Module
, Signal
, Elaboratable
, Mux
, Cat
, Repl
, signed
,
9 from nmigen
.cli
import main
10 from nmigen
.iocontrol
import RecordObject
12 # library ieee; use ieee.std_logic_1164.all; use ieee.numeric_std.all;
14 # library work; use work.common.all;
17 # -- Supports 4-level trees as in arch 3.0B, but not the two-step translation
18 # -- for guests under a hypervisor (i.e. there is no gRA -> hRA translation).
33 # architecture behave of mmu is
48 # type reg_stage_t is record
49 # -- latched request from loadstore1
54 # addr : std_ulogic_vector(63 downto 0);
55 # inval_all : std_ulogic;
57 # prtbl : std_ulogic_vector(63 downto 0);
58 # pid : std_ulogic_vector(31 downto 0);
63 # pgtbl0 : std_ulogic_vector(63 downto 0);
64 # pt0_valid : std_ulogic;
65 # pgtbl3 : std_ulogic_vector(63 downto 0);
66 # pt3_valid : std_ulogic;
67 # shift : unsigned(5 downto 0);
68 # mask_size : unsigned(4 downto 0);
69 # pgbase : std_ulogic_vector(55 downto 0);
70 # pde : std_ulogic_vector(63 downto 0);
71 # invalid : std_ulogic;
72 # badtree : std_ulogic;
73 # segerror : std_ulogic;
74 # perm_err : std_ulogic;
75 # rc_error : std_ulogic;
79 class RegStage(RecordObject
):
80 def __init__(self
, name
=None):
81 super().__init
__(self
, name
=name
)
82 # latched request from loadstore1
83 self
.valid
= Signal(reset_less
=True)
84 self
.iside
= Signal(reset_less
=True)
85 self
.store
= Signal(reset_less
=True)
86 self
.priv
= Signal(reset_less
=True)
87 self
.addr
= Signal(64, reset_less
=True)
88 self
.inval_all
= Signal(reset_less
=True)
90 self
.prtbl
= Signal(64, reset_less
=True)
91 self
.pid
= Signal(32, reset_less
=True)
93 self
.state
= State
.IDLE
94 self
.done
= Signal(reset_less
=True)
95 self
.err
= Signal(reset_less
=True)
96 self
.pgtbl0
= Signal(64, reset_less
=True)
97 self
.pt0_valid
= Signal(reset_less
=True)
98 self
.pgtbl3
= Signal(64, reset_less
=True)
99 self
.pt3_valid
= Signal(reset_less
=True)
100 self
.shift
= Signal(6, reset_less
=True)
101 self
.mask_size
= Signal(5, reset_less
=True)
102 self
.pgbase
= Signal(56, reset_less
=True)
103 self
.pde
= Signal(64, reset_less
=True)
104 self
.invalid
= Signal(reset_less
=True)
105 self
.badtree
= Signal(reset_less
=True)
106 self
.segerror
= Signal(reset_less
=True)
107 self
.perm_err
= Signal(reset_less
=True)
108 self
.rc_error
= Signal(reset_less
=True)
112 # Supports 4-level trees as in arch 3.0B, but not the two-step translation
113 # for guests under a hypervisor (i.e. there is no gRA -> hRA translation).
114 class MMU(Elaboratable
):
117 # clk : in std_ulogic;
118 # rst : in std_ulogic;
120 # l_in : in Loadstore1ToMmuType;
121 # l_out : out MmuToLoadstore1Type;
123 # d_out : out MmuToDcacheType;
124 # d_in : in DcacheToMmuType;
126 # i_out : out MmuToIcacheType
130 self
.l_in
= Loadstore1ToMmuType()
131 self
.l_out
= MmuToLoadstore1Type()
132 self
.d_out
= MmuToDcacheType()
133 self
.d_in
= DcacheToMmuType()
134 self
.i_out
= MmuToIcacheType()
136 def elaborate(self
, platform
):
137 # -- Multiplex internal SPR values back to loadstore1, selected
140 # Multiplex internal SPR values back to loadstore1, selected by
154 # non-existant variable, to be removed when I understand how to do VHDL
155 # rising_edge(clk) in nmigen
158 # signal r, rin : reg_stage_t;
162 # signal addrsh : std_ulogic_vector(15 downto 0);
163 # signal mask : std_ulogic_vector(15 downto 0);
164 # signal finalmask : std_ulogic_vector(43 downto 0);
167 finalmask
= Signal(44)
171 # l_out.sprval <= r.prtbl when l_in.sprn(9) = '1'
172 with m
.If(l_in
.sprn
[9] == 1):
173 comb
+= l_out
.sprval
.eq(r
.prtbl
)
175 # else x"00000000" & r.pid;
177 comb
+= l_out
.sprval
.eq(0x00000000 & r
)
179 # if rin.valid = '1' then
180 # report "MMU got tlb miss for " & to_hstring(rin.addr);
182 with m
.If(rin
.valid
== 1):
183 print(f
"MMU got tlb miss for {rin.addr}")
185 # if l_out.done = '1' then
186 # report "MMU completing op without error";
188 with m
.If(l_out
.done
== 1):
189 print("MMU completing op without error")
191 # if l_out.err = '1' then
192 # report "MMU completing op with err invalid=" &
193 # std_ulogic'image(l_out.invalid) & " badtree=" &
194 # std_ulogic'image(l_out.badtree);
196 with m
.If(l_out
.err
== 1):
197 print(f
"MMU completing op with err invalid={l_out.invalid}
198 badtree={l_out.badtree}")
200 # if rin.state = RADIX_LOOKUP then
201 # report "radix lookup shift=" & integer'image(to_integer(
202 # rin.shift)) & " msize=" & integer'image(to_integer(
205 with m
.If(rin
.state
== State
.RADIX_LOOKUP
):
206 print(f
"radix lookup shift={rin.shift}
207 msize={rin.mask_size}")
209 # if r.state = RADIX_LOOKUP then
210 # report "send load addr=" & to_hstring(d_out.addr) &
211 # " addrsh=" & to_hstring(addrsh) & " mask=" &
214 with m
.If(r
.state
== State
.RADIX_LOOKUP
):
215 print(f
"send load addr={d_out.addr}
216 addrsh={addrsh} mask={mask}")
222 # -- Shift address bits 61--12 right by 0--47 bits and
223 # -- supply the least significant 16 bits of the result.
224 # addrshifter: process(all)
226 # Shift address bits 61--12 right by 0--47 bits and
227 # supply the least significant 16 bits of the result.
228 class AddrShifter(Elaboratable
):
231 # variable sh1 : std_ulogic_vector(30 downto 0);
232 # variable sh2 : std_ulogic_vector(18 downto 0);
233 # variable result : std_ulogic_vector(15 downto 0);
234 self
.sh1
= Signal(31)
235 self
.sh2
= Signal(19)
236 self
.result
= Signal(16)
240 def elaborate(self
, platform
):
253 # case r.shift(5 downto 4) is
254 with m
.Switch(r
.shift
[4:6]):
256 # sh1 := r.addr(42 downto 12);
258 comb
+= sh1
.eq(r
.addr
[12:43])
260 # sh1 := r.addr(58 downto 28);
262 comb
+= sh1
.eq(r
.addr
[28:59])
264 # sh1 := "0000000000000" & r.addr(61 downto 44);
266 comb
+= sh1
.eq(r
.addr
[44:62])
269 # case r.shift(3 downto 2) is
270 with m
.Switch(r
.shift
[2:4]):
272 # sh2 := sh1(18 downto 0);
274 comb
+= sh2
.eq(sh1
[0:19])
276 # sh2 := sh1(22 downto 4);
278 comb
+= sh2
.eq(sh1
[4:23])
280 # sh2 := sh1(26 downto 8);
282 comb
+= sh2
.eq(sh1
[8:27])
284 # sh2 := sh1(30 downto 12);
286 comb
+= sh2
.eq(sh1
[12:31])
289 # case r.shift(1 downto 0) is
290 with m
.Switch(r
.shift
[0:2]):
292 # result := sh2(15 downto 0);
294 comb
+= result
.eq(sh1
[0:16])
296 # result := sh2(16 downto 1);
298 comb
+= result
.eq(sh1
[1:17])
300 # result := sh2(17 downto 2);
302 comb
+= result
.eq(sh1
[2:18])
304 # result := sh2(18 downto 3);
306 comb
+= result
.eq(sh1
[3:19])
309 comb
+= self
.addrsh
.eq(result
)
312 # -- generate mask for extracting address fields for PTE address generation
313 # addrmaskgen: process(all)
314 # generate mask for extracting address fields for PTE address generation
315 class AddrMaskGen(Elaboratable
):
317 # variable m : std_ulogic_vector(15 downto 0);
318 self
.mask
= Signal(16)
321 def elaborate(self
, platform
):
331 # -- mask_count has to be >= 5
333 # mask_count has to be >= 5
334 comb
+= mask
.eq(0x001F)
336 # for i in 5 to 15 loop
337 for i
in range(5,16):
338 # if i < to_integer(r.mask_size) then
339 with m
.If(i
< r
.mask_size
):
341 comb
+= mask
[i
].eq(1)
345 comb
+= self
.mask
.eq(mask
)
348 # -- generate mask for extracting address bits to go in TLB entry
349 # -- in order to support pages > 4kB
350 # finalmaskgen: process(all)
352 # generate mask for extracting address bits to go in TLB entry
353 # in order to support pages > 4kB
354 class FinalMaskGen(Elaboratable
):
355 # variable m : std_ulogic_vector(43 downto 0);
357 self
.mask
= Signal(44)
359 def elaborate(self
, platform
):
369 # m := (others => '0');
370 # TODO value should be vhdl (others => '0') in nmigen
373 # for i in 0 to 43 loop
375 # if i < to_integer(r.shift) then
376 with m
.If(i
< r
.shift
):
382 comb
+= self
.finalmask(mask
)
385 # mmu_1: process(all)
386 class MMU1(Elaboratable
):
389 # variable v : reg_stage_t;
390 # variable dcreq : std_ulogic;
391 # variable tlb_load : std_ulogic;
392 # variable itlb_load : std_ulogic;
393 # variable tlbie_req : std_ulogic;
394 # variable prtbl_rd : std_ulogic;
395 # variable pt_valid : std_ulogic;
396 # variable effpid : std_ulogic_vector(31 downto 0);
397 # variable prtable_addr : std_ulogic_vector(63 downto 0);
398 # variable rts : unsigned(5 downto 0);
399 # variable mbits : unsigned(5 downto 0);
400 # variable pgtable_addr : std_ulogic_vector(63 downto 0);
401 # variable pte : std_ulogic_vector(63 downto 0);
402 # variable tlb_data : std_ulogic_vector(63 downto 0);
403 # variable nonzero : std_ulogic;
404 # variable pgtbl : std_ulogic_vector(63 downto 0);
405 # variable perm_ok : std_ulogic;
406 # variable rc_ok : std_ulogic;
407 # variable addr : std_ulogic_vector(63 downto 0);
408 # variable data : std_ulogic_vector(63 downto 0);
411 self
.tlb_load
= Signal()
412 self
.itlb_load
= Signal()
413 self
.tlbie_req
= Signal()
414 self
.prtbl_rd
= Signal()
415 self
.pt_valid
= Signal()
416 self
.effpid
= Signal(32)
417 self
.prtable_addr
= Signal(64)
419 self
.mbits
= Signal(6)
420 self
.pgtable_addr
= Signal(64)
421 self
.pte
= Signal(64)
422 self
.tlb_data
= Signal(64)
423 self
.nonzero
= Signal()
424 self
.pgtbl
= Signal(64)
425 self
.perm_ok
= Signal()
426 self
.rc_ok
= Signal()
427 self
.addr
= Signal(64)
428 self
.data
= Signal(64)
431 def elaborate(self
, platform
):
451 tlb_load
= self
.tlb_load
452 itlb_load
= self
.itlb_load
453 tlbie_req
= self
.tlbie_req
454 prtbl_rd
= self
.prtbl_rd
455 pt_valid
= self
.pt_valid
457 prtable_addr
= self
.prtable_addr
460 pgtable_addr
= self
.pgtable_addr
462 tlb_data
= self
.tlb_data
463 nonzero
= self
.nonzero
465 perm_ok
= self
.perm_ok
483 # v.inval_all := '0';
487 comb
+= v
.valid
.eq(0)
491 comb
+= v
.invalid
.eq(0)
492 comb
+= v
.badtree
.eq(0)
493 comb
+= v
.segerror
.eq(0)
494 comb
+= v
.perm_err
.eq(0)
495 comb
+= v
.rc_error
.eq(0)
496 comb
+= tlb_load
.eq(0)
497 comb
+= itlb_load
.eq(0)
498 comb
+= tlbie_req
.eq(0)
499 comb
+= v
.inval_all
.eq(0)
500 comb
+= prtbl_rd
.eq(0)
503 # -- Radix tree data structures in memory are big-endian,
504 # -- so we need to byte-swap them
505 # for i in 0 to 7 loop
506 # Radix tree data structures in memory are big-endian,
507 # so we need to byte-swap them
509 # data(i * 8 + 7 downto i * 8) := d_in.data((7 - i) * 8 + 7 downto
514 (7 - i
) * 8:(7 - i
) * 8 + 7 + 1
519 with m
.Switch(r
.state
):
521 with m
.Case(State
.IDLE
):
522 # if l_in.addr(63) = '0' then
524 # pt_valid := r.pt0_valid;
525 with m
.If(l_in
.addr
[63] == 0):
526 comb
+= pgtbl
.eq(r
.pgtbl0
)
527 comb
+= pt_valid
.eq(r
.pt0_valid
)
530 # pt_valid := r.pt3_valid;
532 comb
+= pgtbl
.eq(r
.pt3_valid
)
533 comb
+= pt_valid
.eq(r
.pt3_valid
)
536 # -- rts == radix tree size, # address bits being translated
537 # rts := unsigned('0' & pgtbl(62 downto 61) & pgtbl(7 downto 5));
538 # rts == radix tree size, number of address bits being translated
539 comb
+= rts
.eq(((Cat(Const(0b0, 1) , Cat(pgtbl
[61:63], pgtbl
[5:8]))).as_unsigned())
541 # -- mbits == # address bits to index top level of tree
542 # mbits := unsigned('0' & pgtbl(4 downto 0));
543 # mbits == number of address bits to index top level of tree
544 comb
+= mbits
.eq((0 & pgtbl
[0:5]).as_unsigned())
545 # -- set v.shift to rts so that we can use finalmask for the
548 # v.mask_size := mbits(4 downto 0);
549 # v.pgbase := pgtbl(55 downto 8) & x"00";
550 # set v.shift to rts so that we can use finalmask for the segment
552 comb
+= v
.shift
.eq(rts
)
553 comb
+= v
.mask_size
.eq(mbits
[0:5])
554 comb
+= v
.pgbase
.eq(pgtbl
[8:56] & 0x00)
556 # if l_in.valid = '1' then
557 with m
.If(l_in
.valid
== 1):
558 # v.addr := l_in.addr;
559 # v.iside := l_in.iside;
560 # v.store := not (l_in.load or l_in.iside);
561 # v.priv := l_in.priv;
562 comb
+= v
.addr
.eq(l_in
.addr
563 comb
+= v
.iside
.eq(l_in
.iside
)
564 comb
+= v
.store
.eq(~
(l_in
.load ^ l_in
.siside
))
565 # if l_in.tlbie = '1' then
566 with m
.If(l_in
.tlbie
== 1):
567 # -- Invalidate all iTLB/dTLB entries for tlbie with
568 # -- RB[IS] != 0 or RB[AP] != 0, or for slbia
569 # v.inval_all := l_in.slbia or l_in.addr(11) or l_in.
570 # addr(10) or l_in.addr(7) or l_in.addr(6)
572 # Invalidate all iTLB/dTLB entries for tlbie with
573 # RB[IS] != 0 or RB[AP] != 0, or for slbia
574 comb
+= v
.inval_all
.eq(l_in
.slbia ^ l_in
.addr
[11] ^
575 l_in
.addr
[10] ^ l_in
.addr
[7] ^
576 l_in
.addr
[6] ^ l_in
.addr
[5])
577 # -- The RIC field of the tlbie instruction comes across
578 # -- on the sprn bus as bits 2--3. RIC=2 flushes process
580 # if l_in.sprn(3) = '1' then
581 # The RIC field of the tlbie instruction comes across
582 # on the sprn bus as bits 2--3. RIC=2 flushes process
584 with m
.If(l_in
.sprn
[3] == 1):
585 # v.pt0_valid := '0';
586 # v.pt3_valid := '0';
587 comb
+= v
.pt0_valid
.eq(0)
588 comb
+= v
.pt3_valid
.eq(0)
590 # v.state := DO_TLBIE;
591 comb
+= v
.state
.eq(State
.DO_TLBIE
)
595 comb
+= v
.valid
.eq(1)
596 # if pt_valid = '0' then
597 with m
.If(pt_valid
== 0):
598 # -- need to fetch process table entry
599 # -- set v.shift so we can use finalmask for generating
600 # -- the process table entry address
601 # v.shift := unsigned('0' & r.prtbl(4 downto 0));
602 # v.state := PROC_TBL_READ;
603 # need to fetch process table entry
604 # set v.shift so we can use finalmask for generating
605 # the process table entry address
606 comb
+= v
.shift
.eq((0 & r
.prtble
[0:5]).as_unsigned())
607 comb
+= v
.state
.eq(State
.PROC_TBL_READ
)
609 # elsif mbits = 0 then
610 with m
.If(mbits
== 0):
611 # -- Use RPDS = 0 to disable radix tree walks
612 # v.state := RADIX_FINISH;
614 # Use RPDS = 0 to disable radix tree walks
615 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
616 comb
+= v
.invalid
.eq(1)
619 # v.state := SEGMENT_CHECK;
620 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
625 # if l_in.mtspr = '1' then
626 with m
.If(l_in
.mtspr
== 1):
627 # -- Move to PID needs to invalidate L1 TLBs and cached
628 # -- pgtbl0 value. Move to PRTBL does that plus
629 # -- invalidating the cached pgtbl3 value as well.
630 # if l_in.sprn(9) = '0' then
631 # Move to PID needs to invalidate L1 TLBs and cached
632 # pgtbl0 value. Move to PRTBL does that plus
633 # invalidating the cached pgtbl3 value as well.
634 with m
.If(l_in
.sprn
[9] == 0):
635 # v.pid := l_in.rs(31 downto 0);
636 comb
+= v
.pid
.eq(l_in
.rs
[0:32])
639 # v.prtbl := l_in.rs;
640 # v.pt3_valid := '0';
641 comb
+= v
.prtbl
.eq(l_in
.rs
)
642 comb
+= v
.pt3_valid
.eq(0)
645 # v.pt0_valid := '0';
646 # v.inval_all := '1';
647 # v.state := DO_TLBIE;
648 comb
+= v
.pt0_valid
.eq(0)
649 comb
+= v
.inval_all
.eq(0)
650 comb
+= v
.state
.eq(State
.DO_TLBIE
)
654 with m
.Case(State
.DO_TLBIE
):
657 # v.state := TLB_WAIT;
659 comb
+= tlbie_req
.eq(1)
660 comb
+= v
.state
.eq(State
.TLB_WAIT
)
663 with m
.Case(State
.TLB_WAIT
):
664 # if d_in.done = '1' then
665 with m
.If(d_in
.done
== 1):
666 # v.state := RADIX_FINISH;
667 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
670 # when PROC_TBL_READ =>
671 with m
.Case(State
.PROC_TBL_READ
):
674 # v.state := PROC_TBL_WAIT;
676 comb
+= prtbl_rd
.eq(1)
677 comb
+= v
.state
.eq(State
.PROC_TBL_WAIT
)
679 # when PROC_TBL_WAIT =>
680 with m
.Case(State
.PROC_TBL_WAIT
):
681 # if d_in.done = '1' then
682 with m
.If(d_in
.done
== 1):
683 # if r.addr(63) = '1' then
684 with m
.If(r
.addr
[63] == 1):
686 # v.pt3_valid := '1';
687 comb
+= v
.pgtbl3
.eq(data
)
688 comb
+= v
.pt3_valid
.eq(1)
692 # v.pt0_valid := '1';
693 comb
+= v
.pgtbl0
.eq(data
)
694 comb
+= v
.pt0_valid
.eq(1)
696 # -- rts == radix tree size, # address bits being translated
697 # rts := unsigned('0' & data(62 downto 61) & data(7 downto 5));
698 # rts == radix tree size, # address bits being translated
699 comb
+= rts
.eq((0 & data
[61:63] & data
[5:8]).as_unsigned())
700 # -- mbits == # address bits to index top level of tree
701 # mbits := unsigned('0' & data(4 downto 0));
702 # mbits == # address bits to index top level of tree
703 comb
+= mbits
.eq((0 & data
[0:5]).as_unsigned())
704 # -- set v.shift to rts so that we can use finalmask for the
707 # v.mask_size := mbits(4 downto 0);
708 # v.pgbase := data(55 downto 8) & x"00";
709 # set v.shift to rts so that we can use finalmask for the
711 comb
+= v
.shift
.eq(rts
)
712 comb
+= v
.mask_size
.eq(mbits
[0:5])
713 comb
+= v
.pgbase
.eq(data
[8:56] & 0x00)
715 with m
.If(mbits
== 0):
716 # v.state := RADIX_FINISH;
718 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
719 comb
+= v
.invalid
.eq(1)
721 # v.state := SEGMENT_CHECK;
722 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
726 # if d_in.err = '1' then
727 with m
.If(d_in
.err
=== 1):
728 # v.state := RADIX_FINISH;
730 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
731 comb
+= v
.badtree
.eq(1)
734 # when SEGMENT_CHECK =>
735 with m
.Case(State
.SEGMENT_CHECK
):
736 # mbits := '0' & r.mask_size;
737 # v.shift := r.shift + (31 - 12) - mbits;
738 # nonzero := or(r.addr(61 downto 31) and not finalmask(
740 comb
+= mbits
.eq(0 & r
.mask_size
)
741 comb
+= v
.shift
.eq(r
.shift
+ (31 -12) - mbits
)
742 comb
+= nonzero
.eq('''TODO wrap in or (?)'''r
.addr
[31:62]
743 & (~finalmask
[0:31]))
744 # if r.addr(63) /= r.addr(62) or nonzero = '1' then
745 # v.state := RADIX_FINISH;
747 with m
.If((r
.addr
[63] != r
.addr
[62]) ^
(nonzero
== 1)):
748 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
749 comb
+= v
.segerror
.eq(1)
750 # elsif mbits < 5 or mbits > 16 or mbits >
751 # (r.shift + (31 - 12)) then
752 # v.state := RADIX_FINISH;
754 with m
.If((mbits
< 5) ^
(mbits
> 16) ^
(mbits
> (r
.shift
+
756 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
757 comb
+= v
.badtree
.eq(1)
759 # v.state := RADIX_LOOKUP;
761 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
764 # when RADIX_LOOKUP =>
765 with m
.Case(State
.RADIX_LOOKUP
):
767 # v.state := RADIX_READ_WAIT;
769 comb
+= v
.state
.eq(State
.RADIX_READ_WAIT
)
771 # when RADIX_READ_WAIT =>
772 with m
.Case(State
.RADIX_READ_WAIT
)
773 # if d_in.done = '1' then
774 with m
.If(d_in
.done
== 1):
776 comb
+= v
.pde
.eq(data
)
778 # if data(63) = '1' then
780 with m
.If(data
[63] == 1):
782 # if data(62) = '1' then
784 with m
.If(data
[62] == 1):
785 # -- check permissions and RC bits
787 comb
+= perm_ok
.eq(0)
788 # if r.priv = '1' or data(3) = '0' then
789 with m
.If((r
.priv
== 1) ^
(data
[3] == 0)):
790 # if r.iside = '0' then
791 # perm_ok := data(1) or (data(2) and not
793 with m
.If(r
.iside
== 0):
794 comb
+= perm_ok
.eq((data
[1] ^ data
[2]) &
798 # -- no IAMR, so no KUEP support for now
799 # -- deny execute permission if cache inhibited
800 # perm_ok := data(0) and not data(5);
801 # no IAMR, so no KUEP support for now
802 # deny execute permission if cache inhibited
803 comb
+= perm_ok
.eq(data
[0] & (~data
[5]))
807 # rc_ok := data(8) and (data(7) or not r.store);
808 comb
+= rc_ok
.eq(data
[8] & (data
[7] ^
(~r
.store
)))
809 # if perm_ok = '1' and rc_ok = '1' then
810 # v.state := RADIX_LOAD_TLB;
811 with m
.If(perm_ok
== 1 & rc_ok
== 1):
812 comb
+= v
.state
.eq(State
.RADIX_LOAD_TLB
)
815 # v.state := RADIX_FINISH;
816 # v.perm_err := not perm_ok;
817 # -- permission error takes precedence over
819 # v.rc_error := perm_ok;
820 comb
+= vl
.state
.eq(State
.RADIX_FINISH
)
821 comb
+= v
.perm_err
.eq(~perm_ok
)
822 # permission error takes precedence over
824 comb
+= v
.rc_error
.eq(perm_ok
)
828 # mbits := unsigned('0' & data(4 downto 0));
829 comb
+= mbits
.eq((0 & data
[0:5]).as_unsigned())
830 # if mbits < 5 or mbits > 16 or mbits > r.shift then
831 # v.state := RADIX_FINISH;
833 with m
.If((mbits
< 5) & (mbits
> 16) ^
835 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
836 comb
+= v
.badtree
.eq(1)
839 # v.shift := v.shift - mbits;
840 # v.mask_size := mbits(4 downto 0);
841 # v.pgbase := data(55 downto 8) & x"00";
842 # v.state := RADIX_LOOKUP;
843 comb
+= v
.shift
.eq(v
.shif
- mbits
)
844 comb
+= v
.mask_size
.eq(mbits
[0:5])
845 comb
+= v
.pgbase
.eq(mbits
[8:56] & 0x00)
846 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
851 # -- non-present PTE, generate a DSI
852 # v.state := RADIX_FINISH;
854 # non-present PTE, generate a DSI
855 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
856 comb
+= v
.invalid
.eq(1)
860 # if d_in.err = '1' then
861 with m
.If(d_in
.err
== 1):
862 # v.state := RADIX_FINISH;
864 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
865 comb
+= v
.badtree
.eq(1)
868 # when RADIX_LOAD_TLB =>
869 with m
.Case(State
.RADIX_LOAD_TLB
):
871 comb
+= tlb_load
.eq(1)
872 # if r.iside = '0' then
873 with m
.If(r
.iside
== 0):
875 # v.state := TLB_WAIT;
877 comb
+= v
.state
.eq(State
.TLB_WAIT
)
882 comb
+= itlb_load
.eq(1)
883 comb
+= v
.state
.eq(State
.IDLE
)
886 # when RADIX_FINISH =>
888 with m
.Case(State
.RADIX_FINISH
):
889 comb
+= v
.state
.eq(State
.IDLE
)
892 # if v.state = RADIX_FINISH or (v.state = RADIX_LOAD_TLB
893 # and r.iside = '1') then
894 with m
.If(v
.state
== State
.RADIX_FINISH ^
(v
.state
==
895 State
.RADIX_LOAD_TLB
& r
.iside
== 1))
896 # v.err := v.invalid or v.badtree or v.segerror or v.perm_err
898 # v.done := not v.err;
899 comb
+= v
.err
.eq(v
.invalid ^ v
.badtree ^ v
.segerror ^ v
.perm_err ^
901 comb
+= v
.done
.eq(~v
.err
)
904 # if r.addr(63) = '1' then
905 # effpid := x"00000000";
906 with m
.If(r
.addr
[63] == 1):
907 comb
+= effpid
.eq(0x00000000)
911 comb
+= effpid
.eq(r
.pid
)
913 # prtable_addr := x"00" & r.prtbl(55 downto 36) &
914 # ((r.prtbl(35 downto 12) and not finalmask(
915 # 23 downto 0)) or (effpid(31 downto 8) and
916 # finalmask(23 downto 0))) & effpid(7 downto 0)
918 comb
+= prtable_addr
.eq(0x00 & r
.prtble
[36:56] & ((r
.prtble
[12:36] &
919 (~finalmask
[0:24])) ^ effpid
[8:32] &
920 finalmask
[0:24]) & effpid
[0:8] & 0x0000)
922 # pgtable_addr := x"00" & r.pgbase(55 downto 19) &
923 # ((r.pgbase(18 downto 3) and not mask) or
924 # (addrsh and mask)) & "000";
925 comb
+= pgtable_addr
.eq(0x00 & r
.pgbase
[19:56] & ((r
.pgbase
[3:19] &
926 (~mask
)) ^
(addrsh
& mask
)) & 0x000)
928 # pte := x"00" & ((r.pde(55 downto 12) and not finalmask) or
929 # (r.addr(55 downto 12) and finalmask)) & r.pde(11 downto 0);
930 comb
+= pte
.eq(0x00 & ((r
.pde
[12:56] & (~finalmask
)) ^
(r
.addr
[12:56]
931 & finalmask
)) & r
.pde
[0:12])
933 # -- update registers
939 # if tlbie_req = '1' then
941 with m
.If(tlbie_req
== 1):
943 # tlb_data := (others => '0');
944 comb
+= addr
.eq(r
.addr
)
945 comb
+= tlb_data
.eq('''TODO ()others => '0') ''')
946 # elsif tlb_load = '1' then
947 with m
.If(tlb_load
== 1):
948 # addr := r.addr(63 downto 12) & x"000";
950 comb
+= addr
.eq(r
.addr
[12:64] & 0x000)
951 # elsif prtbl_rd = '1' then
952 with m
.If(prtbl_rd
== 1):
953 # addr := prtable_addr;
954 # tlb_data := (others => '0');
955 comb
+= addr
.eq(prtable_addr
)
956 comb
+= tlb_data
.eq('''TODO (others => '0')''')
959 # addr := pgtable_addr;
960 # tlb_data := (others => '0');
961 comb
+= addr
.eq(pgtable_addr
)
962 comb
+= tlb_data
.eq('''TODO (others => '0')''')
965 # l_out.done <= r.done;
966 # l_out.err <= r.err;
967 # l_out.invalid <= r.invalid;
968 # l_out.badtree <= r.badtree;
969 # l_out.segerr <= r.segerror;
970 # l_out.perm_error <= r.perm_err;
971 # l_out.rc_error <= r.rc_error;
972 comb
+= l_out
.done
.eq(r
.done
)
973 comb
+= l_out
.err
.eq(r
.err
)
974 comb
+= l_out
.invalid
.eq(r
.invalid
)
975 comb
+= l_out
.badtree
.eq(r
.badtree
)
976 comb
+= l_out
.segerr
.eq(r
.segerror
)
977 comb
+= l_out
.perm_error
.eq(r
.perm_err
)
978 comb
+= l_out
.rc_error
.eq(r
.rc_error
)
980 # d_out.valid <= dcreq;
981 # d_out.tlbie <= tlbie_req;
982 # d_out.doall <= r.inval_all;
983 # d_out.tlbld <= tlb_load;
984 # d_out.addr <= addr;
985 # d_out.pte <= tlb_data;
986 comb
+= d_out
.valid
.eq(dcreq
)
987 comb
+= d_out
.tlbie
.eq(tlbie_req
)
988 comb
+= d_out
.doall
.eq(r
.inval_all
)
989 comb
+= d_out
.tlbld
.eeq(tlb_load
)
990 comb
+= d_out
.addr
.eq(addr
)
991 comb
+= d_out
.pte
.eq(tlb_data
)
993 # i_out.tlbld <= itlb_load;
994 # i_out.tlbie <= tlbie_req;
995 # i_out.doall <= r.inval_all;
996 # i_out.addr <= addr;
997 # i_out.pte <= tlb_data;
998 comb
+= i_out
.tlbld
.eq(itlb_load
)
999 comb
+= i_out
.tblie
.eq(tlbie_req
)
1000 comb
+= i_out
.doall
.eq(r
.inval_all
)
1001 comb
+= i_out
.addr
.eq(addr
)
1002 comb
+= i_out
.pte
.eq(tlb_data
)