3 based on Anton Blanchard microwatt mmu.vhdl
6 from enum
import Enum
, unique
7 from nmigen
import (Module
, Signal
, Elaboratable
, Mux
, Cat
, Repl
, signed
,
9 from nmigen
.cli
import main
10 from nmigen
.iocontrol
import RecordObject
12 # library ieee; use ieee.std_logic_1164.all; use ieee.numeric_std.all;
14 # library work; use work.common.all;
17 # -- Supports 4-level trees as in arch 3.0B, but not the two-step translation
18 # -- for guests under a hypervisor (i.e. there is no gRA -> hRA translation).
33 # architecture behave of mmu is
48 # type reg_stage_t is record
49 # -- latched request from loadstore1
54 # addr : std_ulogic_vector(63 downto 0);
55 # inval_all : std_ulogic;
57 # prtbl : std_ulogic_vector(63 downto 0);
58 # pid : std_ulogic_vector(31 downto 0);
63 # pgtbl0 : std_ulogic_vector(63 downto 0);
64 # pt0_valid : std_ulogic;
65 # pgtbl3 : std_ulogic_vector(63 downto 0);
66 # pt3_valid : std_ulogic;
67 # shift : unsigned(5 downto 0);
68 # mask_size : unsigned(4 downto 0);
69 # pgbase : std_ulogic_vector(55 downto 0);
70 # pde : std_ulogic_vector(63 downto 0);
71 # invalid : std_ulogic;
72 # badtree : std_ulogic;
73 # segerror : std_ulogic;
74 # perm_err : std_ulogic;
75 # rc_error : std_ulogic;
79 class RegStage(RecordObject
):
80 def __init__(self
, name
=None):
81 super().__init
__(self
, name
=name
)
82 # latched request from loadstore1
83 self
.valid
= Signal(reset_less
=True)
84 self
.iside
= Signal(reset_less
=True)
85 self
.store
= Signal(reset_less
=True)
86 self
.priv
= Signal(reset_less
=True)
87 self
.addr
= Signal(64, reset_less
=True)
88 self
.inval_all
= Signal(reset_less
=True)
90 self
.prtbl
= Signal(64, reset_less
=True)
91 self
.pid
= Signal(32, reset_less
=True)
93 self
.state
= State
.IDLE
94 self
.done
= Signal(reset_less
=True)
95 self
.err
= Signal(reset_less
=True)
96 self
.pgtbl0
= Signal(64, reset_less
=True)
97 self
.pt0_valid
= Signal(reset_less
=True)
98 self
.pgtbl3
= Signal(64, reset_less
=True)
99 self
.pt3_valid
= Signal(reset_less
=True)
100 self
.shift
= Signal(6, reset_less
=True)
101 self
.mask_size
= Signal(5, reset_less
=True)
102 self
.pgbase
= Signal(56, reset_less
=True)
103 self
.pde
= Signal(64, reset_less
=True)
104 self
.invalid
= Signal(reset_less
=True)
105 self
.badtree
= Signal(reset_less
=True)
106 self
.segerror
= Signal(reset_less
=True)
107 self
.perm_err
= Signal(reset_less
=True)
108 self
.rc_error
= Signal(reset_less
=True)
112 # Supports 4-level trees as in arch 3.0B, but not the two-step translation
113 # for guests under a hypervisor (i.e. there is no gRA -> hRA translation).
114 class MMU(Elaboratable
):
117 # clk : in std_ulogic;
118 # rst : in std_ulogic;
120 # l_in : in Loadstore1ToMmuType;
121 # l_out : out MmuToLoadstore1Type;
123 # d_out : out MmuToDcacheType;
124 # d_in : in DcacheToMmuType;
126 # i_out : out MmuToIcacheType
130 self
.l_in
= Loadstore1ToMmuType()
131 self
.l_out
= MmuToLoadstore1Type()
132 self
.d_out
= MmuToDcacheType()
133 self
.d_in
= DcacheToMmuType()
134 self
.i_out
= MmuToIcacheType()
136 def elaborate(self
, platform
):
137 # -- Multiplex internal SPR values back to loadstore1, selected
140 # Multiplex internal SPR values back to loadstore1, selected by
154 # non-existant variable, to be removed when I understand how to do VHDL
155 # rising_edge(clk) in nmigen
158 # signal r, rin : reg_stage_t;
162 # signal addrsh : std_ulogic_vector(15 downto 0);
163 # signal mask : std_ulogic_vector(15 downto 0);
164 # signal finalmask : std_ulogic_vector(43 downto 0);
167 finalmask
= Signal(44)
171 # l_out.sprval <= r.prtbl when l_in.sprn(9) = '1'
172 with m
.If(l_in
.sprn
[9] == 1):
173 comb
+= l_out
.sprval
.eq(r
.prtbl
)
175 # else x"00000000" & r.pid;
177 comb
+= l_out
.sprval
.eq(0x00000000 & r
)
179 # if rin.valid = '1' then
180 # report "MMU got tlb miss for " & to_hstring(rin.addr);
182 with m
.If(rin
.valid
== 1):
183 print(f
"MMU got tlb miss for {rin.addr}")
185 # if l_out.done = '1' then
186 # report "MMU completing op without error";
188 with m
.If(l_out
.done
== 1):
189 print("MMU completing op without error")
191 # if l_out.err = '1' then
192 # report "MMU completing op with err invalid=" &
193 # std_ulogic'image(l_out.invalid) & " badtree=" &
194 # std_ulogic'image(l_out.badtree);
196 with m
.If(l_out
.err
== 1):
197 print(f
"MMU completing op with err invalid={l_out.invalid}
198 badtree={l_out.badtree}")
200 # if rin.state = RADIX_LOOKUP then
201 # report "radix lookup shift=" & integer'image(to_integer(
202 # rin.shift)) & " msize=" & integer'image(to_integer(
205 with m
.If(rin
.state
== State
.RADIX_LOOKUP
):
206 print(f
"radix lookup shift={rin.shift}
207 msize={rin.mask_size}")
209 # if r.state = RADIX_LOOKUP then
210 # report "send load addr=" & to_hstring(d_out.addr) &
211 # " addrsh=" & to_hstring(addrsh) & " mask=" &
214 with m
.If(r
.state
== State
.RADIX_LOOKUP
):
215 print(f
"send load addr={d_out.addr}
216 addrsh={addrsh} mask={mask}")
222 # -- Shift address bits 61--12 right by 0--47 bits and
223 # -- supply the least significant 16 bits of the result.
224 # addrshifter: process(all)
226 # Shift address bits 61--12 right by 0--47 bits and
227 # supply the least significant 16 bits of the result.
228 class AddrShifter(Elaboratable
):
231 # variable sh1 : std_ulogic_vector(30 downto 0);
232 # variable sh2 : std_ulogic_vector(18 downto 0);
233 # variable result : std_ulogic_vector(15 downto 0);
234 self
.sh1
= Signal(31)
235 self
.sh2
= Signal(19)
236 self
.result
= Signal(16)
240 def elaborate(self
, platform
):
253 # case r.shift(5 downto 4) is
254 with m
.Switch(r
.shift
[4:6]):
256 # sh1 := r.addr(42 downto 12);
258 comb
+= sh1
.eq(r
.addr
[12:43])
260 # sh1 := r.addr(58 downto 28);
262 comb
+= sh1
.eq(r
.addr
[28:59])
264 # sh1 := "0000000000000" & r.addr(61 downto 44);
266 comb
+= sh1
.eq(r
.addr
[44:62])
269 # case r.shift(3 downto 2) is
270 with m
.Switch(r
.shift
[2:4]):
272 # sh2 := sh1(18 downto 0);
274 comb
+= sh2
.eq(sh1
[0:19])
276 # sh2 := sh1(22 downto 4);
278 comb
+= sh2
.eq(sh1
[4:23])
280 # sh2 := sh1(26 downto 8);
282 comb
+= sh2
.eq(sh1
[8:27])
284 # sh2 := sh1(30 downto 12);
286 comb
+= sh2
.eq(sh1
[12:31])
289 # case r.shift(1 downto 0) is
290 with m
.Switch(r
.shift
[0:2]):
292 # result := sh2(15 downto 0);
294 comb
+= result
.eq(sh1
[0:16])
296 # result := sh2(16 downto 1);
298 comb
+= result
.eq(sh1
[1:17])
300 # result := sh2(17 downto 2);
302 comb
+= result
.eq(sh1
[2:18])
304 # result := sh2(18 downto 3);
306 comb
+= result
.eq(sh1
[3:19])
309 comb
+= self
.addrsh
.eq(result
)
312 # -- generate mask for extracting address fields for PTE address generation
313 # addrmaskgen: process(all)
314 # generate mask for extracting address fields for PTE address generation
315 class AddrMaskGen(Elaboratable
):
317 # variable m : std_ulogic_vector(15 downto 0);
318 self
.mask
= Signal(16)
321 def elaborate(self
, platform
):
331 # -- mask_count has to be >= 5
333 # mask_count has to be >= 5
334 comb
+= mask
.eq(0x001F)
336 # for i in 5 to 15 loop
337 for i
in range(5,16):
338 # if i < to_integer(r.mask_size) then
339 with m
.If(i
< r
.mask_size
):
341 comb
+= mask
[i
].eq(1)
345 comb
+= self
.mask
.eq(mask
)
348 # -- generate mask for extracting address bits to go in TLB entry
349 # -- in order to support pages > 4kB
350 # finalmaskgen: process(all)
352 # generate mask for extracting address bits to go in TLB entry
353 # in order to support pages > 4kB
354 class FinalMaskGen(Elaboratable
):
355 # variable m : std_ulogic_vector(43 downto 0);
357 self
.mask
= Signal(44)
359 def elaborate(self
, platform
):
369 # m := (others => '0');
370 # TODO value should be vhdl (others => '0') in nmigen
373 # for i in 0 to 43 loop
375 # if i < to_integer(r.shift) then
376 with m
.If(i
< r
.shift
):
382 comb
+= self
.finalmask(mask
)
385 # mmu_1: process(all)
386 class MMU1(Elaboratable
):
389 # variable v : reg_stage_t;
390 # variable dcreq : std_ulogic;
391 # variable tlb_load : std_ulogic;
392 # variable itlb_load : std_ulogic;
393 # variable tlbie_req : std_ulogic;
394 # variable prtbl_rd : std_ulogic;
395 # variable pt_valid : std_ulogic;
396 # variable effpid : std_ulogic_vector(31 downto 0);
397 # variable prtable_addr : std_ulogic_vector(63 downto 0);
398 # variable rts : unsigned(5 downto 0);
399 # variable mbits : unsigned(5 downto 0);
400 # variable pgtable_addr : std_ulogic_vector(63 downto 0);
401 # variable pte : std_ulogic_vector(63 downto 0);
402 # variable tlb_data : std_ulogic_vector(63 downto 0);
403 # variable nonzero : std_ulogic;
404 # variable pgtbl : std_ulogic_vector(63 downto 0);
405 # variable perm_ok : std_ulogic;
406 # variable rc_ok : std_ulogic;
407 # variable addr : std_ulogic_vector(63 downto 0);
408 # variable data : std_ulogic_vector(63 downto 0);
411 self
.tlb_load
= Signal()
412 self
.itlb_load
= Signal()
413 self
.tlbie_req
= Signal()
414 self
.prtbl_rd
= Signal()
415 self
.pt_valid
= Signal()
416 self
.effpid
= Signal(32)
417 self
.prtable_addr
= Signal(64)
419 self
.mbits
= Signal(6)
420 self
.pgtable_addr
= Signal(64)
421 self
.pte
= Signal(64)
422 self
.tlb_data
= Signal(64)
423 self
.nonzero
= Signal()
424 self
.pgtbl
= Signal(64)
425 self
.perm_ok
= Signal()
426 self
.rc_ok
= Signal()
427 self
.addr
= Signal(64)
428 self
.data
= Signal(64)
431 def elaborate(self
, platform
):
451 tlb_load
= self
.tlb_load
452 itlb_load
= self
.itlb_load
453 tlbie_req
= self
.tlbie_req
454 prtbl_rd
= self
.prtbl_rd
455 pt_valid
= self
.pt_valid
457 prtable_addr
= self
.prtable_addr
460 pgtable_addr
= self
.pgtable_addr
462 tlb_data
= self
.tlb_data
463 nonzero
= self
.nonzero
465 perm_ok
= self
.perm_ok
483 # v.inval_all := '0';
487 comb
+= v
.valid
.eq(0)
491 comb
+= v
.invalid
.eq(0)
492 comb
+= v
.badtree
.eq(0)
493 comb
+= v
.segerror
.eq(0)
494 comb
+= v
.perm_err
.eq(0)
495 comb
+= v
.rc_error
.eq(0)
496 comb
+= tlb_load
.eq(0)
497 comb
+= itlb_load
.eq(0)
498 comb
+= tlbie_req
.eq(0)
499 comb
+= v
.inval_all
.eq(0)
500 comb
+= prtbl_rd
.eq(0)
503 # -- Radix tree data structures in memory are big-endian,
504 # -- so we need to byte-swap them
505 # for i in 0 to 7 loop
506 # Radix tree data structures in memory are big-endian,
507 # so we need to byte-swap them
509 # data(i * 8 + 7 downto i * 8) := d_in.data((7 - i)
510 # * 8 + 7 downto (7 - i) * 8);
511 comb
+= data
[i
* 8:i
* 8 + 7 + 1].eq(d_in
.data
[
512 (7 - i
) * 8:(7 - i
) * 8 + 7 + 1
517 with m
.Switch(r
.state
):
519 with m
.Case(State
.IDLE
):
520 # if l_in.addr(63) = '0' then
522 # pt_valid := r.pt0_valid;
523 with m
.If(l_in
.addr
[63] == 0):
524 comb
+= pgtbl
.eq(r
.pgtbl0
)
525 comb
+= pt_valid
.eq(r
.pt0_valid
)
528 # pt_valid := r.pt3_valid;
530 comb
+= pgtbl
.eq(r
.pt3_valid
)
531 comb
+= pt_valid
.eq(r
.pt3_valid
)
534 # -- rts == radix tree size, # address bits being translated
535 # rts := unsigned('0' & pgtbl(62 downto 61) & pgtbl(7 downto 5));
536 # rts == radix tree size, number of address bits being translated
537 comb
+= rts
.eq(((Cat(Const(0b0, 1), Cat(pgtbl
[61:63],
538 pgtbl
[5:8]))).as_unsigned())
540 # -- mbits == # address bits to index top level of tree
541 # mbits := unsigned('0' & pgtbl(4 downto 0));
542 # mbits == number of address bits to index top level of tree
543 comb
+= mbits
.eq((0 & pgtbl
[0:5]).as_unsigned())
544 # -- set v.shift to rts so that we can use finalmask for the
547 # v.mask_size := mbits(4 downto 0);
548 # v.pgbase := pgtbl(55 downto 8) & x"00";
549 # set v.shift to rts so that we can use finalmask for the segment
551 comb
+= v
.shift
.eq(rts
)
552 comb
+= v
.mask_size
.eq(mbits
[0:5])
553 comb
+= v
.pgbase
.eq(pgtbl
[8:56] & 0x00)
555 # if l_in.valid = '1' then
556 with m
.If(l_in
.valid
== 1):
557 # v.addr := l_in.addr;
558 # v.iside := l_in.iside;
559 # v.store := not (l_in.load or l_in.iside);
560 # v.priv := l_in.priv;
561 comb
+= v
.addr
.eq(l_in
.addr
562 comb
+= v
.iside
.eq(l_in
.iside
)
563 comb
+= v
.store
.eq(~
(l_in
.load | l_in
.siside
))
564 # if l_in.tlbie = '1' then
565 with m
.If(l_in
.tlbie
== 1):
566 # -- Invalidate all iTLB/dTLB entries for tlbie with
567 # -- RB[IS] != 0 or RB[AP] != 0, or for slbia
568 # v.inval_all := l_in.slbia or l_in.addr(11) or l_in.
569 # addr(10) or l_in.addr(7) or l_in.addr(6)
571 # Invalidate all iTLB/dTLB entries for tlbie with
572 # RB[IS] != 0 or RB[AP] != 0, or for slbia
573 comb
+= v
.inval_all
.eq(l_in
.slbia | l_in
.addr
[11] |
574 l_in
.addr
[10] | l_in
.addr
[7] |
575 l_in
.addr
[6] | l_in
.addr
[5])
576 # -- The RIC field of the tlbie instruction comes across
577 # -- on the sprn bus as bits 2--3. RIC=2 flushes process
579 # if l_in.sprn(3) = '1' then
580 # The RIC field of the tlbie instruction comes across
581 # on the sprn bus as bits 2--3. RIC=2 flushes process
583 with m
.If(l_in
.sprn
[3] == 1):
584 # v.pt0_valid := '0';
585 # v.pt3_valid := '0';
586 comb
+= v
.pt0_valid
.eq(0)
587 comb
+= v
.pt3_valid
.eq(0)
589 # v.state := DO_TLBIE;
590 comb
+= v
.state
.eq(State
.DO_TLBIE
)
594 comb
+= v
.valid
.eq(1)
595 # if pt_valid = '0' then
596 with m
.If(pt_valid
== 0):
597 # -- need to fetch process table entry
598 # -- set v.shift so we can use finalmask for generating
599 # -- the process table entry address
600 # v.shift := unsigned('0' & r.prtbl(4 downto 0));
601 # v.state := PROC_TBL_READ;
602 # need to fetch process table entry
603 # set v.shift so we can use finalmask for generating
604 # the process table entry address
605 comb
+= v
.shift
.eq((0 & r
.prtble
[0:5]).as_unsigned())
606 comb
+= v
.state
.eq(State
.PROC_TBL_READ
)
608 # elsif mbits = 0 then
609 with m
.If(mbits
== 0):
610 # -- Use RPDS = 0 to disable radix tree walks
611 # v.state := RADIX_FINISH;
613 # Use RPDS = 0 to disable radix tree walks
614 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
615 comb
+= v
.invalid
.eq(1)
618 # v.state := SEGMENT_CHECK;
619 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
624 # if l_in.mtspr = '1' then
625 with m
.If(l_in
.mtspr
== 1):
626 # -- Move to PID needs to invalidate L1 TLBs and cached
627 # -- pgtbl0 value. Move to PRTBL does that plus
628 # -- invalidating the cached pgtbl3 value as well.
629 # if l_in.sprn(9) = '0' then
630 # Move to PID needs to invalidate L1 TLBs and cached
631 # pgtbl0 value. Move to PRTBL does that plus
632 # invalidating the cached pgtbl3 value as well.
633 with m
.If(l_in
.sprn
[9] == 0):
634 # v.pid := l_in.rs(31 downto 0);
635 comb
+= v
.pid
.eq(l_in
.rs
[0:32])
638 # v.prtbl := l_in.rs;
639 # v.pt3_valid := '0';
640 comb
+= v
.prtbl
.eq(l_in
.rs
)
641 comb
+= v
.pt3_valid
.eq(0)
644 # v.pt0_valid := '0';
645 # v.inval_all := '1';
646 # v.state := DO_TLBIE;
647 comb
+= v
.pt0_valid
.eq(0)
648 comb
+= v
.inval_all
.eq(0)
649 comb
+= v
.state
.eq(State
.DO_TLBIE
)
653 with m
.Case(State
.DO_TLBIE
):
656 # v.state := TLB_WAIT;
658 comb
+= tlbie_req
.eq(1)
659 comb
+= v
.state
.eq(State
.TLB_WAIT
)
662 with m
.Case(State
.TLB_WAIT
):
663 # if d_in.done = '1' then
664 with m
.If(d_in
.done
== 1):
665 # v.state := RADIX_FINISH;
666 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
669 # when PROC_TBL_READ =>
670 with m
.Case(State
.PROC_TBL_READ
):
673 # v.state := PROC_TBL_WAIT;
675 comb
+= prtbl_rd
.eq(1)
676 comb
+= v
.state
.eq(State
.PROC_TBL_WAIT
)
678 # when PROC_TBL_WAIT =>
679 with m
.Case(State
.PROC_TBL_WAIT
):
680 # if d_in.done = '1' then
681 with m
.If(d_in
.done
== 1):
682 # if r.addr(63) = '1' then
683 with m
.If(r
.addr
[63] == 1):
685 # v.pt3_valid := '1';
686 comb
+= v
.pgtbl3
.eq(data
)
687 comb
+= v
.pt3_valid
.eq(1)
691 # v.pt0_valid := '1';
692 comb
+= v
.pgtbl0
.eq(data
)
693 comb
+= v
.pt0_valid
.eq(1)
695 # -- rts == radix tree size, # address bits being translated
696 # rts := unsigned('0' & data(62 downto 61) & data(7 downto 5));
697 # rts == radix tree size, # address bits being translated
698 comb
+= rts
.eq((0 & data
[61:63] & data
[5:8]).as_unsigned())
699 # -- mbits == # address bits to index top level of tree
700 # mbits := unsigned('0' & data(4 downto 0));
701 # mbits == # address bits to index top level of tree
702 comb
+= mbits
.eq((0 & data
[0:5]).as_unsigned())
703 # -- set v.shift to rts so that we can use finalmask for the
706 # v.mask_size := mbits(4 downto 0);
707 # v.pgbase := data(55 downto 8) & x"00";
708 # set v.shift to rts so that we can use finalmask for the
710 comb
+= v
.shift
.eq(rts
)
711 comb
+= v
.mask_size
.eq(mbits
[0:5])
712 comb
+= v
.pgbase
.eq(data
[8:56] & 0x00)
714 with m
.If(mbits
== 0):
715 # v.state := RADIX_FINISH;
717 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
718 comb
+= v
.invalid
.eq(1)
720 # v.state := SEGMENT_CHECK;
721 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
725 # if d_in.err = '1' then
726 with m
.If(d_in
.err
=== 1):
727 # v.state := RADIX_FINISH;
729 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
730 comb
+= v
.badtree
.eq(1)
733 # when SEGMENT_CHECK =>
734 with m
.Case(State
.SEGMENT_CHECK
):
735 # mbits := '0' & r.mask_size;
736 # v.shift := r.shift + (31 - 12) - mbits;
737 # nonzero := or(r.addr(61 downto 31) and not finalmask(
739 comb
+= mbits
.eq(0 & r
.mask_size
)
740 comb
+= v
.shift
.eq(r
.shift
+ (31 -12) - mbits
)
741 comb
+= nonzero
.eq('''TODO wrap in or (?)'''r
.addr
[31:62]
742 & (~finalmask
[0:31]))
743 # if r.addr(63) /= r.addr(62) or nonzero = '1' then
744 # v.state := RADIX_FINISH;
746 with m
.If((r
.addr
[63] != r
.addr
[62]) |
(nonzero
== 1)):
747 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
748 comb
+= v
.segerror
.eq(1)
749 # elsif mbits < 5 or mbits > 16 or mbits >
750 # (r.shift + (31 - 12)) then
751 # v.state := RADIX_FINISH;
753 with m
.If((mbits
< 5) |
(mbits
> 16)
754 |
(mbits
> (r
.shift
+ (31-12)))):
755 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
756 comb
+= v
.badtree
.eq(1)
758 # v.state := RADIX_LOOKUP;
760 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
763 # when RADIX_LOOKUP =>
764 with m
.Case(State
.RADIX_LOOKUP
):
766 # v.state := RADIX_READ_WAIT;
768 comb
+= v
.state
.eq(State
.RADIX_READ_WAIT
)
770 # when RADIX_READ_WAIT =>
771 with m
.Case(State
.RADIX_READ_WAIT
)
772 # if d_in.done = '1' then
773 with m
.If(d_in
.done
== 1):
775 comb
+= v
.pde
.eq(data
)
777 # if data(63) = '1' then
779 with m
.If(data
[63] == 1):
781 # if data(62) = '1' then
783 with m
.If(data
[62] == 1):
784 # -- check permissions and RC bits
786 comb
+= perm_ok
.eq(0)
787 # if r.priv = '1' or data(3) = '0' then
788 with m
.If((r
.priv
== 1) |
(data
[3] == 0)):
789 # if r.iside = '0' then
790 # perm_ok := data(1) or (data(2) and not
792 with m
.If(r
.iside
== 0):
793 comb
+= perm_ok
.eq((data
[1] | data
[2])
797 # -- no IAMR, so no KUEP support for now
798 # -- deny execute permission if cache inhibited
799 # perm_ok := data(0) and not data(5);
800 # no IAMR, so no KUEP support for now
801 # deny execute permission if cache inhibited
802 comb
+= perm_ok
.eq(data
[0] & (~data
[5]))
806 # rc_ok := data(8) and (data(7) or not r.store);
807 comb
+= rc_ok
.eq(data
[8] & (data
[7] |
(~r
.store
)))
808 # if perm_ok = '1' and rc_ok = '1' then
809 # v.state := RADIX_LOAD_TLB;
810 with m
.If(perm_ok
== 1 & rc_ok
== 1):
811 comb
+= v
.state
.eq(State
.RADIX_LOAD_TLB
)
814 # v.state := RADIX_FINISH;
815 # v.perm_err := not perm_ok;
816 # -- permission error takes precedence over
818 # v.rc_error := perm_ok;
819 comb
+= vl
.state
.eq(State
.RADIX_FINISH
)
820 comb
+= v
.perm_err
.eq(~perm_ok
)
821 # permission error takes precedence over
823 comb
+= v
.rc_error
.eq(perm_ok
)
827 # mbits := unsigned('0' & data(4 downto 0));
828 comb
+= mbits
.eq((0 & data
[0:5]).as_unsigned())
829 # if mbits < 5 or mbits > 16 or mbits > r.shift then
830 # v.state := RADIX_FINISH;
832 with m
.If((mbits
< 5) & (mbits
> 16) |
834 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
835 comb
+= v
.badtree
.eq(1)
838 # v.shift := v.shift - mbits;
839 # v.mask_size := mbits(4 downto 0);
840 # v.pgbase := data(55 downto 8) & x"00";
841 # v.state := RADIX_LOOKUP;
842 comb
+= v
.shift
.eq(v
.shif
- mbits
)
843 comb
+= v
.mask_size
.eq(mbits
[0:5])
844 comb
+= v
.pgbase
.eq(mbits
[8:56] & 0x00)
845 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
850 # -- non-present PTE, generate a DSI
851 # v.state := RADIX_FINISH;
853 # non-present PTE, generate a DSI
854 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
855 comb
+= v
.invalid
.eq(1)
859 # if d_in.err = '1' then
860 with m
.If(d_in
.err
== 1):
861 # v.state := RADIX_FINISH;
863 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
864 comb
+= v
.badtree
.eq(1)
867 # when RADIX_LOAD_TLB =>
868 with m
.Case(State
.RADIX_LOAD_TLB
):
870 comb
+= tlb_load
.eq(1)
871 # if r.iside = '0' then
872 with m
.If(r
.iside
== 0):
874 # v.state := TLB_WAIT;
876 comb
+= v
.state
.eq(State
.TLB_WAIT
)
881 comb
+= itlb_load
.eq(1)
882 comb
+= v
.state
.eq(State
.IDLE
)
885 # when RADIX_FINISH =>
887 with m
.Case(State
.RADIX_FINISH
):
888 comb
+= v
.state
.eq(State
.IDLE
)
891 # if v.state = RADIX_FINISH or (v.state = RADIX_LOAD_TLB
892 # and r.iside = '1') then
893 with m
.If(v
.state
== State
.RADIX_FINISH |
(v
.state
==
894 State
.RADIX_LOAD_TLB
& r
.iside
== 1))
895 # v.err := v.invalid or v.badtree or v.segerror or v.perm_err
897 # v.done := not v.err;
898 comb
+= v
.err
.eq(v
.invalid | v
.badtree | v
.segerror
899 | v
.perm_err | v
.rc_error
)
900 comb
+= v
.done
.eq(~v
.err
)
903 # if r.addr(63) = '1' then
904 # effpid := x"00000000";
905 with m
.If(r
.addr
[63] == 1):
906 comb
+= effpid
.eq(0x00000000)
910 comb
+= effpid
.eq(r
.pid
)
912 # prtable_addr := x"00" & r.prtbl(55 downto 36) &
913 # ((r.prtbl(35 downto 12) and not finalmask(
914 # 23 downto 0)) or (effpid(31 downto 8) and
915 # finalmask(23 downto 0))) & effpid(7 downto 0)
917 comb
+= prtable_addr
.eq(0x00 & r
.prtble
[36:56] &
918 ((r
.prtble
[12:36] & (~finalmask
[0:24]))
919 | effpid
[8:32] & finalmask
[0:24])
920 & effpid
[0:8] & 0x0000)
922 # pgtable_addr := x"00" & r.pgbase(55 downto 19) &
923 # ((r.pgbase(18 downto 3) and not mask) or
924 # (addrsh and mask)) & "000";
925 comb
+= pgtable_addr
.eq(0x00 & r
.pgbase
[19:56] & ((r
.pgbase
[3:19]
926 & (~mask
)) |
(addrsh
& mask
)) & 0x000)
928 # pte := x"00" & ((r.pde(55 downto 12) and not finalmask) or
929 # (r.addr(55 downto 12) and finalmask)) & r.pde(11 downto 0);
930 comb
+= pte
.eq(0x00 & ((r
.pde
[12:56] & (~finalmask
))
931 |
(r
.addr
[12:56] & finalmask
)) & r
.pde
[0:12])
933 # -- update registers
939 # if tlbie_req = '1' then
941 with m
.If(tlbie_req
== 1):
943 # tlb_data := (others => '0');
944 comb
+= addr
.eq(r
.addr
)
945 comb
+= tlb_data
.eq('''TODO ()others => '0') ''')
946 # elsif tlb_load = '1' then
947 with m
.If(tlb_load
== 1):
948 # addr := r.addr(63 downto 12) & x"000";
950 comb
+= addr
.eq(r
.addr
[12:64] & 0x000)
951 # elsif prtbl_rd = '1' then
952 with m
.If(prtbl_rd
== 1):
953 # addr := prtable_addr;
954 # tlb_data := (others => '0');
955 comb
+= addr
.eq(prtable_addr
)
956 comb
+= tlb_data
.eq('''TODO (others => '0')''')
959 # addr := pgtable_addr;
960 # tlb_data := (others => '0');
961 comb
+= addr
.eq(pgtable_addr
)
962 comb
+= tlb_data
.eq('''TODO (others => '0')''')
965 # l_out.done <= r.done;
966 # l_out.err <= r.err;
967 # l_out.invalid <= r.invalid;
968 # l_out.badtree <= r.badtree;
969 # l_out.segerr <= r.segerror;
970 # l_out.perm_error <= r.perm_err;
971 # l_out.rc_error <= r.rc_error;
972 comb
+= l_out
.done
.eq(r
.done
)
973 comb
+= l_out
.err
.eq(r
.err
)
974 comb
+= l_out
.invalid
.eq(r
.invalid
)
975 comb
+= l_out
.badtree
.eq(r
.badtree
)
976 comb
+= l_out
.segerr
.eq(r
.segerror
)
977 comb
+= l_out
.perm_error
.eq(r
.perm_err
)
978 comb
+= l_out
.rc_error
.eq(r
.rc_error
)
980 # d_out.valid <= dcreq;
981 # d_out.tlbie <= tlbie_req;
982 # d_out.doall <= r.inval_all;
983 # d_out.tlbld <= tlb_load;
984 # d_out.addr <= addr;
985 # d_out.pte <= tlb_data;
986 comb
+= d_out
.valid
.eq(dcreq
)
987 comb
+= d_out
.tlbie
.eq(tlbie_req
)
988 comb
+= d_out
.doall
.eq(r
.inval_all
)
989 comb
+= d_out
.tlbld
.eeq(tlb_load
)
990 comb
+= d_out
.addr
.eq(addr
)
991 comb
+= d_out
.pte
.eq(tlb_data
)
993 # i_out.tlbld <= itlb_load;
994 # i_out.tlbie <= tlbie_req;
995 # i_out.doall <= r.inval_all;
996 # i_out.addr <= addr;
997 # i_out.pte <= tlb_data;
998 comb
+= i_out
.tlbld
.eq(itlb_load
)
999 comb
+= i_out
.tblie
.eq(tlbie_req
)
1000 comb
+= i_out
.doall
.eq(r
.inval_all
)
1001 comb
+= i_out
.addr
.eq(addr
)
1002 comb
+= i_out
.pte
.eq(tlb_data
)