1 // See LICENSE for license details.
7 mmu_t::mmu_t(char* _mem
, size_t _memsz
)
8 : mem(_mem
), memsz(_memsz
), proc(NULL
)
17 void mmu_t::flush_icache()
19 for (size_t i
= 0; i
< ICACHE_ENTRIES
; i
++)
23 void mmu_t::flush_tlb()
25 memset(tlb_insn_tag
, -1, sizeof(tlb_insn_tag
));
26 memset(tlb_load_tag
, -1, sizeof(tlb_load_tag
));
27 memset(tlb_store_tag
, -1, sizeof(tlb_store_tag
));
32 void* mmu_t::refill_tlb(reg_t addr
, reg_t bytes
, bool store
, bool fetch
)
34 reg_t idx
= (addr
>> PGSHIFT
) % TLB_ENTRIES
;
35 reg_t expected_tag
= addr
>> PGSHIFT
;
38 reg_t mstatus
= proc
? proc
->state
.mstatus
: 0;
40 bool vm_disabled
= get_field(mstatus
, MSTATUS_VM
) == VM_MBARE
;
41 bool mode_m
= get_field(mstatus
, MSTATUS_PRV
) == PRV_M
;
42 bool mode_s
= get_field(mstatus
, MSTATUS_PRV
) == PRV_S
;
43 bool mprv_m
= get_field(mstatus
, MSTATUS_MPRV
) == PRV_M
;
44 bool mprv_s
= get_field(mstatus
, MSTATUS_MPRV
) == PRV_S
;
46 if (vm_disabled
|| (mode_m
&& (mprv_m
|| fetch
))) {
47 // virtual memory is disabled. merely check legality of physical address.
49 // produce a fake PTE for the TLB's benefit.
50 pte
= PTE_V
| PTE_UX
| PTE_SX
| ((addr
>> PGSHIFT
) << PGSHIFT
);
51 if (vm_disabled
|| !(mode_m
&& !mprv_m
))
52 pte
|= PTE_UR
| PTE_SR
| PTE_UW
| PTE_SW
;
58 reg_t pte_perm
= pte
& PTE_PERM
;
59 if (mode_s
|| (mode_m
&& mprv_s
&& !fetch
))
60 pte_perm
= (pte_perm
/(PTE_SX
/PTE_UX
)) & PTE_PERM
;
61 pte_perm
|= pte
& PTE_V
;
63 reg_t perm
= (fetch
? PTE_UX
: store
? PTE_UW
: PTE_UR
) | PTE_V
;
64 if(unlikely((pte_perm
& perm
) != perm
))
67 throw trap_instruction_access_fault(addr
);
69 throw trap_store_access_fault(addr
);
70 throw trap_load_access_fault(addr
);
73 reg_t pgoff
= addr
& (PGSIZE
-1);
74 reg_t pgbase
= pte
>> PGSHIFT
<< PGSHIFT
;
75 reg_t paddr
= pgbase
+ pgoff
;
77 if (unlikely(tracer
.interested_in_range(pgbase
, pgbase
+ PGSIZE
, store
, fetch
)))
78 tracer
.trace(paddr
, bytes
, store
, fetch
);
81 tlb_load_tag
[idx
] = (pte_perm
& PTE_UR
) ? expected_tag
: -1;
82 tlb_store_tag
[idx
] = (pte_perm
& PTE_UW
) ? expected_tag
: -1;
83 tlb_insn_tag
[idx
] = (pte_perm
& PTE_UX
) ? expected_tag
: -1;
84 tlb_data
[idx
] = mem
+ pgbase
- (addr
& ~(PGSIZE
-1));
90 pte_t
mmu_t::walk(reg_t addr
)
92 reg_t msb_mask
= -(reg_t(1) << (VA_BITS
-1));
93 if ((addr
& msb_mask
) != 0 && (addr
& msb_mask
) != msb_mask
)
94 return 0; // address isn't properly sign-extended
96 reg_t base
= proc
->get_state()->sptbr
;
99 int ptshift
= (LEVELS
-1)*PTIDXBITS
;
100 for (reg_t i
= 0; i
< LEVELS
; i
++, ptshift
-= PTIDXBITS
) {
101 reg_t idx
= (addr
>> (PGSHIFT
+ptshift
)) & ((1<<PTIDXBITS
)-1);
103 // check that physical address of PTE is legal
104 reg_t pte_addr
= base
+ idx
*sizeof(pte_t
);
105 if (pte_addr
>= memsz
)
108 ptd
= *(pte_t
*)(mem
+pte_addr
);
110 if (!(ptd
& PTE_V
)) { // invalid mapping
112 } else if (ptd
& PTE_T
) { // next level of page table
113 base
= (ptd
>> PGSHIFT
) << PGSHIFT
;
115 // we've found the PTE.
116 // for superpage mappings, make a fake leaf PTE for the TLB's benefit.
117 reg_t vpn
= addr
>> PGSHIFT
;
118 ptd
|= (vpn
& ((1<<(ptshift
))-1)) << PGSHIFT
;
120 // check that physical address is legal
121 if (((ptd
>> PGSHIFT
) << PGSHIFT
) >= memsz
)
130 void mmu_t::register_memtracer(memtracer_t
* t
)