1 // See LICENSE for license details.
7 mmu_t::mmu_t(sim_t
* sim
, processor_t
* proc
)
8 : sim(sim
), proc(proc
),
9 check_triggers_fetch(false),
10 check_triggers_load(false),
11 check_triggers_store(false),
21 void mmu_t::flush_icache()
23 for (size_t i
= 0; i
< ICACHE_ENTRIES
; i
++)
27 void mmu_t::flush_tlb()
29 memset(tlb_insn_tag
, -1, sizeof(tlb_insn_tag
));
30 memset(tlb_load_tag
, -1, sizeof(tlb_load_tag
));
31 memset(tlb_store_tag
, -1, sizeof(tlb_store_tag
));
36 reg_t
mmu_t::translate(reg_t addr
, access_type type
)
41 reg_t mode
= proc
->state
.prv
;
43 if (!proc
->state
.dcsr
.cause
&& get_field(proc
->state
.mstatus
, MSTATUS_MPRV
))
44 mode
= get_field(proc
->state
.mstatus
, MSTATUS_MPP
);
47 return walk(addr
, type
, mode
) | (addr
& (PGSIZE
-1));
50 tlb_entry_t
mmu_t::fetch_slow_path(reg_t vaddr
)
52 reg_t paddr
= translate(vaddr
, FETCH
);
54 if (auto host_addr
= sim
->addr_to_mem(paddr
)) {
55 return refill_tlb(vaddr
, paddr
, host_addr
, FETCH
);
57 if (!sim
->mmio_load(paddr
, sizeof fetch_temp
, (uint8_t*)&fetch_temp
))
58 throw trap_instruction_access_fault(vaddr
);
59 tlb_entry_t entry
= {(char*)&fetch_temp
- vaddr
, paddr
- vaddr
};
64 reg_t
reg_from_bytes(size_t len
, const uint8_t* bytes
)
71 (((reg_t
) bytes
[1]) << 8);
74 (((reg_t
) bytes
[1]) << 8) |
75 (((reg_t
) bytes
[2]) << 16) |
76 (((reg_t
) bytes
[3]) << 24);
79 (((reg_t
) bytes
[1]) << 8) |
80 (((reg_t
) bytes
[2]) << 16) |
81 (((reg_t
) bytes
[3]) << 24) |
82 (((reg_t
) bytes
[4]) << 32) |
83 (((reg_t
) bytes
[5]) << 40) |
84 (((reg_t
) bytes
[6]) << 48) |
85 (((reg_t
) bytes
[7]) << 56);
90 void mmu_t::load_slow_path(reg_t addr
, reg_t len
, uint8_t* bytes
)
92 reg_t paddr
= translate(addr
, LOAD
);
94 if (auto host_addr
= sim
->addr_to_mem(paddr
)) {
95 memcpy(bytes
, host_addr
, len
);
96 if (tracer
.interested_in_range(paddr
, paddr
+ PGSIZE
, LOAD
))
97 tracer
.trace(paddr
, len
, LOAD
);
99 refill_tlb(addr
, paddr
, host_addr
, LOAD
);
100 } else if (!sim
->mmio_load(paddr
, len
, bytes
)) {
101 throw trap_load_access_fault(addr
);
104 if (!matched_trigger
) {
105 reg_t data
= reg_from_bytes(len
, bytes
);
106 matched_trigger
= trigger_exception(OPERATION_LOAD
, addr
, data
);
108 throw *matched_trigger
;
112 void mmu_t::store_slow_path(reg_t addr
, reg_t len
, const uint8_t* bytes
)
114 reg_t paddr
= translate(addr
, STORE
);
116 if (!matched_trigger
) {
117 reg_t data
= reg_from_bytes(len
, bytes
);
118 matched_trigger
= trigger_exception(OPERATION_STORE
, addr
, data
);
120 throw *matched_trigger
;
123 if (auto host_addr
= sim
->addr_to_mem(paddr
)) {
124 memcpy(host_addr
, bytes
, len
);
125 if (tracer
.interested_in_range(paddr
, paddr
+ PGSIZE
, STORE
))
126 tracer
.trace(paddr
, len
, STORE
);
128 refill_tlb(addr
, paddr
, host_addr
, STORE
);
129 } else if (!sim
->mmio_store(paddr
, len
, bytes
)) {
130 throw trap_store_access_fault(addr
);
134 tlb_entry_t
mmu_t::refill_tlb(reg_t vaddr
, reg_t paddr
, char* host_addr
, access_type type
)
136 reg_t idx
= (vaddr
>> PGSHIFT
) % TLB_ENTRIES
;
137 reg_t expected_tag
= vaddr
>> PGSHIFT
;
139 if ((tlb_load_tag
[idx
] & ~TLB_CHECK_TRIGGERS
) != expected_tag
)
140 tlb_load_tag
[idx
] = -1;
141 if ((tlb_store_tag
[idx
] & ~TLB_CHECK_TRIGGERS
) != expected_tag
)
142 tlb_store_tag
[idx
] = -1;
143 if ((tlb_insn_tag
[idx
] & ~TLB_CHECK_TRIGGERS
) != expected_tag
)
144 tlb_insn_tag
[idx
] = -1;
146 if ((check_triggers_fetch
&& type
== FETCH
) ||
147 (check_triggers_load
&& type
== LOAD
) ||
148 (check_triggers_store
&& type
== STORE
))
149 expected_tag
|= TLB_CHECK_TRIGGERS
;
151 if (type
== FETCH
) tlb_insn_tag
[idx
] = expected_tag
;
152 else if (type
== STORE
) tlb_store_tag
[idx
] = expected_tag
;
153 else tlb_load_tag
[idx
] = expected_tag
;
155 tlb_entry_t entry
= {host_addr
- vaddr
, paddr
- vaddr
};
156 tlb_data
[idx
] = entry
;
160 reg_t
mmu_t::walk(reg_t addr
, access_type type
, reg_t mode
)
162 vm_info vm
= decode_vm_info(proc
->max_xlen
, mode
, proc
->get_state()->sptbr
);
164 return addr
& ((reg_t(2) << (proc
->xlen
-1))-1); // zero-extend from xlen
166 bool s_mode
= mode
== PRV_S
;
167 bool sum
= get_field(proc
->state
.mstatus
, MSTATUS_SUM
);
168 bool mxr
= get_field(proc
->state
.mstatus
, MSTATUS_MXR
);
170 // verify bits xlen-1:va_bits-1 are all equal
171 int va_bits
= PGSHIFT
+ vm
.levels
* vm
.idxbits
;
172 reg_t mask
= (reg_t(1) << (proc
->xlen
- (va_bits
-1))) - 1;
173 reg_t masked_msbs
= (addr
>> (va_bits
-1)) & mask
;
174 if (masked_msbs
!= 0 && masked_msbs
!= mask
)
177 reg_t base
= vm
.ptbase
;
178 for (int i
= vm
.levels
- 1; i
>= 0; i
--) {
179 int ptshift
= i
* vm
.idxbits
;
180 reg_t idx
= (addr
>> (PGSHIFT
+ ptshift
)) & ((1 << vm
.idxbits
) - 1);
182 // check that physical address of PTE is legal
183 auto ppte
= sim
->addr_to_mem(base
+ idx
* vm
.ptesize
);
185 throw trap_load_access_fault(addr
);
187 reg_t pte
= vm
.ptesize
== 4 ? *(uint32_t*)ppte
: *(uint64_t*)ppte
;
188 reg_t ppn
= pte
>> PTE_PPN_SHIFT
;
190 if (PTE_TABLE(pte
)) { // next level of page table
191 base
= ppn
<< PGSHIFT
;
192 } else if ((pte
& PTE_U
) ? s_mode
&& (type
== FETCH
|| !sum
) : !s_mode
) {
194 } else if (!(pte
& PTE_V
) || (!(pte
& PTE_R
) && (pte
& PTE_W
))) {
196 } else if (type
== FETCH
? !(pte
& PTE_X
) :
197 type
== LOAD
? !(pte
& PTE_R
) && !(mxr
&& (pte
& PTE_X
)) :
198 !((pte
& PTE_R
) && (pte
& PTE_W
))) {
200 } else if ((ppn
& ((reg_t(1) << ptshift
) - 1)) != 0) {
203 reg_t ad
= PTE_A
| ((type
== STORE
) * PTE_D
);
204 #ifdef RISCV_ENABLE_DIRTY
205 // set accessed and possibly dirty bits.
206 *(uint32_t*)ppte
|= ad
;
208 // take exception if access or possibly dirty bit is not set.
209 if ((pte
& ad
) != ad
)
212 // for superpage mappings, make a fake leaf PTE for the TLB's benefit.
213 reg_t vpn
= addr
>> PGSHIFT
;
214 reg_t value
= (ppn
| (vpn
& ((reg_t(1) << ptshift
) - 1))) << PGSHIFT
;
221 case FETCH
: throw trap_instruction_page_fault(addr
);
222 case LOAD
: throw trap_load_page_fault(addr
);
223 case STORE
: throw trap_store_page_fault(addr
);
228 void mmu_t::register_memtracer(memtracer_t
* t
)