1 // See LICENSE for license details.
11 #include "processor.h"
12 #include "memtracer.h"
16 // virtual memory configuration
18 const reg_t PGSIZE
= 1 << PGSHIFT
;
19 const reg_t PGMASK
= ~(PGSIZE
-1);
27 struct icache_entry_t
{
33 class trigger_matched_t
36 trigger_matched_t(int index
,
37 trigger_operation_t operation
, reg_t address
, reg_t data
) :
38 index(index
), operation(operation
), address(address
), data(data
) {}
41 trigger_operation_t operation
;
46 // this class implements a processor's port into the virtual memory system.
47 // an MMU and instruction cache are maintained for simulator performance.
51 mmu_t(sim_t
* sim
, processor_t
* proc
);
54 inline reg_t
misaligned_load(reg_t addr
, size_t size
)
56 #ifdef RISCV_ENABLE_MISALIGNED
58 for (size_t i
= 0; i
< size
; i
++)
59 res
+= (reg_t
)load_uint8(addr
+ i
) << (i
* 8);
62 throw trap_load_address_misaligned(addr
);
66 inline void misaligned_store(reg_t addr
, reg_t data
, size_t size
)
68 #ifdef RISCV_ENABLE_MISALIGNED
69 for (size_t i
= 0; i
< size
; i
++)
70 store_uint8(addr
+ i
, data
>> (i
* 8));
72 throw trap_store_address_misaligned(addr
);
76 // template for functions that load an aligned value from memory
77 #define load_func(type) \
78 inline type##_t load_##type(reg_t addr) { \
79 if (unlikely(addr & (sizeof(type##_t)-1))) \
80 return misaligned_load(addr, sizeof(type##_t)); \
81 reg_t vpn = addr >> PGSHIFT; \
82 if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
83 return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
84 if (unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
85 type##_t data = *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
86 if (!matched_trigger) { \
87 matched_trigger = trigger_exception(OPERATION_LOAD, addr, data); \
88 if (matched_trigger) \
89 throw *matched_trigger; \
94 load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res); \
98 // load value from memory at aligned address; zero extend to register width
104 // load value from memory at aligned address; sign extend to register width
110 // template for functions that store an aligned value to memory
111 #define store_func(type) \
112 void store_##type(reg_t addr, type##_t val) { \
113 if (unlikely(addr & (sizeof(type##_t)-1))) \
114 return misaligned_store(addr, val, sizeof(type##_t)); \
115 reg_t vpn = addr >> PGSHIFT; \
116 if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
117 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
118 else if (unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
119 if (!matched_trigger) { \
120 matched_trigger = trigger_exception(OPERATION_STORE, addr, val); \
121 if (matched_trigger) \
122 throw *matched_trigger; \
124 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
127 store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
130 // template for functions that perform an atomic memory operation
131 #define amo_func(type) \
132 template<typename op> \
133 type##_t amo_##type(reg_t addr, op f) { \
134 if (addr & (sizeof(type##_t)-1)) \
135 throw trap_store_address_misaligned(addr); \
137 auto lhs = load_##type(addr); \
138 store_##type(addr, f(lhs)); \
140 } catch (trap_load_page_fault& t) { \
141 /* AMO faults should be reported as store faults */ \
142 throw trap_store_page_fault(t.get_badaddr()); \
143 } catch (trap_load_access_fault& t) { \
144 /* AMO faults should be reported as store faults */ \
145 throw trap_store_access_fault(t.get_badaddr()); \
149 // store value to memory at aligned address
155 // perform an atomic memory operation at an aligned address
159 static const reg_t ICACHE_ENTRIES
= 1024;
161 inline size_t icache_index(reg_t addr
)
163 return (addr
/ PC_ALIGN
) % ICACHE_ENTRIES
;
166 inline icache_entry_t
* refill_icache(reg_t addr
, icache_entry_t
* entry
)
168 const uint16_t* iaddr
= translate_insn_addr(addr
);
169 insn_bits_t insn
= *iaddr
;
170 int length
= insn_length(insn
);
172 if (likely(length
== 4)) {
173 insn
|= (insn_bits_t
)*(const int16_t*)translate_insn_addr(addr
+ 2) << 16;
174 } else if (length
== 2) {
175 insn
= (int16_t)insn
;
176 } else if (length
== 6) {
177 insn
|= (insn_bits_t
)*(const int16_t*)translate_insn_addr(addr
+ 4) << 32;
178 insn
|= (insn_bits_t
)*(const uint16_t*)translate_insn_addr(addr
+ 2) << 16;
180 static_assert(sizeof(insn_bits_t
) == 8, "insn_bits_t must be uint64_t");
181 insn
|= (insn_bits_t
)*(const int16_t*)translate_insn_addr(addr
+ 6) << 48;
182 insn
|= (insn_bits_t
)*(const uint16_t*)translate_insn_addr(addr
+ 4) << 32;
183 insn
|= (insn_bits_t
)*(const uint16_t*)translate_insn_addr(addr
+ 2) << 16;
186 insn_fetch_t fetch
= {proc
->decode_insn(insn
), insn
};
190 reg_t paddr
= sim
->mem_to_addr((char*)iaddr
);
191 if (tracer
.interested_in_range(paddr
, paddr
+ 1, FETCH
)) {
193 tracer
.trace(paddr
, length
, FETCH
);
198 inline icache_entry_t
* access_icache(reg_t addr
)
200 icache_entry_t
* entry
= &icache
[icache_index(addr
)];
201 if (likely(entry
->tag
== addr
))
203 return refill_icache(addr
, entry
);
206 inline insn_fetch_t
load_insn(reg_t addr
)
208 icache_entry_t entry
;
209 return refill_icache(addr
, &entry
)->data
;
215 void register_memtracer(memtracer_t
*);
220 memtracer_list_t tracer
;
223 // implement an instruction cache for simulator performance
224 icache_entry_t icache
[ICACHE_ENTRIES
];
226 // implement a TLB for simulator performance
227 static const reg_t TLB_ENTRIES
= 256;
228 // If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a
229 // trigger match before completing an access.
230 static const reg_t TLB_CHECK_TRIGGERS
= reg_t(1) << 63;
231 char* tlb_data
[TLB_ENTRIES
];
232 reg_t tlb_insn_tag
[TLB_ENTRIES
];
233 reg_t tlb_load_tag
[TLB_ENTRIES
];
234 reg_t tlb_store_tag
[TLB_ENTRIES
];
236 // finish translation on a TLB miss and update the TLB
237 void refill_tlb(reg_t vaddr
, reg_t paddr
, access_type type
);
238 const char* fill_from_mmio(reg_t vaddr
, reg_t paddr
);
240 // perform a page table walk for a given VA; set referenced/dirty bits
241 reg_t
walk(reg_t addr
, access_type type
, reg_t prv
);
243 // handle uncommon cases: TLB misses, page faults, MMIO
244 const uint16_t* fetch_slow_path(reg_t addr
);
245 void load_slow_path(reg_t addr
, reg_t len
, uint8_t* bytes
);
246 void store_slow_path(reg_t addr
, reg_t len
, const uint8_t* bytes
);
247 reg_t
translate(reg_t addr
, access_type type
);
250 inline const uint16_t* translate_insn_addr(reg_t addr
) {
251 reg_t vpn
= addr
>> PGSHIFT
;
252 if (likely(tlb_insn_tag
[vpn
% TLB_ENTRIES
] == vpn
))
253 return (uint16_t*)(tlb_data
[vpn
% TLB_ENTRIES
] + addr
);
254 if (unlikely(tlb_insn_tag
[vpn
% TLB_ENTRIES
] == (vpn
| TLB_CHECK_TRIGGERS
))) {
255 uint16_t* ptr
= (uint16_t*)(tlb_data
[vpn
% TLB_ENTRIES
] + addr
);
256 int match
= proc
->trigger_match(OPERATION_EXECUTE
, addr
, *ptr
);
258 throw trigger_matched_t(match
, OPERATION_EXECUTE
, addr
, *ptr
);
261 return fetch_slow_path(addr
);
264 inline trigger_matched_t
*trigger_exception(trigger_operation_t operation
,
265 reg_t address
, reg_t data
)
270 int match
= proc
->trigger_match(operation
, address
, data
);
273 if (proc
->state
.mcontrol
[match
].timing
== 0) {
274 throw trigger_matched_t(match
, operation
, address
, data
);
276 return new trigger_matched_t(match
, operation
, address
, data
);
279 bool check_triggers_fetch
;
280 bool check_triggers_load
;
281 bool check_triggers_store
;
282 // The exception describing a matched trigger, or NULL.
283 trigger_matched_t
*matched_trigger
;
285 friend class processor_t
;
295 inline vm_info
decode_vm_info(int xlen
, reg_t prv
, reg_t sptbr
)
299 } else if (prv
<= PRV_S
&& xlen
== 32) {
300 switch (get_field(sptbr
, SPTBR32_MODE
)) {
301 case SPTBR_MODE_OFF
: return {0, 0, 0, 0};
302 case SPTBR_MODE_SV32
: return {2, 10, 4, (sptbr
& SPTBR32_PPN
) << PGSHIFT
};
305 } else if (prv
<= PRV_S
&& xlen
== 64) {
306 switch (get_field(sptbr
, SPTBR64_MODE
)) {
307 case SPTBR_MODE_OFF
: return {0, 0, 0, 0};
308 case SPTBR_MODE_SV39
: return {3, 9, 8, (sptbr
& SPTBR64_PPN
) << PGSHIFT
};
309 case SPTBR_MODE_SV48
: return {4, 9, 8, (sptbr
& SPTBR64_PPN
) << PGSHIFT
};
310 case SPTBR_MODE_SV57
: return {5, 9, 8, (sptbr
& SPTBR64_PPN
) << PGSHIFT
};
311 case SPTBR_MODE_SV64
: return {6, 9, 8, (sptbr
& SPTBR64_PPN
) << PGSHIFT
};