1 // See LICENSE for license details.
11 #include "processor.h"
12 #include "memtracer.h"
16 // virtual memory configuration
18 const reg_t PGSIZE
= 1 << PGSHIFT
;
19 const reg_t PGMASK
= ~(PGSIZE
-1);
27 struct icache_entry_t
{
33 class trigger_matched_t
36 trigger_matched_t(int index
,
37 trigger_operation_t operation
, reg_t address
, reg_t data
) :
38 index(index
), operation(operation
), address(address
), data(data
) {}
41 trigger_operation_t operation
;
46 // this class implements a processor's port into the virtual memory system.
47 // an MMU and instruction cache are maintained for simulator performance.
51 mmu_t(sim_t
* sim
, processor_t
* proc
);
54 // template for functions that load an aligned value from memory
55 #define load_func(type) \
56 inline type##_t load_##type(reg_t addr) { \
57 if (addr & (sizeof(type##_t)-1)) \
58 throw trap_load_address_misaligned(addr); \
59 reg_t vpn = addr >> PGSHIFT; \
60 if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
61 return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
62 if (unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
63 type##_t data = *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
64 if (!matched_trigger) { \
65 matched_trigger = trigger_exception(OPERATION_LOAD, addr, data); \
66 if (matched_trigger) \
67 throw *matched_trigger; \
72 load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res); \
76 // load value from memory at aligned address; zero extend to register width
82 // load value from memory at aligned address; sign extend to register width
88 // template for functions that store an aligned value to memory
89 #define store_func(type) \
90 void store_##type(reg_t addr, type##_t val) { \
91 if (addr & (sizeof(type##_t)-1)) \
92 throw trap_store_address_misaligned(addr); \
93 reg_t vpn = addr >> PGSHIFT; \
94 if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
95 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
96 else if (unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
97 if (!matched_trigger) { \
98 matched_trigger = trigger_exception(OPERATION_STORE, addr, val); \
99 if (matched_trigger) \
100 throw *matched_trigger; \
102 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
105 store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
108 // template for functions that perform an atomic memory operation
109 #define amo_func(type) \
110 template<typename op> \
111 type##_t amo_##type(reg_t addr, op f) { \
112 if (addr & (sizeof(type##_t)-1)) \
113 throw trap_store_address_misaligned(addr); \
115 auto lhs = load_##type(addr); \
116 store_##type(addr, f(lhs)); \
118 } catch (trap_load_access_fault& t) { \
119 /* AMO faults should be reported as store faults */ \
120 throw trap_store_access_fault(t.get_badaddr()); \
124 // store value to memory at aligned address
130 // perform an atomic memory operation at an aligned address
134 static const reg_t ICACHE_ENTRIES
= 1024;
136 inline size_t icache_index(reg_t addr
)
138 return (addr
/ PC_ALIGN
) % ICACHE_ENTRIES
;
141 inline icache_entry_t
* refill_icache(reg_t addr
, icache_entry_t
* entry
)
143 const uint16_t* iaddr
= translate_insn_addr(addr
);
144 insn_bits_t insn
= *iaddr
;
145 int length
= insn_length(insn
);
147 if (likely(length
== 4)) {
148 insn
|= (insn_bits_t
)*(const int16_t*)translate_insn_addr(addr
+ 2) << 16;
149 } else if (length
== 2) {
150 insn
= (int16_t)insn
;
151 } else if (length
== 6) {
152 insn
|= (insn_bits_t
)*(const int16_t*)translate_insn_addr(addr
+ 4) << 32;
153 insn
|= (insn_bits_t
)*(const uint16_t*)translate_insn_addr(addr
+ 2) << 16;
155 static_assert(sizeof(insn_bits_t
) == 8, "insn_bits_t must be uint64_t");
156 insn
|= (insn_bits_t
)*(const int16_t*)translate_insn_addr(addr
+ 6) << 48;
157 insn
|= (insn_bits_t
)*(const uint16_t*)translate_insn_addr(addr
+ 4) << 32;
158 insn
|= (insn_bits_t
)*(const uint16_t*)translate_insn_addr(addr
+ 2) << 16;
161 insn_fetch_t fetch
= {proc
->decode_insn(insn
), insn
};
165 reg_t paddr
= sim
->mem_to_addr((char*)iaddr
);
166 if (tracer
.interested_in_range(paddr
, paddr
+ 1, FETCH
)) {
168 tracer
.trace(paddr
, length
, FETCH
);
173 inline icache_entry_t
* access_icache(reg_t addr
)
175 icache_entry_t
* entry
= &icache
[icache_index(addr
)];
176 if (likely(entry
->tag
== addr
))
178 return refill_icache(addr
, entry
);
181 inline insn_fetch_t
load_insn(reg_t addr
)
183 icache_entry_t entry
;
184 return refill_icache(addr
, &entry
)->data
;
190 void register_memtracer(memtracer_t
*);
195 memtracer_list_t tracer
;
198 // implement an instruction cache for simulator performance
199 icache_entry_t icache
[ICACHE_ENTRIES
];
201 // implement a TLB for simulator performance
202 static const reg_t TLB_ENTRIES
= 256;
203 // If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a
204 // trigger match before completing an access.
205 static const reg_t TLB_CHECK_TRIGGERS
= reg_t(1) << 63;
206 char* tlb_data
[TLB_ENTRIES
];
207 reg_t tlb_insn_tag
[TLB_ENTRIES
];
208 reg_t tlb_load_tag
[TLB_ENTRIES
];
209 reg_t tlb_store_tag
[TLB_ENTRIES
];
211 // finish translation on a TLB miss and update the TLB
212 void refill_tlb(reg_t vaddr
, reg_t paddr
, access_type type
);
213 const char* fill_from_mmio(reg_t vaddr
, reg_t paddr
);
215 // perform a page table walk for a given VA; set referenced/dirty bits
216 reg_t
walk(reg_t addr
, access_type type
, reg_t prv
);
218 // handle uncommon cases: TLB misses, page faults, MMIO
219 const uint16_t* fetch_slow_path(reg_t addr
);
220 void load_slow_path(reg_t addr
, reg_t len
, uint8_t* bytes
);
221 void store_slow_path(reg_t addr
, reg_t len
, const uint8_t* bytes
);
222 reg_t
translate(reg_t addr
, access_type type
);
225 inline const uint16_t* translate_insn_addr(reg_t addr
) {
226 reg_t vpn
= addr
>> PGSHIFT
;
227 if (likely(tlb_insn_tag
[vpn
% TLB_ENTRIES
] == vpn
))
228 return (uint16_t*)(tlb_data
[vpn
% TLB_ENTRIES
] + addr
);
229 if (unlikely(tlb_insn_tag
[vpn
% TLB_ENTRIES
] == (vpn
| TLB_CHECK_TRIGGERS
))) {
230 uint16_t* ptr
= (uint16_t*)(tlb_data
[vpn
% TLB_ENTRIES
] + addr
);
231 int match
= proc
->trigger_match(OPERATION_EXECUTE
, addr
, *ptr
);
233 throw trigger_matched_t(match
, OPERATION_EXECUTE
, addr
, *ptr
);
236 return fetch_slow_path(addr
);
239 inline trigger_matched_t
*trigger_exception(trigger_operation_t operation
,
240 reg_t address
, reg_t data
)
245 int match
= proc
->trigger_match(operation
, address
, data
);
248 if (proc
->state
.mcontrol
[match
].timing
== 0) {
249 throw trigger_matched_t(match
, operation
, address
, data
);
251 return new trigger_matched_t(match
, operation
, address
, data
);
254 bool check_triggers_fetch
;
255 bool check_triggers_load
;
256 bool check_triggers_store
;
257 // The exception describing a matched trigger, or NULL.
258 trigger_matched_t
*matched_trigger
;
260 friend class processor_t
;