struct icache_entry_t {
reg_t tag;
- reg_t pad;
+ struct icache_entry_t* next;
insn_fetch_t data;
};
+struct tlb_entry_t {
+ char* host_offset;
+ reg_t target_offset;
+};
+
class trigger_matched_t
{
public:
class mmu_t
{
public:
- mmu_t(sim_t* sim, processor_t* proc);
+ mmu_t(simif_t* sim, processor_t* proc);
~mmu_t();
inline reg_t misaligned_load(reg_t addr, size_t size)
return misaligned_load(addr, sizeof(type##_t)); \
reg_t vpn = addr >> PGSHIFT; \
if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
- return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
+ return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); \
if (unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
- type##_t data = *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
+ type##_t data = *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); \
if (!matched_trigger) { \
matched_trigger = trigger_exception(OPERATION_LOAD, addr, data); \
if (matched_trigger) \
return misaligned_store(addr, val, sizeof(type##_t)); \
reg_t vpn = addr >> PGSHIFT; \
if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
- *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
+ *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = val; \
else if (unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
if (!matched_trigger) { \
matched_trigger = trigger_exception(OPERATION_STORE, addr, val); \
if (matched_trigger) \
throw *matched_trigger; \
} \
- *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
+ *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = val; \
} \
else \
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
return lhs; \
} catch (trap_load_page_fault& t) { \
/* AMO faults should be reported as store faults */ \
- throw trap_store_page_fault(t.get_badaddr()); \
+ throw trap_store_page_fault(t.get_tval()); \
} catch (trap_load_access_fault& t) { \
/* AMO faults should be reported as store faults */ \
- throw trap_store_access_fault(t.get_badaddr()); \
+ throw trap_store_access_fault(t.get_tval()); \
} \
}
+ void store_float128(reg_t addr, float128_t val)
+ {
+#ifndef RISCV_ENABLE_MISALIGNED
+ if (unlikely(addr & (sizeof(float128_t)-1)))
+ throw trap_store_address_misaligned(addr);
+#endif
+ store_uint64(addr, val.v[0]);
+ store_uint64(addr + 8, val.v[1]);
+ }
+
+ float128_t load_float128(reg_t addr)
+ {
+#ifndef RISCV_ENABLE_MISALIGNED
+ if (unlikely(addr & (sizeof(float128_t)-1)))
+ throw trap_load_address_misaligned(addr);
+#endif
+ return (float128_t){load_uint64(addr), load_uint64(addr + 8)};
+ }
+
// store value to memory at aligned address
store_func(uint8)
store_func(uint16)
inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
{
- const uint16_t* iaddr = translate_insn_addr(addr);
- insn_bits_t insn = *iaddr;
+ auto tlb_entry = translate_insn_addr(addr);
+ insn_bits_t insn = *(uint16_t*)(tlb_entry.host_offset + addr);
int length = insn_length(insn);
if (likely(length == 4)) {
- insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 2) << 16;
+ insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 2) << 16;
} else if (length == 2) {
insn = (int16_t)insn;
} else if (length == 6) {
- insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 4) << 32;
- insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
+ insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 4) << 32;
+ insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 2) << 16;
} else {
static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
- insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 6) << 48;
- insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 4) << 32;
- insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
+ insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 6) << 48;
+ insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 4) << 32;
+ insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 2) << 16;
}
insn_fetch_t fetch = {proc->decode_insn(insn), insn};
entry->tag = addr;
+ entry->next = &icache[icache_index(addr + length)];
entry->data = fetch;
- reg_t paddr = sim->mem_to_addr((char*)iaddr);
+ reg_t paddr = tlb_entry.target_offset + addr;;
if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) {
entry->tag = -1;
tracer.trace(paddr, length, FETCH);
void register_memtracer(memtracer_t*);
private:
- sim_t* sim;
+ simif_t* sim;
processor_t* proc;
memtracer_list_t tracer;
uint16_t fetch_temp;
// If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a
// trigger match before completing an access.
static const reg_t TLB_CHECK_TRIGGERS = reg_t(1) << 63;
- char* tlb_data[TLB_ENTRIES];
+ tlb_entry_t tlb_data[TLB_ENTRIES];
reg_t tlb_insn_tag[TLB_ENTRIES];
reg_t tlb_load_tag[TLB_ENTRIES];
reg_t tlb_store_tag[TLB_ENTRIES];
// finish translation on a TLB miss and update the TLB
- void refill_tlb(reg_t vaddr, reg_t paddr, access_type type);
+ tlb_entry_t refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type);
const char* fill_from_mmio(reg_t vaddr, reg_t paddr);
// perform a page table walk for a given VA; set referenced/dirty bits
reg_t walk(reg_t addr, access_type type, reg_t prv);
// handle uncommon cases: TLB misses, page faults, MMIO
- const uint16_t* fetch_slow_path(reg_t addr);
+ tlb_entry_t fetch_slow_path(reg_t addr);
void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes);
void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes);
reg_t translate(reg_t addr, access_type type);
// ITLB lookup
- inline const uint16_t* translate_insn_addr(reg_t addr) {
+ inline tlb_entry_t translate_insn_addr(reg_t addr) {
reg_t vpn = addr >> PGSHIFT;
if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
- return (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
+ return tlb_data[vpn % TLB_ENTRIES];
if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) {
- uint16_t* ptr = (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
+ uint16_t* ptr = (uint16_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr);
int match = proc->trigger_match(OPERATION_EXECUTE, addr, *ptr);
if (match >= 0)
throw trigger_matched_t(match, OPERATION_EXECUTE, addr, *ptr);
- return ptr;
+ return tlb_data[vpn % TLB_ENTRIES];
}
return fetch_slow_path(addr);
}
+ inline const uint16_t* translate_insn_addr_to_host(reg_t addr) {
+ return (uint16_t*)(translate_insn_addr(addr).host_offset + addr);
+ }
+
inline trigger_matched_t *trigger_exception(trigger_operation_t operation,
reg_t address, reg_t data)
{
reg_t ptbase;
};
-inline vm_info decode_vm_info(int xlen, reg_t prv, reg_t sptbr)
+inline vm_info decode_vm_info(int xlen, reg_t prv, reg_t satp)
{
if (prv == PRV_M) {
return {0, 0, 0, 0};
} else if (prv <= PRV_S && xlen == 32) {
- switch (get_field(sptbr, SPTBR32_MODE)) {
- case SPTBR_MODE_OFF: return {0, 0, 0, 0};
- case SPTBR_MODE_SV32: return {2, 10, 4, (sptbr & SPTBR32_PPN) << PGSHIFT};
+ switch (get_field(satp, SATP32_MODE)) {
+ case SATP_MODE_OFF: return {0, 0, 0, 0};
+ case SATP_MODE_SV32: return {2, 10, 4, (satp & SATP32_PPN) << PGSHIFT};
default: abort();
}
} else if (prv <= PRV_S && xlen == 64) {
- switch (get_field(sptbr, SPTBR64_MODE)) {
- case SPTBR_MODE_OFF: return {0, 0, 0, 0};
- case SPTBR_MODE_SV39: return {3, 9, 8, (sptbr & SPTBR64_PPN) << PGSHIFT};
- case SPTBR_MODE_SV48: return {4, 9, 8, (sptbr & SPTBR64_PPN) << PGSHIFT};
- case SPTBR_MODE_SV57: return {5, 9, 8, (sptbr & SPTBR64_PPN) << PGSHIFT};
- case SPTBR_MODE_SV64: return {6, 9, 8, (sptbr & SPTBR64_PPN) << PGSHIFT};
+ switch (get_field(satp, SATP64_MODE)) {
+ case SATP_MODE_OFF: return {0, 0, 0, 0};
+ case SATP_MODE_SV39: return {3, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
+ case SATP_MODE_SV48: return {4, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
+ case SATP_MODE_SV57: return {5, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
+ case SATP_MODE_SV64: return {6, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
default: abort();
}
} else {