#define MSTATUS_XS 0x00018000
#define MSTATUS_MPRV 0x00020000
#define MSTATUS_PUM 0x00040000
+#define MSTATUS_MXR 0x00080000
#define MSTATUS_VM 0x1F000000
#define MSTATUS32_SD 0x80000000
#define MSTATUS64_SD 0x8000000000000000
// page table entry (PTE) fields
#define PTE_V 0x001 // Valid
-#define PTE_TYPE 0x01E // Type
-#define PTE_R 0x020 // Referenced
-#define PTE_D 0x040 // Dirty
-#define PTE_SOFT 0x380 // Reserved for Software
-
-#define PTE_TYPE_TABLE 0x00
-#define PTE_TYPE_TABLE_GLOBAL 0x02
-#define PTE_TYPE_URX_SR 0x04
-#define PTE_TYPE_URWX_SRW 0x06
-#define PTE_TYPE_UR_SR 0x08
-#define PTE_TYPE_URW_SRW 0x0A
-#define PTE_TYPE_URX_SRX 0x0C
-#define PTE_TYPE_URWX_SRWX 0x0E
-#define PTE_TYPE_SR 0x10
-#define PTE_TYPE_SRW 0x12
-#define PTE_TYPE_SRX 0x14
-#define PTE_TYPE_SRWX 0x16
-#define PTE_TYPE_SR_GLOBAL 0x18
-#define PTE_TYPE_SRW_GLOBAL 0x1A
-#define PTE_TYPE_SRX_GLOBAL 0x1C
-#define PTE_TYPE_SRWX_GLOBAL 0x1E
+#define PTE_R 0x002 // Read
+#define PTE_W 0x004 // Write
+#define PTE_X 0x008 // Execute
+#define PTE_U 0x010 // User
+#define PTE_G 0x020 // Global
+#define PTE_A 0x040 // Accessed
+#define PTE_D 0x080 // Dirty
+#define PTE_SOFT 0x300 // Reserved for Software
#define PTE_PPN_SHIFT 10
-#define PTE_TABLE(PTE) ((0x0000000AU >> ((PTE) & 0x1F)) & 1)
-#define PTE_UR(PTE) ((0x0000AAA0U >> ((PTE) & 0x1F)) & 1)
-#define PTE_UW(PTE) ((0x00008880U >> ((PTE) & 0x1F)) & 1)
-#define PTE_UX(PTE) ((0x0000A0A0U >> ((PTE) & 0x1F)) & 1)
-#define PTE_SR(PTE) ((0xAAAAAAA0U >> ((PTE) & 0x1F)) & 1)
-#define PTE_SW(PTE) ((0x88888880U >> ((PTE) & 0x1F)) & 1)
-#define PTE_SX(PTE) ((0xA0A0A000U >> ((PTE) & 0x1F)) & 1)
-
-#define PTE_CHECK_PERM(PTE, SUPERVISOR, STORE, FETCH) \
- ((STORE) ? ((SUPERVISOR) ? PTE_SW(PTE) : PTE_UW(PTE)) : \
- (FETCH) ? ((SUPERVISOR) ? PTE_SX(PTE) : PTE_UX(PTE)) : \
- ((SUPERVISOR) ? PTE_SR(PTE) : PTE_UR(PTE)))
+#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V)
#ifdef __riscv
return addr;
reg_t mode = proc->state.prv;
- bool pum = false;
if (type != FETCH) {
if (!proc->state.dcsr.cause && get_field(proc->state.mstatus, MSTATUS_MPRV))
mode = get_field(proc->state.mstatus, MSTATUS_MPP);
- pum = (mode == PRV_S && get_field(proc->state.mstatus, MSTATUS_PUM));
}
if (get_field(proc->state.mstatus, MSTATUS_VM) == VM_MBARE)
mode = PRV_M;
reg_t msb_mask = (reg_t(2) << (proc->xlen-1))-1; // zero-extend from xlen
return addr & msb_mask;
}
- return walk(addr, type, mode > PRV_U, pum) | (addr & (PGSIZE-1));
+ return walk(addr, type, mode) | (addr & (PGSIZE-1));
}
const uint16_t* mmu_t::fetch_slow_path(reg_t vaddr)
tlb_data[idx] = sim->addr_to_mem(paddr) - vaddr;
}
-reg_t mmu_t::walk(reg_t addr, access_type type, bool supervisor, bool pum)
+reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode)
{
int levels, ptidxbits, ptesize;
switch (get_field(proc->get_state()->mstatus, MSTATUS_VM))
default: abort();
}
+ bool supervisor = mode == PRV_S;
+ bool pum = get_field(proc->state.mstatus, MSTATUS_PUM);
+ bool mxr = get_field(proc->state.mstatus, MSTATUS_MXR);
+
// verify bits xlen-1:va_bits-1 are all equal
int va_bits = PGSHIFT + levels * ptidxbits;
reg_t mask = (reg_t(1) << (proc->xlen - (va_bits-1))) - 1;
if (PTE_TABLE(pte)) { // next level of page table
base = ppn << PGSHIFT;
- } else if (pum && PTE_CHECK_PERM(pte, 0, type == STORE, type == FETCH)) {
+ } else if ((pte & PTE_U) ? supervisor && pum : !supervisor) {
+ break;
+ } else if (!(pte & PTE_R) && (pte & PTE_W)) { // reserved
break;
- } else if (!PTE_CHECK_PERM(pte, supervisor, type == STORE, type == FETCH)) {
+ } else if (type == FETCH ? !(pte & PTE_X) :
+ type == LOAD ? !(pte & PTE_R) && !(mxr && (pte & PTE_X)) :
+ !((pte & PTE_R) && (pte & PTE_W))) {
break;
} else {
- // set referenced and possibly dirty bits.
- *(uint32_t*)ppte |= PTE_R | ((type == STORE) * PTE_D);
+ // set accessed and possibly dirty bits.
+ *(uint32_t*)ppte |= PTE_A | ((type == STORE) * PTE_D);
// for superpage mappings, make a fake leaf PTE for the TLB's benefit.
reg_t vpn = addr >> PGSHIFT;
reg_t value = (ppn | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT;
const char* fill_from_mmio(reg_t vaddr, reg_t paddr);
// perform a page table walk for a given VA; set referenced/dirty bits
- reg_t walk(reg_t addr, access_type type, bool supervisor, bool pum);
+ reg_t walk(reg_t addr, access_type type, reg_t prv);
// handle uncommon cases: TLB misses, page faults, MMIO
const uint16_t* fetch_slow_path(reg_t addr);
break;
case CSR_MSTATUS: {
if ((val ^ state.mstatus) &
- (MSTATUS_VM | MSTATUS_MPP | MSTATUS_MPRV | MSTATUS_PUM))
+ (MSTATUS_VM | MSTATUS_MPP | MSTATUS_MPRV | MSTATUS_PUM | MSTATUS_MXR))
mmu->flush_tlb();
reg_t mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE
| MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_PUM
- | (ext ? MSTATUS_XS : 0);
+ | MSTATUS_MXR | (ext ? MSTATUS_XS : 0);
if (validate_vm(max_xlen, get_field(val, MSTATUS_VM)))
mask |= MSTATUS_VM;
.balign 0x1000
page_table:
- .word ((0x80000000 >> 2) | PTE_V | PTE_TYPE_URWX_SRWX)
+ .word ((0x80000000 >> 2) | PTE_V | PTE_U | PTE_R | PTE_W | PTE_X)
.word 0