Separate page faults from physical memory access exceptions
[riscv-tests.git] / benchmarks / pmp / pmp.c
1 // See LICENSE for license details.
2
3 // Test of PMP functionality.
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include "util.h"
9
10 volatile int trap_expected;
11
12 #define INLINE inline __attribute__((always_inline))
13
14 uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32])
15 {
16 if (cause == CAUSE_ILLEGAL_INSTRUCTION)
17 exit(0); // no PMP support
18
19 if (!trap_expected || cause != CAUSE_LOAD_ACCESS)
20 exit(1);
21 trap_expected = 0;
22 return epc + insn_len(epc);
23 }
24
25 #define SCRATCH RISCV_PGSIZE
26 uintptr_t scratch[RISCV_PGSIZE / sizeof(uintptr_t)] __attribute__((aligned(RISCV_PGSIZE)));
27 uintptr_t l1pt[RISCV_PGSIZE / sizeof(uintptr_t)] __attribute__((aligned(RISCV_PGSIZE)));
28 uintptr_t l2pt[RISCV_PGSIZE / sizeof(uintptr_t)] __attribute__((aligned(RISCV_PGSIZE)));
29 #if __riscv_xlen == 64
30 uintptr_t l3pt[RISCV_PGSIZE / sizeof(uintptr_t)] __attribute__((aligned(RISCV_PGSIZE)));
31 #else
32 #define l3pt l2pt
33 #endif
34
35 static void init_pt()
36 {
37 l1pt[0] = ((uintptr_t)l2pt >> RISCV_PGSHIFT << PTE_PPN_SHIFT) | PTE_V;
38 l3pt[SCRATCH / RISCV_PGSIZE] = ((uintptr_t)scratch >> RISCV_PGSHIFT << PTE_PPN_SHIFT) | PTE_A | PTE_D | PTE_V | PTE_R | PTE_W;
39 #if __riscv_xlen == 64
40 l2pt[0] = ((uintptr_t)l3pt >> RISCV_PGSHIFT << PTE_PPN_SHIFT) | PTE_V;
41 uintptr_t vm_choice = SPTBR_MODE_SV39;
42 #else
43 uintptr_t vm_choice = SPTBR_MODE_SV32;
44 #endif
45 write_csr(sptbr, ((uintptr_t)l1pt >> RISCV_PGSHIFT) |
46 (vm_choice * (SPTBR_MODE & ~(SPTBR_MODE<<1))));
47 write_csr(pmpcfg0, (PMP_EN | PMP_NAPOT | PMP_R) << 16);
48 write_csr(pmpaddr2, -1);
49 }
50
51 INLINE uintptr_t va2pa(uintptr_t va)
52 {
53 if (va < SCRATCH || va >= SCRATCH + RISCV_PGSIZE)
54 exit(3);
55 return va - SCRATCH + (uintptr_t)scratch;
56 }
57
58 #define GRANULE (1UL << PMP_SHIFT)
59
60 typedef struct {
61 uintptr_t cfg;
62 uintptr_t a0;
63 uintptr_t a1;
64 } pmpcfg_t;
65
66 INLINE int pmp_ok(pmpcfg_t p, uintptr_t addr, uintptr_t size)
67 {
68 if (!(p.cfg & PMP_TOR)) {
69 uintptr_t range = 1;
70
71 if (p.cfg & PMP_NAPOT) {
72 range <<= 1;
73 for (uintptr_t i = 1; i; i <<= 1) {
74 if ((p.a1 & i) == 0)
75 break;
76 p.a1 &= ~i;
77 range <<= 1;
78 }
79 }
80
81 p.a0 = p.a1;
82 p.a1 = p.a0 + range;
83 }
84
85 p.a0 *= GRANULE;
86 p.a1 *= GRANULE;
87 addr = va2pa(addr);
88
89 uintptr_t hits = 0;
90 for (uintptr_t i = 0; i < size; i += GRANULE) {
91 if (p.a0 <= addr + i && addr + i < p.a1)
92 hits += GRANULE;
93 }
94
95 return hits == 0 || hits >= size;
96 }
97
98 INLINE void test_one(uintptr_t addr, uintptr_t size)
99 {
100 uintptr_t new_mstatus = (read_csr(mstatus) & ~MSTATUS_MPP) | (MSTATUS_MPP & (MSTATUS_MPP >> 1)) | MSTATUS_MPRV;
101 switch (size) {
102 case 1: asm volatile ("csrrw %0, mstatus, %0; lb x0, (%1); csrw mstatus, %0" : "+&r" (new_mstatus) : "r" (addr)); break;
103 case 2: asm volatile ("csrrw %0, mstatus, %0; lh x0, (%1); csrw mstatus, %0" : "+&r" (new_mstatus) : "r" (addr)); break;
104 case 4: asm volatile ("csrrw %0, mstatus, %0; lw x0, (%1); csrw mstatus, %0" : "+&r" (new_mstatus) : "r" (addr)); break;
105 #if __riscv_xlen >= 64
106 case 8: asm volatile ("csrrw %0, mstatus, %0; ld x0, (%1); csrw mstatus, %0" : "+&r" (new_mstatus) : "r" (addr)); break;
107 #endif
108 default: __builtin_unreachable();
109 }
110 }
111
112 INLINE void test_all_sizes(pmpcfg_t p, uintptr_t addr)
113 {
114 for (size_t size = 1; size <= sizeof(uintptr_t); size *= 2) {
115 if (addr & (size - 1))
116 continue;
117 trap_expected = !pmp_ok(p, addr, size);
118 test_one(addr, size);
119 if (trap_expected)
120 exit(2);
121 }
122 }
123
124 INLINE void test_range_once(pmpcfg_t p, uintptr_t base, uintptr_t range)
125 {
126 for (uintptr_t addr = base; addr < base + range; addr += GRANULE)
127 test_all_sizes(p, addr);
128 }
129
130 INLINE pmpcfg_t set_pmp(pmpcfg_t p)
131 {
132 uintptr_t cfg0 = read_csr(pmpcfg0);
133 write_csr(pmpcfg0, cfg0 & ~0xff00);
134 write_csr(pmpaddr0, p.a0);
135 write_csr(pmpaddr1, p.a1);
136 write_csr(pmpcfg0, ((p.cfg << 8) & 0xff00) | (cfg0 & ~0xff00));
137 asm volatile ("sfence.vma");
138 return p;
139 }
140
141 INLINE pmpcfg_t set_pmp_range(uintptr_t base, uintptr_t range)
142 {
143 pmpcfg_t p;
144 p.cfg = PMP_EN | PMP_TOR | PMP_M | PMP_R;
145 p.a0 = base >> PMP_SHIFT;
146 p.a1 = (base + range) >> PMP_SHIFT;
147 return set_pmp(p);
148 }
149
150 INLINE pmpcfg_t set_pmp_napot(uintptr_t base, uintptr_t range)
151 {
152 pmpcfg_t p;
153 p.cfg = PMP_EN | PMP_M | PMP_R | (range > GRANULE ? PMP_NAPOT : 0);
154 p.a0 = 0;
155 p.a1 = (base + (range/2 - 1)) >> PMP_SHIFT;
156 return set_pmp(p);
157 }
158
159 static void test_range(uintptr_t addr, uintptr_t range)
160 {
161 pmpcfg_t p = set_pmp_range(va2pa(addr), range);
162 test_range_once(p, addr, range);
163
164 if ((range & (range - 1)) == 0 && (addr & (range - 1)) == 0) {
165 p = set_pmp_napot(va2pa(addr), range);
166 test_range_once(p, addr, range);
167 }
168 }
169
170 static void test_ranges(uintptr_t addr, uintptr_t size)
171 {
172 for (uintptr_t range = GRANULE; range <= size; range += GRANULE)
173 test_range(addr, range);
174 }
175
176 static void exhaustive_test(uintptr_t addr, uintptr_t size)
177 {
178 for (uintptr_t base = addr; base < addr + size; base += GRANULE)
179 test_ranges(base, size - (base - addr));
180 }
181
182 int main()
183 {
184 init_pt();
185
186 const int max_exhaustive = 32;
187 exhaustive_test(SCRATCH, max_exhaustive);
188 exhaustive_test(SCRATCH + RISCV_PGSIZE - max_exhaustive, max_exhaustive);
189
190 test_range(SCRATCH, RISCV_PGSIZE);
191 test_range(SCRATCH, RISCV_PGSIZE / 2);
192 test_range(SCRATCH + RISCV_PGSIZE / 2, RISCV_PGSIZE / 2);
193
194 return 0;
195 }