arch-riscv: Stop "using namespace std"
[gem5.git] / src / arch / riscv / tlb.cc
1 /*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * Copyright (c) 2007 MIPS Technologies, Inc.
4 * Copyright (c) 2020 Barkhausen Institut
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met: redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer;
11 * redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution;
14 * neither the name of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "arch/riscv/tlb.hh"
32
33 #include <string>
34 #include <vector>
35
36 #include "arch/riscv/faults.hh"
37 #include "arch/riscv/fs_workload.hh"
38 #include "arch/riscv/mmu.hh"
39 #include "arch/riscv/pagetable.hh"
40 #include "arch/riscv/pagetable_walker.hh"
41 #include "arch/riscv/pra_constants.hh"
42 #include "arch/riscv/utility.hh"
43 #include "base/inifile.hh"
44 #include "base/str.hh"
45 #include "base/trace.hh"
46 #include "cpu/thread_context.hh"
47 #include "debug/TLB.hh"
48 #include "debug/TLBVerbose.hh"
49 #include "mem/page_table.hh"
50 #include "params/RiscvTLB.hh"
51 #include "sim/full_system.hh"
52 #include "sim/process.hh"
53 #include "sim/system.hh"
54
55 using namespace RiscvISA;
56
57 ///////////////////////////////////////////////////////////////////////
58 //
59 // RISC-V TLB
60 //
61
62 static Addr
63 buildKey(Addr vpn, uint16_t asid)
64 {
65 return (static_cast<Addr>(asid) << 48) | vpn;
66 }
67
68 TLB::TLB(const Params &p)
69 : BaseTLB(p), size(p.size), tlb(size), lruSeq(0), stats(this)
70 {
71 for (size_t x = 0; x < size; x++) {
72 tlb[x].trieHandle = NULL;
73 freeList.push_back(&tlb[x]);
74 }
75
76 walker = p.walker;
77 walker->setTLB(this);
78 }
79
80 Walker *
81 TLB::getWalker()
82 {
83 return walker;
84 }
85
86 void
87 TLB::evictLRU()
88 {
89 // Find the entry with the lowest (and hence least recently updated)
90 // sequence number.
91
92 size_t lru = 0;
93 for (size_t i = 1; i < size; i++) {
94 if (tlb[i].lruSeq < tlb[lru].lruSeq)
95 lru = i;
96 }
97
98 remove(lru);
99 }
100
101 TlbEntry *
102 TLB::lookup(Addr vpn, uint16_t asid, Mode mode, bool hidden)
103 {
104 TlbEntry *entry = trie.lookup(buildKey(vpn, asid));
105
106 if (!hidden) {
107 if (entry)
108 entry->lruSeq = nextSeq();
109
110 if (mode == Write)
111 stats.write_accesses++;
112 else
113 stats.read_accesses++;
114
115 if (!entry) {
116 if (mode == Write)
117 stats.write_misses++;
118 else
119 stats.read_misses++;
120 }
121 else {
122 if (mode == Write)
123 stats.write_hits++;
124 else
125 stats.read_hits++;
126 }
127
128 DPRINTF(TLBVerbose, "lookup(vpn=%#x, asid=%#x): %s ppn %#x\n",
129 vpn, asid, entry ? "hit" : "miss", entry ? entry->paddr : 0);
130 }
131
132 return entry;
133 }
134
135 TlbEntry *
136 TLB::insert(Addr vpn, const TlbEntry &entry)
137 {
138 DPRINTF(TLB, "insert(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
139 vpn, entry.asid, entry.paddr, entry.pte, entry.size());
140
141 // If somebody beat us to it, just use that existing entry.
142 TlbEntry *newEntry = lookup(vpn, entry.asid, Mode::Read, true);
143 if (newEntry) {
144 // update PTE flags (maybe we set the dirty/writable flag)
145 newEntry->pte = entry.pte;
146 assert(newEntry->vaddr == vpn);
147 return newEntry;
148 }
149
150 if (freeList.empty())
151 evictLRU();
152
153 newEntry = freeList.front();
154 freeList.pop_front();
155
156 Addr key = buildKey(vpn, entry.asid);
157 *newEntry = entry;
158 newEntry->lruSeq = nextSeq();
159 newEntry->vaddr = vpn;
160 newEntry->trieHandle =
161 trie.insert(key, TlbEntryTrie::MaxBits - entry.logBytes, newEntry);
162 return newEntry;
163 }
164
165 void
166 TLB::demapPage(Addr vpn, uint64_t asid)
167 {
168 asid &= 0xFFFF;
169
170 if (vpn == 0 && asid == 0)
171 flushAll();
172 else {
173 DPRINTF(TLB, "flush(vpn=%#x, asid=%#x)\n", vpn, asid);
174 if (vpn != 0 && asid != 0) {
175 TlbEntry *newEntry = lookup(vpn, asid, Mode::Read, true);
176 if (newEntry)
177 remove(newEntry - tlb.data());
178 }
179 else {
180 for (size_t i = 0; i < size; i++) {
181 if (tlb[i].trieHandle) {
182 Addr mask = ~(tlb[i].size() - 1);
183 if ((vpn == 0 || (vpn & mask) == tlb[i].vaddr) &&
184 (asid == 0 || tlb[i].asid == asid))
185 remove(i);
186 }
187 }
188 }
189 }
190 }
191
192 void
193 TLB::flushAll()
194 {
195 DPRINTF(TLB, "flushAll()\n");
196 for (size_t i = 0; i < size; i++) {
197 if (tlb[i].trieHandle)
198 remove(i);
199 }
200 }
201
202 void
203 TLB::remove(size_t idx)
204 {
205 DPRINTF(TLB, "remove(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
206 tlb[idx].vaddr, tlb[idx].asid, tlb[idx].paddr, tlb[idx].pte,
207 tlb[idx].size());
208
209 assert(tlb[idx].trieHandle);
210 trie.remove(tlb[idx].trieHandle);
211 tlb[idx].trieHandle = NULL;
212 freeList.push_back(&tlb[idx]);
213 }
214
215 Fault
216 TLB::checkPermissions(STATUS status, PrivilegeMode pmode, Addr vaddr,
217 Mode mode, PTESv39 pte)
218 {
219 Fault fault = NoFault;
220
221 if (mode == TLB::Read && !pte.r) {
222 DPRINTF(TLB, "PTE has no read perm, raising PF\n");
223 fault = createPagefault(vaddr, mode);
224 }
225 else if (mode == TLB::Write && !pte.w) {
226 DPRINTF(TLB, "PTE has no write perm, raising PF\n");
227 fault = createPagefault(vaddr, mode);
228 }
229 else if (mode == TLB::Execute && !pte.x) {
230 DPRINTF(TLB, "PTE has no exec perm, raising PF\n");
231 fault = createPagefault(vaddr, mode);
232 }
233
234 if (fault == NoFault) {
235 // check pte.u
236 if (pmode == PrivilegeMode::PRV_U && !pte.u) {
237 DPRINTF(TLB, "PTE is not user accessible, raising PF\n");
238 fault = createPagefault(vaddr, mode);
239 }
240 else if (pmode == PrivilegeMode::PRV_S && pte.u && status.sum == 0) {
241 DPRINTF(TLB, "PTE is only user accessible, raising PF\n");
242 fault = createPagefault(vaddr, mode);
243 }
244 }
245
246 return fault;
247 }
248
249 Fault
250 TLB::createPagefault(Addr vaddr, Mode mode)
251 {
252 ExceptionCode code;
253 if (mode == TLB::Read)
254 code = ExceptionCode::LOAD_PAGE;
255 else if (mode == TLB::Write)
256 code = ExceptionCode::STORE_PAGE;
257 else
258 code = ExceptionCode::INST_PAGE;
259 return std::make_shared<AddressFault>(vaddr, code);
260 }
261
262 Addr
263 TLB::translateWithTLB(Addr vaddr, uint16_t asid, Mode mode)
264 {
265 TlbEntry *e = lookup(vaddr, asid, mode, false);
266 assert(e != nullptr);
267 return e->paddr << PageShift | (vaddr & mask(e->logBytes));
268 }
269
270 Fault
271 TLB::doTranslate(const RequestPtr &req, ThreadContext *tc,
272 Translation *translation, Mode mode, bool &delayed)
273 {
274 delayed = false;
275
276 Addr vaddr = req->getVaddr() & ((static_cast<Addr>(1) << VADDR_BITS) - 1);
277 SATP satp = tc->readMiscReg(MISCREG_SATP);
278
279 TlbEntry *e = lookup(vaddr, satp.asid, mode, false);
280 if (!e) {
281 Fault fault = walker->start(tc, translation, req, mode);
282 if (translation != nullptr || fault != NoFault) {
283 // This gets ignored in atomic mode.
284 delayed = true;
285 return fault;
286 }
287 e = lookup(vaddr, satp.asid, mode, false);
288 assert(e != nullptr);
289 }
290
291 STATUS status = tc->readMiscReg(MISCREG_STATUS);
292 PrivilegeMode pmode = getMemPriv(tc, mode);
293 Fault fault = checkPermissions(status, pmode, vaddr, mode, e->pte);
294 if (fault != NoFault) {
295 // if we want to write and it isn't writable, do a page table walk
296 // again to update the dirty flag.
297 if (mode == TLB::Write && !e->pte.w) {
298 DPRINTF(TLB, "Dirty bit not set, repeating PT walk\n");
299 fault = walker->start(tc, translation, req, mode);
300 if (translation != nullptr || fault != NoFault) {
301 delayed = true;
302 return fault;
303 }
304 }
305 if (fault != NoFault)
306 return fault;
307 }
308
309 Addr paddr = e->paddr << PageShift | (vaddr & mask(e->logBytes));
310 DPRINTF(TLBVerbose, "translate(vpn=%#x, asid=%#x): %#x\n",
311 vaddr, satp.asid, paddr);
312 req->setPaddr(paddr);
313
314 return NoFault;
315 }
316
317 PrivilegeMode
318 TLB::getMemPriv(ThreadContext *tc, Mode mode)
319 {
320 STATUS status = (STATUS)tc->readMiscReg(MISCREG_STATUS);
321 PrivilegeMode pmode = (PrivilegeMode)tc->readMiscReg(MISCREG_PRV);
322 if (mode != Mode::Execute && status.mprv == 1)
323 pmode = (PrivilegeMode)(RegVal)status.mpp;
324 return pmode;
325 }
326
327 Fault
328 TLB::translate(const RequestPtr &req, ThreadContext *tc,
329 Translation *translation, Mode mode, bool &delayed)
330 {
331 delayed = false;
332
333 if (FullSystem) {
334 PrivilegeMode pmode = getMemPriv(tc, mode);
335 SATP satp = tc->readMiscReg(MISCREG_SATP);
336 if (pmode == PrivilegeMode::PRV_M || satp.mode == AddrXlateMode::BARE)
337 req->setFlags(Request::PHYSICAL);
338
339 Fault fault;
340 if (req->getFlags() & Request::PHYSICAL) {
341 /**
342 * we simply set the virtual address to physical address
343 */
344 req->setPaddr(req->getVaddr());
345 fault = NoFault;
346 } else {
347 fault = doTranslate(req, tc, translation, mode, delayed);
348 }
349
350 // according to the RISC-V tests, negative physical addresses trigger
351 // an illegal address exception.
352 // TODO where is that written in the manual?
353 if (!delayed && fault == NoFault && bits(req->getPaddr(), 63)) {
354 ExceptionCode code;
355 if (mode == TLB::Read)
356 code = ExceptionCode::LOAD_ACCESS;
357 else if (mode == TLB::Write)
358 code = ExceptionCode::STORE_ACCESS;
359 else
360 code = ExceptionCode::INST_ACCESS;
361 fault = std::make_shared<AddressFault>(req->getVaddr(), code);
362 }
363
364 return fault;
365 } else {
366 // In the O3 CPU model, sometimes a memory access will be speculatively
367 // executed along a branch that will end up not being taken where the
368 // address is invalid. In that case, return a fault rather than trying
369 // to translate it (which will cause a panic). Since RISC-V allows
370 // unaligned memory accesses, this should only happen if the request's
371 // length is long enough to wrap around from the end of the memory to
372 // the start.
373 assert(req->getSize() > 0);
374 if (req->getVaddr() + req->getSize() - 1 < req->getVaddr())
375 return std::make_shared<GenericPageTableFault>(req->getVaddr());
376
377 Process * p = tc->getProcessPtr();
378
379 Fault fault = p->pTable->translate(req);
380 if (fault != NoFault)
381 return fault;
382
383 return NoFault;
384 }
385 }
386
387 Fault
388 TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
389 {
390 bool delayed;
391 return translate(req, tc, nullptr, mode, delayed);
392 }
393
394 void
395 TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
396 Translation *translation, Mode mode)
397 {
398 bool delayed;
399 assert(translation);
400 Fault fault = translate(req, tc, translation, mode, delayed);
401 if (!delayed)
402 translation->finish(fault, req, tc, mode);
403 else
404 translation->markDelayed();
405 }
406
407 Fault
408 TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode)
409 {
410 const Addr vaddr = req->getVaddr();
411 Addr paddr = vaddr;
412
413 if (FullSystem) {
414 MMU *mmu = static_cast<MMU *>(tc->getMMUPtr());
415
416 PrivilegeMode pmode = mmu->getMemPriv(tc, mode);
417 SATP satp = tc->readMiscReg(MISCREG_SATP);
418 if (pmode != PrivilegeMode::PRV_M &&
419 satp.mode != AddrXlateMode::BARE) {
420 Walker *walker = mmu->getDataWalker();
421 unsigned logBytes;
422 Fault fault = walker->startFunctional(
423 tc, paddr, logBytes, mode);
424 if (fault != NoFault)
425 return fault;
426
427 Addr masked_addr = vaddr & mask(logBytes);
428 paddr |= masked_addr;
429 }
430 }
431 else {
432 Process *process = tc->getProcessPtr();
433 const auto *pte = process->pTable->lookup(vaddr);
434
435 if (!pte && mode != Execute) {
436 // Check if we just need to grow the stack.
437 if (process->fixupFault(vaddr)) {
438 // If we did, lookup the entry for the new page.
439 pte = process->pTable->lookup(vaddr);
440 }
441 }
442
443 if (!pte)
444 return std::make_shared<GenericPageTableFault>(req->getVaddr());
445
446 paddr = pte->paddr | process->pTable->pageOffset(vaddr);
447 }
448
449 DPRINTF(TLB, "Translated (functional) %#x -> %#x.\n", vaddr, paddr);
450 req->setPaddr(paddr);
451 return NoFault;
452 }
453
454 Fault
455 TLB::finalizePhysical(const RequestPtr &req,
456 ThreadContext *tc, Mode mode) const
457 {
458 return NoFault;
459 }
460
461 void
462 TLB::serialize(CheckpointOut &cp) const
463 {
464 // Only store the entries in use.
465 uint32_t _size = size - freeList.size();
466 SERIALIZE_SCALAR(_size);
467 SERIALIZE_SCALAR(lruSeq);
468
469 uint32_t _count = 0;
470 for (uint32_t x = 0; x < size; x++) {
471 if (tlb[x].trieHandle != NULL)
472 tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
473 }
474 }
475
476 void
477 TLB::unserialize(CheckpointIn &cp)
478 {
479 // Do not allow to restore with a smaller tlb.
480 uint32_t _size;
481 UNSERIALIZE_SCALAR(_size);
482 if (_size > size) {
483 fatal("TLB size less than the one in checkpoint!");
484 }
485
486 UNSERIALIZE_SCALAR(lruSeq);
487
488 for (uint32_t x = 0; x < _size; x++) {
489 TlbEntry *newEntry = freeList.front();
490 freeList.pop_front();
491
492 newEntry->unserializeSection(cp, csprintf("Entry%d", x));
493 Addr key = buildKey(newEntry->vaddr, newEntry->asid);
494 newEntry->trieHandle = trie.insert(key,
495 TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
496 }
497 }
498
499 TLB::TlbStats::TlbStats(Stats::Group *parent)
500 : Stats::Group(parent),
501 ADD_STAT(read_hits, "read hits"),
502 ADD_STAT(read_misses, "read misses"),
503 ADD_STAT(read_accesses, "read accesses"),
504 ADD_STAT(write_hits, "write hits"),
505 ADD_STAT(write_misses, "write misses"),
506 ADD_STAT(write_accesses, "write accesses"),
507 ADD_STAT(hits, "Total TLB (read and write) hits", read_hits + write_hits),
508 ADD_STAT(misses, "Total TLB (read and write) misses",
509 read_misses + write_misses),
510 ADD_STAT(accesses, "Total TLB (read and write) accesses",
511 read_accesses + write_accesses)
512 {
513 }