af0f86266bfcb35aaafe6cf57c3c910a05d9f905
[gem5.git] / src / arch / arm / tlb.cc
1 /*
2 * Copyright (c) 2010-2013, 2016-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45 #include "arch/arm/tlb.hh"
46
47 #include <memory>
48 #include <string>
49 #include <vector>
50
51 #include "arch/arm/faults.hh"
52 #include "arch/arm/pagetable.hh"
53 #include "arch/arm/stage2_lookup.hh"
54 #include "arch/arm/stage2_mmu.hh"
55 #include "arch/arm/system.hh"
56 #include "arch/arm/table_walker.hh"
57 #include "arch/arm/utility.hh"
58 #include "arch/generic/mmapped_ipr.hh"
59 #include "base/inifile.hh"
60 #include "base/str.hh"
61 #include "base/trace.hh"
62 #include "cpu/base.hh"
63 #include "cpu/thread_context.hh"
64 #include "debug/Checkpoint.hh"
65 #include "debug/TLB.hh"
66 #include "debug/TLBVerbose.hh"
67 #include "mem/page_table.hh"
68 #include "mem/request.hh"
69 #include "params/ArmTLB.hh"
70 #include "sim/full_system.hh"
71 #include "sim/process.hh"
72
73 using namespace std;
74 using namespace ArmISA;
75
76 TLB::TLB(const ArmTLBParams *p)
77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78 isStage2(p->is_stage2), stage2Req(false), stage2DescReq(false), _attr(0),
79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), hcr(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84 {
85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87 tableWalker->setTlb(this);
88
89 // Cache system-level properties
90 haveLPAE = tableWalker->haveLPAE();
91 haveVirtualization = tableWalker->haveVirtualization();
92 haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94 if (sys)
95 m5opRange = sys->m5opRange();
96 }
97
98 TLB::~TLB()
99 {
100 delete[] table;
101 }
102
103 void
104 TLB::init()
105 {
106 if (stage2Mmu && !isStage2)
107 stage2Tlb = stage2Mmu->stage2Tlb();
108 }
109
110 void
111 TLB::setMMU(Stage2MMU *m, MasterID master_id)
112 {
113 stage2Mmu = m;
114 tableWalker->setMMU(m, master_id);
115 }
116
117 bool
118 TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119 {
120 updateMiscReg(tc);
121
122 if (directToStage2) {
123 assert(stage2Tlb);
124 return stage2Tlb->translateFunctional(tc, va, pa);
125 }
126
127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128 aarch64 ? aarch64EL : EL1);
129 if (!e)
130 return false;
131 pa = e->pAddr(va);
132 return true;
133 }
134
135 Fault
136 TLB::finalizePhysical(const RequestPtr &req,
137 ThreadContext *tc, Mode mode) const
138 {
139 const Addr paddr = req->getPaddr();
140
141 if (m5opRange.contains(paddr)) {
142 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
143 req->setPaddr(GenericISA::iprAddressPseudoInst(
144 (paddr >> 8) & 0xFF,
145 paddr & 0xFF));
146 }
147
148 return NoFault;
149 }
150
151 TlbEntry*
152 TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
153 bool functional, bool ignore_asn, ExceptionLevel target_el)
154 {
155
156 TlbEntry *retval = NULL;
157
158 // Maintaining LRU array
159 int x = 0;
160 while (retval == NULL && x < size) {
161 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
162 target_el)) ||
163 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
164 // We only move the hit entry ahead when the position is higher
165 // than rangeMRU
166 if (x > rangeMRU && !functional) {
167 TlbEntry tmp_entry = table[x];
168 for (int i = x; i > 0; i--)
169 table[i] = table[i - 1];
170 table[0] = tmp_entry;
171 retval = &table[0];
172 } else {
173 retval = &table[x];
174 }
175 break;
176 }
177 ++x;
178 }
179
180 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
181 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
182 "el: %d\n",
183 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
184 retval ? retval->pfn : 0, retval ? retval->size : 0,
185 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
186 retval ? retval->ns : 0, retval ? retval->nstid : 0,
187 retval ? retval->global : 0, retval ? retval->asid : 0,
188 retval ? retval->el : 0);
189
190 return retval;
191 }
192
193 // insert a new TLB entry
194 void
195 TLB::insert(Addr addr, TlbEntry &entry)
196 {
197 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
198 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
199 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
200 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
201 entry.global, entry.valid, entry.nonCacheable, entry.xn,
202 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
203 entry.isHyp);
204
205 if (table[size - 1].valid)
206 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
207 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
208 table[size-1].vpn << table[size-1].N, table[size-1].asid,
209 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
210 table[size-1].size, table[size-1].ap, table[size-1].ns,
211 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
212 table[size-1].el);
213
214 //inserting to MRU position and evicting the LRU one
215
216 for (int i = size - 1; i > 0; --i)
217 table[i] = table[i-1];
218 table[0] = entry;
219
220 inserts++;
221 ppRefills->notify(1);
222 }
223
224 void
225 TLB::printTlb() const
226 {
227 int x = 0;
228 TlbEntry *te;
229 DPRINTF(TLB, "Current TLB contents:\n");
230 while (x < size) {
231 te = &table[x];
232 if (te->valid)
233 DPRINTF(TLB, " * %s\n", te->print());
234 ++x;
235 }
236 }
237
238 void
239 TLB::flushAllSecurity(bool secure_lookup, ExceptionLevel target_el,
240 bool ignore_el)
241 {
242 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
243 (secure_lookup ? "secure" : "non-secure"));
244 int x = 0;
245 TlbEntry *te;
246 while (x < size) {
247 te = &table[x];
248 const bool el_match = ignore_el ?
249 true : te->checkELMatch(target_el);
250
251 if (te->valid && secure_lookup == !te->nstid &&
252 (te->vmid == vmid || secure_lookup) && el_match) {
253
254 DPRINTF(TLB, " - %s\n", te->print());
255 te->valid = false;
256 flushedEntries++;
257 }
258 ++x;
259 }
260
261 flushTlb++;
262
263 // If there's a second stage TLB (and we're not it) then flush it as well
264 // if we're currently in hyp mode
265 if (!isStage2 && isHyp) {
266 stage2Tlb->flushAllSecurity(secure_lookup, EL1, true);
267 }
268 }
269
270 void
271 TLB::flushAllNs(ExceptionLevel target_el, bool ignore_el)
272 {
273 bool hyp = target_el == EL2;
274
275 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
276 (hyp ? "hyp" : "non-hyp"));
277 int x = 0;
278 TlbEntry *te;
279 while (x < size) {
280 te = &table[x];
281 const bool el_match = ignore_el ?
282 true : te->checkELMatch(target_el);
283
284 if (te->valid && te->nstid && te->isHyp == hyp && el_match) {
285
286 DPRINTF(TLB, " - %s\n", te->print());
287 flushedEntries++;
288 te->valid = false;
289 }
290 ++x;
291 }
292
293 flushTlb++;
294
295 // If there's a second stage TLB (and we're not it) then flush it as well
296 if (!isStage2 && !hyp) {
297 stage2Tlb->flushAllNs(EL1, true);
298 }
299 }
300
301 void
302 TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup,
303 ExceptionLevel target_el)
304 {
305 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
306 "(%s lookup)\n", mva, asn, (secure_lookup ?
307 "secure" : "non-secure"));
308 _flushMva(mva, asn, secure_lookup, false, target_el);
309 flushTlbMvaAsid++;
310 }
311
312 void
313 TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el)
314 {
315 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
316 (secure_lookup ? "secure" : "non-secure"));
317
318 int x = 0 ;
319 TlbEntry *te;
320
321 while (x < size) {
322 te = &table[x];
323 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
324 (te->vmid == vmid || secure_lookup) &&
325 te->checkELMatch(target_el)) {
326
327 te->valid = false;
328 DPRINTF(TLB, " - %s\n", te->print());
329 flushedEntries++;
330 }
331 ++x;
332 }
333 flushTlbAsid++;
334 }
335
336 void
337 TLB::flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el)
338 {
339 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
340 (secure_lookup ? "secure" : "non-secure"));
341 _flushMva(mva, 0xbeef, secure_lookup, true, target_el);
342 flushTlbMva++;
343 }
344
345 void
346 TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup,
347 bool ignore_asn, ExceptionLevel target_el)
348 {
349 TlbEntry *te;
350 // D5.7.2: Sign-extend address to 64 bits
351 mva = sext<56>(mva);
352
353 bool hyp = target_el == EL2;
354
355 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
356 target_el);
357 while (te != NULL) {
358 if (secure_lookup == !te->nstid) {
359 DPRINTF(TLB, " - %s\n", te->print());
360 te->valid = false;
361 flushedEntries++;
362 }
363 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
364 target_el);
365 }
366 }
367
368 void
369 TLB::flushIpaVmid(Addr ipa, bool secure_lookup, ExceptionLevel target_el)
370 {
371 assert(!isStage2);
372 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el);
373 }
374
375 void
376 TLB::drainResume()
377 {
378 // We might have unserialized something or switched CPUs, so make
379 // sure to re-read the misc regs.
380 miscRegValid = false;
381 }
382
383 void
384 TLB::takeOverFrom(BaseTLB *_otlb)
385 {
386 TLB *otlb = dynamic_cast<TLB*>(_otlb);
387 /* Make sure we actually have a valid type */
388 if (otlb) {
389 _attr = otlb->_attr;
390 haveLPAE = otlb->haveLPAE;
391 directToStage2 = otlb->directToStage2;
392 stage2Req = otlb->stage2Req;
393 stage2DescReq = otlb->stage2DescReq;
394
395 /* Sync the stage2 MMU if they exist in both
396 * the old CPU and the new
397 */
398 if (!isStage2 &&
399 stage2Tlb && otlb->stage2Tlb) {
400 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
401 }
402 } else {
403 panic("Incompatible TLB type!");
404 }
405 }
406
407 void
408 TLB::regStats()
409 {
410 BaseTLB::regStats();
411 instHits
412 .name(name() + ".inst_hits")
413 .desc("ITB inst hits")
414 ;
415
416 instMisses
417 .name(name() + ".inst_misses")
418 .desc("ITB inst misses")
419 ;
420
421 instAccesses
422 .name(name() + ".inst_accesses")
423 .desc("ITB inst accesses")
424 ;
425
426 readHits
427 .name(name() + ".read_hits")
428 .desc("DTB read hits")
429 ;
430
431 readMisses
432 .name(name() + ".read_misses")
433 .desc("DTB read misses")
434 ;
435
436 readAccesses
437 .name(name() + ".read_accesses")
438 .desc("DTB read accesses")
439 ;
440
441 writeHits
442 .name(name() + ".write_hits")
443 .desc("DTB write hits")
444 ;
445
446 writeMisses
447 .name(name() + ".write_misses")
448 .desc("DTB write misses")
449 ;
450
451 writeAccesses
452 .name(name() + ".write_accesses")
453 .desc("DTB write accesses")
454 ;
455
456 hits
457 .name(name() + ".hits")
458 .desc("DTB hits")
459 ;
460
461 misses
462 .name(name() + ".misses")
463 .desc("DTB misses")
464 ;
465
466 accesses
467 .name(name() + ".accesses")
468 .desc("DTB accesses")
469 ;
470
471 flushTlb
472 .name(name() + ".flush_tlb")
473 .desc("Number of times complete TLB was flushed")
474 ;
475
476 flushTlbMva
477 .name(name() + ".flush_tlb_mva")
478 .desc("Number of times TLB was flushed by MVA")
479 ;
480
481 flushTlbMvaAsid
482 .name(name() + ".flush_tlb_mva_asid")
483 .desc("Number of times TLB was flushed by MVA & ASID")
484 ;
485
486 flushTlbAsid
487 .name(name() + ".flush_tlb_asid")
488 .desc("Number of times TLB was flushed by ASID")
489 ;
490
491 flushedEntries
492 .name(name() + ".flush_entries")
493 .desc("Number of entries that have been flushed from TLB")
494 ;
495
496 alignFaults
497 .name(name() + ".align_faults")
498 .desc("Number of TLB faults due to alignment restrictions")
499 ;
500
501 prefetchFaults
502 .name(name() + ".prefetch_faults")
503 .desc("Number of TLB faults due to prefetch")
504 ;
505
506 domainFaults
507 .name(name() + ".domain_faults")
508 .desc("Number of TLB faults due to domain restrictions")
509 ;
510
511 permsFaults
512 .name(name() + ".perms_faults")
513 .desc("Number of TLB faults due to permissions restrictions")
514 ;
515
516 instAccesses = instHits + instMisses;
517 readAccesses = readHits + readMisses;
518 writeAccesses = writeHits + writeMisses;
519 hits = readHits + writeHits + instHits;
520 misses = readMisses + writeMisses + instMisses;
521 accesses = readAccesses + writeAccesses + instAccesses;
522 }
523
524 void
525 TLB::regProbePoints()
526 {
527 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
528 }
529
530 Fault
531 TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
532 Translation *translation, bool &delay, bool timing)
533 {
534 updateMiscReg(tc);
535 Addr vaddr_tainted = req->getVaddr();
536 Addr vaddr = 0;
537 if (aarch64)
538 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
539 else
540 vaddr = vaddr_tainted;
541 Request::Flags flags = req->getFlags();
542
543 bool is_fetch = (mode == Execute);
544 bool is_write = (mode == Write);
545
546 if (!is_fetch) {
547 assert(flags & MustBeOne || req->isPrefetch());
548 if (sctlr.a || !(flags & AllowUnaligned)) {
549 if (vaddr & mask(flags & AlignmentMask)) {
550 // LPAE is always disabled in SE mode
551 return std::make_shared<DataAbort>(
552 vaddr_tainted,
553 TlbEntry::DomainType::NoAccess, is_write,
554 ArmFault::AlignmentFault, isStage2,
555 ArmFault::VmsaTran);
556 }
557 }
558 }
559
560 Addr paddr;
561 Process *p = tc->getProcessPtr();
562
563 if (!p->pTable->translate(vaddr, paddr))
564 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
565 req->setPaddr(paddr);
566
567 return finalizePhysical(req, tc, mode);
568 }
569
570 Fault
571 TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode)
572 {
573 // a data cache maintenance instruction that operates by MVA does
574 // not generate a Data Abort exeception due to a Permission fault
575 if (req->isCacheMaintenance()) {
576 return NoFault;
577 }
578
579 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
580 Request::Flags flags = req->getFlags();
581 bool is_fetch = (mode == Execute);
582 bool is_write = (mode == Write);
583 bool is_priv = isPriv && !(flags & UserMode);
584
585 // Get the translation type from the actuall table entry
586 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
587 : ArmFault::VmsaTran;
588
589 // If this is the second stage of translation and the request is for a
590 // stage 1 page table walk then we need to check the HCR.PTW bit. This
591 // allows us to generate a fault if the request targets an area marked
592 // as a device or strongly ordered.
593 if (isStage2 && req->isPTWalk() && hcr.ptw &&
594 (te->mtype != TlbEntry::MemoryType::Normal)) {
595 return std::make_shared<DataAbort>(
596 vaddr, te->domain, is_write,
597 ArmFault::PermissionLL + te->lookupLevel,
598 isStage2, tranMethod);
599 }
600
601 // Generate an alignment fault for unaligned data accesses to device or
602 // strongly ordered memory
603 if (!is_fetch) {
604 if (te->mtype != TlbEntry::MemoryType::Normal) {
605 if (vaddr & mask(flags & AlignmentMask)) {
606 alignFaults++;
607 return std::make_shared<DataAbort>(
608 vaddr, TlbEntry::DomainType::NoAccess, is_write,
609 ArmFault::AlignmentFault, isStage2,
610 tranMethod);
611 }
612 }
613 }
614
615 if (te->nonCacheable) {
616 // Prevent prefetching from I/O devices.
617 if (req->isPrefetch()) {
618 // Here we can safely use the fault status for the short
619 // desc. format in all cases
620 return std::make_shared<PrefetchAbort>(
621 vaddr, ArmFault::PrefetchUncacheable,
622 isStage2, tranMethod);
623 }
624 }
625
626 if (!te->longDescFormat) {
627 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
628 case 0:
629 domainFaults++;
630 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
631 " domain: %#x write:%d\n", dacr,
632 static_cast<uint8_t>(te->domain), is_write);
633 if (is_fetch) {
634 // Use PC value instead of vaddr because vaddr might
635 // be aligned to cache line and should not be the
636 // address reported in FAR
637 return std::make_shared<PrefetchAbort>(
638 req->getPC(),
639 ArmFault::DomainLL + te->lookupLevel,
640 isStage2, tranMethod);
641 } else
642 return std::make_shared<DataAbort>(
643 vaddr, te->domain, is_write,
644 ArmFault::DomainLL + te->lookupLevel,
645 isStage2, tranMethod);
646 case 1:
647 // Continue with permissions check
648 break;
649 case 2:
650 panic("UNPRED domain\n");
651 case 3:
652 return NoFault;
653 }
654 }
655
656 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
657 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
658 uint8_t hap = te->hap;
659
660 if (sctlr.afe == 1 || te->longDescFormat)
661 ap |= 1;
662
663 bool abt;
664 bool isWritable = true;
665 // If this is a stage 2 access (eg for reading stage 1 page table entries)
666 // then don't perform the AP permissions check, we stil do the HAP check
667 // below.
668 if (isStage2) {
669 abt = false;
670 } else {
671 switch (ap) {
672 case 0:
673 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
674 (int)sctlr.rs);
675 if (!sctlr.xp) {
676 switch ((int)sctlr.rs) {
677 case 2:
678 abt = is_write;
679 break;
680 case 1:
681 abt = is_write || !is_priv;
682 break;
683 case 0:
684 case 3:
685 default:
686 abt = true;
687 break;
688 }
689 } else {
690 abt = true;
691 }
692 break;
693 case 1:
694 abt = !is_priv;
695 break;
696 case 2:
697 abt = !is_priv && is_write;
698 isWritable = is_priv;
699 break;
700 case 3:
701 abt = false;
702 break;
703 case 4:
704 panic("UNPRED premissions\n");
705 case 5:
706 abt = !is_priv || is_write;
707 isWritable = false;
708 break;
709 case 6:
710 case 7:
711 abt = is_write;
712 isWritable = false;
713 break;
714 default:
715 panic("Unknown permissions %#x\n", ap);
716 }
717 }
718
719 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
720 bool xn = te->xn || (isWritable && sctlr.wxn) ||
721 (ap == 3 && sctlr.uwxn && is_priv);
722 if (is_fetch && (abt || xn ||
723 (te->longDescFormat && te->pxn && is_priv) ||
724 (isSecure && te->ns && scr.sif))) {
725 permsFaults++;
726 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
727 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
728 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
729 // Use PC value instead of vaddr because vaddr might be aligned to
730 // cache line and should not be the address reported in FAR
731 return std::make_shared<PrefetchAbort>(
732 req->getPC(),
733 ArmFault::PermissionLL + te->lookupLevel,
734 isStage2, tranMethod);
735 } else if (abt | hapAbt) {
736 permsFaults++;
737 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
738 " write:%d\n", ap, is_priv, is_write);
739 return std::make_shared<DataAbort>(
740 vaddr, te->domain, is_write,
741 ArmFault::PermissionLL + te->lookupLevel,
742 isStage2 | !abt, tranMethod);
743 }
744 return NoFault;
745 }
746
747
748 Fault
749 TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
750 ThreadContext *tc)
751 {
752 assert(aarch64);
753
754 // A data cache maintenance instruction that operates by VA does
755 // not generate a Permission fault unless:
756 // * It is a data cache invalidate (dc ivac) which requires write
757 // permissions to the VA, or
758 // * It is executed from EL0
759 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) {
760 return NoFault;
761 }
762
763 Addr vaddr_tainted = req->getVaddr();
764 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
765
766 Request::Flags flags = req->getFlags();
767 bool is_fetch = (mode == Execute);
768 // Cache clean operations require read permissions to the specified VA
769 bool is_write = !req->isCacheClean() && mode == Write;
770 bool is_atomic = req->isAtomic();
771 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
772
773 updateMiscReg(tc, curTranType);
774
775 // If this is the second stage of translation and the request is for a
776 // stage 1 page table walk then we need to check the HCR.PTW bit. This
777 // allows us to generate a fault if the request targets an area marked
778 // as a device or strongly ordered.
779 if (isStage2 && req->isPTWalk() && hcr.ptw &&
780 (te->mtype != TlbEntry::MemoryType::Normal)) {
781 return std::make_shared<DataAbort>(
782 vaddr_tainted, te->domain, is_write,
783 ArmFault::PermissionLL + te->lookupLevel,
784 isStage2, ArmFault::LpaeTran);
785 }
786
787 // Generate an alignment fault for unaligned accesses to device or
788 // strongly ordered memory
789 if (!is_fetch) {
790 if (te->mtype != TlbEntry::MemoryType::Normal) {
791 if (vaddr & mask(flags & AlignmentMask)) {
792 alignFaults++;
793 return std::make_shared<DataAbort>(
794 vaddr_tainted,
795 TlbEntry::DomainType::NoAccess,
796 is_atomic ? false : is_write,
797 ArmFault::AlignmentFault, isStage2,
798 ArmFault::LpaeTran);
799 }
800 }
801 }
802
803 if (te->nonCacheable) {
804 // Prevent prefetching from I/O devices.
805 if (req->isPrefetch()) {
806 // Here we can safely use the fault status for the short
807 // desc. format in all cases
808 return std::make_shared<PrefetchAbort>(
809 vaddr_tainted,
810 ArmFault::PrefetchUncacheable,
811 isStage2, ArmFault::LpaeTran);
812 }
813 }
814
815 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
816 bool grant = false;
817
818 uint8_t xn = te->xn;
819 uint8_t pxn = te->pxn;
820 bool r = !is_write && !is_fetch;
821 bool w = is_write;
822 bool x = is_fetch;
823
824 // grant_read is used for faults from an atomic instruction that
825 // both reads and writes from a memory location. From a ISS point
826 // of view they count as read if a read to that address would have
827 // generated the fault; they count as writes otherwise
828 bool grant_read = true;
829 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
830 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
831
832 if (isStage2) {
833 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
834 // In stage 2 we use the hypervisor access permission bits.
835 // The following permissions are described in ARM DDI 0487A.f
836 // D4-1802
837 uint8_t hap = 0x3 & te->hap;
838 grant_read = hap & 0x1;
839 if (is_fetch) {
840 // sctlr.wxn overrides the xn bit
841 grant = !sctlr.wxn && !xn;
842 } else if (is_write) {
843 grant = hap & 0x2;
844 } else { // is_read
845 grant = grant_read;
846 }
847 } else {
848 switch (aarch64EL) {
849 case EL0:
850 {
851 grant_read = ap & 0x1;
852 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
853 switch (perm) {
854 case 0:
855 case 1:
856 case 8:
857 case 9:
858 grant = x;
859 break;
860 case 4:
861 case 5:
862 grant = r || w || (x && !sctlr.wxn);
863 break;
864 case 6:
865 case 7:
866 grant = r || w;
867 break;
868 case 12:
869 case 13:
870 grant = r || x;
871 break;
872 case 14:
873 case 15:
874 grant = r;
875 break;
876 default:
877 grant = false;
878 }
879 }
880 break;
881 case EL1:
882 {
883 if (checkPAN(tc, ap, req, mode)) {
884 grant = false;
885 grant_read = false;
886 break;
887 }
888
889 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
890 switch (perm) {
891 case 0:
892 case 2:
893 grant = r || w || (x && !sctlr.wxn);
894 break;
895 case 1:
896 case 3:
897 case 4:
898 case 5:
899 case 6:
900 case 7:
901 // regions that are writeable at EL0 should not be
902 // executable at EL1
903 grant = r || w;
904 break;
905 case 8:
906 case 10:
907 case 12:
908 case 14:
909 grant = r || x;
910 break;
911 case 9:
912 case 11:
913 case 13:
914 case 15:
915 grant = r;
916 break;
917 default:
918 grant = false;
919 }
920 }
921 break;
922 case EL2:
923 if (hcr.e2h && checkPAN(tc, ap, req, mode)) {
924 grant = false;
925 grant_read = false;
926 break;
927 }
928 M5_FALLTHROUGH;
929 case EL3:
930 {
931 uint8_t perm = (ap & 0x2) | xn;
932 switch (perm) {
933 case 0:
934 grant = r || w || (x && !sctlr.wxn) ;
935 break;
936 case 1:
937 grant = r || w;
938 break;
939 case 2:
940 grant = r || x;
941 break;
942 case 3:
943 grant = r;
944 break;
945 default:
946 grant = false;
947 }
948 }
949 break;
950 }
951 }
952
953 if (!grant) {
954 if (is_fetch) {
955 permsFaults++;
956 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
957 "AP:%d priv:%d write:%d ns:%d sif:%d "
958 "sctlr.afe: %d\n",
959 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
960 // Use PC value instead of vaddr because vaddr might be aligned to
961 // cache line and should not be the address reported in FAR
962 return std::make_shared<PrefetchAbort>(
963 req->getPC(),
964 ArmFault::PermissionLL + te->lookupLevel,
965 isStage2, ArmFault::LpaeTran);
966 } else {
967 permsFaults++;
968 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
969 "priv:%d write:%d\n", ap, is_priv, is_write);
970 return std::make_shared<DataAbort>(
971 vaddr_tainted, te->domain,
972 (is_atomic && !grant_read) ? false : is_write,
973 ArmFault::PermissionLL + te->lookupLevel,
974 isStage2, ArmFault::LpaeTran);
975 }
976 }
977
978 return NoFault;
979 }
980
981 bool
982 TLB::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode)
983 {
984 // The PAN bit has no effect on:
985 // 1) Instruction accesses.
986 // 2) Data Cache instructions other than DC ZVA
987 // 3) Address translation instructions, other than ATS1E1RP and
988 // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
989 // gem5)
990 // 4) Unprivileged instructions (Unimplemented in gem5)
991 AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
992 if (mmfr1.pan && cpsr.pan && (ap & 0x1) && mode != Execute &&
993 (!req->isCacheMaintenance() ||
994 (req->getFlags() & Request::CACHE_BLOCK_ZERO))) {
995 return true;
996 } else {
997 return false;
998 }
999 }
1000
1001 Fault
1002 TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
1003 Translation *translation, bool &delay, bool timing,
1004 TLB::ArmTranslationType tranType, bool functional)
1005 {
1006 // No such thing as a functional timing access
1007 assert(!(timing && functional));
1008
1009 updateMiscReg(tc, tranType);
1010
1011 Addr vaddr_tainted = req->getVaddr();
1012 Addr vaddr = 0;
1013 if (aarch64)
1014 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
1015 else
1016 vaddr = vaddr_tainted;
1017 Request::Flags flags = req->getFlags();
1018
1019 bool is_fetch = (mode == Execute);
1020 bool is_write = (mode == Write);
1021 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
1022 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
1023 : ArmFault::VmsaTran;
1024
1025 req->setAsid(asid);
1026
1027 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1028 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1029
1030 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1031 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1032 scr, sctlr, flags, tranType);
1033
1034 if ((req->isInstFetch() && (!sctlr.i)) ||
1035 ((!req->isInstFetch()) && (!sctlr.c))){
1036 if (!req->isCacheMaintenance()) {
1037 req->setFlags(Request::UNCACHEABLE);
1038 }
1039 req->setFlags(Request::STRICT_ORDER);
1040 }
1041 if (!is_fetch) {
1042 assert(flags & MustBeOne || req->isPrefetch());
1043 if (sctlr.a || !(flags & AllowUnaligned)) {
1044 if (vaddr & mask(flags & AlignmentMask)) {
1045 alignFaults++;
1046 return std::make_shared<DataAbort>(
1047 vaddr_tainted,
1048 TlbEntry::DomainType::NoAccess, is_write,
1049 ArmFault::AlignmentFault, isStage2,
1050 tranMethod);
1051 }
1052 }
1053 }
1054
1055 // If guest MMU is off or hcr.vm=0 go straight to stage2
1056 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1057
1058 req->setPaddr(vaddr);
1059 // When the MMU is off the security attribute corresponds to the
1060 // security state of the processor
1061 if (isSecure)
1062 req->setFlags(Request::SECURE);
1063
1064 // @todo: double check this (ARM ARM issue C B3.2.1)
1065 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1066 nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1067 if (!req->isCacheMaintenance()) {
1068 req->setFlags(Request::UNCACHEABLE);
1069 }
1070 req->setFlags(Request::STRICT_ORDER);
1071 }
1072
1073 // Set memory attributes
1074 TlbEntry temp_te;
1075 temp_te.ns = !isSecure;
1076 if (isStage2 || hcr.dc == 0 || isSecure ||
1077 (isHyp && !(tranType & S1CTran))) {
1078
1079 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1080 : TlbEntry::MemoryType::StronglyOrdered;
1081 temp_te.innerAttrs = 0x0;
1082 temp_te.outerAttrs = 0x0;
1083 temp_te.shareable = true;
1084 temp_te.outerShareable = true;
1085 } else {
1086 temp_te.mtype = TlbEntry::MemoryType::Normal;
1087 temp_te.innerAttrs = 0x3;
1088 temp_te.outerAttrs = 0x3;
1089 temp_te.shareable = false;
1090 temp_te.outerShareable = false;
1091 }
1092 temp_te.setAttributes(long_desc_format);
1093 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1094 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1095 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1096 isStage2);
1097 setAttr(temp_te.attributes);
1098
1099 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1100 }
1101
1102 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1103 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1104 // Translation enabled
1105
1106 TlbEntry *te = NULL;
1107 TlbEntry mergeTe;
1108 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1109 functional, &mergeTe);
1110 // only proceed if we have a valid table entry
1111 if ((te == NULL) && (fault == NoFault)) delay = true;
1112
1113 // If we have the table entry transfer some of the attributes to the
1114 // request that triggered the translation
1115 if (te != NULL) {
1116 // Set memory attributes
1117 DPRINTF(TLBVerbose,
1118 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1119 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1120 te->shareable, te->innerAttrs, te->outerAttrs,
1121 static_cast<uint8_t>(te->mtype), isStage2);
1122 setAttr(te->attributes);
1123
1124 if (te->nonCacheable && !req->isCacheMaintenance())
1125 req->setFlags(Request::UNCACHEABLE);
1126
1127 // Require requests to be ordered if the request goes to
1128 // strongly ordered or device memory (i.e., anything other
1129 // than normal memory requires strict order).
1130 if (te->mtype != TlbEntry::MemoryType::Normal)
1131 req->setFlags(Request::STRICT_ORDER);
1132
1133 Addr pa = te->pAddr(vaddr);
1134 req->setPaddr(pa);
1135
1136 if (isSecure && !te->ns) {
1137 req->setFlags(Request::SECURE);
1138 }
1139 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1140 (te->mtype != TlbEntry::MemoryType::Normal)) {
1141 // Unaligned accesses to Device memory should always cause an
1142 // abort regardless of sctlr.a
1143 alignFaults++;
1144 return std::make_shared<DataAbort>(
1145 vaddr_tainted,
1146 TlbEntry::DomainType::NoAccess, is_write,
1147 ArmFault::AlignmentFault, isStage2,
1148 tranMethod);
1149 }
1150
1151 // Check for a trickbox generated address fault
1152 if (fault == NoFault)
1153 fault = testTranslation(req, mode, te->domain);
1154 }
1155
1156 if (fault == NoFault) {
1157 // Don't try to finalize a physical address unless the
1158 // translation has completed (i.e., there is a table entry).
1159 return te ? finalizePhysical(req, tc, mode) : NoFault;
1160 } else {
1161 return fault;
1162 }
1163 }
1164
1165 Fault
1166 TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
1167 TLB::ArmTranslationType tranType)
1168 {
1169 updateMiscReg(tc, tranType);
1170
1171 if (directToStage2) {
1172 assert(stage2Tlb);
1173 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1174 }
1175
1176 bool delay = false;
1177 Fault fault;
1178 if (FullSystem)
1179 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1180 else
1181 fault = translateSe(req, tc, mode, NULL, delay, false);
1182 assert(!delay);
1183 return fault;
1184 }
1185
1186 Fault
1187 TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode,
1188 TLB::ArmTranslationType tranType)
1189 {
1190 updateMiscReg(tc, tranType);
1191
1192 if (directToStage2) {
1193 assert(stage2Tlb);
1194 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1195 }
1196
1197 bool delay = false;
1198 Fault fault;
1199 if (FullSystem)
1200 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1201 else
1202 fault = translateSe(req, tc, mode, NULL, delay, false);
1203 assert(!delay);
1204 return fault;
1205 }
1206
1207 void
1208 TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
1209 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1210 {
1211 updateMiscReg(tc, tranType);
1212
1213 if (directToStage2) {
1214 assert(stage2Tlb);
1215 stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1216 return;
1217 }
1218
1219 assert(translation);
1220
1221 translateComplete(req, tc, translation, mode, tranType, isStage2);
1222 }
1223
1224 Fault
1225 TLB::translateComplete(const RequestPtr &req, ThreadContext *tc,
1226 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1227 bool callFromS2)
1228 {
1229 bool delay = false;
1230 Fault fault;
1231 if (FullSystem)
1232 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1233 else
1234 fault = translateSe(req, tc, mode, translation, delay, true);
1235 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1236 NoFault);
1237 // If we have a translation, and we're not in the middle of doing a stage
1238 // 2 translation tell the translation that we've either finished or its
1239 // going to take a while. By not doing this when we're in the middle of a
1240 // stage 2 translation we prevent marking the translation as delayed twice,
1241 // one when the translation starts and again when the stage 1 translation
1242 // completes.
1243 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1244 if (!delay)
1245 translation->finish(fault, req, tc, mode);
1246 else
1247 translation->markDelayed();
1248 }
1249 return fault;
1250 }
1251
1252 Port *
1253 TLB::getTableWalkerPort()
1254 {
1255 return &stage2Mmu->getDMAPort();
1256 }
1257
1258 void
1259 TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1260 {
1261 // check if the regs have changed, or the translation mode is different.
1262 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1263 // one type of translation anyway
1264 if (miscRegValid && miscRegContext == tc->contextId() &&
1265 ((tranType == curTranType) || isStage2)) {
1266 return;
1267 }
1268
1269 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1270 cpsr = tc->readMiscReg(MISCREG_CPSR);
1271
1272 // Dependencies: SCR/SCR_EL3, CPSR
1273 isSecure = inSecureState(tc) &&
1274 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1275
1276 aarch64EL = tranTypeEL(cpsr, tranType);
1277 aarch64 = isStage2 ?
1278 ELIs64(tc, EL2) :
1279 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1280
1281 if (aarch64) { // AArch64
1282 // determine EL we need to translate in
1283 switch (aarch64EL) {
1284 case EL0:
1285 case EL1:
1286 {
1287 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1288 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1289 uint64_t ttbr_asid = ttbcr.a1 ?
1290 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1291 tc->readMiscReg(MISCREG_TTBR0_EL1);
1292 asid = bits(ttbr_asid,
1293 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1294 }
1295 break;
1296 case EL2:
1297 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1298 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1299 asid = -1;
1300 break;
1301 case EL3:
1302 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1303 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1304 asid = -1;
1305 break;
1306 }
1307 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1308 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1309 isPriv = aarch64EL != EL0;
1310 if (haveVirtualization) {
1311 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1312 isHyp = aarch64EL == EL2;
1313 isHyp |= tranType & HypMode;
1314 isHyp &= (tranType & S1S2NsTran) == 0;
1315 isHyp &= (tranType & S1CTran) == 0;
1316 // Work out if we should skip the first stage of translation and go
1317 // directly to stage 2. This value is cached so we don't have to
1318 // compute it for every translation.
1319 stage2Req = isStage2 ||
1320 (hcr.vm && !isHyp && !isSecure &&
1321 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1322 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1323 stage2DescReq = isStage2 || (hcr.vm && !isHyp && !isSecure &&
1324 (aarch64EL < EL2));
1325 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1326 } else {
1327 vmid = 0;
1328 isHyp = false;
1329 directToStage2 = false;
1330 stage2Req = false;
1331 stage2DescReq = false;
1332 }
1333 } else { // AArch32
1334 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1335 !isSecure));
1336 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1337 !isSecure));
1338 scr = tc->readMiscReg(MISCREG_SCR);
1339 isPriv = cpsr.mode != MODE_USER;
1340 if (longDescFormatInUse(tc)) {
1341 uint64_t ttbr_asid = tc->readMiscReg(
1342 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1343 MISCREG_TTBR0,
1344 tc, !isSecure));
1345 asid = bits(ttbr_asid, 55, 48);
1346 } else { // Short-descriptor translation table format in use
1347 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1348 MISCREG_CONTEXTIDR, tc,!isSecure));
1349 asid = context_id.asid;
1350 }
1351 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1352 !isSecure));
1353 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1354 !isSecure));
1355 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1356 !isSecure));
1357 hcr = tc->readMiscReg(MISCREG_HCR);
1358
1359 if (haveVirtualization) {
1360 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1361 isHyp = cpsr.mode == MODE_HYP;
1362 isHyp |= tranType & HypMode;
1363 isHyp &= (tranType & S1S2NsTran) == 0;
1364 isHyp &= (tranType & S1CTran) == 0;
1365 if (isHyp) {
1366 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1367 }
1368 // Work out if we should skip the first stage of translation and go
1369 // directly to stage 2. This value is cached so we don't have to
1370 // compute it for every translation.
1371 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1372 !(tranType & S1CTran);
1373 stage2DescReq = hcr.vm && !isStage2 && !isHyp && !isSecure;
1374 directToStage2 = stage2Req && !sctlr.m;
1375 } else {
1376 vmid = 0;
1377 stage2Req = false;
1378 isHyp = false;
1379 directToStage2 = false;
1380 stage2DescReq = false;
1381 }
1382 }
1383 miscRegValid = true;
1384 miscRegContext = tc->contextId();
1385 curTranType = tranType;
1386 }
1387
1388 ExceptionLevel
1389 TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type)
1390 {
1391 switch (type) {
1392 case S1E0Tran:
1393 case S12E0Tran:
1394 return EL0;
1395
1396 case S1E1Tran:
1397 case S12E1Tran:
1398 return EL1;
1399
1400 case S1E2Tran:
1401 return EL2;
1402
1403 case S1E3Tran:
1404 return EL3;
1405
1406 case NormalTran:
1407 case S1CTran:
1408 case S1S2NsTran:
1409 case HypMode:
1410 return currEL(cpsr);
1411
1412 default:
1413 panic("Unknown translation mode!\n");
1414 }
1415 }
1416
1417 Fault
1418 TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode,
1419 Translation *translation, bool timing, bool functional,
1420 bool is_secure, TLB::ArmTranslationType tranType)
1421 {
1422 // In a 2-stage system, the IPA->PA translation can be started via this
1423 // call so make sure the miscRegs are correct.
1424 if (isStage2) {
1425 updateMiscReg(tc, tranType);
1426 }
1427 bool is_fetch = (mode == Execute);
1428 bool is_write = (mode == Write);
1429
1430 Addr vaddr_tainted = req->getVaddr();
1431 Addr vaddr = 0;
1432 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1433 if (aarch64) {
1434 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1435 } else {
1436 vaddr = vaddr_tainted;
1437 }
1438 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1439 if (*te == NULL) {
1440 if (req->isPrefetch()) {
1441 // if the request is a prefetch don't attempt to fill the TLB or go
1442 // any further with the memory access (here we can safely use the
1443 // fault status for the short desc. format in all cases)
1444 prefetchFaults++;
1445 return std::make_shared<PrefetchAbort>(
1446 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1447 }
1448
1449 if (is_fetch)
1450 instMisses++;
1451 else if (is_write)
1452 writeMisses++;
1453 else
1454 readMisses++;
1455
1456 // start translation table walk, pass variables rather than
1457 // re-retreaving in table walker for speed
1458 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1459 vaddr_tainted, asid, vmid);
1460 Fault fault;
1461 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1462 translation, timing, functional, is_secure,
1463 tranType, stage2DescReq);
1464 // for timing mode, return and wait for table walk,
1465 if (timing || fault != NoFault) {
1466 return fault;
1467 }
1468
1469 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1470 if (!*te)
1471 printTlb();
1472 assert(*te);
1473 } else {
1474 if (is_fetch)
1475 instHits++;
1476 else if (is_write)
1477 writeHits++;
1478 else
1479 readHits++;
1480 }
1481 return NoFault;
1482 }
1483
1484 Fault
1485 TLB::getResultTe(TlbEntry **te, const RequestPtr &req,
1486 ThreadContext *tc, Mode mode,
1487 Translation *translation, bool timing, bool functional,
1488 TlbEntry *mergeTe)
1489 {
1490 Fault fault;
1491
1492 if (isStage2) {
1493 // We are already in the stage 2 TLB. Grab the table entry for stage
1494 // 2 only. We are here because stage 1 translation is disabled.
1495 TlbEntry *s2Te = NULL;
1496 // Get the stage 2 table entry
1497 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1498 isSecure, curTranType);
1499 // Check permissions of stage 2
1500 if ((s2Te != NULL) && (fault == NoFault)) {
1501 if (aarch64)
1502 fault = checkPermissions64(s2Te, req, mode, tc);
1503 else
1504 fault = checkPermissions(s2Te, req, mode);
1505 }
1506 *te = s2Te;
1507 return fault;
1508 }
1509
1510 TlbEntry *s1Te = NULL;
1511
1512 Addr vaddr_tainted = req->getVaddr();
1513
1514 // Get the stage 1 table entry
1515 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1516 isSecure, curTranType);
1517 // only proceed if we have a valid table entry
1518 if ((s1Te != NULL) && (fault == NoFault)) {
1519 // Check stage 1 permissions before checking stage 2
1520 if (aarch64)
1521 fault = checkPermissions64(s1Te, req, mode, tc);
1522 else
1523 fault = checkPermissions(s1Te, req, mode);
1524 if (stage2Req & (fault == NoFault)) {
1525 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1526 req, translation, mode, timing, functional, curTranType);
1527 fault = s2Lookup->getTe(tc, mergeTe);
1528 if (s2Lookup->isComplete()) {
1529 *te = mergeTe;
1530 // We've finished with the lookup so delete it
1531 delete s2Lookup;
1532 } else {
1533 // The lookup hasn't completed, so we can't delete it now. We
1534 // get round this by asking the object to self delete when the
1535 // translation is complete.
1536 s2Lookup->setSelfDelete();
1537 }
1538 } else {
1539 // This case deals with an S1 hit (or bypass), followed by
1540 // an S2 hit-but-perms issue
1541 if (isStage2) {
1542 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1543 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1544 if (fault != NoFault) {
1545 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1546 armFault->annotate(ArmFault::S1PTW, false);
1547 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1548 }
1549 }
1550 *te = s1Te;
1551 }
1552 }
1553 return fault;
1554 }
1555
1556 void
1557 TLB::setTestInterface(SimObject *_ti)
1558 {
1559 if (!_ti) {
1560 test = nullptr;
1561 } else {
1562 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1563 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1564 test = ti;
1565 }
1566 }
1567
1568 Fault
1569 TLB::testTranslation(const RequestPtr &req, Mode mode,
1570 TlbEntry::DomainType domain)
1571 {
1572 if (!test || !req->hasSize() || req->getSize() == 0 ||
1573 req->isCacheMaintenance()) {
1574 return NoFault;
1575 } else {
1576 return test->translationCheck(req, isPriv, mode, domain);
1577 }
1578 }
1579
1580 Fault
1581 TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1582 TlbEntry::DomainType domain, LookupLevel lookup_level)
1583 {
1584 if (!test) {
1585 return NoFault;
1586 } else {
1587 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1588 domain, lookup_level);
1589 }
1590 }
1591
1592
1593 ArmISA::TLB *
1594 ArmTLBParams::create()
1595 {
1596 return new ArmISA::TLB(this);
1597 }