arch, cpu, dev, gpu, mem, sim, python: start using getPort.
[gem5.git] / src / arch / arm / tlb.cc
1 /*
2 * Copyright (c) 2010-2013, 2016-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45 #include "arch/arm/tlb.hh"
46
47 #include <memory>
48 #include <string>
49 #include <vector>
50
51 #include "arch/arm/faults.hh"
52 #include "arch/arm/pagetable.hh"
53 #include "arch/arm/stage2_lookup.hh"
54 #include "arch/arm/stage2_mmu.hh"
55 #include "arch/arm/system.hh"
56 #include "arch/arm/table_walker.hh"
57 #include "arch/arm/utility.hh"
58 #include "arch/generic/mmapped_ipr.hh"
59 #include "base/inifile.hh"
60 #include "base/str.hh"
61 #include "base/trace.hh"
62 #include "cpu/base.hh"
63 #include "cpu/thread_context.hh"
64 #include "debug/Checkpoint.hh"
65 #include "debug/TLB.hh"
66 #include "debug/TLBVerbose.hh"
67 #include "mem/page_table.hh"
68 #include "mem/request.hh"
69 #include "params/ArmTLB.hh"
70 #include "sim/full_system.hh"
71 #include "sim/process.hh"
72
73 using namespace std;
74 using namespace ArmISA;
75
76 TLB::TLB(const ArmTLBParams *p)
77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
78 isStage2(p->is_stage2), stage2Req(false), stage2DescReq(false), _attr(0),
79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
80 stage2Mmu(NULL), test(nullptr), rangeMRU(1),
81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
82 isHyp(false), asid(0), vmid(0), hcr(0), dacr(0),
83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
84 {
85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
86
87 tableWalker->setTlb(this);
88
89 // Cache system-level properties
90 haveLPAE = tableWalker->haveLPAE();
91 haveVirtualization = tableWalker->haveVirtualization();
92 haveLargeAsid64 = tableWalker->haveLargeAsid64();
93
94 if (sys)
95 m5opRange = sys->m5opRange();
96 }
97
98 TLB::~TLB()
99 {
100 delete[] table;
101 }
102
103 void
104 TLB::init()
105 {
106 if (stage2Mmu && !isStage2)
107 stage2Tlb = stage2Mmu->stage2Tlb();
108 }
109
110 void
111 TLB::setMMU(Stage2MMU *m, MasterID master_id)
112 {
113 stage2Mmu = m;
114 tableWalker->setMMU(m, master_id);
115 }
116
117 bool
118 TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
119 {
120 updateMiscReg(tc);
121
122 if (directToStage2) {
123 assert(stage2Tlb);
124 return stage2Tlb->translateFunctional(tc, va, pa);
125 }
126
127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
128 aarch64 ? aarch64EL : EL1);
129 if (!e)
130 return false;
131 pa = e->pAddr(va);
132 return true;
133 }
134
135 Fault
136 TLB::finalizePhysical(const RequestPtr &req,
137 ThreadContext *tc, Mode mode) const
138 {
139 const Addr paddr = req->getPaddr();
140
141 if (m5opRange.contains(paddr)) {
142 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR);
143 req->setPaddr(GenericISA::iprAddressPseudoInst(
144 (paddr >> 8) & 0xFF,
145 paddr & 0xFF));
146 }
147
148 return NoFault;
149 }
150
151 TlbEntry*
152 TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
153 bool functional, bool ignore_asn, uint8_t target_el)
154 {
155
156 TlbEntry *retval = NULL;
157
158 // Maintaining LRU array
159 int x = 0;
160 while (retval == NULL && x < size) {
161 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
162 target_el)) ||
163 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
164 // We only move the hit entry ahead when the position is higher
165 // than rangeMRU
166 if (x > rangeMRU && !functional) {
167 TlbEntry tmp_entry = table[x];
168 for (int i = x; i > 0; i--)
169 table[i] = table[i - 1];
170 table[0] = tmp_entry;
171 retval = &table[0];
172 } else {
173 retval = &table[x];
174 }
175 break;
176 }
177 ++x;
178 }
179
180 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
181 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
182 "el: %d\n",
183 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
184 retval ? retval->pfn : 0, retval ? retval->size : 0,
185 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
186 retval ? retval->ns : 0, retval ? retval->nstid : 0,
187 retval ? retval->global : 0, retval ? retval->asid : 0,
188 retval ? retval->el : 0);
189
190 return retval;
191 }
192
193 // insert a new TLB entry
194 void
195 TLB::insert(Addr addr, TlbEntry &entry)
196 {
197 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
198 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
199 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
200 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
201 entry.global, entry.valid, entry.nonCacheable, entry.xn,
202 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
203 entry.isHyp);
204
205 if (table[size - 1].valid)
206 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
207 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
208 table[size-1].vpn << table[size-1].N, table[size-1].asid,
209 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
210 table[size-1].size, table[size-1].ap, table[size-1].ns,
211 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
212 table[size-1].el);
213
214 //inserting to MRU position and evicting the LRU one
215
216 for (int i = size - 1; i > 0; --i)
217 table[i] = table[i-1];
218 table[0] = entry;
219
220 inserts++;
221 ppRefills->notify(1);
222 }
223
224 void
225 TLB::printTlb() const
226 {
227 int x = 0;
228 TlbEntry *te;
229 DPRINTF(TLB, "Current TLB contents:\n");
230 while (x < size) {
231 te = &table[x];
232 if (te->valid)
233 DPRINTF(TLB, " * %s\n", te->print());
234 ++x;
235 }
236 }
237
238 void
239 TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
240 {
241 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
242 (secure_lookup ? "secure" : "non-secure"));
243 int x = 0;
244 TlbEntry *te;
245 while (x < size) {
246 te = &table[x];
247 if (te->valid && secure_lookup == !te->nstid &&
248 (te->vmid == vmid || secure_lookup) &&
249 checkELMatch(target_el, te->el, ignore_el)) {
250
251 DPRINTF(TLB, " - %s\n", te->print());
252 te->valid = false;
253 flushedEntries++;
254 }
255 ++x;
256 }
257
258 flushTlb++;
259
260 // If there's a second stage TLB (and we're not it) then flush it as well
261 // if we're currently in hyp mode
262 if (!isStage2 && isHyp) {
263 stage2Tlb->flushAllSecurity(secure_lookup, true);
264 }
265 }
266
267 void
268 TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
269 {
270 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
271 (hyp ? "hyp" : "non-hyp"));
272 int x = 0;
273 TlbEntry *te;
274 while (x < size) {
275 te = &table[x];
276 if (te->valid && te->nstid && te->isHyp == hyp &&
277 checkELMatch(target_el, te->el, ignore_el)) {
278
279 DPRINTF(TLB, " - %s\n", te->print());
280 flushedEntries++;
281 te->valid = false;
282 }
283 ++x;
284 }
285
286 flushTlb++;
287
288 // If there's a second stage TLB (and we're not it) then flush it as well
289 if (!isStage2 && !hyp) {
290 stage2Tlb->flushAllNs(false, true);
291 }
292 }
293
294 void
295 TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
296 {
297 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
298 "(%s lookup)\n", mva, asn, (secure_lookup ?
299 "secure" : "non-secure"));
300 _flushMva(mva, asn, secure_lookup, false, false, target_el);
301 flushTlbMvaAsid++;
302 }
303
304 void
305 TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
306 {
307 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
308 (secure_lookup ? "secure" : "non-secure"));
309
310 int x = 0 ;
311 TlbEntry *te;
312
313 while (x < size) {
314 te = &table[x];
315 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
316 (te->vmid == vmid || secure_lookup) &&
317 checkELMatch(target_el, te->el, false)) {
318
319 te->valid = false;
320 DPRINTF(TLB, " - %s\n", te->print());
321 flushedEntries++;
322 }
323 ++x;
324 }
325 flushTlbAsid++;
326 }
327
328 void
329 TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
330 {
331 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
332 (secure_lookup ? "secure" : "non-secure"));
333 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
334 flushTlbMva++;
335 }
336
337 void
338 TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
339 bool ignore_asn, uint8_t target_el)
340 {
341 TlbEntry *te;
342 // D5.7.2: Sign-extend address to 64 bits
343 mva = sext<56>(mva);
344 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
345 target_el);
346 while (te != NULL) {
347 if (secure_lookup == !te->nstid) {
348 DPRINTF(TLB, " - %s\n", te->print());
349 te->valid = false;
350 flushedEntries++;
351 }
352 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
353 target_el);
354 }
355 }
356
357 void
358 TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el)
359 {
360 assert(!isStage2);
361 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el);
362 }
363
364 bool
365 TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
366 {
367 bool elMatch = true;
368 if (!ignore_el) {
369 if (target_el == 2 || target_el == 3) {
370 elMatch = (tentry_el == target_el);
371 } else {
372 elMatch = (tentry_el == 0) || (tentry_el == 1);
373 }
374 }
375 return elMatch;
376 }
377
378 void
379 TLB::drainResume()
380 {
381 // We might have unserialized something or switched CPUs, so make
382 // sure to re-read the misc regs.
383 miscRegValid = false;
384 }
385
386 void
387 TLB::takeOverFrom(BaseTLB *_otlb)
388 {
389 TLB *otlb = dynamic_cast<TLB*>(_otlb);
390 /* Make sure we actually have a valid type */
391 if (otlb) {
392 _attr = otlb->_attr;
393 haveLPAE = otlb->haveLPAE;
394 directToStage2 = otlb->directToStage2;
395 stage2Req = otlb->stage2Req;
396 stage2DescReq = otlb->stage2DescReq;
397
398 /* Sync the stage2 MMU if they exist in both
399 * the old CPU and the new
400 */
401 if (!isStage2 &&
402 stage2Tlb && otlb->stage2Tlb) {
403 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
404 }
405 } else {
406 panic("Incompatible TLB type!");
407 }
408 }
409
410 void
411 TLB::serialize(CheckpointOut &cp) const
412 {
413 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
414
415 SERIALIZE_SCALAR(_attr);
416 SERIALIZE_SCALAR(haveLPAE);
417 SERIALIZE_SCALAR(directToStage2);
418 SERIALIZE_SCALAR(stage2Req);
419 SERIALIZE_SCALAR(stage2DescReq);
420
421 int num_entries = size;
422 SERIALIZE_SCALAR(num_entries);
423 for (int i = 0; i < size; i++)
424 table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
425 }
426
427 void
428 TLB::unserialize(CheckpointIn &cp)
429 {
430 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
431
432 UNSERIALIZE_SCALAR(_attr);
433 UNSERIALIZE_SCALAR(haveLPAE);
434 UNSERIALIZE_SCALAR(directToStage2);
435 UNSERIALIZE_SCALAR(stage2Req);
436 UNSERIALIZE_SCALAR(stage2DescReq);
437
438 int num_entries;
439 UNSERIALIZE_SCALAR(num_entries);
440 for (int i = 0; i < min(size, num_entries); i++)
441 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
442 }
443
444 void
445 TLB::regStats()
446 {
447 BaseTLB::regStats();
448 instHits
449 .name(name() + ".inst_hits")
450 .desc("ITB inst hits")
451 ;
452
453 instMisses
454 .name(name() + ".inst_misses")
455 .desc("ITB inst misses")
456 ;
457
458 instAccesses
459 .name(name() + ".inst_accesses")
460 .desc("ITB inst accesses")
461 ;
462
463 readHits
464 .name(name() + ".read_hits")
465 .desc("DTB read hits")
466 ;
467
468 readMisses
469 .name(name() + ".read_misses")
470 .desc("DTB read misses")
471 ;
472
473 readAccesses
474 .name(name() + ".read_accesses")
475 .desc("DTB read accesses")
476 ;
477
478 writeHits
479 .name(name() + ".write_hits")
480 .desc("DTB write hits")
481 ;
482
483 writeMisses
484 .name(name() + ".write_misses")
485 .desc("DTB write misses")
486 ;
487
488 writeAccesses
489 .name(name() + ".write_accesses")
490 .desc("DTB write accesses")
491 ;
492
493 hits
494 .name(name() + ".hits")
495 .desc("DTB hits")
496 ;
497
498 misses
499 .name(name() + ".misses")
500 .desc("DTB misses")
501 ;
502
503 accesses
504 .name(name() + ".accesses")
505 .desc("DTB accesses")
506 ;
507
508 flushTlb
509 .name(name() + ".flush_tlb")
510 .desc("Number of times complete TLB was flushed")
511 ;
512
513 flushTlbMva
514 .name(name() + ".flush_tlb_mva")
515 .desc("Number of times TLB was flushed by MVA")
516 ;
517
518 flushTlbMvaAsid
519 .name(name() + ".flush_tlb_mva_asid")
520 .desc("Number of times TLB was flushed by MVA & ASID")
521 ;
522
523 flushTlbAsid
524 .name(name() + ".flush_tlb_asid")
525 .desc("Number of times TLB was flushed by ASID")
526 ;
527
528 flushedEntries
529 .name(name() + ".flush_entries")
530 .desc("Number of entries that have been flushed from TLB")
531 ;
532
533 alignFaults
534 .name(name() + ".align_faults")
535 .desc("Number of TLB faults due to alignment restrictions")
536 ;
537
538 prefetchFaults
539 .name(name() + ".prefetch_faults")
540 .desc("Number of TLB faults due to prefetch")
541 ;
542
543 domainFaults
544 .name(name() + ".domain_faults")
545 .desc("Number of TLB faults due to domain restrictions")
546 ;
547
548 permsFaults
549 .name(name() + ".perms_faults")
550 .desc("Number of TLB faults due to permissions restrictions")
551 ;
552
553 instAccesses = instHits + instMisses;
554 readAccesses = readHits + readMisses;
555 writeAccesses = writeHits + writeMisses;
556 hits = readHits + writeHits + instHits;
557 misses = readMisses + writeMisses + instMisses;
558 accesses = readAccesses + writeAccesses + instAccesses;
559 }
560
561 void
562 TLB::regProbePoints()
563 {
564 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
565 }
566
567 Fault
568 TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
569 Translation *translation, bool &delay, bool timing)
570 {
571 updateMiscReg(tc);
572 Addr vaddr_tainted = req->getVaddr();
573 Addr vaddr = 0;
574 if (aarch64)
575 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
576 else
577 vaddr = vaddr_tainted;
578 Request::Flags flags = req->getFlags();
579
580 bool is_fetch = (mode == Execute);
581 bool is_write = (mode == Write);
582
583 if (!is_fetch) {
584 assert(flags & MustBeOne);
585 if (sctlr.a || !(flags & AllowUnaligned)) {
586 if (vaddr & mask(flags & AlignmentMask)) {
587 // LPAE is always disabled in SE mode
588 return std::make_shared<DataAbort>(
589 vaddr_tainted,
590 TlbEntry::DomainType::NoAccess, is_write,
591 ArmFault::AlignmentFault, isStage2,
592 ArmFault::VmsaTran);
593 }
594 }
595 }
596
597 Addr paddr;
598 Process *p = tc->getProcessPtr();
599
600 if (!p->pTable->translate(vaddr, paddr))
601 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
602 req->setPaddr(paddr);
603
604 return finalizePhysical(req, tc, mode);
605 }
606
607 Fault
608 TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode)
609 {
610 // a data cache maintenance instruction that operates by MVA does
611 // not generate a Data Abort exeception due to a Permission fault
612 if (req->isCacheMaintenance()) {
613 return NoFault;
614 }
615
616 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
617 Request::Flags flags = req->getFlags();
618 bool is_fetch = (mode == Execute);
619 bool is_write = (mode == Write);
620 bool is_priv = isPriv && !(flags & UserMode);
621
622 // Get the translation type from the actuall table entry
623 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
624 : ArmFault::VmsaTran;
625
626 // If this is the second stage of translation and the request is for a
627 // stage 1 page table walk then we need to check the HCR.PTW bit. This
628 // allows us to generate a fault if the request targets an area marked
629 // as a device or strongly ordered.
630 if (isStage2 && req->isPTWalk() && hcr.ptw &&
631 (te->mtype != TlbEntry::MemoryType::Normal)) {
632 return std::make_shared<DataAbort>(
633 vaddr, te->domain, is_write,
634 ArmFault::PermissionLL + te->lookupLevel,
635 isStage2, tranMethod);
636 }
637
638 // Generate an alignment fault for unaligned data accesses to device or
639 // strongly ordered memory
640 if (!is_fetch) {
641 if (te->mtype != TlbEntry::MemoryType::Normal) {
642 if (vaddr & mask(flags & AlignmentMask)) {
643 alignFaults++;
644 return std::make_shared<DataAbort>(
645 vaddr, TlbEntry::DomainType::NoAccess, is_write,
646 ArmFault::AlignmentFault, isStage2,
647 tranMethod);
648 }
649 }
650 }
651
652 if (te->nonCacheable) {
653 // Prevent prefetching from I/O devices.
654 if (req->isPrefetch()) {
655 // Here we can safely use the fault status for the short
656 // desc. format in all cases
657 return std::make_shared<PrefetchAbort>(
658 vaddr, ArmFault::PrefetchUncacheable,
659 isStage2, tranMethod);
660 }
661 }
662
663 if (!te->longDescFormat) {
664 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
665 case 0:
666 domainFaults++;
667 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
668 " domain: %#x write:%d\n", dacr,
669 static_cast<uint8_t>(te->domain), is_write);
670 if (is_fetch) {
671 // Use PC value instead of vaddr because vaddr might
672 // be aligned to cache line and should not be the
673 // address reported in FAR
674 return std::make_shared<PrefetchAbort>(
675 req->getPC(),
676 ArmFault::DomainLL + te->lookupLevel,
677 isStage2, tranMethod);
678 } else
679 return std::make_shared<DataAbort>(
680 vaddr, te->domain, is_write,
681 ArmFault::DomainLL + te->lookupLevel,
682 isStage2, tranMethod);
683 case 1:
684 // Continue with permissions check
685 break;
686 case 2:
687 panic("UNPRED domain\n");
688 case 3:
689 return NoFault;
690 }
691 }
692
693 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
694 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
695 uint8_t hap = te->hap;
696
697 if (sctlr.afe == 1 || te->longDescFormat)
698 ap |= 1;
699
700 bool abt;
701 bool isWritable = true;
702 // If this is a stage 2 access (eg for reading stage 1 page table entries)
703 // then don't perform the AP permissions check, we stil do the HAP check
704 // below.
705 if (isStage2) {
706 abt = false;
707 } else {
708 switch (ap) {
709 case 0:
710 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
711 (int)sctlr.rs);
712 if (!sctlr.xp) {
713 switch ((int)sctlr.rs) {
714 case 2:
715 abt = is_write;
716 break;
717 case 1:
718 abt = is_write || !is_priv;
719 break;
720 case 0:
721 case 3:
722 default:
723 abt = true;
724 break;
725 }
726 } else {
727 abt = true;
728 }
729 break;
730 case 1:
731 abt = !is_priv;
732 break;
733 case 2:
734 abt = !is_priv && is_write;
735 isWritable = is_priv;
736 break;
737 case 3:
738 abt = false;
739 break;
740 case 4:
741 panic("UNPRED premissions\n");
742 case 5:
743 abt = !is_priv || is_write;
744 isWritable = false;
745 break;
746 case 6:
747 case 7:
748 abt = is_write;
749 isWritable = false;
750 break;
751 default:
752 panic("Unknown permissions %#x\n", ap);
753 }
754 }
755
756 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
757 bool xn = te->xn || (isWritable && sctlr.wxn) ||
758 (ap == 3 && sctlr.uwxn && is_priv);
759 if (is_fetch && (abt || xn ||
760 (te->longDescFormat && te->pxn && is_priv) ||
761 (isSecure && te->ns && scr.sif))) {
762 permsFaults++;
763 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
764 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
765 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
766 // Use PC value instead of vaddr because vaddr might be aligned to
767 // cache line and should not be the address reported in FAR
768 return std::make_shared<PrefetchAbort>(
769 req->getPC(),
770 ArmFault::PermissionLL + te->lookupLevel,
771 isStage2, tranMethod);
772 } else if (abt | hapAbt) {
773 permsFaults++;
774 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
775 " write:%d\n", ap, is_priv, is_write);
776 return std::make_shared<DataAbort>(
777 vaddr, te->domain, is_write,
778 ArmFault::PermissionLL + te->lookupLevel,
779 isStage2 | !abt, tranMethod);
780 }
781 return NoFault;
782 }
783
784
785 Fault
786 TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
787 ThreadContext *tc)
788 {
789 assert(aarch64);
790
791 // A data cache maintenance instruction that operates by VA does
792 // not generate a Permission fault unless:
793 // * It is a data cache invalidate (dc ivac) which requires write
794 // permissions to the VA, or
795 // * It is executed from EL0
796 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) {
797 return NoFault;
798 }
799
800 Addr vaddr_tainted = req->getVaddr();
801 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
802
803 Request::Flags flags = req->getFlags();
804 bool is_fetch = (mode == Execute);
805 // Cache clean operations require read permissions to the specified VA
806 bool is_write = !req->isCacheClean() && mode == Write;
807 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
808
809 updateMiscReg(tc, curTranType);
810
811 // If this is the second stage of translation and the request is for a
812 // stage 1 page table walk then we need to check the HCR.PTW bit. This
813 // allows us to generate a fault if the request targets an area marked
814 // as a device or strongly ordered.
815 if (isStage2 && req->isPTWalk() && hcr.ptw &&
816 (te->mtype != TlbEntry::MemoryType::Normal)) {
817 return std::make_shared<DataAbort>(
818 vaddr_tainted, te->domain, is_write,
819 ArmFault::PermissionLL + te->lookupLevel,
820 isStage2, ArmFault::LpaeTran);
821 }
822
823 // Generate an alignment fault for unaligned accesses to device or
824 // strongly ordered memory
825 if (!is_fetch) {
826 if (te->mtype != TlbEntry::MemoryType::Normal) {
827 if (vaddr & mask(flags & AlignmentMask)) {
828 alignFaults++;
829 return std::make_shared<DataAbort>(
830 vaddr_tainted,
831 TlbEntry::DomainType::NoAccess, is_write,
832 ArmFault::AlignmentFault, isStage2,
833 ArmFault::LpaeTran);
834 }
835 }
836 }
837
838 if (te->nonCacheable) {
839 // Prevent prefetching from I/O devices.
840 if (req->isPrefetch()) {
841 // Here we can safely use the fault status for the short
842 // desc. format in all cases
843 return std::make_shared<PrefetchAbort>(
844 vaddr_tainted,
845 ArmFault::PrefetchUncacheable,
846 isStage2, ArmFault::LpaeTran);
847 }
848 }
849
850 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
851 bool grant = false;
852
853 uint8_t xn = te->xn;
854 uint8_t pxn = te->pxn;
855 bool r = !is_write && !is_fetch;
856 bool w = is_write;
857 bool x = is_fetch;
858 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
859 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
860
861 if (isStage2) {
862 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
863 // In stage 2 we use the hypervisor access permission bits.
864 // The following permissions are described in ARM DDI 0487A.f
865 // D4-1802
866 uint8_t hap = 0x3 & te->hap;
867 if (is_fetch) {
868 // sctlr.wxn overrides the xn bit
869 grant = !sctlr.wxn && !xn;
870 } else if (is_write) {
871 grant = hap & 0x2;
872 } else { // is_read
873 grant = hap & 0x1;
874 }
875 } else {
876 switch (aarch64EL) {
877 case EL0:
878 {
879 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
880 switch (perm) {
881 case 0:
882 case 1:
883 case 8:
884 case 9:
885 grant = x;
886 break;
887 case 4:
888 case 5:
889 grant = r || w || (x && !sctlr.wxn);
890 break;
891 case 6:
892 case 7:
893 grant = r || w;
894 break;
895 case 12:
896 case 13:
897 grant = r || x;
898 break;
899 case 14:
900 case 15:
901 grant = r;
902 break;
903 default:
904 grant = false;
905 }
906 }
907 break;
908 case EL1:
909 {
910 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
911 switch (perm) {
912 case 0:
913 case 2:
914 grant = r || w || (x && !sctlr.wxn);
915 break;
916 case 1:
917 case 3:
918 case 4:
919 case 5:
920 case 6:
921 case 7:
922 // regions that are writeable at EL0 should not be
923 // executable at EL1
924 grant = r || w;
925 break;
926 case 8:
927 case 10:
928 case 12:
929 case 14:
930 grant = r || x;
931 break;
932 case 9:
933 case 11:
934 case 13:
935 case 15:
936 grant = r;
937 break;
938 default:
939 grant = false;
940 }
941 }
942 break;
943 case EL2:
944 case EL3:
945 {
946 uint8_t perm = (ap & 0x2) | xn;
947 switch (perm) {
948 case 0:
949 grant = r || w || (x && !sctlr.wxn) ;
950 break;
951 case 1:
952 grant = r || w;
953 break;
954 case 2:
955 grant = r || x;
956 break;
957 case 3:
958 grant = r;
959 break;
960 default:
961 grant = false;
962 }
963 }
964 break;
965 }
966 }
967
968 if (!grant) {
969 if (is_fetch) {
970 permsFaults++;
971 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
972 "AP:%d priv:%d write:%d ns:%d sif:%d "
973 "sctlr.afe: %d\n",
974 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
975 // Use PC value instead of vaddr because vaddr might be aligned to
976 // cache line and should not be the address reported in FAR
977 return std::make_shared<PrefetchAbort>(
978 req->getPC(),
979 ArmFault::PermissionLL + te->lookupLevel,
980 isStage2, ArmFault::LpaeTran);
981 } else {
982 permsFaults++;
983 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
984 "priv:%d write:%d\n", ap, is_priv, is_write);
985 return std::make_shared<DataAbort>(
986 vaddr_tainted, te->domain, is_write,
987 ArmFault::PermissionLL + te->lookupLevel,
988 isStage2, ArmFault::LpaeTran);
989 }
990 }
991
992 return NoFault;
993 }
994
995 Fault
996 TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
997 Translation *translation, bool &delay, bool timing,
998 TLB::ArmTranslationType tranType, bool functional)
999 {
1000 // No such thing as a functional timing access
1001 assert(!(timing && functional));
1002
1003 updateMiscReg(tc, tranType);
1004
1005 Addr vaddr_tainted = req->getVaddr();
1006 Addr vaddr = 0;
1007 if (aarch64)
1008 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr);
1009 else
1010 vaddr = vaddr_tainted;
1011 Request::Flags flags = req->getFlags();
1012
1013 bool is_fetch = (mode == Execute);
1014 bool is_write = (mode == Write);
1015 bool long_desc_format = aarch64 || longDescFormatInUse(tc);
1016 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
1017 : ArmFault::VmsaTran;
1018
1019 req->setAsid(asid);
1020
1021 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1022 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1023
1024 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1025 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1026 scr, sctlr, flags, tranType);
1027
1028 if ((req->isInstFetch() && (!sctlr.i)) ||
1029 ((!req->isInstFetch()) && (!sctlr.c))){
1030 if (!req->isCacheMaintenance()) {
1031 req->setFlags(Request::UNCACHEABLE);
1032 }
1033 req->setFlags(Request::STRICT_ORDER);
1034 }
1035 if (!is_fetch) {
1036 assert(flags & MustBeOne);
1037 if (sctlr.a || !(flags & AllowUnaligned)) {
1038 if (vaddr & mask(flags & AlignmentMask)) {
1039 alignFaults++;
1040 return std::make_shared<DataAbort>(
1041 vaddr_tainted,
1042 TlbEntry::DomainType::NoAccess, is_write,
1043 ArmFault::AlignmentFault, isStage2,
1044 tranMethod);
1045 }
1046 }
1047 }
1048
1049 // If guest MMU is off or hcr.vm=0 go straight to stage2
1050 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1051
1052 req->setPaddr(vaddr);
1053 // When the MMU is off the security attribute corresponds to the
1054 // security state of the processor
1055 if (isSecure)
1056 req->setFlags(Request::SECURE);
1057
1058 // @todo: double check this (ARM ARM issue C B3.2.1)
1059 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1060 nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1061 if (!req->isCacheMaintenance()) {
1062 req->setFlags(Request::UNCACHEABLE);
1063 }
1064 req->setFlags(Request::STRICT_ORDER);
1065 }
1066
1067 // Set memory attributes
1068 TlbEntry temp_te;
1069 temp_te.ns = !isSecure;
1070 if (isStage2 || hcr.dc == 0 || isSecure ||
1071 (isHyp && !(tranType & S1CTran))) {
1072
1073 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1074 : TlbEntry::MemoryType::StronglyOrdered;
1075 temp_te.innerAttrs = 0x0;
1076 temp_te.outerAttrs = 0x0;
1077 temp_te.shareable = true;
1078 temp_te.outerShareable = true;
1079 } else {
1080 temp_te.mtype = TlbEntry::MemoryType::Normal;
1081 temp_te.innerAttrs = 0x3;
1082 temp_te.outerAttrs = 0x3;
1083 temp_te.shareable = false;
1084 temp_te.outerShareable = false;
1085 }
1086 temp_te.setAttributes(long_desc_format);
1087 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1088 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1089 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1090 isStage2);
1091 setAttr(temp_te.attributes);
1092
1093 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess);
1094 }
1095
1096 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1097 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1098 // Translation enabled
1099
1100 TlbEntry *te = NULL;
1101 TlbEntry mergeTe;
1102 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1103 functional, &mergeTe);
1104 // only proceed if we have a valid table entry
1105 if ((te == NULL) && (fault == NoFault)) delay = true;
1106
1107 // If we have the table entry transfer some of the attributes to the
1108 // request that triggered the translation
1109 if (te != NULL) {
1110 // Set memory attributes
1111 DPRINTF(TLBVerbose,
1112 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1113 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1114 te->shareable, te->innerAttrs, te->outerAttrs,
1115 static_cast<uint8_t>(te->mtype), isStage2);
1116 setAttr(te->attributes);
1117
1118 if (te->nonCacheable && !req->isCacheMaintenance())
1119 req->setFlags(Request::UNCACHEABLE);
1120
1121 // Require requests to be ordered if the request goes to
1122 // strongly ordered or device memory (i.e., anything other
1123 // than normal memory requires strict order).
1124 if (te->mtype != TlbEntry::MemoryType::Normal)
1125 req->setFlags(Request::STRICT_ORDER);
1126
1127 Addr pa = te->pAddr(vaddr);
1128 req->setPaddr(pa);
1129
1130 if (isSecure && !te->ns) {
1131 req->setFlags(Request::SECURE);
1132 }
1133 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1134 (te->mtype != TlbEntry::MemoryType::Normal)) {
1135 // Unaligned accesses to Device memory should always cause an
1136 // abort regardless of sctlr.a
1137 alignFaults++;
1138 return std::make_shared<DataAbort>(
1139 vaddr_tainted,
1140 TlbEntry::DomainType::NoAccess, is_write,
1141 ArmFault::AlignmentFault, isStage2,
1142 tranMethod);
1143 }
1144
1145 // Check for a trickbox generated address fault
1146 if (fault == NoFault)
1147 fault = testTranslation(req, mode, te->domain);
1148 }
1149
1150 if (fault == NoFault) {
1151 // Don't try to finalize a physical address unless the
1152 // translation has completed (i.e., there is a table entry).
1153 return te ? finalizePhysical(req, tc, mode) : NoFault;
1154 } else {
1155 return fault;
1156 }
1157 }
1158
1159 Fault
1160 TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
1161 TLB::ArmTranslationType tranType)
1162 {
1163 updateMiscReg(tc, tranType);
1164
1165 if (directToStage2) {
1166 assert(stage2Tlb);
1167 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1168 }
1169
1170 bool delay = false;
1171 Fault fault;
1172 if (FullSystem)
1173 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1174 else
1175 fault = translateSe(req, tc, mode, NULL, delay, false);
1176 assert(!delay);
1177 return fault;
1178 }
1179
1180 Fault
1181 TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode,
1182 TLB::ArmTranslationType tranType)
1183 {
1184 updateMiscReg(tc, tranType);
1185
1186 if (directToStage2) {
1187 assert(stage2Tlb);
1188 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1189 }
1190
1191 bool delay = false;
1192 Fault fault;
1193 if (FullSystem)
1194 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1195 else
1196 fault = translateSe(req, tc, mode, NULL, delay, false);
1197 assert(!delay);
1198 return fault;
1199 }
1200
1201 void
1202 TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
1203 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1204 {
1205 updateMiscReg(tc, tranType);
1206
1207 if (directToStage2) {
1208 assert(stage2Tlb);
1209 stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1210 return;
1211 }
1212
1213 assert(translation);
1214
1215 translateComplete(req, tc, translation, mode, tranType, isStage2);
1216 }
1217
1218 Fault
1219 TLB::translateComplete(const RequestPtr &req, ThreadContext *tc,
1220 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1221 bool callFromS2)
1222 {
1223 bool delay = false;
1224 Fault fault;
1225 if (FullSystem)
1226 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1227 else
1228 fault = translateSe(req, tc, mode, translation, delay, true);
1229 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1230 NoFault);
1231 // If we have a translation, and we're not in the middle of doing a stage
1232 // 2 translation tell the translation that we've either finished or its
1233 // going to take a while. By not doing this when we're in the middle of a
1234 // stage 2 translation we prevent marking the translation as delayed twice,
1235 // one when the translation starts and again when the stage 1 translation
1236 // completes.
1237 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1238 if (!delay)
1239 translation->finish(fault, req, tc, mode);
1240 else
1241 translation->markDelayed();
1242 }
1243 return fault;
1244 }
1245
1246 Port *
1247 TLB::getTableWalkerPort()
1248 {
1249 return &stage2Mmu->getPort();
1250 }
1251
1252 void
1253 TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1254 {
1255 // check if the regs have changed, or the translation mode is different.
1256 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1257 // one type of translation anyway
1258 if (miscRegValid && miscRegContext == tc->contextId() &&
1259 ((tranType == curTranType) || isStage2)) {
1260 return;
1261 }
1262
1263 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1264 cpsr = tc->readMiscReg(MISCREG_CPSR);
1265
1266 // Dependencies: SCR/SCR_EL3, CPSR
1267 isSecure = inSecureState(tc) &&
1268 !(tranType & HypMode) && !(tranType & S1S2NsTran);
1269
1270 aarch64EL = tranTypeEL(cpsr, tranType);
1271 aarch64 = isStage2 ?
1272 ELIs64(tc, EL2) :
1273 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1274
1275 if (aarch64) { // AArch64
1276 // determine EL we need to translate in
1277 switch (aarch64EL) {
1278 case EL0:
1279 case EL1:
1280 {
1281 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1282 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1283 uint64_t ttbr_asid = ttbcr.a1 ?
1284 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1285 tc->readMiscReg(MISCREG_TTBR0_EL1);
1286 asid = bits(ttbr_asid,
1287 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1288 }
1289 break;
1290 case EL2:
1291 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1292 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1293 asid = -1;
1294 break;
1295 case EL3:
1296 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1297 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1298 asid = -1;
1299 break;
1300 }
1301 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1302 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1303 isPriv = aarch64EL != EL0;
1304 if (haveVirtualization) {
1305 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1306 isHyp = tranType & HypMode;
1307 isHyp &= (tranType & S1S2NsTran) == 0;
1308 isHyp &= (tranType & S1CTran) == 0;
1309 // Work out if we should skip the first stage of translation and go
1310 // directly to stage 2. This value is cached so we don't have to
1311 // compute it for every translation.
1312 stage2Req = isStage2 ||
1313 (hcr.vm && !isHyp && !isSecure &&
1314 !(tranType & S1CTran) && (aarch64EL < EL2) &&
1315 !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1316 stage2DescReq = isStage2 || (hcr.vm && !isHyp && !isSecure &&
1317 (aarch64EL < EL2));
1318 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1319 } else {
1320 vmid = 0;
1321 isHyp = false;
1322 directToStage2 = false;
1323 stage2Req = false;
1324 stage2DescReq = false;
1325 }
1326 } else { // AArch32
1327 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1328 !isSecure));
1329 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1330 !isSecure));
1331 scr = tc->readMiscReg(MISCREG_SCR);
1332 isPriv = cpsr.mode != MODE_USER;
1333 if (longDescFormatInUse(tc)) {
1334 uint64_t ttbr_asid = tc->readMiscReg(
1335 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1336 MISCREG_TTBR0,
1337 tc, !isSecure));
1338 asid = bits(ttbr_asid, 55, 48);
1339 } else { // Short-descriptor translation table format in use
1340 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1341 MISCREG_CONTEXTIDR, tc,!isSecure));
1342 asid = context_id.asid;
1343 }
1344 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1345 !isSecure));
1346 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1347 !isSecure));
1348 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1349 !isSecure));
1350 hcr = tc->readMiscReg(MISCREG_HCR);
1351
1352 if (haveVirtualization) {
1353 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1354 isHyp = cpsr.mode == MODE_HYP;
1355 isHyp |= tranType & HypMode;
1356 isHyp &= (tranType & S1S2NsTran) == 0;
1357 isHyp &= (tranType & S1CTran) == 0;
1358 if (isHyp) {
1359 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1360 }
1361 // Work out if we should skip the first stage of translation and go
1362 // directly to stage 2. This value is cached so we don't have to
1363 // compute it for every translation.
1364 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1365 !(tranType & S1CTran);
1366 stage2DescReq = hcr.vm && !isStage2 && !isHyp && !isSecure;
1367 directToStage2 = stage2Req && !sctlr.m;
1368 } else {
1369 vmid = 0;
1370 stage2Req = false;
1371 isHyp = false;
1372 directToStage2 = false;
1373 stage2DescReq = false;
1374 }
1375 }
1376 miscRegValid = true;
1377 miscRegContext = tc->contextId();
1378 curTranType = tranType;
1379 }
1380
1381 ExceptionLevel
1382 TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type)
1383 {
1384 switch (type) {
1385 case S1E0Tran:
1386 case S12E0Tran:
1387 return EL0;
1388
1389 case S1E1Tran:
1390 case S12E1Tran:
1391 return EL1;
1392
1393 case S1E2Tran:
1394 return EL2;
1395
1396 case S1E3Tran:
1397 return EL3;
1398
1399 case NormalTran:
1400 case S1CTran:
1401 case S1S2NsTran:
1402 case HypMode:
1403 return opModeToEL((OperatingMode)(uint8_t)cpsr.mode);
1404
1405 default:
1406 panic("Unknown translation mode!\n");
1407 }
1408 }
1409
1410 Fault
1411 TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode,
1412 Translation *translation, bool timing, bool functional,
1413 bool is_secure, TLB::ArmTranslationType tranType)
1414 {
1415 bool is_fetch = (mode == Execute);
1416 bool is_write = (mode == Write);
1417
1418 Addr vaddr_tainted = req->getVaddr();
1419 Addr vaddr = 0;
1420 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1421 if (aarch64) {
1422 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr);
1423 } else {
1424 vaddr = vaddr_tainted;
1425 }
1426 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1427 if (*te == NULL) {
1428 if (req->isPrefetch()) {
1429 // if the request is a prefetch don't attempt to fill the TLB or go
1430 // any further with the memory access (here we can safely use the
1431 // fault status for the short desc. format in all cases)
1432 prefetchFaults++;
1433 return std::make_shared<PrefetchAbort>(
1434 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1435 }
1436
1437 if (is_fetch)
1438 instMisses++;
1439 else if (is_write)
1440 writeMisses++;
1441 else
1442 readMisses++;
1443
1444 // start translation table walk, pass variables rather than
1445 // re-retreaving in table walker for speed
1446 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1447 vaddr_tainted, asid, vmid);
1448 Fault fault;
1449 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1450 translation, timing, functional, is_secure,
1451 tranType, stage2DescReq);
1452 // for timing mode, return and wait for table walk,
1453 if (timing || fault != NoFault) {
1454 return fault;
1455 }
1456
1457 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1458 if (!*te)
1459 printTlb();
1460 assert(*te);
1461 } else {
1462 if (is_fetch)
1463 instHits++;
1464 else if (is_write)
1465 writeHits++;
1466 else
1467 readHits++;
1468 }
1469 return NoFault;
1470 }
1471
1472 Fault
1473 TLB::getResultTe(TlbEntry **te, const RequestPtr &req,
1474 ThreadContext *tc, Mode mode,
1475 Translation *translation, bool timing, bool functional,
1476 TlbEntry *mergeTe)
1477 {
1478 Fault fault;
1479
1480 if (isStage2) {
1481 // We are already in the stage 2 TLB. Grab the table entry for stage
1482 // 2 only. We are here because stage 1 translation is disabled.
1483 TlbEntry *s2Te = NULL;
1484 // Get the stage 2 table entry
1485 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1486 isSecure, curTranType);
1487 // Check permissions of stage 2
1488 if ((s2Te != NULL) && (fault == NoFault)) {
1489 if (aarch64)
1490 fault = checkPermissions64(s2Te, req, mode, tc);
1491 else
1492 fault = checkPermissions(s2Te, req, mode);
1493 }
1494 *te = s2Te;
1495 return fault;
1496 }
1497
1498 TlbEntry *s1Te = NULL;
1499
1500 Addr vaddr_tainted = req->getVaddr();
1501
1502 // Get the stage 1 table entry
1503 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1504 isSecure, curTranType);
1505 // only proceed if we have a valid table entry
1506 if ((s1Te != NULL) && (fault == NoFault)) {
1507 // Check stage 1 permissions before checking stage 2
1508 if (aarch64)
1509 fault = checkPermissions64(s1Te, req, mode, tc);
1510 else
1511 fault = checkPermissions(s1Te, req, mode);
1512 if (stage2Req & (fault == NoFault)) {
1513 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1514 req, translation, mode, timing, functional, curTranType);
1515 fault = s2Lookup->getTe(tc, mergeTe);
1516 if (s2Lookup->isComplete()) {
1517 *te = mergeTe;
1518 // We've finished with the lookup so delete it
1519 delete s2Lookup;
1520 } else {
1521 // The lookup hasn't completed, so we can't delete it now. We
1522 // get round this by asking the object to self delete when the
1523 // translation is complete.
1524 s2Lookup->setSelfDelete();
1525 }
1526 } else {
1527 // This case deals with an S1 hit (or bypass), followed by
1528 // an S2 hit-but-perms issue
1529 if (isStage2) {
1530 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1531 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1532 if (fault != NoFault) {
1533 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1534 armFault->annotate(ArmFault::S1PTW, false);
1535 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1536 }
1537 }
1538 *te = s1Te;
1539 }
1540 }
1541 return fault;
1542 }
1543
1544 void
1545 TLB::setTestInterface(SimObject *_ti)
1546 {
1547 if (!_ti) {
1548 test = nullptr;
1549 } else {
1550 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1551 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1552 test = ti;
1553 }
1554 }
1555
1556 Fault
1557 TLB::testTranslation(const RequestPtr &req, Mode mode,
1558 TlbEntry::DomainType domain)
1559 {
1560 if (!test || !req->hasSize() || req->getSize() == 0 ||
1561 req->isCacheMaintenance()) {
1562 return NoFault;
1563 } else {
1564 return test->translationCheck(req, isPriv, mode, domain);
1565 }
1566 }
1567
1568 Fault
1569 TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1570 TlbEntry::DomainType domain, LookupLevel lookup_level)
1571 {
1572 if (!test) {
1573 return NoFault;
1574 } else {
1575 return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1576 domain, lookup_level);
1577 }
1578 }
1579
1580
1581 ArmISA::TLB *
1582 ArmTLBParams::create()
1583 {
1584 return new ArmISA::TLB(this);
1585 }