arm: Mark some miscregs (timer counter) registers at unverifiable.
[gem5.git] / src / arch / arm / tlb.cc
1 /*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45 #include <memory>
46 #include <string>
47 #include <vector>
48
49 #include "arch/arm/faults.hh"
50 #include "arch/arm/pagetable.hh"
51 #include "arch/arm/system.hh"
52 #include "arch/arm/table_walker.hh"
53 #include "arch/arm/stage2_lookup.hh"
54 #include "arch/arm/stage2_mmu.hh"
55 #include "arch/arm/tlb.hh"
56 #include "arch/arm/utility.hh"
57 #include "base/inifile.hh"
58 #include "base/str.hh"
59 #include "base/trace.hh"
60 #include "cpu/base.hh"
61 #include "cpu/thread_context.hh"
62 #include "debug/Checkpoint.hh"
63 #include "debug/TLB.hh"
64 #include "debug/TLBVerbose.hh"
65 #include "mem/page_table.hh"
66 #include "params/ArmTLB.hh"
67 #include "sim/full_system.hh"
68 #include "sim/process.hh"
69
70 using namespace std;
71 using namespace ArmISA;
72
73 TLB::TLB(const ArmTLBParams *p)
74 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
75 isStage2(p->is_stage2), stage2Req(false), _attr(0),
76 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
77 stage2Mmu(NULL), rangeMRU(1), bootUncacheability(false),
78 miscRegValid(false), curTranType(NormalTran)
79 {
80 tableWalker->setTlb(this);
81
82 // Cache system-level properties
83 haveLPAE = tableWalker->haveLPAE();
84 haveVirtualization = tableWalker->haveVirtualization();
85 haveLargeAsid64 = tableWalker->haveLargeAsid64();
86 }
87
88 TLB::~TLB()
89 {
90 delete[] table;
91 }
92
93 void
94 TLB::init()
95 {
96 if (stage2Mmu && !isStage2)
97 stage2Tlb = stage2Mmu->stage2Tlb();
98 }
99
100 void
101 TLB::setMMU(Stage2MMU *m)
102 {
103 stage2Mmu = m;
104 tableWalker->setMMU(m);
105 }
106
107 bool
108 TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
109 {
110 updateMiscReg(tc);
111
112 if (directToStage2) {
113 assert(stage2Tlb);
114 return stage2Tlb->translateFunctional(tc, va, pa);
115 }
116
117 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
118 aarch64 ? aarch64EL : EL1);
119 if (!e)
120 return false;
121 pa = e->pAddr(va);
122 return true;
123 }
124
125 Fault
126 TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
127 {
128 return NoFault;
129 }
130
131 TlbEntry*
132 TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
133 bool functional, bool ignore_asn, uint8_t target_el)
134 {
135
136 TlbEntry *retval = NULL;
137
138 // Maintaining LRU array
139 int x = 0;
140 while (retval == NULL && x < size) {
141 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
142 target_el)) ||
143 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
144 // We only move the hit entry ahead when the position is higher
145 // than rangeMRU
146 if (x > rangeMRU && !functional) {
147 TlbEntry tmp_entry = table[x];
148 for(int i = x; i > 0; i--)
149 table[i] = table[i - 1];
150 table[0] = tmp_entry;
151 retval = &table[0];
152 } else {
153 retval = &table[x];
154 }
155 break;
156 }
157 ++x;
158 }
159
160 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
161 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
162 "el: %d\n",
163 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
164 retval ? retval->pfn : 0, retval ? retval->size : 0,
165 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
166 retval ? retval->ns : 0, retval ? retval->nstid : 0,
167 retval ? retval->global : 0, retval ? retval->asid : 0,
168 retval ? retval->el : 0);
169
170 return retval;
171 }
172
173 // insert a new TLB entry
174 void
175 TLB::insert(Addr addr, TlbEntry &entry)
176 {
177 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
178 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
179 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
180 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
181 entry.global, entry.valid, entry.nonCacheable, entry.xn,
182 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
183 entry.isHyp);
184
185 if (table[size - 1].valid)
186 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
187 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
188 table[size-1].vpn << table[size-1].N, table[size-1].asid,
189 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
190 table[size-1].size, table[size-1].ap, table[size-1].ns,
191 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
192 table[size-1].el);
193
194 //inserting to MRU position and evicting the LRU one
195
196 for (int i = size - 1; i > 0; --i)
197 table[i] = table[i-1];
198 table[0] = entry;
199
200 inserts++;
201 ppRefills->notify(1);
202 }
203
204 void
205 TLB::printTlb() const
206 {
207 int x = 0;
208 TlbEntry *te;
209 DPRINTF(TLB, "Current TLB contents:\n");
210 while (x < size) {
211 te = &table[x];
212 if (te->valid)
213 DPRINTF(TLB, " * %s\n", te->print());
214 ++x;
215 }
216 }
217
218 void
219 TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
220 {
221 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
222 (secure_lookup ? "secure" : "non-secure"));
223 int x = 0;
224 TlbEntry *te;
225 while (x < size) {
226 te = &table[x];
227 if (te->valid && secure_lookup == !te->nstid &&
228 (te->vmid == vmid || secure_lookup) &&
229 checkELMatch(target_el, te->el, ignore_el)) {
230
231 DPRINTF(TLB, " - %s\n", te->print());
232 te->valid = false;
233 flushedEntries++;
234 }
235 ++x;
236 }
237
238 flushTlb++;
239
240 // If there's a second stage TLB (and we're not it) then flush it as well
241 // if we're currently in hyp mode
242 if (!isStage2 && isHyp) {
243 stage2Tlb->flushAllSecurity(secure_lookup, true);
244 }
245 }
246
247 void
248 TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
249 {
250 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
251 (hyp ? "hyp" : "non-hyp"));
252 int x = 0;
253 TlbEntry *te;
254 while (x < size) {
255 te = &table[x];
256 if (te->valid && te->nstid && te->isHyp == hyp &&
257 checkELMatch(target_el, te->el, ignore_el)) {
258
259 DPRINTF(TLB, " - %s\n", te->print());
260 flushedEntries++;
261 te->valid = false;
262 }
263 ++x;
264 }
265
266 flushTlb++;
267
268 // If there's a second stage TLB (and we're not it) then flush it as well
269 if (!isStage2 && !hyp) {
270 stage2Tlb->flushAllNs(false, true);
271 }
272 }
273
274 void
275 TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
276 {
277 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
278 "(%s lookup)\n", mva, asn, (secure_lookup ?
279 "secure" : "non-secure"));
280 _flushMva(mva, asn, secure_lookup, false, false, target_el);
281 flushTlbMvaAsid++;
282 }
283
284 void
285 TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
286 {
287 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
288 (secure_lookup ? "secure" : "non-secure"));
289
290 int x = 0 ;
291 TlbEntry *te;
292
293 while (x < size) {
294 te = &table[x];
295 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
296 (te->vmid == vmid || secure_lookup) &&
297 checkELMatch(target_el, te->el, false)) {
298
299 te->valid = false;
300 DPRINTF(TLB, " - %s\n", te->print());
301 flushedEntries++;
302 }
303 ++x;
304 }
305 flushTlbAsid++;
306 }
307
308 void
309 TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
310 {
311 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
312 (secure_lookup ? "secure" : "non-secure"));
313 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
314 flushTlbMva++;
315 }
316
317 void
318 TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
319 bool ignore_asn, uint8_t target_el)
320 {
321 TlbEntry *te;
322 // D5.7.2: Sign-extend address to 64 bits
323 mva = sext<56>(mva);
324 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
325 target_el);
326 while (te != NULL) {
327 if (secure_lookup == !te->nstid) {
328 DPRINTF(TLB, " - %s\n", te->print());
329 te->valid = false;
330 flushedEntries++;
331 }
332 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
333 target_el);
334 }
335 }
336
337 bool
338 TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
339 {
340 bool elMatch = true;
341 if (!ignore_el) {
342 if (target_el == 2 || target_el == 3) {
343 elMatch = (tentry_el == target_el);
344 } else {
345 elMatch = (tentry_el == 0) || (tentry_el == 1);
346 }
347 }
348 return elMatch;
349 }
350
351 void
352 TLB::drainResume()
353 {
354 // We might have unserialized something or switched CPUs, so make
355 // sure to re-read the misc regs.
356 miscRegValid = false;
357 }
358
359 void
360 TLB::takeOverFrom(BaseTLB *_otlb)
361 {
362 TLB *otlb = dynamic_cast<TLB*>(_otlb);
363 /* Make sure we actually have a valid type */
364 if (otlb) {
365 _attr = otlb->_attr;
366 haveLPAE = otlb->haveLPAE;
367 directToStage2 = otlb->directToStage2;
368 stage2Req = otlb->stage2Req;
369 bootUncacheability = otlb->bootUncacheability;
370
371 /* Sync the stage2 MMU if they exist in both
372 * the old CPU and the new
373 */
374 if (!isStage2 &&
375 stage2Tlb && otlb->stage2Tlb) {
376 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
377 }
378 } else {
379 panic("Incompatible TLB type!");
380 }
381 }
382
383 void
384 TLB::serialize(ostream &os)
385 {
386 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
387
388 SERIALIZE_SCALAR(_attr);
389 SERIALIZE_SCALAR(haveLPAE);
390 SERIALIZE_SCALAR(directToStage2);
391 SERIALIZE_SCALAR(stage2Req);
392 SERIALIZE_SCALAR(bootUncacheability);
393
394 int num_entries = size;
395 SERIALIZE_SCALAR(num_entries);
396 for(int i = 0; i < size; i++){
397 nameOut(os, csprintf("%s.TlbEntry%d", name(), i));
398 table[i].serialize(os);
399 }
400 }
401
402 void
403 TLB::unserialize(Checkpoint *cp, const string &section)
404 {
405 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
406
407 UNSERIALIZE_SCALAR(_attr);
408 UNSERIALIZE_SCALAR(haveLPAE);
409 UNSERIALIZE_SCALAR(directToStage2);
410 UNSERIALIZE_SCALAR(stage2Req);
411 UNSERIALIZE_SCALAR(bootUncacheability);
412
413 int num_entries;
414 UNSERIALIZE_SCALAR(num_entries);
415 for(int i = 0; i < min(size, num_entries); i++){
416 table[i].unserialize(cp, csprintf("%s.TlbEntry%d", section, i));
417 }
418 }
419
420 void
421 TLB::regStats()
422 {
423 instHits
424 .name(name() + ".inst_hits")
425 .desc("ITB inst hits")
426 ;
427
428 instMisses
429 .name(name() + ".inst_misses")
430 .desc("ITB inst misses")
431 ;
432
433 instAccesses
434 .name(name() + ".inst_accesses")
435 .desc("ITB inst accesses")
436 ;
437
438 readHits
439 .name(name() + ".read_hits")
440 .desc("DTB read hits")
441 ;
442
443 readMisses
444 .name(name() + ".read_misses")
445 .desc("DTB read misses")
446 ;
447
448 readAccesses
449 .name(name() + ".read_accesses")
450 .desc("DTB read accesses")
451 ;
452
453 writeHits
454 .name(name() + ".write_hits")
455 .desc("DTB write hits")
456 ;
457
458 writeMisses
459 .name(name() + ".write_misses")
460 .desc("DTB write misses")
461 ;
462
463 writeAccesses
464 .name(name() + ".write_accesses")
465 .desc("DTB write accesses")
466 ;
467
468 hits
469 .name(name() + ".hits")
470 .desc("DTB hits")
471 ;
472
473 misses
474 .name(name() + ".misses")
475 .desc("DTB misses")
476 ;
477
478 accesses
479 .name(name() + ".accesses")
480 .desc("DTB accesses")
481 ;
482
483 flushTlb
484 .name(name() + ".flush_tlb")
485 .desc("Number of times complete TLB was flushed")
486 ;
487
488 flushTlbMva
489 .name(name() + ".flush_tlb_mva")
490 .desc("Number of times TLB was flushed by MVA")
491 ;
492
493 flushTlbMvaAsid
494 .name(name() + ".flush_tlb_mva_asid")
495 .desc("Number of times TLB was flushed by MVA & ASID")
496 ;
497
498 flushTlbAsid
499 .name(name() + ".flush_tlb_asid")
500 .desc("Number of times TLB was flushed by ASID")
501 ;
502
503 flushedEntries
504 .name(name() + ".flush_entries")
505 .desc("Number of entries that have been flushed from TLB")
506 ;
507
508 alignFaults
509 .name(name() + ".align_faults")
510 .desc("Number of TLB faults due to alignment restrictions")
511 ;
512
513 prefetchFaults
514 .name(name() + ".prefetch_faults")
515 .desc("Number of TLB faults due to prefetch")
516 ;
517
518 domainFaults
519 .name(name() + ".domain_faults")
520 .desc("Number of TLB faults due to domain restrictions")
521 ;
522
523 permsFaults
524 .name(name() + ".perms_faults")
525 .desc("Number of TLB faults due to permissions restrictions")
526 ;
527
528 instAccesses = instHits + instMisses;
529 readAccesses = readHits + readMisses;
530 writeAccesses = writeHits + writeMisses;
531 hits = readHits + writeHits + instHits;
532 misses = readMisses + writeMisses + instMisses;
533 accesses = readAccesses + writeAccesses + instAccesses;
534 }
535
536 void
537 TLB::regProbePoints()
538 {
539 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
540 }
541
542 Fault
543 TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
544 Translation *translation, bool &delay, bool timing)
545 {
546 updateMiscReg(tc);
547 Addr vaddr_tainted = req->getVaddr();
548 Addr vaddr = 0;
549 if (aarch64)
550 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
551 else
552 vaddr = vaddr_tainted;
553 uint32_t flags = req->getFlags();
554
555 bool is_fetch = (mode == Execute);
556 bool is_write = (mode == Write);
557
558 if (!is_fetch) {
559 assert(flags & MustBeOne);
560 if (sctlr.a || !(flags & AllowUnaligned)) {
561 if (vaddr & mask(flags & AlignmentMask)) {
562 // LPAE is always disabled in SE mode
563 return std::make_shared<DataAbort>(
564 vaddr_tainted,
565 TlbEntry::DomainType::NoAccess, is_write,
566 ArmFault::AlignmentFault, isStage2,
567 ArmFault::VmsaTran);
568 }
569 }
570 }
571
572 Addr paddr;
573 Process *p = tc->getProcessPtr();
574
575 if (!p->pTable->translate(vaddr, paddr))
576 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
577 req->setPaddr(paddr);
578
579 return NoFault;
580 }
581
582 Fault
583 TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
584 {
585 return NoFault;
586 }
587
588 Fault
589 TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
590 bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level)
591 {
592 return NoFault;
593 }
594
595 Fault
596 TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
597 {
598 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
599 uint32_t flags = req->getFlags();
600 bool is_fetch = (mode == Execute);
601 bool is_write = (mode == Write);
602 bool is_priv = isPriv && !(flags & UserMode);
603
604 // Get the translation type from the actuall table entry
605 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
606 : ArmFault::VmsaTran;
607
608 // If this is the second stage of translation and the request is for a
609 // stage 1 page table walk then we need to check the HCR.PTW bit. This
610 // allows us to generate a fault if the request targets an area marked
611 // as a device or strongly ordered.
612 if (isStage2 && req->isPTWalk() && hcr.ptw &&
613 (te->mtype != TlbEntry::MemoryType::Normal)) {
614 return std::make_shared<DataAbort>(
615 vaddr, te->domain, is_write,
616 ArmFault::PermissionLL + te->lookupLevel,
617 isStage2, tranMethod);
618 }
619
620 // Generate an alignment fault for unaligned data accesses to device or
621 // strongly ordered memory
622 if (!is_fetch) {
623 if (te->mtype != TlbEntry::MemoryType::Normal) {
624 if (vaddr & mask(flags & AlignmentMask)) {
625 alignFaults++;
626 return std::make_shared<DataAbort>(
627 vaddr, TlbEntry::DomainType::NoAccess, is_write,
628 ArmFault::AlignmentFault, isStage2,
629 tranMethod);
630 }
631 }
632 }
633
634 if (te->nonCacheable) {
635 // Prevent prefetching from I/O devices.
636 if (req->isPrefetch()) {
637 // Here we can safely use the fault status for the short
638 // desc. format in all cases
639 return std::make_shared<PrefetchAbort>(
640 vaddr, ArmFault::PrefetchUncacheable,
641 isStage2, tranMethod);
642 }
643 }
644
645 if (!te->longDescFormat) {
646 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
647 case 0:
648 domainFaults++;
649 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
650 " domain: %#x write:%d\n", dacr,
651 static_cast<uint8_t>(te->domain), is_write);
652 if (is_fetch)
653 return std::make_shared<PrefetchAbort>(
654 vaddr,
655 ArmFault::DomainLL + te->lookupLevel,
656 isStage2, tranMethod);
657 else
658 return std::make_shared<DataAbort>(
659 vaddr, te->domain, is_write,
660 ArmFault::DomainLL + te->lookupLevel,
661 isStage2, tranMethod);
662 case 1:
663 // Continue with permissions check
664 break;
665 case 2:
666 panic("UNPRED domain\n");
667 case 3:
668 return NoFault;
669 }
670 }
671
672 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
673 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
674 uint8_t hap = te->hap;
675
676 if (sctlr.afe == 1 || te->longDescFormat)
677 ap |= 1;
678
679 bool abt;
680 bool isWritable = true;
681 // If this is a stage 2 access (eg for reading stage 1 page table entries)
682 // then don't perform the AP permissions check, we stil do the HAP check
683 // below.
684 if (isStage2) {
685 abt = false;
686 } else {
687 switch (ap) {
688 case 0:
689 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
690 (int)sctlr.rs);
691 if (!sctlr.xp) {
692 switch ((int)sctlr.rs) {
693 case 2:
694 abt = is_write;
695 break;
696 case 1:
697 abt = is_write || !is_priv;
698 break;
699 case 0:
700 case 3:
701 default:
702 abt = true;
703 break;
704 }
705 } else {
706 abt = true;
707 }
708 break;
709 case 1:
710 abt = !is_priv;
711 break;
712 case 2:
713 abt = !is_priv && is_write;
714 isWritable = is_priv;
715 break;
716 case 3:
717 abt = false;
718 break;
719 case 4:
720 panic("UNPRED premissions\n");
721 case 5:
722 abt = !is_priv || is_write;
723 isWritable = false;
724 break;
725 case 6:
726 case 7:
727 abt = is_write;
728 isWritable = false;
729 break;
730 default:
731 panic("Unknown permissions %#x\n", ap);
732 }
733 }
734
735 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
736 bool xn = te->xn || (isWritable && sctlr.wxn) ||
737 (ap == 3 && sctlr.uwxn && is_priv);
738 if (is_fetch && (abt || xn ||
739 (te->longDescFormat && te->pxn && !is_priv) ||
740 (isSecure && te->ns && scr.sif))) {
741 permsFaults++;
742 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
743 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
744 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
745 return std::make_shared<PrefetchAbort>(
746 vaddr,
747 ArmFault::PermissionLL + te->lookupLevel,
748 isStage2, tranMethod);
749 } else if (abt | hapAbt) {
750 permsFaults++;
751 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
752 " write:%d\n", ap, is_priv, is_write);
753 return std::make_shared<DataAbort>(
754 vaddr, te->domain, is_write,
755 ArmFault::PermissionLL + te->lookupLevel,
756 isStage2 | !abt, tranMethod);
757 }
758 return NoFault;
759 }
760
761
762 Fault
763 TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
764 ThreadContext *tc)
765 {
766 assert(aarch64);
767
768 Addr vaddr_tainted = req->getVaddr();
769 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
770
771 uint32_t flags = req->getFlags();
772 bool is_fetch = (mode == Execute);
773 bool is_write = (mode == Write);
774 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
775
776 updateMiscReg(tc, curTranType);
777
778 // If this is the second stage of translation and the request is for a
779 // stage 1 page table walk then we need to check the HCR.PTW bit. This
780 // allows us to generate a fault if the request targets an area marked
781 // as a device or strongly ordered.
782 if (isStage2 && req->isPTWalk() && hcr.ptw &&
783 (te->mtype != TlbEntry::MemoryType::Normal)) {
784 return std::make_shared<DataAbort>(
785 vaddr_tainted, te->domain, is_write,
786 ArmFault::PermissionLL + te->lookupLevel,
787 isStage2, ArmFault::LpaeTran);
788 }
789
790 // Generate an alignment fault for unaligned accesses to device or
791 // strongly ordered memory
792 if (!is_fetch) {
793 if (te->mtype != TlbEntry::MemoryType::Normal) {
794 if (vaddr & mask(flags & AlignmentMask)) {
795 alignFaults++;
796 return std::make_shared<DataAbort>(
797 vaddr_tainted,
798 TlbEntry::DomainType::NoAccess, is_write,
799 ArmFault::AlignmentFault, isStage2,
800 ArmFault::LpaeTran);
801 }
802 }
803 }
804
805 if (te->nonCacheable) {
806 // Prevent prefetching from I/O devices.
807 if (req->isPrefetch()) {
808 // Here we can safely use the fault status for the short
809 // desc. format in all cases
810 return std::make_shared<PrefetchAbort>(
811 vaddr_tainted,
812 ArmFault::PrefetchUncacheable,
813 isStage2, ArmFault::LpaeTran);
814 }
815 }
816
817 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
818 bool grant = false;
819
820 uint8_t xn = te->xn;
821 uint8_t pxn = te->pxn;
822 bool r = !is_write && !is_fetch;
823 bool w = is_write;
824 bool x = is_fetch;
825 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
826 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
827
828 if (isStage2) {
829 panic("Virtualization in AArch64 state is not supported yet");
830 } else {
831 switch (aarch64EL) {
832 case EL0:
833 {
834 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
835 switch (perm) {
836 case 0:
837 case 1:
838 case 8:
839 case 9:
840 grant = x;
841 break;
842 case 4:
843 case 5:
844 grant = r || w || (x && !sctlr.wxn);
845 break;
846 case 6:
847 case 7:
848 grant = r || w;
849 break;
850 case 12:
851 case 13:
852 grant = r || x;
853 break;
854 case 14:
855 case 15:
856 grant = r;
857 break;
858 default:
859 grant = false;
860 }
861 }
862 break;
863 case EL1:
864 {
865 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
866 switch (perm) {
867 case 0:
868 case 2:
869 grant = r || w || (x && !sctlr.wxn);
870 break;
871 case 1:
872 case 3:
873 case 4:
874 case 5:
875 case 6:
876 case 7:
877 // regions that are writeable at EL0 should not be
878 // executable at EL1
879 grant = r || w;
880 break;
881 case 8:
882 case 10:
883 case 12:
884 case 14:
885 grant = r || x;
886 break;
887 case 9:
888 case 11:
889 case 13:
890 case 15:
891 grant = r;
892 break;
893 default:
894 grant = false;
895 }
896 }
897 break;
898 case EL2:
899 case EL3:
900 {
901 uint8_t perm = (ap & 0x2) | xn;
902 switch (perm) {
903 case 0:
904 grant = r || w || (x && !sctlr.wxn) ;
905 break;
906 case 1:
907 grant = r || w;
908 break;
909 case 2:
910 grant = r || x;
911 break;
912 case 3:
913 grant = r;
914 break;
915 default:
916 grant = false;
917 }
918 }
919 break;
920 }
921 }
922
923 if (!grant) {
924 if (is_fetch) {
925 permsFaults++;
926 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
927 "AP:%d priv:%d write:%d ns:%d sif:%d "
928 "sctlr.afe: %d\n",
929 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
930 // Use PC value instead of vaddr because vaddr might be aligned to
931 // cache line and should not be the address reported in FAR
932 return std::make_shared<PrefetchAbort>(
933 req->getPC(),
934 ArmFault::PermissionLL + te->lookupLevel,
935 isStage2, ArmFault::LpaeTran);
936 } else {
937 permsFaults++;
938 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
939 "priv:%d write:%d\n", ap, is_priv, is_write);
940 return std::make_shared<DataAbort>(
941 vaddr_tainted, te->domain, is_write,
942 ArmFault::PermissionLL + te->lookupLevel,
943 isStage2, ArmFault::LpaeTran);
944 }
945 }
946
947 return NoFault;
948 }
949
950 Fault
951 TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
952 Translation *translation, bool &delay, bool timing,
953 TLB::ArmTranslationType tranType, bool functional)
954 {
955 // No such thing as a functional timing access
956 assert(!(timing && functional));
957
958 updateMiscReg(tc, tranType);
959
960 Addr vaddr_tainted = req->getVaddr();
961 Addr vaddr = 0;
962 if (aarch64)
963 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
964 else
965 vaddr = vaddr_tainted;
966 uint32_t flags = req->getFlags();
967
968 bool is_fetch = (mode == Execute);
969 bool is_write = (mode == Write);
970 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
971 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
972 : ArmFault::VmsaTran;
973
974 req->setAsid(asid);
975
976 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
977 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
978
979 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
980 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
981 scr, sctlr, flags, tranType);
982
983 // Generate an alignment fault for unaligned PC
984 if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
985 return std::make_shared<PCAlignmentFault>(req->getPC());
986 }
987
988 // If this is a clrex instruction, provide a PA of 0 with no fault
989 // This will force the monitor to set the tracked address to 0
990 // a bit of a hack but this effectively clrears this processors monitor
991 if (flags & Request::CLEAR_LL){
992 // @todo: check implications of security extensions
993 req->setPaddr(0);
994 req->setFlags(Request::UNCACHEABLE);
995 req->setFlags(Request::CLEAR_LL);
996 return NoFault;
997 }
998 if ((req->isInstFetch() && (!sctlr.i)) ||
999 ((!req->isInstFetch()) && (!sctlr.c))){
1000 req->setFlags(Request::UNCACHEABLE);
1001 }
1002 if (!is_fetch) {
1003 assert(flags & MustBeOne);
1004 if (sctlr.a || !(flags & AllowUnaligned)) {
1005 if (vaddr & mask(flags & AlignmentMask)) {
1006 alignFaults++;
1007 return std::make_shared<DataAbort>(
1008 vaddr_tainted,
1009 TlbEntry::DomainType::NoAccess, is_write,
1010 ArmFault::AlignmentFault, isStage2,
1011 tranMethod);
1012 }
1013 }
1014 }
1015
1016 // If guest MMU is off or hcr.vm=0 go straight to stage2
1017 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1018
1019 req->setPaddr(vaddr);
1020 // When the MMU is off the security attribute corresponds to the
1021 // security state of the processor
1022 if (isSecure)
1023 req->setFlags(Request::SECURE);
1024
1025 // @todo: double check this (ARM ARM issue C B3.2.1)
1026 if (long_desc_format || sctlr.tre == 0) {
1027 req->setFlags(Request::UNCACHEABLE);
1028 } else {
1029 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
1030 req->setFlags(Request::UNCACHEABLE);
1031 }
1032
1033 // Set memory attributes
1034 TlbEntry temp_te;
1035 temp_te.ns = !isSecure;
1036 if (isStage2 || hcr.dc == 0 || isSecure ||
1037 (isHyp && !(tranType & S1CTran))) {
1038
1039 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1040 : TlbEntry::MemoryType::StronglyOrdered;
1041 temp_te.innerAttrs = 0x0;
1042 temp_te.outerAttrs = 0x0;
1043 temp_te.shareable = true;
1044 temp_te.outerShareable = true;
1045 } else {
1046 temp_te.mtype = TlbEntry::MemoryType::Normal;
1047 temp_te.innerAttrs = 0x3;
1048 temp_te.outerAttrs = 0x3;
1049 temp_te.shareable = false;
1050 temp_te.outerShareable = false;
1051 }
1052 temp_te.setAttributes(long_desc_format);
1053 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1054 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1055 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1056 isStage2);
1057 setAttr(temp_te.attributes);
1058
1059 return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess);
1060 }
1061
1062 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1063 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1064 // Translation enabled
1065
1066 TlbEntry *te = NULL;
1067 TlbEntry mergeTe;
1068 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1069 functional, &mergeTe);
1070 // only proceed if we have a valid table entry
1071 if ((te == NULL) && (fault == NoFault)) delay = true;
1072
1073 // If we have the table entry transfer some of the attributes to the
1074 // request that triggered the translation
1075 if (te != NULL) {
1076 // Set memory attributes
1077 DPRINTF(TLBVerbose,
1078 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1079 "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1080 te->shareable, te->innerAttrs, te->outerAttrs,
1081 static_cast<uint8_t>(te->mtype), isStage2);
1082 setAttr(te->attributes);
1083 if (te->nonCacheable) {
1084 req->setFlags(Request::UNCACHEABLE);
1085 }
1086
1087 if (!bootUncacheability &&
1088 ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr)) {
1089 req->setFlags(Request::UNCACHEABLE);
1090 }
1091
1092 req->setPaddr(te->pAddr(vaddr));
1093 if (isSecure && !te->ns) {
1094 req->setFlags(Request::SECURE);
1095 }
1096 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1097 (te->mtype != TlbEntry::MemoryType::Normal)) {
1098 // Unaligned accesses to Device memory should always cause an
1099 // abort regardless of sctlr.a
1100 alignFaults++;
1101 return std::make_shared<DataAbort>(
1102 vaddr_tainted,
1103 TlbEntry::DomainType::NoAccess, is_write,
1104 ArmFault::AlignmentFault, isStage2,
1105 tranMethod);
1106 }
1107
1108 // Check for a trickbox generated address fault
1109 if (fault == NoFault) {
1110 fault = trickBoxCheck(req, mode, te->domain);
1111 }
1112 }
1113
1114 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1115 if (fault == NoFault) {
1116 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1117 if (aarch64 && is_fetch && cpsr.il == 1) {
1118 return std::make_shared<IllegalInstSetStateFault>();
1119 }
1120 }
1121
1122 return fault;
1123 }
1124
1125 Fault
1126 TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1127 TLB::ArmTranslationType tranType)
1128 {
1129 updateMiscReg(tc, tranType);
1130
1131 if (directToStage2) {
1132 assert(stage2Tlb);
1133 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1134 }
1135
1136 bool delay = false;
1137 Fault fault;
1138 if (FullSystem)
1139 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1140 else
1141 fault = translateSe(req, tc, mode, NULL, delay, false);
1142 assert(!delay);
1143 return fault;
1144 }
1145
1146 Fault
1147 TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1148 TLB::ArmTranslationType tranType)
1149 {
1150 updateMiscReg(tc, tranType);
1151
1152 if (directToStage2) {
1153 assert(stage2Tlb);
1154 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1155 }
1156
1157 bool delay = false;
1158 Fault fault;
1159 if (FullSystem)
1160 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1161 else
1162 fault = translateSe(req, tc, mode, NULL, delay, false);
1163 assert(!delay);
1164 return fault;
1165 }
1166
1167 Fault
1168 TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1169 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1170 {
1171 updateMiscReg(tc, tranType);
1172
1173 if (directToStage2) {
1174 assert(stage2Tlb);
1175 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1176 }
1177
1178 assert(translation);
1179
1180 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1181 }
1182
1183 Fault
1184 TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1185 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1186 bool callFromS2)
1187 {
1188 bool delay = false;
1189 Fault fault;
1190 if (FullSystem)
1191 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1192 else
1193 fault = translateSe(req, tc, mode, translation, delay, true);
1194 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1195 NoFault);
1196 // If we have a translation, and we're not in the middle of doing a stage
1197 // 2 translation tell the translation that we've either finished or its
1198 // going to take a while. By not doing this when we're in the middle of a
1199 // stage 2 translation we prevent marking the translation as delayed twice,
1200 // one when the translation starts and again when the stage 1 translation
1201 // completes.
1202 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1203 if (!delay)
1204 translation->finish(fault, req, tc, mode);
1205 else
1206 translation->markDelayed();
1207 }
1208 return fault;
1209 }
1210
1211 BaseMasterPort*
1212 TLB::getMasterPort()
1213 {
1214 return &tableWalker->getMasterPort("port");
1215 }
1216
1217 DmaPort&
1218 TLB::getWalkerPort()
1219 {
1220 return tableWalker->getWalkerPort();
1221 }
1222
1223 void
1224 TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1225 {
1226 // check if the regs have changed, or the translation mode is different.
1227 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1228 // one type of translation anyway
1229 if (miscRegValid && ((tranType == curTranType) || isStage2)) {
1230 return;
1231 }
1232
1233 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1234 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1235 // Dependencies: SCR/SCR_EL3, CPSR
1236 isSecure = inSecureState(tc);
1237 isSecure &= (tranType & HypMode) == 0;
1238 isSecure &= (tranType & S1S2NsTran) == 0;
1239 aarch64 = !cpsr.width;
1240 if (aarch64) { // AArch64
1241 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1242 switch (aarch64EL) {
1243 case EL0:
1244 case EL1:
1245 {
1246 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1247 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1248 uint64_t ttbr_asid = ttbcr.a1 ?
1249 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1250 tc->readMiscReg(MISCREG_TTBR0_EL1);
1251 asid = bits(ttbr_asid,
1252 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1253 }
1254 break;
1255 case EL2:
1256 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1257 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1258 asid = -1;
1259 break;
1260 case EL3:
1261 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1262 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1263 asid = -1;
1264 break;
1265 }
1266 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1267 isPriv = aarch64EL != EL0;
1268 // @todo: modify this behaviour to support Virtualization in
1269 // AArch64
1270 vmid = 0;
1271 isHyp = false;
1272 directToStage2 = false;
1273 stage2Req = false;
1274 } else { // AArch32
1275 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1276 !isSecure));
1277 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1278 !isSecure));
1279 scr = tc->readMiscReg(MISCREG_SCR);
1280 isPriv = cpsr.mode != MODE_USER;
1281 if (haveLPAE && ttbcr.eae) {
1282 // Long-descriptor translation table format in use
1283 uint64_t ttbr_asid = tc->readMiscReg(
1284 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1285 : MISCREG_TTBR0,
1286 tc, !isSecure));
1287 asid = bits(ttbr_asid, 55, 48);
1288 } else {
1289 // Short-descriptor translation table format in use
1290 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1291 MISCREG_CONTEXTIDR, tc,!isSecure));
1292 asid = context_id.asid;
1293 }
1294 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1295 !isSecure));
1296 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1297 !isSecure));
1298 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1299 !isSecure));
1300 hcr = tc->readMiscReg(MISCREG_HCR);
1301
1302 if (haveVirtualization) {
1303 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1304 isHyp = cpsr.mode == MODE_HYP;
1305 isHyp |= tranType & HypMode;
1306 isHyp &= (tranType & S1S2NsTran) == 0;
1307 isHyp &= (tranType & S1CTran) == 0;
1308 if (isHyp) {
1309 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1310 }
1311 // Work out if we should skip the first stage of translation and go
1312 // directly to stage 2. This value is cached so we don't have to
1313 // compute it for every translation.
1314 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1315 !(tranType & S1CTran);
1316 directToStage2 = stage2Req && !sctlr.m;
1317 } else {
1318 vmid = 0;
1319 stage2Req = false;
1320 isHyp = false;
1321 directToStage2 = false;
1322 }
1323 }
1324 miscRegValid = true;
1325 curTranType = tranType;
1326 }
1327
1328 Fault
1329 TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1330 Translation *translation, bool timing, bool functional,
1331 bool is_secure, TLB::ArmTranslationType tranType)
1332 {
1333 bool is_fetch = (mode == Execute);
1334 bool is_write = (mode == Write);
1335
1336 Addr vaddr_tainted = req->getVaddr();
1337 Addr vaddr = 0;
1338 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1339 if (aarch64) {
1340 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el);
1341 } else {
1342 vaddr = vaddr_tainted;
1343 }
1344 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1345 if (*te == NULL) {
1346 if (req->isPrefetch()) {
1347 // if the request is a prefetch don't attempt to fill the TLB or go
1348 // any further with the memory access (here we can safely use the
1349 // fault status for the short desc. format in all cases)
1350 prefetchFaults++;
1351 return std::make_shared<PrefetchAbort>(
1352 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1353 }
1354
1355 if (is_fetch)
1356 instMisses++;
1357 else if (is_write)
1358 writeMisses++;
1359 else
1360 readMisses++;
1361
1362 // start translation table walk, pass variables rather than
1363 // re-retreaving in table walker for speed
1364 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1365 vaddr_tainted, asid, vmid);
1366 Fault fault;
1367 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1368 translation, timing, functional, is_secure,
1369 tranType);
1370 // for timing mode, return and wait for table walk,
1371 if (timing || fault != NoFault) {
1372 return fault;
1373 }
1374
1375 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1376 if (!*te)
1377 printTlb();
1378 assert(*te);
1379 } else {
1380 if (is_fetch)
1381 instHits++;
1382 else if (is_write)
1383 writeHits++;
1384 else
1385 readHits++;
1386 }
1387 return NoFault;
1388 }
1389
1390 Fault
1391 TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1392 Translation *translation, bool timing, bool functional,
1393 TlbEntry *mergeTe)
1394 {
1395 Fault fault;
1396 TlbEntry *s1Te = NULL;
1397
1398 Addr vaddr_tainted = req->getVaddr();
1399
1400 // Get the stage 1 table entry
1401 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1402 isSecure, curTranType);
1403 // only proceed if we have a valid table entry
1404 if ((s1Te != NULL) && (fault == NoFault)) {
1405 // Check stage 1 permissions before checking stage 2
1406 if (aarch64)
1407 fault = checkPermissions64(s1Te, req, mode, tc);
1408 else
1409 fault = checkPermissions(s1Te, req, mode);
1410 if (stage2Req & (fault == NoFault)) {
1411 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1412 req, translation, mode, timing, functional, curTranType);
1413 fault = s2Lookup->getTe(tc, mergeTe);
1414 if (s2Lookup->isComplete()) {
1415 *te = mergeTe;
1416 // We've finished with the lookup so delete it
1417 delete s2Lookup;
1418 } else {
1419 // The lookup hasn't completed, so we can't delete it now. We
1420 // get round this by asking the object to self delete when the
1421 // translation is complete.
1422 s2Lookup->setSelfDelete();
1423 }
1424 } else {
1425 // This case deals with an S1 hit (or bypass), followed by
1426 // an S2 hit-but-perms issue
1427 if (isStage2) {
1428 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1429 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1430 if (fault != NoFault) {
1431 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1432 armFault->annotate(ArmFault::S1PTW, false);
1433 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1434 }
1435 }
1436 *te = s1Te;
1437 }
1438 }
1439 return fault;
1440 }
1441
1442 ArmISA::TLB *
1443 ArmTLBParams::create()
1444 {
1445 return new ArmISA::TLB(this);
1446 }