arm: allow DC instructions by default so SE mode works
[gem5.git] / src / arch / arm / tlb.cc
1 /*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Steve Reinhardt
43 */
44
45 #include <string>
46 #include <vector>
47
48 #include "arch/arm/faults.hh"
49 #include "arch/arm/pagetable.hh"
50 #include "arch/arm/system.hh"
51 #include "arch/arm/table_walker.hh"
52 #include "arch/arm/stage2_lookup.hh"
53 #include "arch/arm/stage2_mmu.hh"
54 #include "arch/arm/tlb.hh"
55 #include "arch/arm/utility.hh"
56 #include "base/inifile.hh"
57 #include "base/str.hh"
58 #include "base/trace.hh"
59 #include "cpu/base.hh"
60 #include "cpu/thread_context.hh"
61 #include "debug/Checkpoint.hh"
62 #include "debug/TLB.hh"
63 #include "debug/TLBVerbose.hh"
64 #include "mem/page_table.hh"
65 #include "params/ArmTLB.hh"
66 #include "sim/full_system.hh"
67 #include "sim/process.hh"
68
69 using namespace std;
70 using namespace ArmISA;
71
72 TLB::TLB(const ArmTLBParams *p)
73 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
74 isStage2(p->is_stage2), tableWalker(p->walker), stage2Tlb(NULL),
75 stage2Mmu(NULL), rangeMRU(1), bootUncacheability(false),
76 miscRegValid(false), curTranType(NormalTran)
77 {
78 tableWalker->setTlb(this);
79
80 // Cache system-level properties
81 haveLPAE = tableWalker->haveLPAE();
82 haveVirtualization = tableWalker->haveVirtualization();
83 haveLargeAsid64 = tableWalker->haveLargeAsid64();
84 }
85
86 TLB::~TLB()
87 {
88 delete[] table;
89 }
90
91 void
92 TLB::init()
93 {
94 if (stage2Mmu && !isStage2)
95 stage2Tlb = stage2Mmu->stage2Tlb();
96 }
97
98 void
99 TLB::setMMU(Stage2MMU *m)
100 {
101 stage2Mmu = m;
102 tableWalker->setMMU(m);
103 }
104
105 bool
106 TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
107 {
108 updateMiscReg(tc);
109
110 if (directToStage2) {
111 assert(stage2Tlb);
112 return stage2Tlb->translateFunctional(tc, va, pa);
113 }
114
115 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
116 aarch64 ? aarch64EL : EL1);
117 if (!e)
118 return false;
119 pa = e->pAddr(va);
120 return true;
121 }
122
123 Fault
124 TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
125 {
126 return NoFault;
127 }
128
129 TlbEntry*
130 TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
131 bool functional, bool ignore_asn, uint8_t target_el)
132 {
133
134 TlbEntry *retval = NULL;
135
136 // Maintaining LRU array
137 int x = 0;
138 while (retval == NULL && x < size) {
139 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
140 target_el)) ||
141 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
142 // We only move the hit entry ahead when the position is higher
143 // than rangeMRU
144 if (x > rangeMRU && !functional) {
145 TlbEntry tmp_entry = table[x];
146 for(int i = x; i > 0; i--)
147 table[i] = table[i - 1];
148 table[0] = tmp_entry;
149 retval = &table[0];
150 } else {
151 retval = &table[x];
152 }
153 break;
154 }
155 ++x;
156 }
157
158 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
159 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
160 "el: %d\n",
161 va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
162 retval ? retval->pfn : 0, retval ? retval->size : 0,
163 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
164 retval ? retval->ns : 0, retval ? retval->nstid : 0,
165 retval ? retval->global : 0, retval ? retval->asid : 0,
166 retval ? retval->el : 0, retval ? retval->el : 0);
167
168 return retval;
169 }
170
171 // insert a new TLB entry
172 void
173 TLB::insert(Addr addr, TlbEntry &entry)
174 {
175 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
176 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
177 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
178 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
179 entry.global, entry.valid, entry.nonCacheable, entry.xn,
180 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
181 entry.isHyp);
182
183 if (table[size - 1].valid)
184 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
185 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
186 table[size-1].vpn << table[size-1].N, table[size-1].asid,
187 table[size-1].vmid, table[size-1].pfn << table[size-1].N,
188 table[size-1].size, table[size-1].ap, table[size-1].ns,
189 table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
190 table[size-1].el);
191
192 //inserting to MRU position and evicting the LRU one
193
194 for (int i = size - 1; i > 0; --i)
195 table[i] = table[i-1];
196 table[0] = entry;
197
198 inserts++;
199 }
200
201 void
202 TLB::printTlb() const
203 {
204 int x = 0;
205 TlbEntry *te;
206 DPRINTF(TLB, "Current TLB contents:\n");
207 while (x < size) {
208 te = &table[x];
209 if (te->valid)
210 DPRINTF(TLB, " * %s\n", te->print());
211 ++x;
212 }
213 }
214
215 void
216 TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
217 {
218 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
219 (secure_lookup ? "secure" : "non-secure"));
220 int x = 0;
221 TlbEntry *te;
222 while (x < size) {
223 te = &table[x];
224 if (te->valid && secure_lookup == !te->nstid &&
225 (te->vmid == vmid || secure_lookup) &&
226 checkELMatch(target_el, te->el, ignore_el)) {
227
228 DPRINTF(TLB, " - %s\n", te->print());
229 te->valid = false;
230 flushedEntries++;
231 }
232 ++x;
233 }
234
235 flushTlb++;
236
237 // If there's a second stage TLB (and we're not it) then flush it as well
238 // if we're currently in hyp mode
239 if (!isStage2 && isHyp) {
240 stage2Tlb->flushAllSecurity(secure_lookup, true);
241 }
242 }
243
244 void
245 TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
246 {
247 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
248 (hyp ? "hyp" : "non-hyp"));
249 int x = 0;
250 TlbEntry *te;
251 while (x < size) {
252 te = &table[x];
253 if (te->valid && te->nstid && te->isHyp == hyp &&
254 checkELMatch(target_el, te->el, ignore_el)) {
255
256 DPRINTF(TLB, " - %s\n", te->print());
257 flushedEntries++;
258 te->valid = false;
259 }
260 ++x;
261 }
262
263 flushTlb++;
264
265 // If there's a second stage TLB (and we're not it) then flush it as well
266 if (!isStage2 && !hyp) {
267 stage2Tlb->flushAllNs(false, true);
268 }
269 }
270
271 void
272 TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
273 {
274 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
275 "(%s lookup)\n", mva, asn, (secure_lookup ?
276 "secure" : "non-secure"));
277 _flushMva(mva, asn, secure_lookup, false, false, target_el);
278 flushTlbMvaAsid++;
279 }
280
281 void
282 TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
283 {
284 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
285 (secure_lookup ? "secure" : "non-secure"));
286
287 int x = 0 ;
288 TlbEntry *te;
289
290 while (x < size) {
291 te = &table[x];
292 if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
293 (te->vmid == vmid || secure_lookup) &&
294 checkELMatch(target_el, te->el, false)) {
295
296 te->valid = false;
297 DPRINTF(TLB, " - %s\n", te->print());
298 flushedEntries++;
299 }
300 ++x;
301 }
302 flushTlbAsid++;
303 }
304
305 void
306 TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
307 {
308 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
309 (secure_lookup ? "secure" : "non-secure"));
310 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
311 flushTlbMva++;
312 }
313
314 void
315 TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
316 bool ignore_asn, uint8_t target_el)
317 {
318 TlbEntry *te;
319 // D5.7.2: Sign-extend address to 64 bits
320 mva = sext<56>(mva);
321 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
322 target_el);
323 while (te != NULL) {
324 if (secure_lookup == !te->nstid) {
325 DPRINTF(TLB, " - %s\n", te->print());
326 te->valid = false;
327 flushedEntries++;
328 }
329 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
330 target_el);
331 }
332 }
333
334 bool
335 TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
336 {
337 bool elMatch = true;
338 if (!ignore_el) {
339 if (target_el == 2 || target_el == 3) {
340 elMatch = (tentry_el == target_el);
341 } else {
342 elMatch = (tentry_el == 0) || (tentry_el == 1);
343 }
344 }
345 return elMatch;
346 }
347
348 void
349 TLB::drainResume()
350 {
351 // We might have unserialized something or switched CPUs, so make
352 // sure to re-read the misc regs.
353 miscRegValid = false;
354 }
355
356 void
357 TLB::takeOverFrom(BaseTLB *_otlb)
358 {
359 TLB *otlb = dynamic_cast<TLB*>(_otlb);
360 /* Make sure we actually have a valid type */
361 if (otlb) {
362 _attr = otlb->_attr;
363 haveLPAE = otlb->haveLPAE;
364 directToStage2 = otlb->directToStage2;
365 stage2Req = otlb->stage2Req;
366 bootUncacheability = otlb->bootUncacheability;
367
368 /* Sync the stage2 MMU if they exist in both
369 * the old CPU and the new
370 */
371 if (!isStage2 &&
372 stage2Tlb && otlb->stage2Tlb) {
373 stage2Tlb->takeOverFrom(otlb->stage2Tlb);
374 }
375 } else {
376 panic("Incompatible TLB type!");
377 }
378 }
379
380 void
381 TLB::serialize(ostream &os)
382 {
383 DPRINTF(Checkpoint, "Serializing Arm TLB\n");
384
385 SERIALIZE_SCALAR(_attr);
386 SERIALIZE_SCALAR(haveLPAE);
387 SERIALIZE_SCALAR(directToStage2);
388 SERIALIZE_SCALAR(stage2Req);
389 SERIALIZE_SCALAR(bootUncacheability);
390
391 int num_entries = size;
392 SERIALIZE_SCALAR(num_entries);
393 for(int i = 0; i < size; i++){
394 nameOut(os, csprintf("%s.TlbEntry%d", name(), i));
395 table[i].serialize(os);
396 }
397 }
398
399 void
400 TLB::unserialize(Checkpoint *cp, const string &section)
401 {
402 DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
403
404 UNSERIALIZE_SCALAR(_attr);
405 UNSERIALIZE_SCALAR(haveLPAE);
406 UNSERIALIZE_SCALAR(directToStage2);
407 UNSERIALIZE_SCALAR(stage2Req);
408 UNSERIALIZE_SCALAR(bootUncacheability);
409
410 int num_entries;
411 UNSERIALIZE_SCALAR(num_entries);
412 for(int i = 0; i < min(size, num_entries); i++){
413 table[i].unserialize(cp, csprintf("%s.TlbEntry%d", section, i));
414 }
415 }
416
417 void
418 TLB::regStats()
419 {
420 instHits
421 .name(name() + ".inst_hits")
422 .desc("ITB inst hits")
423 ;
424
425 instMisses
426 .name(name() + ".inst_misses")
427 .desc("ITB inst misses")
428 ;
429
430 instAccesses
431 .name(name() + ".inst_accesses")
432 .desc("ITB inst accesses")
433 ;
434
435 readHits
436 .name(name() + ".read_hits")
437 .desc("DTB read hits")
438 ;
439
440 readMisses
441 .name(name() + ".read_misses")
442 .desc("DTB read misses")
443 ;
444
445 readAccesses
446 .name(name() + ".read_accesses")
447 .desc("DTB read accesses")
448 ;
449
450 writeHits
451 .name(name() + ".write_hits")
452 .desc("DTB write hits")
453 ;
454
455 writeMisses
456 .name(name() + ".write_misses")
457 .desc("DTB write misses")
458 ;
459
460 writeAccesses
461 .name(name() + ".write_accesses")
462 .desc("DTB write accesses")
463 ;
464
465 hits
466 .name(name() + ".hits")
467 .desc("DTB hits")
468 ;
469
470 misses
471 .name(name() + ".misses")
472 .desc("DTB misses")
473 ;
474
475 accesses
476 .name(name() + ".accesses")
477 .desc("DTB accesses")
478 ;
479
480 flushTlb
481 .name(name() + ".flush_tlb")
482 .desc("Number of times complete TLB was flushed")
483 ;
484
485 flushTlbMva
486 .name(name() + ".flush_tlb_mva")
487 .desc("Number of times TLB was flushed by MVA")
488 ;
489
490 flushTlbMvaAsid
491 .name(name() + ".flush_tlb_mva_asid")
492 .desc("Number of times TLB was flushed by MVA & ASID")
493 ;
494
495 flushTlbAsid
496 .name(name() + ".flush_tlb_asid")
497 .desc("Number of times TLB was flushed by ASID")
498 ;
499
500 flushedEntries
501 .name(name() + ".flush_entries")
502 .desc("Number of entries that have been flushed from TLB")
503 ;
504
505 alignFaults
506 .name(name() + ".align_faults")
507 .desc("Number of TLB faults due to alignment restrictions")
508 ;
509
510 prefetchFaults
511 .name(name() + ".prefetch_faults")
512 .desc("Number of TLB faults due to prefetch")
513 ;
514
515 domainFaults
516 .name(name() + ".domain_faults")
517 .desc("Number of TLB faults due to domain restrictions")
518 ;
519
520 permsFaults
521 .name(name() + ".perms_faults")
522 .desc("Number of TLB faults due to permissions restrictions")
523 ;
524
525 instAccesses = instHits + instMisses;
526 readAccesses = readHits + readMisses;
527 writeAccesses = writeHits + writeMisses;
528 hits = readHits + writeHits + instHits;
529 misses = readMisses + writeMisses + instMisses;
530 accesses = readAccesses + writeAccesses + instAccesses;
531 }
532
533 Fault
534 TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
535 Translation *translation, bool &delay, bool timing)
536 {
537 updateMiscReg(tc);
538 Addr vaddr_tainted = req->getVaddr();
539 Addr vaddr = 0;
540 if (aarch64)
541 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
542 else
543 vaddr = vaddr_tainted;
544 uint32_t flags = req->getFlags();
545
546 bool is_fetch = (mode == Execute);
547 bool is_write = (mode == Write);
548
549 if (!is_fetch) {
550 assert(flags & MustBeOne);
551 if (sctlr.a || !(flags & AllowUnaligned)) {
552 if (vaddr & mask(flags & AlignmentMask)) {
553 // LPAE is always disabled in SE mode
554 return new DataAbort(vaddr_tainted,
555 TlbEntry::DomainType::NoAccess, is_write,
556 ArmFault::AlignmentFault, isStage2,
557 ArmFault::VmsaTran);
558 }
559 }
560 }
561
562 Addr paddr;
563 Process *p = tc->getProcessPtr();
564
565 if (!p->pTable->translate(vaddr, paddr))
566 return Fault(new GenericPageTableFault(vaddr_tainted));
567 req->setPaddr(paddr);
568
569 return NoFault;
570 }
571
572 Fault
573 TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
574 {
575 return NoFault;
576 }
577
578 Fault
579 TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
580 bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level)
581 {
582 return NoFault;
583 }
584
585 Fault
586 TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
587 {
588 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
589 uint32_t flags = req->getFlags();
590 bool is_fetch = (mode == Execute);
591 bool is_write = (mode == Write);
592 bool is_priv = isPriv && !(flags & UserMode);
593
594 // Get the translation type from the actuall table entry
595 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
596 : ArmFault::VmsaTran;
597
598 // If this is the second stage of translation and the request is for a
599 // stage 1 page table walk then we need to check the HCR.PTW bit. This
600 // allows us to generate a fault if the request targets an area marked
601 // as a device or strongly ordered.
602 if (isStage2 && req->isPTWalk() && hcr.ptw &&
603 (te->mtype != TlbEntry::MemoryType::Normal)) {
604 return new DataAbort(vaddr, te->domain, is_write,
605 ArmFault::PermissionLL + te->lookupLevel,
606 isStage2, tranMethod);
607 }
608
609 // Generate an alignment fault for unaligned data accesses to device or
610 // strongly ordered memory
611 if (!is_fetch) {
612 if (te->mtype != TlbEntry::MemoryType::Normal) {
613 if (vaddr & mask(flags & AlignmentMask)) {
614 alignFaults++;
615 return new DataAbort(vaddr, TlbEntry::DomainType::NoAccess, is_write,
616 ArmFault::AlignmentFault, isStage2,
617 tranMethod);
618 }
619 }
620 }
621
622 if (te->nonCacheable) {
623 // Prevent prefetching from I/O devices.
624 if (req->isPrefetch()) {
625 // Here we can safely use the fault status for the short
626 // desc. format in all cases
627 return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable,
628 isStage2, tranMethod);
629 }
630 }
631
632 if (!te->longDescFormat) {
633 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
634 case 0:
635 domainFaults++;
636 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
637 " domain: %#x write:%d\n", dacr,
638 static_cast<uint8_t>(te->domain), is_write);
639 if (is_fetch)
640 return new PrefetchAbort(vaddr,
641 ArmFault::DomainLL + te->lookupLevel,
642 isStage2, tranMethod);
643 else
644 return new DataAbort(vaddr, te->domain, is_write,
645 ArmFault::DomainLL + te->lookupLevel,
646 isStage2, tranMethod);
647 case 1:
648 // Continue with permissions check
649 break;
650 case 2:
651 panic("UNPRED domain\n");
652 case 3:
653 return NoFault;
654 }
655 }
656
657 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
658 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
659 uint8_t hap = te->hap;
660
661 if (sctlr.afe == 1 || te->longDescFormat)
662 ap |= 1;
663
664 bool abt;
665 bool isWritable = true;
666 // If this is a stage 2 access (eg for reading stage 1 page table entries)
667 // then don't perform the AP permissions check, we stil do the HAP check
668 // below.
669 if (isStage2) {
670 abt = false;
671 } else {
672 switch (ap) {
673 case 0:
674 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
675 (int)sctlr.rs);
676 if (!sctlr.xp) {
677 switch ((int)sctlr.rs) {
678 case 2:
679 abt = is_write;
680 break;
681 case 1:
682 abt = is_write || !is_priv;
683 break;
684 case 0:
685 case 3:
686 default:
687 abt = true;
688 break;
689 }
690 } else {
691 abt = true;
692 }
693 break;
694 case 1:
695 abt = !is_priv;
696 break;
697 case 2:
698 abt = !is_priv && is_write;
699 isWritable = is_priv;
700 break;
701 case 3:
702 abt = false;
703 break;
704 case 4:
705 panic("UNPRED premissions\n");
706 case 5:
707 abt = !is_priv || is_write;
708 isWritable = false;
709 break;
710 case 6:
711 case 7:
712 abt = is_write;
713 isWritable = false;
714 break;
715 default:
716 panic("Unknown permissions %#x\n", ap);
717 }
718 }
719
720 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
721 bool xn = te->xn || (isWritable && sctlr.wxn) ||
722 (ap == 3 && sctlr.uwxn && is_priv);
723 if (is_fetch && (abt || xn ||
724 (te->longDescFormat && te->pxn && !is_priv) ||
725 (isSecure && te->ns && scr.sif))) {
726 permsFaults++;
727 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
728 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
729 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
730 return new PrefetchAbort(vaddr,
731 ArmFault::PermissionLL + te->lookupLevel,
732 isStage2, tranMethod);
733 } else if (abt | hapAbt) {
734 permsFaults++;
735 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
736 " write:%d\n", ap, is_priv, is_write);
737 return new DataAbort(vaddr, te->domain, is_write,
738 ArmFault::PermissionLL + te->lookupLevel,
739 isStage2 | !abt, tranMethod);
740 }
741 return NoFault;
742 }
743
744
745 Fault
746 TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
747 ThreadContext *tc)
748 {
749 assert(aarch64);
750
751 Addr vaddr_tainted = req->getVaddr();
752 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
753
754 uint32_t flags = req->getFlags();
755 bool is_fetch = (mode == Execute);
756 bool is_write = (mode == Write);
757 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
758
759 updateMiscReg(tc, curTranType);
760
761 // If this is the second stage of translation and the request is for a
762 // stage 1 page table walk then we need to check the HCR.PTW bit. This
763 // allows us to generate a fault if the request targets an area marked
764 // as a device or strongly ordered.
765 if (isStage2 && req->isPTWalk() && hcr.ptw &&
766 (te->mtype != TlbEntry::MemoryType::Normal)) {
767 return new DataAbort(vaddr_tainted, te->domain, is_write,
768 ArmFault::PermissionLL + te->lookupLevel,
769 isStage2, ArmFault::LpaeTran);
770 }
771
772 // Generate an alignment fault for unaligned accesses to device or
773 // strongly ordered memory
774 if (!is_fetch) {
775 if (te->mtype != TlbEntry::MemoryType::Normal) {
776 if (vaddr & mask(flags & AlignmentMask)) {
777 alignFaults++;
778 return new DataAbort(vaddr_tainted,
779 TlbEntry::DomainType::NoAccess, is_write,
780 ArmFault::AlignmentFault, isStage2,
781 ArmFault::LpaeTran);
782 }
783 }
784 }
785
786 if (te->nonCacheable) {
787 // Prevent prefetching from I/O devices.
788 if (req->isPrefetch()) {
789 // Here we can safely use the fault status for the short
790 // desc. format in all cases
791 return new PrefetchAbort(vaddr_tainted,
792 ArmFault::PrefetchUncacheable,
793 isStage2, ArmFault::LpaeTran);
794 }
795 }
796
797 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
798 bool grant = false;
799
800 uint8_t xn = te->xn;
801 uint8_t pxn = te->pxn;
802 bool r = !is_write && !is_fetch;
803 bool w = is_write;
804 bool x = is_fetch;
805 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
806 "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
807
808 if (isStage2) {
809 panic("Virtualization in AArch64 state is not supported yet");
810 } else {
811 switch (aarch64EL) {
812 case EL0:
813 {
814 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
815 switch (perm) {
816 case 0:
817 case 1:
818 case 8:
819 case 9:
820 grant = x;
821 break;
822 case 4:
823 case 5:
824 grant = r || w || (x && !sctlr.wxn);
825 break;
826 case 6:
827 case 7:
828 grant = r || w;
829 break;
830 case 12:
831 case 13:
832 grant = r || x;
833 break;
834 case 14:
835 case 15:
836 grant = r;
837 break;
838 default:
839 grant = false;
840 }
841 }
842 break;
843 case EL1:
844 {
845 uint8_t perm = (ap << 2) | (xn << 1) | pxn;
846 switch (perm) {
847 case 0:
848 case 2:
849 grant = r || w || (x && !sctlr.wxn);
850 break;
851 case 1:
852 case 3:
853 case 4:
854 case 5:
855 case 6:
856 case 7:
857 // regions that are writeable at EL0 should not be
858 // executable at EL1
859 grant = r || w;
860 break;
861 case 8:
862 case 10:
863 case 12:
864 case 14:
865 grant = r || x;
866 break;
867 case 9:
868 case 11:
869 case 13:
870 case 15:
871 grant = r;
872 break;
873 default:
874 grant = false;
875 }
876 }
877 break;
878 case EL2:
879 case EL3:
880 {
881 uint8_t perm = (ap & 0x2) | xn;
882 switch (perm) {
883 case 0:
884 grant = r || w || (x && !sctlr.wxn) ;
885 break;
886 case 1:
887 grant = r || w;
888 break;
889 case 2:
890 grant = r || x;
891 break;
892 case 3:
893 grant = r;
894 break;
895 default:
896 grant = false;
897 }
898 }
899 break;
900 }
901 }
902
903 if (!grant) {
904 if (is_fetch) {
905 permsFaults++;
906 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
907 "AP:%d priv:%d write:%d ns:%d sif:%d "
908 "sctlr.afe: %d\n",
909 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
910 // Use PC value instead of vaddr because vaddr might be aligned to
911 // cache line and should not be the address reported in FAR
912 return new PrefetchAbort(req->getPC(),
913 ArmFault::PermissionLL + te->lookupLevel,
914 isStage2, ArmFault::LpaeTran);
915 } else {
916 permsFaults++;
917 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
918 "priv:%d write:%d\n", ap, is_priv, is_write);
919 return new DataAbort(vaddr_tainted, te->domain, is_write,
920 ArmFault::PermissionLL + te->lookupLevel,
921 isStage2, ArmFault::LpaeTran);
922 }
923 }
924
925 return NoFault;
926 }
927
928 Fault
929 TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
930 Translation *translation, bool &delay, bool timing,
931 TLB::ArmTranslationType tranType, bool functional)
932 {
933 // No such thing as a functional timing access
934 assert(!(timing && functional));
935
936 updateMiscReg(tc, tranType);
937
938 Addr vaddr_tainted = req->getVaddr();
939 Addr vaddr = 0;
940 if (aarch64)
941 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
942 else
943 vaddr = vaddr_tainted;
944 uint32_t flags = req->getFlags();
945
946 bool is_fetch = (mode == Execute);
947 bool is_write = (mode == Write);
948 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
949 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
950 : ArmFault::VmsaTran;
951
952 req->setAsid(asid);
953
954 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
955 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
956
957 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
958 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
959 scr, sctlr, flags, tranType);
960
961 // Generate an alignment fault for unaligned PC
962 if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
963 return new PCAlignmentFault(req->getPC());
964 }
965
966 // If this is a clrex instruction, provide a PA of 0 with no fault
967 // This will force the monitor to set the tracked address to 0
968 // a bit of a hack but this effectively clrears this processors monitor
969 if (flags & Request::CLEAR_LL){
970 // @todo: check implications of security extensions
971 req->setPaddr(0);
972 req->setFlags(Request::UNCACHEABLE);
973 req->setFlags(Request::CLEAR_LL);
974 return NoFault;
975 }
976 if ((req->isInstFetch() && (!sctlr.i)) ||
977 ((!req->isInstFetch()) && (!sctlr.c))){
978 req->setFlags(Request::UNCACHEABLE);
979 }
980 if (!is_fetch) {
981 assert(flags & MustBeOne);
982 if (sctlr.a || !(flags & AllowUnaligned)) {
983 if (vaddr & mask(flags & AlignmentMask)) {
984 alignFaults++;
985 return new DataAbort(vaddr_tainted,
986 TlbEntry::DomainType::NoAccess, is_write,
987 ArmFault::AlignmentFault, isStage2,
988 tranMethod);
989 }
990 }
991 }
992
993 // If guest MMU is off or hcr.vm=0 go straight to stage2
994 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
995
996 req->setPaddr(vaddr);
997 // When the MMU is off the security attribute corresponds to the
998 // security state of the processor
999 if (isSecure)
1000 req->setFlags(Request::SECURE);
1001
1002 // @todo: double check this (ARM ARM issue C B3.2.1)
1003 if (long_desc_format || sctlr.tre == 0) {
1004 req->setFlags(Request::UNCACHEABLE);
1005 } else {
1006 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
1007 req->setFlags(Request::UNCACHEABLE);
1008 }
1009
1010 // Set memory attributes
1011 TlbEntry temp_te;
1012 temp_te.ns = !isSecure;
1013 if (isStage2 || hcr.dc == 0 || isSecure ||
1014 (isHyp && !(tranType & S1CTran))) {
1015
1016 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1017 : TlbEntry::MemoryType::StronglyOrdered;
1018 temp_te.innerAttrs = 0x0;
1019 temp_te.outerAttrs = 0x0;
1020 temp_te.shareable = true;
1021 temp_te.outerShareable = true;
1022 } else {
1023 temp_te.mtype = TlbEntry::MemoryType::Normal;
1024 temp_te.innerAttrs = 0x3;
1025 temp_te.outerAttrs = 0x3;
1026 temp_te.shareable = false;
1027 temp_te.outerShareable = false;
1028 }
1029 temp_te.setAttributes(long_desc_format);
1030 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable:\
1031 %d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1032 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1033 isStage2);
1034 setAttr(temp_te.attributes);
1035
1036 return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess);
1037 }
1038
1039 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1040 isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1041 // Translation enabled
1042
1043 TlbEntry *te = NULL;
1044 TlbEntry mergeTe;
1045 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1046 functional, &mergeTe);
1047 // only proceed if we have a valid table entry
1048 if ((te == NULL) && (fault == NoFault)) delay = true;
1049
1050 // If we have the table entry transfer some of the attributes to the
1051 // request that triggered the translation
1052 if (te != NULL) {
1053 // Set memory attributes
1054 DPRINTF(TLBVerbose,
1055 "Setting memory attributes: shareable: %d, innerAttrs: %d, \
1056 outerAttrs: %d, mtype: %d, isStage2: %d\n",
1057 te->shareable, te->innerAttrs, te->outerAttrs,
1058 static_cast<uint8_t>(te->mtype), isStage2);
1059 setAttr(te->attributes);
1060 if (te->nonCacheable) {
1061 req->setFlags(Request::UNCACHEABLE);
1062 }
1063
1064 if (!bootUncacheability &&
1065 ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr)) {
1066 req->setFlags(Request::UNCACHEABLE);
1067 }
1068
1069 req->setPaddr(te->pAddr(vaddr));
1070 if (isSecure && !te->ns) {
1071 req->setFlags(Request::SECURE);
1072 }
1073 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1074 (te->mtype != TlbEntry::MemoryType::Normal)) {
1075 // Unaligned accesses to Device memory should always cause an
1076 // abort regardless of sctlr.a
1077 alignFaults++;
1078 return new DataAbort(vaddr_tainted,
1079 TlbEntry::DomainType::NoAccess, is_write,
1080 ArmFault::AlignmentFault, isStage2,
1081 tranMethod);
1082 }
1083
1084 // Check for a trickbox generated address fault
1085 if (fault == NoFault) {
1086 fault = trickBoxCheck(req, mode, te->domain);
1087 }
1088 }
1089
1090 // Generate Illegal Inst Set State fault if IL bit is set in CPSR
1091 if (fault == NoFault) {
1092 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1093 if (aarch64 && is_fetch && cpsr.il == 1) {
1094 return new IllegalInstSetStateFault();
1095 }
1096 }
1097
1098 return fault;
1099 }
1100
1101 Fault
1102 TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
1103 TLB::ArmTranslationType tranType)
1104 {
1105 updateMiscReg(tc, tranType);
1106
1107 if (directToStage2) {
1108 assert(stage2Tlb);
1109 return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1110 }
1111
1112 bool delay = false;
1113 Fault fault;
1114 if (FullSystem)
1115 fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1116 else
1117 fault = translateSe(req, tc, mode, NULL, delay, false);
1118 assert(!delay);
1119 return fault;
1120 }
1121
1122 Fault
1123 TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
1124 TLB::ArmTranslationType tranType)
1125 {
1126 updateMiscReg(tc, tranType);
1127
1128 if (directToStage2) {
1129 assert(stage2Tlb);
1130 return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1131 }
1132
1133 bool delay = false;
1134 Fault fault;
1135 if (FullSystem)
1136 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1137 else
1138 fault = translateSe(req, tc, mode, NULL, delay, false);
1139 assert(!delay);
1140 return fault;
1141 }
1142
1143 Fault
1144 TLB::translateTiming(RequestPtr req, ThreadContext *tc,
1145 Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1146 {
1147 updateMiscReg(tc, tranType);
1148
1149 if (directToStage2) {
1150 assert(stage2Tlb);
1151 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1152 }
1153
1154 assert(translation);
1155
1156 return translateComplete(req, tc, translation, mode, tranType, isStage2);
1157 }
1158
1159 Fault
1160 TLB::translateComplete(RequestPtr req, ThreadContext *tc,
1161 Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1162 bool callFromS2)
1163 {
1164 bool delay = false;
1165 Fault fault;
1166 if (FullSystem)
1167 fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1168 else
1169 fault = translateSe(req, tc, mode, translation, delay, true);
1170 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1171 NoFault);
1172 // If we have a translation, and we're not in the middle of doing a stage
1173 // 2 translation tell the translation that we've either finished or its
1174 // going to take a while. By not doing this when we're in the middle of a
1175 // stage 2 translation we prevent marking the translation as delayed twice,
1176 // one when the translation starts and again when the stage 1 translation
1177 // completes.
1178 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1179 if (!delay)
1180 translation->finish(fault, req, tc, mode);
1181 else
1182 translation->markDelayed();
1183 }
1184 return fault;
1185 }
1186
1187 BaseMasterPort*
1188 TLB::getMasterPort()
1189 {
1190 return &tableWalker->getMasterPort("port");
1191 }
1192
1193 DmaPort&
1194 TLB::getWalkerPort()
1195 {
1196 return tableWalker->getWalkerPort();
1197 }
1198
1199 void
1200 TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
1201 {
1202 // check if the regs have changed, or the translation mode is different.
1203 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1204 // one type of translation anyway
1205 if (miscRegValid && ((tranType == curTranType) || isStage2)) {
1206 return;
1207 }
1208
1209 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1210 CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
1211 // Dependencies: SCR/SCR_EL3, CPSR
1212 isSecure = inSecureState(tc);
1213 isSecure &= (tranType & HypMode) == 0;
1214 isSecure &= (tranType & S1S2NsTran) == 0;
1215 aarch64 = !cpsr.width;
1216 if (aarch64) { // AArch64
1217 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
1218 switch (aarch64EL) {
1219 case EL0:
1220 case EL1:
1221 {
1222 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1223 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1224 uint64_t ttbr_asid = ttbcr.a1 ?
1225 tc->readMiscReg(MISCREG_TTBR1_EL1) :
1226 tc->readMiscReg(MISCREG_TTBR0_EL1);
1227 asid = bits(ttbr_asid,
1228 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1229 }
1230 break;
1231 case EL2:
1232 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1233 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1234 asid = -1;
1235 break;
1236 case EL3:
1237 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1238 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1239 asid = -1;
1240 break;
1241 }
1242 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1243 isPriv = aarch64EL != EL0;
1244 // @todo: modify this behaviour to support Virtualization in
1245 // AArch64
1246 vmid = 0;
1247 isHyp = false;
1248 directToStage2 = false;
1249 stage2Req = false;
1250 } else { // AArch32
1251 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
1252 !isSecure));
1253 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
1254 !isSecure));
1255 scr = tc->readMiscReg(MISCREG_SCR);
1256 isPriv = cpsr.mode != MODE_USER;
1257 if (haveLPAE && ttbcr.eae) {
1258 // Long-descriptor translation table format in use
1259 uint64_t ttbr_asid = tc->readMiscReg(
1260 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
1261 : MISCREG_TTBR0,
1262 tc, !isSecure));
1263 asid = bits(ttbr_asid, 55, 48);
1264 } else {
1265 // Short-descriptor translation table format in use
1266 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
1267 MISCREG_CONTEXTIDR, tc,!isSecure));
1268 asid = context_id.asid;
1269 }
1270 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
1271 !isSecure));
1272 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
1273 !isSecure));
1274 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
1275 !isSecure));
1276 hcr = tc->readMiscReg(MISCREG_HCR);
1277
1278 if (haveVirtualization) {
1279 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1280 isHyp = cpsr.mode == MODE_HYP;
1281 isHyp |= tranType & HypMode;
1282 isHyp &= (tranType & S1S2NsTran) == 0;
1283 isHyp &= (tranType & S1CTran) == 0;
1284 if (isHyp) {
1285 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1286 }
1287 // Work out if we should skip the first stage of translation and go
1288 // directly to stage 2. This value is cached so we don't have to
1289 // compute it for every translation.
1290 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1291 !(tranType & S1CTran);
1292 directToStage2 = stage2Req && !sctlr.m;
1293 } else {
1294 vmid = 0;
1295 stage2Req = false;
1296 isHyp = false;
1297 directToStage2 = false;
1298 }
1299 }
1300 miscRegValid = true;
1301 curTranType = tranType;
1302 }
1303
1304 Fault
1305 TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1306 Translation *translation, bool timing, bool functional,
1307 bool is_secure, TLB::ArmTranslationType tranType)
1308 {
1309 bool is_fetch = (mode == Execute);
1310 bool is_write = (mode == Write);
1311
1312 Addr vaddr_tainted = req->getVaddr();
1313 Addr vaddr = 0;
1314 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1315 if (aarch64) {
1316 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el);
1317 } else {
1318 vaddr = vaddr_tainted;
1319 }
1320 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1321 if (*te == NULL) {
1322 if (req->isPrefetch()) {
1323 // if the request is a prefetch don't attempt to fill the TLB or go
1324 // any further with the memory access (here we can safely use the
1325 // fault status for the short desc. format in all cases)
1326 prefetchFaults++;
1327 return new PrefetchAbort(vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1328 }
1329
1330 if (is_fetch)
1331 instMisses++;
1332 else if (is_write)
1333 writeMisses++;
1334 else
1335 readMisses++;
1336
1337 // start translation table walk, pass variables rather than
1338 // re-retreaving in table walker for speed
1339 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1340 vaddr_tainted, asid, vmid);
1341 Fault fault;
1342 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1343 translation, timing, functional, is_secure,
1344 tranType);
1345 // for timing mode, return and wait for table walk,
1346 if (timing || fault != NoFault) {
1347 return fault;
1348 }
1349
1350 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1351 if (!*te)
1352 printTlb();
1353 assert(*te);
1354 } else {
1355 if (is_fetch)
1356 instHits++;
1357 else if (is_write)
1358 writeHits++;
1359 else
1360 readHits++;
1361 }
1362 return NoFault;
1363 }
1364
1365 Fault
1366 TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
1367 Translation *translation, bool timing, bool functional,
1368 TlbEntry *mergeTe)
1369 {
1370 Fault fault;
1371 TlbEntry *s1Te = NULL;
1372
1373 Addr vaddr_tainted = req->getVaddr();
1374
1375 // Get the stage 1 table entry
1376 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1377 isSecure, curTranType);
1378 // only proceed if we have a valid table entry
1379 if ((s1Te != NULL) && (fault == NoFault)) {
1380 // Check stage 1 permissions before checking stage 2
1381 if (aarch64)
1382 fault = checkPermissions64(s1Te, req, mode, tc);
1383 else
1384 fault = checkPermissions(s1Te, req, mode);
1385 if (stage2Req & (fault == NoFault)) {
1386 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1387 req, translation, mode, timing, functional, curTranType);
1388 fault = s2Lookup->getTe(tc, mergeTe);
1389 if (s2Lookup->isComplete()) {
1390 *te = mergeTe;
1391 // We've finished with the lookup so delete it
1392 delete s2Lookup;
1393 } else {
1394 // The lookup hasn't completed, so we can't delete it now. We
1395 // get round this by asking the object to self delete when the
1396 // translation is complete.
1397 s2Lookup->setSelfDelete();
1398 }
1399 } else {
1400 // This case deals with an S1 hit (or bypass), followed by
1401 // an S2 hit-but-perms issue
1402 if (isStage2) {
1403 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1404 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1405 if (fault != NoFault) {
1406 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1407 armFault->annotate(ArmFault::S1PTW, false);
1408 armFault->annotate(ArmFault::OVA, vaddr_tainted);
1409 }
1410 }
1411 *te = s1Te;
1412 }
1413 }
1414 return fault;
1415 }
1416
1417 ArmISA::TLB *
1418 ArmTLBParams::create()
1419 {
1420 return new ArmISA::TLB(this);
1421 }