arm: Add support for ARMv8 (AArch64 & AArch32)
[gem5.git] / src / arch / arm / tlb.cc
index bbf9232c5bdbf56f577e18b89db8785ee8c69b8f..037f7490e63592f8bc092f650d677b2e94c26cd5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010 ARM Limited
+ * Copyright (c) 2010-2013 ARM Limited
  * All rights reserved
  *
  * The license below extends only to copyright in the software and shall
 
 #include "arch/arm/faults.hh"
 #include "arch/arm/pagetable.hh"
+#include "arch/arm/system.hh"
+#include "arch/arm/table_walker.hh"
+#include "arch/arm/stage2_lookup.hh"
+#include "arch/arm/stage2_mmu.hh"
 #include "arch/arm/tlb.hh"
 #include "arch/arm/utility.hh"
 #include "base/inifile.hh"
 #include "base/str.hh"
 #include "base/trace.hh"
+#include "cpu/base.hh"
 #include "cpu/thread_context.hh"
+#include "debug/Checkpoint.hh"
+#include "debug/TLB.hh"
+#include "debug/TLBVerbose.hh"
 #include "mem/page_table.hh"
 #include "params/ArmTLB.hh"
+#include "sim/full_system.hh"
 #include "sim/process.hh"
 
-
 using namespace std;
 using namespace ArmISA;
 
-TLB::TLB(const Params *p)
-    : BaseTLB(p), size(p->size), nlu(0)
+TLB::TLB(const ArmTLBParams *p)
+    : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
+    isStage2(p->is_stage2), tableWalker(p->walker), stage2Tlb(NULL),
+    stage2Mmu(NULL), rangeMRU(1), bootUncacheability(false),
+    miscRegValid(false), curTranType(NormalTran)
 {
-    table = new ArmISA::PTE[size];
-    memset(table, 0, sizeof(ArmISA::PTE[size]));
+    tableWalker->setTlb(this);
 
+    // Cache system-level properties
+    haveLPAE = tableWalker->haveLPAE();
+    haveVirtualization = tableWalker->haveVirtualization();
+    haveLargeAsid64 = tableWalker->haveLargeAsid64();
 }
 
 TLB::~TLB()
 {
-    if (table)
-        delete [] table;
+    delete[] table;
+}
+
+void
+TLB::init()
+{
+    if (stage2Mmu && !isStage2)
+        stage2Tlb = stage2Mmu->stage2Tlb();
 }
 
-ArmISA::PTE *
-TLB::lookup(Addr vpn, uint8_t asn) const
+void
+TLB::setMMU(Stage2MMU *m)
 {
-    panic("lookup() not implemented for ARM\n");
+    stage2Mmu = m;
+    tableWalker->setMMU(m);
+}
+
+bool
+TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
+{
+    updateMiscReg(tc);
+
+    if (directToStage2) {
+        assert(stage2Tlb);
+        return stage2Tlb->translateFunctional(tc, va, pa);
+    }
+
+    TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
+                         aarch64 ? aarch64EL : EL1);
+    if (!e)
+        return false;
+    pa = e->pAddr(va);
+    return true;
+}
+
+Fault
+TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
+{
+    return NoFault;
+}
+
+TlbEntry*
+TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
+            bool functional, bool ignore_asn, uint8_t target_el)
+{
+
+    TlbEntry *retval = NULL;
+
+    // Maintaining LRU array
+    int x = 0;
+    while (retval == NULL && x < size) {
+        if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
+             target_el)) ||
+            (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
+            // We only move the hit entry ahead when the position is higher
+            // than rangeMRU
+            if (x > rangeMRU && !functional) {
+                TlbEntry tmp_entry = table[x];
+                for(int i = x; i > 0; i--)
+                    table[i] = table[i - 1];
+                table[0] = tmp_entry;
+                retval = &table[0];
+            } else {
+                retval = &table[x];
+            }
+            break;
+        }
+        ++x;
+    }
+
+    DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
+            "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
+            "el: %d\n",
+            va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
+            retval ? retval->pfn       : 0, retval ? retval->size  : 0,
+            retval ? retval->pAddr(va) : 0, retval ? retval->ap    : 0,
+            retval ? retval->ns        : 0, retval ? retval->nstid : 0,
+            retval ? retval->global    : 0, retval ? retval->asid  : 0,
+            retval ? retval->el        : 0, retval ? retval->el    : 0);
+
+    return retval;
 }
 
 // insert a new TLB entry
 void
-TLB::insert(Addr addr, ArmISA::PTE &pte)
+TLB::insert(Addr addr, TlbEntry &entry)
+{
+    DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
+            " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
+            " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
+            entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
+            entry.global, entry.valid, entry.nonCacheable, entry.xn,
+            entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
+            entry.isHyp);
+
+    if (table[size - 1].valid)
+        DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
+                "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
+                table[size-1].vpn << table[size-1].N, table[size-1].asid,
+                table[size-1].vmid, table[size-1].pfn << table[size-1].N,
+                table[size-1].size, table[size-1].ap, table[size-1].ns,
+                table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
+                table[size-1].el);
+
+    //inserting to MRU position and evicting the LRU one
+
+    for (int i = size - 1; i > 0; --i)
+        table[i] = table[i-1];
+    table[0] = entry;
+
+    inserts++;
+}
+
+void
+TLB::printTlb() const
+{
+    int x = 0;
+    TlbEntry *te;
+    DPRINTF(TLB, "Current TLB contents:\n");
+    while (x < size) {
+        te = &table[x];
+        if (te->valid)
+            DPRINTF(TLB, " *  %s\n", te->print());
+        ++x;
+    }
+}
+
+void
+TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el)
+{
+    DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
+            (secure_lookup ? "secure" : "non-secure"));
+    int x = 0;
+    TlbEntry *te;
+    while (x < size) {
+        te = &table[x];
+        if (te->valid && secure_lookup == !te->nstid &&
+            (te->vmid == vmid || secure_lookup) &&
+            checkELMatch(target_el, te->el, ignore_el)) {
+
+            DPRINTF(TLB, " -  %s\n", te->print());
+            te->valid = false;
+            flushedEntries++;
+        }
+        ++x;
+    }
+
+    flushTlb++;
+
+    // If there's a second stage TLB (and we're not it) then flush it as well
+    // if we're currently in hyp mode
+    if (!isStage2 && isHyp) {
+        stage2Tlb->flushAllSecurity(secure_lookup, true);
+    }
+}
+
+void
+TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el)
+{
+    DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
+            (hyp ? "hyp" : "non-hyp"));
+    int x = 0;
+    TlbEntry *te;
+    while (x < size) {
+        te = &table[x];
+        if (te->valid && te->nstid && te->isHyp == hyp &&
+            checkELMatch(target_el, te->el, ignore_el)) {
+
+            DPRINTF(TLB, " -  %s\n", te->print());
+            flushedEntries++;
+            te->valid = false;
+        }
+        ++x;
+    }
+
+    flushTlb++;
+
+    // If there's a second stage TLB (and we're not it) then flush it as well
+    if (!isStage2 && !hyp) {
+        stage2Tlb->flushAllNs(false, true);
+    }
+}
+
+void
+TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el)
+{
+    DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
+            "(%s lookup)\n", mva, asn, (secure_lookup ?
+            "secure" : "non-secure"));
+    _flushMva(mva, asn, secure_lookup, false, false, target_el);
+    flushTlbMvaAsid++;
+}
+
+void
+TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el)
+{
+    DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
+            (secure_lookup ? "secure" : "non-secure"));
+
+    int x = 0 ;
+    TlbEntry *te;
+
+    while (x < size) {
+        te = &table[x];
+        if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
+            (te->vmid == vmid || secure_lookup) &&
+            checkELMatch(target_el, te->el, false)) {
+
+            te->valid = false;
+            DPRINTF(TLB, " -  %s\n", te->print());
+            flushedEntries++;
+        }
+        ++x;
+    }
+    flushTlbAsid++;
+}
+
+void
+TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el)
+{
+    DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
+            (secure_lookup ? "secure" : "non-secure"));
+    _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el);
+    flushTlbMva++;
+}
+
+void
+TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp,
+               bool ignore_asn, uint8_t target_el)
+{
+    TlbEntry *te;
+    // D5.7.2: Sign-extend address to 64 bits
+    mva = sext<56>(mva);
+    te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
+                target_el);
+    while (te != NULL) {
+        if (secure_lookup == !te->nstid) {
+            DPRINTF(TLB, " -  %s\n", te->print());
+            te->valid = false;
+            flushedEntries++;
+        }
+        te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
+                    target_el);
+    }
+}
+
+bool
+TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el)
 {
-  fatal("TLB Insert not yet implemented\n");
+    bool elMatch = true;
+    if (!ignore_el) {
+        if (target_el == 2 || target_el == 3) {
+            elMatch = (tentry_el  == target_el);
+        } else {
+            elMatch = (tentry_el == 0) || (tentry_el  == 1);
+        }
+    }
+    return elMatch;
 }
 
 void
-TLB::flushAll()
+TLB::drainResume()
 {
-    DPRINTF(TLB, "flushAll\n");
-    memset(table, 0, sizeof(ArmISA::PTE[size]));
-    lookupTable.clear();
-    nlu = 0;
+    // We might have unserialized something or switched CPUs, so make
+    // sure to re-read the misc regs.
+    miscRegValid = false;
 }
 
 void
 TLB::serialize(ostream &os)
 {
-    SERIALIZE_SCALAR(size);
-    SERIALIZE_SCALAR(nlu);
+    DPRINTF(Checkpoint, "Serializing Arm TLB\n");
 
-    for (int i = 0; i < size; i++) {
-        nameOut(os, csprintf("%s.PTE%d", name(), i));
+    SERIALIZE_SCALAR(_attr);
+    SERIALIZE_SCALAR(haveLPAE);
+    SERIALIZE_SCALAR(directToStage2);
+    SERIALIZE_SCALAR(stage2Req);
+    SERIALIZE_SCALAR(bootUncacheability);
+
+    int num_entries = size;
+    SERIALIZE_SCALAR(num_entries);
+    for(int i = 0; i < size; i++){
+        nameOut(os, csprintf("%s.TlbEntry%d", name(), i));
         table[i].serialize(os);
     }
 }
@@ -112,46 +375,65 @@ TLB::serialize(ostream &os)
 void
 TLB::unserialize(Checkpoint *cp, const string &section)
 {
-    UNSERIALIZE_SCALAR(size);
-    UNSERIALIZE_SCALAR(nlu);
+    DPRINTF(Checkpoint, "Unserializing Arm TLB\n");
+
+    UNSERIALIZE_SCALAR(_attr);
+    UNSERIALIZE_SCALAR(haveLPAE);
+    UNSERIALIZE_SCALAR(directToStage2);
+    UNSERIALIZE_SCALAR(stage2Req);
+    UNSERIALIZE_SCALAR(bootUncacheability);
 
-    panic("Need to properly unserialize TLB\n");
-    for (int i = 0; i < size; i++) {
-        table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
+    int num_entries;
+    UNSERIALIZE_SCALAR(num_entries);
+    for(int i = 0; i < min(size, num_entries); i++){
+        table[i].unserialize(cp, csprintf("%s.TlbEntry%d", section, i));
     }
 }
 
 void
 TLB::regStats()
 {
-    read_hits
+    instHits
+        .name(name() + ".inst_hits")
+        .desc("ITB inst hits")
+        ;
+
+    instMisses
+        .name(name() + ".inst_misses")
+        .desc("ITB inst misses")
+        ;
+
+    instAccesses
+        .name(name() + ".inst_accesses")
+        .desc("ITB inst accesses")
+        ;
+
+    readHits
         .name(name() + ".read_hits")
         .desc("DTB read hits")
         ;
 
-    read_misses
+    readMisses
         .name(name() + ".read_misses")
         .desc("DTB read misses")
         ;
 
-
-    read_accesses
+    readAccesses
         .name(name() + ".read_accesses")
         .desc("DTB read accesses")
         ;
 
-    write_hits
+    writeHits
         .name(name() + ".write_hits")
         .desc("DTB write hits")
         ;
 
-    write_misses
+    writeMisses
         .name(name() + ".write_misses")
         .desc("DTB write misses")
         ;
 
-
-    write_accesses
+    writeAccesses
         .name(name() + ".write_accesses")
         .desc("DTB write accesses")
         ;
@@ -166,66 +448,946 @@ TLB::regStats()
         .desc("DTB misses")
         ;
 
-    invalids
-        .name(name() + ".invalids")
-        .desc("DTB access violations")
-        ;
-
     accesses
         .name(name() + ".accesses")
         .desc("DTB accesses")
         ;
 
-    hits = read_hits + write_hits;
-    misses = read_misses + write_misses;
-    accesses = read_accesses + write_accesses;
+    flushTlb
+        .name(name() + ".flush_tlb")
+        .desc("Number of times complete TLB was flushed")
+        ;
+
+    flushTlbMva
+        .name(name() + ".flush_tlb_mva")
+        .desc("Number of times TLB was flushed by MVA")
+        ;
+
+    flushTlbMvaAsid
+        .name(name() + ".flush_tlb_mva_asid")
+        .desc("Number of times TLB was flushed by MVA & ASID")
+        ;
+
+    flushTlbAsid
+        .name(name() + ".flush_tlb_asid")
+        .desc("Number of times TLB was flushed by ASID")
+        ;
+
+    flushedEntries
+        .name(name() + ".flush_entries")
+        .desc("Number of entries that have been flushed from TLB")
+        ;
+
+    alignFaults
+        .name(name() + ".align_faults")
+        .desc("Number of TLB faults due to alignment restrictions")
+        ;
+
+    prefetchFaults
+        .name(name() + ".prefetch_faults")
+        .desc("Number of TLB faults due to prefetch")
+        ;
+
+    domainFaults
+        .name(name() + ".domain_faults")
+        .desc("Number of TLB faults due to domain restrictions")
+        ;
+
+    permsFaults
+        .name(name() + ".perms_faults")
+        .desc("Number of TLB faults due to permissions restrictions")
+        ;
+
+    instAccesses = instHits + instMisses;
+    readAccesses = readHits + readMisses;
+    writeAccesses = writeHits + writeMisses;
+    hits = readHits + writeHits + instHits;
+    misses = readMisses + writeMisses + instMisses;
+    accesses = readAccesses + writeAccesses + instAccesses;
 }
 
 Fault
-TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
+TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode,
+                 Translation *translation, bool &delay, bool timing)
 {
-    Addr vaddr = req->getVaddr() & ~PcModeMask;
-    SCTLR sctlr = tc->readMiscReg(MISCREG_SCTLR);
+    updateMiscReg(tc);
+    Addr vaddr_tainted = req->getVaddr();
+    Addr vaddr = 0;
+    if (aarch64)
+        vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
+    else
+        vaddr = vaddr_tainted;
     uint32_t flags = req->getFlags();
 
-    if (mode != Execute) {
-        assert(flags & MustBeOne);
+    bool is_fetch = (mode == Execute);
+    bool is_write = (mode == Write);
 
-        if (sctlr.a || (flags & AllowUnaligned) == 0) {
-            if ((vaddr & flags & AlignmentMask) != 0) {
-                return new DataAbort(vaddr, (mode == Write), 0,
-                            ArmFault::AlignmentFault);
+    if (!is_fetch) {
+        assert(flags & MustBeOne);
+        if (sctlr.a || !(flags & AllowUnaligned)) {
+            if (vaddr & mask(flags & AlignmentMask)) {
+                // LPAE is always disabled in SE mode
+                return new DataAbort(vaddr_tainted,
+                        TlbEntry::DomainType::NoAccess, is_write,
+                                     ArmFault::AlignmentFault, isStage2,
+                                     ArmFault::VmsaTran);
             }
         }
     }
-#if !FULL_SYSTEM
-    Process * p = tc->getProcessPtr();
 
     Addr paddr;
+    Process *p = tc->getProcessPtr();
+
     if (!p->pTable->translate(vaddr, paddr))
-        return Fault(new GenericPageTableFault(vaddr));
+        return Fault(new GenericPageTableFault(vaddr_tainted));
     req->setPaddr(paddr);
 
     return NoFault;
-#else
-    if (!sctlr.m) {
-        req->setPaddr(vaddr);
-        return NoFault;
+}
+
+Fault
+TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain)
+{
+    return NoFault;
+}
+
+Fault
+TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec,
+        bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level)
+{
+    return NoFault;
+}
+
+Fault
+TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode)
+{
+    Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
+    uint32_t flags = req->getFlags();
+    bool is_fetch  = (mode == Execute);
+    bool is_write  = (mode == Write);
+    bool is_priv   = isPriv && !(flags & UserMode);
+
+    // Get the translation type from the actuall table entry
+    ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
+                                                         : ArmFault::VmsaTran;
+
+    // If this is the second stage of translation and the request is for a
+    // stage 1 page table walk then we need to check the HCR.PTW bit. This
+    // allows us to generate a fault if the request targets an area marked
+    // as a device or strongly ordered.
+    if (isStage2 && req->isPTWalk() && hcr.ptw &&
+        (te->mtype != TlbEntry::MemoryType::Normal)) {
+        return new DataAbort(vaddr, te->domain, is_write,
+                             ArmFault::PermissionLL + te->lookupLevel,
+                             isStage2, tranMethod);
+    }
+
+    // Generate an alignment fault for unaligned data accesses to device or
+    // strongly ordered memory
+    if (!is_fetch) {
+        if (te->mtype != TlbEntry::MemoryType::Normal) {
+            if (vaddr & mask(flags & AlignmentMask)) {
+                alignFaults++;
+                return new DataAbort(vaddr, TlbEntry::DomainType::NoAccess, is_write,
+                                     ArmFault::AlignmentFault, isStage2,
+                                     tranMethod);
+            }
+        }
+    }
+
+    if (te->nonCacheable) {
+        // Prevent prefetching from I/O devices.
+        if (req->isPrefetch()) {
+            // Here we can safely use the fault status for the short
+            // desc. format in all cases
+            return new PrefetchAbort(vaddr, ArmFault::PrefetchUncacheable,
+                                     isStage2, tranMethod);
+        }
+    }
+
+    if (!te->longDescFormat) {
+        switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
+          case 0:
+            domainFaults++;
+            DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
+                    " domain: %#x write:%d\n", dacr,
+                    static_cast<uint8_t>(te->domain), is_write);
+            if (is_fetch)
+                return new PrefetchAbort(vaddr,
+                                         ArmFault::DomainLL + te->lookupLevel,
+                                         isStage2, tranMethod);
+            else
+                return new DataAbort(vaddr, te->domain, is_write,
+                                     ArmFault::DomainLL + te->lookupLevel,
+                                     isStage2, tranMethod);
+          case 1:
+            // Continue with permissions check
+            break;
+          case 2:
+            panic("UNPRED domain\n");
+          case 3:
+            return NoFault;
+        }
+    }
+
+    // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
+    uint8_t ap  = te->longDescFormat ? te->ap << 1 : te->ap;
+    uint8_t hap = te->hap;
+
+    if (sctlr.afe == 1 || te->longDescFormat)
+        ap |= 1;
+
+    bool abt;
+    bool isWritable = true;
+    // If this is a stage 2 access (eg for reading stage 1 page table entries)
+    // then don't perform the AP permissions check, we stil do the HAP check
+    // below.
+    if (isStage2) {
+        abt = false;
+    } else {
+        switch (ap) {
+          case 0:
+            DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
+                    (int)sctlr.rs);
+            if (!sctlr.xp) {
+                switch ((int)sctlr.rs) {
+                  case 2:
+                    abt = is_write;
+                    break;
+                  case 1:
+                    abt = is_write || !is_priv;
+                    break;
+                  case 0:
+                  case 3:
+                  default:
+                    abt = true;
+                    break;
+                }
+            } else {
+                abt = true;
+            }
+            break;
+          case 1:
+            abt = !is_priv;
+            break;
+          case 2:
+            abt = !is_priv && is_write;
+            isWritable = is_priv;
+            break;
+          case 3:
+            abt = false;
+            break;
+          case 4:
+            panic("UNPRED premissions\n");
+          case 5:
+            abt = !is_priv || is_write;
+            isWritable = false;
+            break;
+          case 6:
+          case 7:
+            abt        = is_write;
+            isWritable = false;
+            break;
+          default:
+            panic("Unknown permissions %#x\n", ap);
+        }
+    }
+
+    bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
+    bool xn     = te->xn || (isWritable && sctlr.wxn) ||
+                            (ap == 3    && sctlr.uwxn && is_priv);
+    if (is_fetch && (abt || xn ||
+                     (te->longDescFormat && te->pxn && !is_priv) ||
+                     (isSecure && te->ns && scr.sif))) {
+        permsFaults++;
+        DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
+                     "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
+                     ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
+        return new PrefetchAbort(vaddr,
+                                 ArmFault::PermissionLL + te->lookupLevel,
+                                 isStage2, tranMethod);
+    } else if (abt | hapAbt) {
+        permsFaults++;
+        DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
+               " write:%d\n", ap, is_priv, is_write);
+        return new DataAbort(vaddr, te->domain, is_write,
+                             ArmFault::PermissionLL + te->lookupLevel,
+                             isStage2 | !abt, tranMethod);
     }
-    warn_once("MPU translation not implemented\n");
-    req->setPaddr(vaddr);
     return NoFault;
-    
+}
+
+
+Fault
+TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode,
+                        ThreadContext *tc)
+{
+    assert(aarch64);
 
-#endif
+    Addr vaddr_tainted = req->getVaddr();
+    Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
+
+    uint32_t flags = req->getFlags();
+    bool is_fetch  = (mode == Execute);
+    bool is_write  = (mode == Write);
+    bool is_priv M5_VAR_USED  = isPriv && !(flags & UserMode);
+
+    updateMiscReg(tc, curTranType);
+
+    // If this is the second stage of translation and the request is for a
+    // stage 1 page table walk then we need to check the HCR.PTW bit. This
+    // allows us to generate a fault if the request targets an area marked
+    // as a device or strongly ordered.
+    if (isStage2 && req->isPTWalk() && hcr.ptw &&
+        (te->mtype != TlbEntry::MemoryType::Normal)) {
+        return new DataAbort(vaddr_tainted, te->domain, is_write,
+                             ArmFault::PermissionLL + te->lookupLevel,
+                             isStage2, ArmFault::LpaeTran);
+    }
+
+    // Generate an alignment fault for unaligned accesses to device or
+    // strongly ordered memory
+    if (!is_fetch) {
+        if (te->mtype != TlbEntry::MemoryType::Normal) {
+            if (vaddr & mask(flags & AlignmentMask)) {
+                alignFaults++;
+                return new DataAbort(vaddr_tainted,
+                                     TlbEntry::DomainType::NoAccess, is_write,
+                                     ArmFault::AlignmentFault, isStage2,
+                                     ArmFault::LpaeTran);
+            }
+        }
+    }
+
+    if (te->nonCacheable) {
+        // Prevent prefetching from I/O devices.
+        if (req->isPrefetch()) {
+            // Here we can safely use the fault status for the short
+            // desc. format in all cases
+            return new PrefetchAbort(vaddr_tainted,
+                                     ArmFault::PrefetchUncacheable,
+                                     isStage2, ArmFault::LpaeTran);
+        }
+    }
+
+    uint8_t ap  = 0x3 & (te->ap);  // 2-bit access protection field
+    bool grant = false;
+
+    uint8_t xn =  te->xn;
+    uint8_t pxn = te->pxn;
+    bool r = !is_write && !is_fetch;
+    bool w = is_write;
+    bool x = is_fetch;
+    DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
+                        "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
+
+    if (isStage2) {
+        panic("Virtualization in AArch64 state is not supported yet");
+    } else {
+        switch (aarch64EL) {
+          case EL0:
+            {
+                uint8_t perm = (ap << 2)  | (xn << 1) | pxn;
+                switch (perm) {
+                  case 0:
+                  case 1:
+                  case 8:
+                  case 9:
+                    grant = x;
+                    break;
+                  case 4:
+                  case 5:
+                    grant = r || w || (x && !sctlr.wxn);
+                    break;
+                  case 6:
+                  case 7:
+                    grant = r || w;
+                    break;
+                  case 12:
+                  case 13:
+                    grant = r || x;
+                    break;
+                  case 14:
+                  case 15:
+                    grant = r;
+                    break;
+                  default:
+                    grant = false;
+                }
+            }
+            break;
+          case EL1:
+            {
+                uint8_t perm = (ap << 2)  | (xn << 1) | pxn;
+                switch (perm) {
+                  case 0:
+                  case 2:
+                    grant = r || w || (x && !sctlr.wxn);
+                    break;
+                  case 1:
+                  case 3:
+                  case 4:
+                  case 5:
+                  case 6:
+                  case 7:
+                    // regions that are writeable at EL0 should not be
+                    // executable at EL1
+                    grant = r || w;
+                    break;
+                  case 8:
+                  case 10:
+                  case 12:
+                  case 14:
+                    grant = r || x;
+                    break;
+                  case 9:
+                  case 11:
+                  case 13:
+                  case 15:
+                    grant = r;
+                    break;
+                  default:
+                    grant = false;
+                }
+            }
+            break;
+          case EL2:
+          case EL3:
+            {
+                uint8_t perm = (ap & 0x2) | xn;
+                switch (perm) {
+                  case 0:
+                    grant = r || w || (x && !sctlr.wxn) ;
+                    break;
+                  case 1:
+                    grant = r || w;
+                    break;
+                  case 2:
+                    grant = r || x;
+                    break;
+                  case 3:
+                    grant = r;
+                    break;
+                  default:
+                    grant = false;
+                }
+            }
+            break;
+        }
+    }
+
+    if (!grant) {
+        if (is_fetch) {
+            permsFaults++;
+            DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
+                    "AP:%d priv:%d write:%d ns:%d sif:%d "
+                    "sctlr.afe: %d\n",
+                    ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
+            // Use PC value instead of vaddr because vaddr might be aligned to
+            // cache line and should not be the address reported in FAR
+            return new PrefetchAbort(req->getPC(),
+                                     ArmFault::PermissionLL + te->lookupLevel,
+                                     isStage2, ArmFault::LpaeTran);
+        } else {
+            permsFaults++;
+            DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
+                    "priv:%d write:%d\n", ap, is_priv, is_write);
+            return new DataAbort(vaddr_tainted, te->domain, is_write,
+                                 ArmFault::PermissionLL + te->lookupLevel,
+                                 isStage2, ArmFault::LpaeTran);
+        }
+    }
+
+    return NoFault;
 }
 
-void
+Fault
+TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode,
+        Translation *translation, bool &delay, bool timing,
+        TLB::ArmTranslationType tranType, bool functional)
+{
+    // No such thing as a functional timing access
+    assert(!(timing && functional));
+
+    updateMiscReg(tc, tranType);
+
+    Addr vaddr_tainted = req->getVaddr();
+    Addr vaddr = 0;
+    if (aarch64)
+        vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL);
+    else
+        vaddr = vaddr_tainted;
+    uint32_t flags = req->getFlags();
+
+    bool is_fetch  = (mode == Execute);
+    bool is_write  = (mode == Write);
+    bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae);
+    ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
+                                                       : ArmFault::VmsaTran;
+
+    req->setAsid(asid);
+
+    DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
+            isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
+
+    DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
+                 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2,
+                 scr, sctlr, flags, tranType);
+
+    // Generate an alignment fault for unaligned PC
+    if (aarch64 && is_fetch && (req->getPC() & mask(2))) {
+        return new PCAlignmentFault(req->getPC());
+    }
+
+    // If this is a clrex instruction, provide a PA of 0 with no fault
+    // This will force the monitor to set the tracked address to 0
+    // a bit of a hack but this effectively clrears this processors monitor
+    if (flags & Request::CLEAR_LL){
+        // @todo: check implications of security extensions
+       req->setPaddr(0);
+       req->setFlags(Request::UNCACHEABLE);
+       req->setFlags(Request::CLEAR_LL);
+       return NoFault;
+    }
+    if ((req->isInstFetch() && (!sctlr.i)) ||
+        ((!req->isInstFetch()) && (!sctlr.c))){
+       req->setFlags(Request::UNCACHEABLE);
+    }
+    if (!is_fetch) {
+        assert(flags & MustBeOne);
+        if (sctlr.a || !(flags & AllowUnaligned)) {
+            if (vaddr & mask(flags & AlignmentMask)) {
+                alignFaults++;
+                return new DataAbort(vaddr_tainted,
+                                     TlbEntry::DomainType::NoAccess, is_write,
+                                     ArmFault::AlignmentFault, isStage2,
+                                     tranMethod);
+            }
+        }
+    }
+
+    // If guest MMU is off or hcr.vm=0 go straight to stage2
+    if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
+
+        req->setPaddr(vaddr);
+        // When the MMU is off the security attribute corresponds to the
+        // security state of the processor
+        if (isSecure)
+            req->setFlags(Request::SECURE);
+
+        // @todo: double check this (ARM ARM issue C B3.2.1)
+        if (long_desc_format || sctlr.tre == 0) {
+            req->setFlags(Request::UNCACHEABLE);
+        } else {
+            if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2)
+                req->setFlags(Request::UNCACHEABLE);
+        }
+
+        // Set memory attributes
+        TlbEntry temp_te;
+        temp_te.ns = !isSecure;
+        if (isStage2 || hcr.dc == 0 || isSecure ||
+           (isHyp && !(tranType & S1CTran))) {
+
+            temp_te.mtype      = is_fetch ? TlbEntry::MemoryType::Normal
+                                          : TlbEntry::MemoryType::StronglyOrdered;
+            temp_te.innerAttrs = 0x0;
+            temp_te.outerAttrs = 0x0;
+            temp_te.shareable  = true;
+            temp_te.outerShareable = true;
+        } else {
+            temp_te.mtype      = TlbEntry::MemoryType::Normal;
+            temp_te.innerAttrs = 0x3;
+            temp_te.outerAttrs = 0x3;
+            temp_te.shareable  = false;
+            temp_te.outerShareable = false;
+        }
+        temp_te.setAttributes(long_desc_format);
+        DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable:\
+                %d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
+                temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
+                isStage2);
+        setAttr(temp_te.attributes);
+
+        return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess);
+    }
+
+    DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
+            isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
+    // Translation enabled
+
+    TlbEntry *te = NULL;
+    TlbEntry mergeTe;
+    Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
+                              functional, &mergeTe);
+    // only proceed if we have a valid table entry
+    if ((te == NULL) && (fault == NoFault)) delay = true;
+
+    // If we have the table entry transfer some of the attributes to the
+    // request that triggered the translation
+    if (te != NULL) {
+        // Set memory attributes
+        DPRINTF(TLBVerbose,
+                "Setting memory attributes: shareable: %d, innerAttrs: %d, \
+                outerAttrs: %d, mtype: %d, isStage2: %d\n",
+                te->shareable, te->innerAttrs, te->outerAttrs,
+                static_cast<uint8_t>(te->mtype), isStage2);
+        setAttr(te->attributes);
+        if (te->nonCacheable) {
+            req->setFlags(Request::UNCACHEABLE);
+        }
+
+        if (!bootUncacheability &&
+            ((ArmSystem*)tc->getSystemPtr())->adderBootUncacheable(vaddr)) {
+            req->setFlags(Request::UNCACHEABLE);
+        }
+
+        req->setPaddr(te->pAddr(vaddr));
+        if (isSecure && !te->ns) {
+            req->setFlags(Request::SECURE);
+        }
+        if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
+            (te->mtype != TlbEntry::MemoryType::Normal)) {
+                // Unaligned accesses to Device memory should always cause an
+                // abort regardless of sctlr.a
+                alignFaults++;
+                return new DataAbort(vaddr_tainted,
+                                     TlbEntry::DomainType::NoAccess, is_write,
+                                     ArmFault::AlignmentFault, isStage2,
+                                     tranMethod);
+        }
+
+        // Check for a trickbox generated address fault
+        if (fault == NoFault) {
+            fault = trickBoxCheck(req, mode, te->domain);
+        }
+    }
+
+    // Generate Illegal Inst Set State fault if IL bit is set in CPSR
+    if (fault == NoFault) {
+        CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+        if (aarch64 && is_fetch && cpsr.il == 1) {
+            return new IllegalInstSetStateFault();
+        }
+    }
+
+    return fault;
+}
+
+Fault
+TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode,
+    TLB::ArmTranslationType tranType)
+{
+    updateMiscReg(tc, tranType);
+
+    if (directToStage2) {
+        assert(stage2Tlb);
+        return stage2Tlb->translateAtomic(req, tc, mode, tranType);
+    }
+
+    bool delay = false;
+    Fault fault;
+    if (FullSystem)
+        fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
+    else
+        fault = translateSe(req, tc, mode, NULL, delay, false);
+    assert(!delay);
+    return fault;
+}
+
+Fault
+TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode,
+    TLB::ArmTranslationType tranType)
+{
+    updateMiscReg(tc, tranType);
+
+    if (directToStage2) {
+        assert(stage2Tlb);
+        return stage2Tlb->translateFunctional(req, tc, mode, tranType);
+    }
+
+    bool delay = false;
+    Fault fault;
+    if (FullSystem)
+        fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
+   else
+        fault = translateSe(req, tc, mode, NULL, delay, false);
+    assert(!delay);
+    return fault;
+}
+
+Fault
 TLB::translateTiming(RequestPtr req, ThreadContext *tc,
-        Translation *translation, Mode mode)
+    Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
 {
+    updateMiscReg(tc, tranType);
+
+    if (directToStage2) {
+        assert(stage2Tlb);
+        return stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
+    }
+
     assert(translation);
-    translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
+
+    return translateComplete(req, tc, translation, mode, tranType, isStage2);
+}
+
+Fault
+TLB::translateComplete(RequestPtr req, ThreadContext *tc,
+        Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
+        bool callFromS2)
+{
+    bool delay = false;
+    Fault fault;
+    if (FullSystem)
+        fault = translateFs(req, tc, mode, translation, delay, true, tranType);
+    else
+        fault = translateSe(req, tc, mode, translation, delay, true);
+    DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
+            NoFault);
+    // If we have a translation, and we're not in the middle of doing a stage
+    // 2 translation tell the translation that we've either finished or its
+    // going to take a while. By not doing this when we're in the middle of a
+    // stage 2 translation we prevent marking the translation as delayed twice,
+    // one when the translation starts and again when the stage 1 translation
+    // completes.
+    if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
+        if (!delay)
+            translation->finish(fault, req, tc, mode);
+        else
+            translation->markDelayed();
+    }
+    return fault;
+}
+
+BaseMasterPort*
+TLB::getMasterPort()
+{
+    return &tableWalker->getMasterPort("port");
+}
+
+DmaPort&
+TLB::getWalkerPort()
+{
+    return tableWalker->getWalkerPort();
+}
+
+void
+TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType)
+{
+    // check if the regs have changed, or the translation mode is different.
+    // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
+    // one type of translation anyway
+    if (miscRegValid && ((tranType == curTranType) || isStage2)) {
+        return;
+    }
+
+    DPRINTF(TLBVerbose, "TLB variables changed!\n");
+    CPSR cpsr = tc->readMiscReg(MISCREG_CPSR);
+    // Dependencies: SCR/SCR_EL3, CPSR
+    isSecure  = inSecureState(tc);
+    isSecure &= (tranType & HypMode)    == 0;
+    isSecure &= (tranType & S1S2NsTran) == 0;
+    aarch64 = !cpsr.width;
+    if (aarch64) {  // AArch64
+        aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el;
+        switch (aarch64EL) {
+          case EL0:
+          case EL1:
+            {
+                sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
+                ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
+                uint64_t ttbr_asid = ttbcr.a1 ?
+                    tc->readMiscReg(MISCREG_TTBR1_EL1) :
+                    tc->readMiscReg(MISCREG_TTBR0_EL1);
+                asid = bits(ttbr_asid,
+                            (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
+            }
+            break;
+          case EL2:
+            sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
+            ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
+            asid = -1;
+            break;
+          case EL3:
+            sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
+            ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
+            asid = -1;
+            break;
+        }
+        scr = tc->readMiscReg(MISCREG_SCR_EL3);
+        isPriv = aarch64EL != EL0;
+        // @todo: modify this behaviour to support Virtualization in
+        // AArch64
+        vmid           = 0;
+        isHyp          = false;
+        directToStage2 = false;
+        stage2Req      = false;
+    } else {  // AArch32
+        sctlr  = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc,
+                                 !isSecure));
+        ttbcr  = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc,
+                                 !isSecure));
+        scr    = tc->readMiscReg(MISCREG_SCR);
+        isPriv = cpsr.mode != MODE_USER;
+        if (haveLPAE && ttbcr.eae) {
+            // Long-descriptor translation table format in use
+            uint64_t ttbr_asid = tc->readMiscReg(
+                flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1
+                                                : MISCREG_TTBR0,
+                                       tc, !isSecure));
+            asid = bits(ttbr_asid, 55, 48);
+        } else {
+            // Short-descriptor translation table format in use
+            CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked(
+                MISCREG_CONTEXTIDR, tc,!isSecure));
+            asid = context_id.asid;
+        }
+        prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc,
+                               !isSecure));
+        nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc,
+                               !isSecure));
+        dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc,
+                               !isSecure));
+        hcr  = tc->readMiscReg(MISCREG_HCR);
+
+        if (haveVirtualization) {
+            vmid   = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
+            isHyp  = cpsr.mode == MODE_HYP;
+            isHyp |=  tranType & HypMode;
+            isHyp &= (tranType & S1S2NsTran) == 0;
+            isHyp &= (tranType & S1CTran)    == 0;
+            if (isHyp) {
+                sctlr = tc->readMiscReg(MISCREG_HSCTLR);
+            }
+            // Work out if we should skip the first stage of translation and go
+            // directly to stage 2. This value is cached so we don't have to
+            // compute it for every translation.
+            stage2Req      = hcr.vm && !isStage2 && !isHyp && !isSecure &&
+                             !(tranType & S1CTran);
+            directToStage2 = stage2Req && !sctlr.m;
+        } else {
+            vmid           = 0;
+            stage2Req      = false;
+            isHyp          = false;
+            directToStage2 = false;
+        }
+    }
+    miscRegValid = true;
+    curTranType  = tranType;
+}
+
+Fault
+TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
+        Translation *translation, bool timing, bool functional,
+        bool is_secure, TLB::ArmTranslationType tranType)
+{
+    bool is_fetch = (mode == Execute);
+    bool is_write = (mode == Write);
+
+    Addr vaddr_tainted = req->getVaddr();
+    Addr vaddr = 0;
+    ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
+    if (aarch64) {
+        vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el);
+    } else {
+        vaddr = vaddr_tainted;
+    }
+    *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
+    if (*te == NULL) {
+        if (req->isPrefetch()) {
+            // if the request is a prefetch don't attempt to fill the TLB or go
+            // any further with the memory access (here we can safely use the
+            // fault status for the short desc. format in all cases)
+           prefetchFaults++;
+           return new PrefetchAbort(vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
+        }
+
+        if (is_fetch)
+            instMisses++;
+        else if (is_write)
+            writeMisses++;
+        else
+            readMisses++;
+
+        // start translation table walk, pass variables rather than
+        // re-retreaving in table walker for speed
+        DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
+                vaddr_tainted, asid, vmid);
+        Fault fault;
+        fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
+                                  translation, timing, functional, is_secure,
+                                  tranType);
+        // for timing mode, return and wait for table walk,
+        if (timing || fault != NoFault) {
+            return fault;
+        }
+
+        *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
+        if (!*te)
+            printTlb();
+        assert(*te);
+    } else {
+        if (is_fetch)
+            instHits++;
+        else if (is_write)
+            writeHits++;
+        else
+            readHits++;
+    }
+    return NoFault;
+}
+
+Fault
+TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode,
+        Translation *translation, bool timing, bool functional,
+        TlbEntry *mergeTe)
+{
+    Fault fault;
+    TlbEntry *s1Te = NULL;
+
+    Addr vaddr_tainted = req->getVaddr();
+
+    // Get the stage 1 table entry
+    fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
+                  isSecure, curTranType);
+    // only proceed if we have a valid table entry
+    if ((s1Te != NULL) && (fault == NoFault)) {
+        // Check stage 1 permissions before checking stage 2
+        if (aarch64)
+            fault = checkPermissions64(s1Te, req, mode, tc);
+        else
+            fault = checkPermissions(s1Te, req, mode);
+        if (stage2Req & (fault == NoFault)) {
+            Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
+                req, translation, mode, timing, functional, curTranType);
+            fault = s2Lookup->getTe(tc, mergeTe);
+            if (s2Lookup->isComplete()) {
+                *te = mergeTe;
+                // We've finished with the lookup so delete it
+                delete s2Lookup;
+            } else {
+                // The lookup hasn't completed, so we can't delete it now. We
+                // get round this by asking the object to self delete when the
+                // translation is complete.
+                s2Lookup->setSelfDelete();
+            }
+        } else {
+            // This case deals with an S1 hit (or bypass), followed by
+            // an S2 hit-but-perms issue
+            if (isStage2) {
+                DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
+                        vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
+                if (fault != NoFault) {
+                    ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
+                    armFault->annotate(ArmFault::S1PTW, false);
+                    armFault->annotate(ArmFault::OVA, vaddr_tainted);
+                }
+            }
+            *te = s1Te;
+        }
+    }
+    return fault;
 }
 
 ArmISA::TLB *