SPARC: Make sure unaligned access are caught on cached translations as well.
authorGabe Black <gblack@eecs.umich.edu>
Mon, 27 Aug 2007 03:15:29 +0000 (20:15 -0700)
committerGabe Black <gblack@eecs.umich.edu>
Mon, 27 Aug 2007 03:15:29 +0000 (20:15 -0700)
--HG--
extra : convert_revision : 5c1f3f585817a19a771164f809dfc2fdc1ab3fb2

src/arch/sparc/tlb.cc

index e184429ab83b0a62d463e88167684dbcf492b39f..12891e2b3f7691375ceb45a70a110cc971009dd0 100644 (file)
@@ -569,6 +569,7 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
     asi = (ASI)req->getAsi();
     bool implicit = false;
     bool hpriv = bits(tlbdata,0,0);
+    bool unaligned = (vaddr & size-1);
 
     DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
             vaddr, size, asi);
@@ -579,43 +580,47 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
     if (asi == ASI_IMPLICIT)
         implicit = true;
 
-    if (hpriv && implicit) {
-        req->setPaddr(vaddr & PAddrImplMask);
-        return NoFault;
-    }
-
-    // Be fast if we can!
-    if (cacheValid &&  cacheState == tlbdata) {
-
-
+    // Only use the fast path here if there doesn't need to be an unaligned
+    // trap later
+    if (!unaligned) {
+        if (hpriv && implicit) {
+            req->setPaddr(vaddr & PAddrImplMask);
+            return NoFault;
+        }
 
-        if (cacheEntry[0]) {
-            TlbEntry *ce = cacheEntry[0];
-            Addr ce_va = ce->range.va;
-            if (cacheAsi[0] == asi &&
-                ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
-                (!write || ce->pte.writable())) {
-                    req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
-                    if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
-                        req->setFlags(req->getFlags() | UNCACHEABLE);
-                    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
-                    return NoFault;
-            } // if matched
-        } // if cache entry valid
-        if (cacheEntry[1]) {
-            TlbEntry *ce = cacheEntry[1];
-            Addr ce_va = ce->range.va;
-            if (cacheAsi[1] == asi &&
-                ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
-                (!write || ce->pte.writable())) {
-                    req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
-                    if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
-                        req->setFlags(req->getFlags() | UNCACHEABLE);
-                    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
-                    return NoFault;
-            } // if matched
-        } // if cache entry valid
-     }
+        // Be fast if we can!
+        if (cacheValid &&  cacheState == tlbdata) {
+
+
+
+            if (cacheEntry[0]) {
+                TlbEntry *ce = cacheEntry[0];
+                Addr ce_va = ce->range.va;
+                if (cacheAsi[0] == asi &&
+                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
+                    (!write || ce->pte.writable())) {
+                        req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
+                        if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
+                            req->setFlags(req->getFlags() | UNCACHEABLE);
+                        DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
+                        return NoFault;
+                } // if matched
+            } // if cache entry valid
+            if (cacheEntry[1]) {
+                TlbEntry *ce = cacheEntry[1];
+                Addr ce_va = ce->range.va;
+                if (cacheAsi[1] == asi &&
+                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
+                    (!write || ce->pte.writable())) {
+                        req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
+                        if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
+                            req->setFlags(req->getFlags() | UNCACHEABLE);
+                        DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
+                        return NoFault;
+                } // if matched
+            } // if cache entry valid
+        }
+    }
 
     bool red = bits(tlbdata,1,1);
     bool priv = bits(tlbdata,2,2);
@@ -707,7 +712,7 @@ DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
     }
 
     // If the asi is unaligned trap
-    if (vaddr & size-1) {
+    if (unaligned) {
         writeSfsr(vaddr, false, ct, false, OtherFault, asi);
         return new MemAddressNotAligned;
     }