Addr effAddr;
 
     /** The effective physical address. */
-    Addr physEffAddr;
+    Addr physEffAddrLow;
+
+    /** The effective physical address
+     *  of the second request for a split request
+     */
+    Addr physEffAddrHigh;
 
     /** The memory request flags (from translation). */
     unsigned memReqFlags;
     instFlags[IsStrictlyOrdered] = state->isStrictlyOrdered();
 
     if (fault == NoFault) {
-        physEffAddr = state->getPaddr();
+        // save Paddr for a single req
+        physEffAddrLow = state->getPaddr();
+
+        // case for the request that has been split
+        if (state->isSplit) {
+          physEffAddrLow = state->sreqLow->getPaddr();
+          physEffAddrHigh = state->sreqHigh->getPaddr();
+        }
+
         memReqFlags = state->getFlags();
 
         if (state->mainReq->isCondSwap()) {
 
 {
     memData = NULL;
     effAddr = 0;
-    physEffAddr = 0;
+    physEffAddrLow = 0;
+    physEffAddrHigh = 0;
     readyRegs = 0;
     memReqFlags = 0;
 
 
                 DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: %s "
                         "[sn:%lli], inst PC: %s [sn:%lli]. Addr is: %#x.\n",
                         violator->pcState(), violator->seqNum,
-                        inst->pcState(), inst->seqNum, inst->physEffAddr);
+                        inst->pcState(), inst->seqNum, inst->physEffAddrLow);
 
                 fetchRedirect[tid] = true;
 
                 DPRINTF(IEW, "LDSTQ detected a violation.  Violator PC: "
                         "%s, inst PC: %s.  Addr is: %#x.\n",
                         violator->pcState(), inst->pcState(),
-                        inst->physEffAddr);
+                        inst->physEffAddrLow);
                 DPRINTF(IEW, "Violation will not be handled because "
                         "already squashing\n");
 
 
 
     DynInstPtr ld_inst = loadQueue[load_idx];
     if (ld_inst) {
-        Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
+        Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask;
+        Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask;
+
         // Check that this snoop didn't just invalidate our lock flag
-        if (ld_inst->effAddrValid() && load_addr == invalidate_addr &&
-            ld_inst->memReqFlags & Request::LLSC)
+        if (ld_inst->effAddrValid() && (load_addr_low == invalidate_addr
+                                        || load_addr_high == invalidate_addr)
+            && ld_inst->memReqFlags & Request::LLSC)
             TheISA::handleLockedSnoopHit(ld_inst.get());
     }
 
             continue;
         }
 
-        Addr load_addr = ld_inst->physEffAddr & cacheBlockMask;
+        Addr load_addr_low = ld_inst->physEffAddrLow & cacheBlockMask;
+        Addr load_addr_high = ld_inst->physEffAddrHigh & cacheBlockMask;
+
         DPRINTF(LSQUnit, "-- inst [sn:%lli] load_addr: %#x to pktAddr:%#x\n",
-                    ld_inst->seqNum, load_addr, invalidate_addr);
+                    ld_inst->seqNum, load_addr_low, invalidate_addr);
 
-        if (load_addr == invalidate_addr || force_squash) {
+        if ((load_addr_low == invalidate_addr
+             || load_addr_high == invalidate_addr) || force_squash) {
             if (needsTSO) {
                 // If we have a TSO system, as all loads must be ordered with
                 // all other loads, this load as well as *all* subsequent loads