Yet another merge with the main repository.
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
index 6fd7ae36cbf50fff011f1e6bd801285c49f94782..0ab9f0579a084eb0153bb8f48d047e288f5e4350 100644 (file)
  *
  */
 
-#include <vector>
 #include <list>
+#include <vector>
+
 #include "arch/isa_traits.hh"
-#include "arch/mips/locked_mem.hh"
+#include "arch/locked_mem.hh"
+#include "arch/predecoder.hh"
 #include "arch/utility.hh"
+#include "config/the_isa.hh"
 #include "cpu/inorder/resources/cache_unit.hh"
-#include "cpu/inorder/pipeline_traits.hh"
 #include "cpu/inorder/cpu.hh"
+#include "cpu/inorder/pipeline_traits.hh"
+#include "cpu/inorder/resource_pool.hh"
+#include "debug/Activity.hh"
+#include "debug/AddrDep.hh"
+#include "debug/InOrderCachePort.hh"
+#include "debug/InOrderStall.hh"
+#include "debug/InOrderTLB.hh"
+#include "debug/LLSC.hh"
+#include "debug/RefCount.hh"
+#include "debug/ThreadModel.hh"
 #include "mem/request.hh"
 
 using namespace std;
 using namespace TheISA;
 using namespace ThePipeline;
 
+#if TRACING_ON
+static std::string
+printMemData(uint8_t *data, unsigned size)
+{
+    std::stringstream dataStr;
+    for (unsigned pos = 0; pos < size; pos++) {
+        ccprintf(dataStr, "%02x", data[pos]);
+    }
+    return dataStr.str();
+}
+#endif
+
 Tick
 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
 {
-    panic("DefaultFetch doesn't expect recvAtomic callback!");
-    return curTick;
+    panic("%s doesn't expect recvAtomic callback!", cachePortUnit->name());
+    return curTick();
 }
 
 void
 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
 {
-    panic("DefaultFetch doesn't expect recvFunctional callback!");
+    DPRINTF(InOrderCachePort, "Doesn't update state on a recvFunctional."
+            "Ignoring packet for %x.\n", pkt->getAddr());
 }
 
 void
-CacheUnit::CachePort::recvStatusChange(Status status)
+CacheUnit::CachePort::recvRangeChange()
 {
-    if (status == RangeChange)
-        return;
-
-    panic("DefaultFetch doesn't expect recvStatusChange callback!");
 }
 
 bool
 CacheUnit::CachePort::recvTiming(Packet *pkt)
 {
-    cachePortUnit->processCacheCompletion(pkt);
+    if (pkt->isError())
+        DPRINTF(InOrderCachePort, "Got error packet back for address: %x\n",
+                pkt->getAddr());
+    else if (pkt->isResponse())
+        cachePortUnit->processCacheCompletion(pkt);
+    else {
+        //@note: depending on consistency model, update here
+        DPRINTF(InOrderCachePort, "Received snoop pkt %x,Ignoring\n", pkt->getAddr());
+    }
+
     return true;
 }
 
@@ -81,14 +111,29 @@ CacheUnit::CachePort::recvRetry()
 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
         int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
     : Resource(res_name, res_id, res_width, res_latency, _cpu),
-      retryPkt(NULL), retrySlot(-1)
+      cachePortBlocked(false)
 {
-    //cacheData.resize(res_width);
-    //slotStatus = new CachePortStatus[width];
-    //fetchPC = new Addr[width];
     cachePort = new CachePort(this);
 
-    cacheBlocked = false;
+    // Hard-Code Selection For Now
+    if (res_name == "icache_port")
+        _tlb = params->itb;
+    else if (res_name == "dcache_port")
+        _tlb = params->dtb;
+    else
+        fatal("Unrecognized TLB name passed by user");
+
+    for (int i=0; i < MaxThreads; i++) {
+        tlbBlocked[i] = false;
+        tlbBlockSeqNum[i] = 0;
+    }
+}
+
+TheISA::TLB*
+CacheUnit::tlb()
+{
+    return _tlb;
+
 }
 
 Port *
@@ -100,118 +145,207 @@ CacheUnit::getPort(const string &if_name, int idx)
         return NULL;
 }
 
+void
+CacheUnit::init()
+{
+    for (int i = 0; i < width; i++) {
+        reqs[i] = new CacheRequest(this);
+    }
+
+    cacheBlkSize = this->cachePort->peerBlockSize();
+    cacheBlkMask = cacheBlkSize  - 1;
+
+    initSlots();
+}
+
 int
 CacheUnit::getSlot(DynInstPtr inst)
 {
-    if (!inst->validMemAddr()) {
-        panic("Mem. Addr. must be set before requesting cache access\n");
+    ThreadID tid = inst->readTid();
+    if (tlbBlocked[tid]) {
+        return -1;
+    }
+
+    // For a Split-Load, the instruction would have processed once already
+    // causing the address to be unset.
+    if (!inst->validMemAddr() && !inst->splitInst) {
+        panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
+              "cache access\n", inst->readTid(), inst->seqNum);
     }
 
+    int new_slot = Resource::getSlot(inst);
+    inst->memTime = curTick();
+    //@note: add back in if you want speculative loads/store capability
+    //setAddrDependency(inst);
+    return new_slot;
+}
+
+void
+CacheUnit::setAddrDependency(DynInstPtr inst)
+{
     Addr req_addr = inst->getMemAddr();
+    ThreadID tid = inst->readTid();
+
+    addrList[tid].push_back(req_addr);
+    addrMap[tid][req_addr] = inst->seqNum;
+
+    DPRINTF(AddrDep,
+            "[tid:%i]: [sn:%i]: Address %08p added to dependency list (size=%i)\n",
+            inst->readTid(), inst->seqNum, req_addr, addrList[tid].size());
+
+    //@NOTE: 10 is an arbitrarily "high" number, but to be exact
+    //       we would need to know the # of outstanding accesses
+    //       a priori. Information like fetch width, stage width,
+    //       fetch buffer, and the branch resolution stage would be
+    //       useful for the icache_port. For the dcache port, the #
+    //       of outstanding cache accesses (mshrs) would be a good
+    //       sanity check here.
+    //assert(addrList[tid].size() < 10);
+}
 
-    if (resName == "icache_port" ||
-        find(addrList.begin(), addrList.end(), req_addr) == addrList.end()) {
+void
+CacheUnit::removeAddrDependency(DynInstPtr inst)
+{
+    ThreadID tid = inst->readTid();
 
-        int new_slot = Resource::getSlot(inst);
+    Addr mem_addr = inst->getMemAddr();
+    
+    inst->unsetMemAddr();
 
-        if (new_slot == -1)
-            return -1;
+    // Erase from Address List
+    std::list<Addr>::iterator list_it = find(addrList[tid].begin(),
+                                          addrList[tid].end(),
+                                          mem_addr);
+    assert(list_it != addrList[tid].end() || inst->splitInst);
 
-        inst->memTime = curTick;
-        addrList.push_back(req_addr);
-        addrMap[req_addr] = inst->seqNum;
-        DPRINTF(InOrderCachePort,
-                "[tid:%i]: [sn:%i]: Address %08p added to dependency list\n",
-                inst->readTid(), inst->seqNum, req_addr);
-        return new_slot;
-    } else {
-        DPRINTF(InOrderCachePort,
-                "Denying request because there is an outstanding"
-                " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
-                req_addr, addrMap[req_addr], inst->memTime);
-        return -1;
+    if (list_it != addrList[tid].end()) {
+        DPRINTF(AddrDep,
+                "[tid:%i]: [sn:%i] Address %08p removed from dependency "
+                "list\n", inst->readTid(), inst->seqNum, (*list_it));
+
+        addrList[tid].erase(list_it);
+
+        // Erase From Address Map (Used for Debugging)
+        addrMap[tid].erase(addrMap[tid].find(mem_addr));
     }
+    
+
 }
 
-void
-CacheUnit::freeSlot(int slot_num)
+ResReqPtr
+CacheUnit::findRequest(DynInstPtr inst)
 {
-    vector<Addr>::iterator vect_it = find(addrList.begin(), addrList.end(),
-            reqMap[slot_num]->inst->getMemAddr());
-    assert(vect_it != addrList.end());
+    for (int i = 0; i < width; i++) {
+        CacheRequest* cache_req =
+            dynamic_cast<CacheRequest*>(reqs[i]);
+        assert(cache_req);
+
+        if (cache_req->valid &&
+            cache_req->getInst() == inst &&
+            cache_req->instIdx == inst->curSkedEntry->idx) {
+            return cache_req;
+        }
+    }
 
-    DPRINTF(InOrderCachePort,
-            "[tid:%i]: Address %08p removed from dependency list\n",
-            reqMap[slot_num]->inst->readTid(), (*vect_it));
+    return NULL;
+}
 
-    addrList.erase(vect_it);
+ResReqPtr
+CacheUnit::findRequest(DynInstPtr inst, int idx)
+{
+    for (int i = 0; i < width; i++) {
+        CacheRequest* cache_req =
+            dynamic_cast<CacheRequest*>(reqs[i]);
+        assert(cache_req);
+
+        if (cache_req->valid &&
+            cache_req->getInst() == inst &&
+            cache_req->instIdx == idx) {
+            return cache_req;
+        }
+    }
 
-    Resource::freeSlot(slot_num);
+    return NULL;
 }
 
+
 ResReqPtr
 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
                      int slot_num, unsigned cmd)
 {
-    ScheduleEntry* sched_entry = inst->resSched.top();
+    ScheduleEntry* sched_entry = *inst->curSkedEntry;
+    CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
 
     if (!inst->validMemAddr()) {
         panic("Mem. Addr. must be set before requesting cache access\n");
     }
 
-    int req_size = 0;
     MemCmd::Command pkt_cmd;
 
-    if (sched_entry->cmd == InitiateReadData) {
+    switch (sched_entry->cmd)
+    {
+      case InitSecondSplitRead:
         pkt_cmd = MemCmd::ReadReq;
-        req_size = inst->getMemAccSize();
 
         DPRINTF(InOrderCachePort,
-                "[tid:%i]: %i byte Read request from [sn:%i] for addr %08p\n",
-                inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
-    } else if (sched_entry->cmd == InitiateWriteData) {
+                "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
+                inst->readTid(), inst->seqNum, inst->split2ndAddr);
+        break;
+
+      case InitiateReadData:
+        pkt_cmd = MemCmd::ReadReq;
+
+        DPRINTF(InOrderCachePort,
+                "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
+                inst->readTid(), inst->seqNum, inst->getMemAddr());
+        break;
+
+      case InitSecondSplitWrite:
         pkt_cmd = MemCmd::WriteReq;
-        req_size = inst->getMemAccSize();
 
         DPRINTF(InOrderCachePort,
-                "[tid:%i]: %i byte Write request from [sn:%i] for addr %08p\n",
-                inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
-    } else if (sched_entry->cmd == InitiateFetch){
-        pkt_cmd = MemCmd::ReadReq;
-        req_size = sizeof(MachInst); //@TODO: mips16e
+                "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
+                inst->readTid(), inst->seqNum, inst->split2ndAddr);
+        break;
+
+      case InitiateWriteData:
+        pkt_cmd = MemCmd::WriteReq;
 
         DPRINTF(InOrderCachePort,
-                "[tid:%i]: %i byte Fetch request from [sn:%i] for addr %08p\n",
-                inst->readTid(), req_size, inst->seqNum, inst->getMemAddr());
-    } else {
-        panic("%i: Unexpected request type (%i) to %s", curTick,
+                "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
+                inst->readTid(), inst->seqNum, inst->getMemAddr());
+        break;
+
+      default:
+        panic("%i: Unexpected request type (%i) to %s", curTick(),
               sched_entry->cmd, name());
     }
 
-    return new CacheRequest(this, inst, stage_num, id, slot_num,
-                            sched_entry->cmd, req_size, pkt_cmd,
-                            0/*flags*/, this->cpu->readCpuId());
+    cache_req->setRequest(inst, stage_num, id, slot_num,
+                          sched_entry->cmd, pkt_cmd,
+                          inst->curSkedEntry->idx);
+    return cache_req;
 }
 
 void
 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
 {
-    //service_request = false;
-
     CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
     assert(cache_req);
 
     // Check to see if this instruction is requesting the same command
     // or a different one
-    if (cache_req->cmd != inst->resSched.top()->cmd) {
+    if (cache_req->cmd != inst->curSkedEntry->cmd &&
+        cache_req->instIdx == inst->curSkedEntry->idx) {
         // If different, then update command in the request
-        cache_req->cmd = inst->resSched.top()->cmd;
+        cache_req->cmd = inst->curSkedEntry->cmd;
         DPRINTF(InOrderCachePort,
-                "[tid:%i]: [sn:%i]: the command for this instruction\n",
-                inst->readTid(), inst->seqNum);
+                "[tid:%i]: [sn:%i]: Updating the command for this "
+                "instruction\n", inst->readTid(), inst->seqNum);
 
         service_request = true;
-    } else {
+    } else if (inst->curSkedEntry->idx != CacheUnit::InitSecondSplitRead &&
+               inst->curSkedEntry->idx != CacheUnit::InitSecondSplitWrite) {
         // If same command, just check to see if memory access was completed
         // but dont try to re-execute
         DPRINTF(InOrderCachePort,
@@ -223,184 +357,583 @@ CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
 }
 
 void
-CacheUnit::execute(int slot_num)
+CacheUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
+                           int acc_size, int flags)
 {
-    if (cacheBlocked) {
-        DPRINTF(InOrderCachePort, "Cache Blocked. Cannot Access\n");
-        return;
+    ThreadID tid = inst->readTid();
+    Addr aligned_addr = inst->getMemAddr();
+
+    if (!cache_req->is2ndSplit()) {
+        if (cache_req->memReq == NULL) {
+            cache_req->memReq =
+                new Request(cpu->asid[tid], aligned_addr, acc_size, flags,
+                            inst->instAddr(),
+                            cpu->readCpuId(), //@todo: use context id
+                            tid);
+        }
+    } else {
+        assert(inst->splitInst);
+
+        if (inst->splitMemReq == NULL) {
+            inst->splitMemReq = new Request(cpu->asid[tid], 
+                                            inst->split2ndAddr,
+                                            acc_size, 
+                                            flags, 
+                                            inst->instAddr(),
+                                            cpu->readCpuId(), 
+                                            tid);
+        }
+
+        cache_req->memReq = inst->splitMemReq;
     }
+}
 
-    CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqMap[slot_num]);
-    assert(cache_req);
+void
+CacheUnit::doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
+                       int flags, TheISA::TLB::Mode tlb_mode)
+{
+    ThreadID tid = inst->readTid();
 
-    DynInstPtr inst = cache_req->inst;
-    int tid;
-    tid = inst->readTid();
-    int seq_num;
-    seq_num = inst->seqNum;
-    //int stage_num = cache_req->getStageNum();
+    setupMemRequest(inst, cache_req, acc_size, flags);
 
-    cache_req->fault = NoFault;
+    //@todo: HACK: the DTB expects the correct PC in the ThreadContext
+    //       but how if the memory accesses are speculative? Shouldn't
+    //       we send along the requestor's PC to the translate functions?
+    ThreadContext *tc = cpu->thread[tid]->getTC();
+    PCState old_pc = tc->pcState();
+    tc->pcState() = inst->pcState();
 
-    switch (cache_req->cmd)
-    {
-      case InitiateFetch:
-        DPRINTF(InOrderCachePort,
-                "[tid:%u]: Initiating fetch access to %s for addr. %08p\n",
-                tid, name(), cache_req->inst->getMemAddr());
+    inst->fault =
+        _tlb->translateAtomic(cache_req->memReq, tc, tlb_mode);
+    tc->pcState() = old_pc;
 
-        DPRINTF(InOrderCachePort,
-                "[tid:%u]: Fetching new cache block from addr: %08p\n",
-                tid, cache_req->memReq->getVaddr());
+    if (inst->fault != NoFault) {
+        DPRINTF(InOrderTLB, "[tid:%i]: %s encountered while translating "
+                "addr:%08p for [sn:%i].\n", tid, inst->fault->name(),
+                cache_req->memReq->getVaddr(), inst->seqNum);
 
-        inst->setCurResSlot(slot_num);
-        doDataAccess(inst);
-        break;
+        tlbBlocked[tid] = true;
+        tlbBlockSeqNum[tid] = inst->seqNum;
 
-      case CompleteFetch:
-        if (cache_req->isMemAccComplete()) {
-            DPRINTF(InOrderCachePort,
-                    "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
-                    tid, inst->seqNum);
+        // Make sure nothing gets executed until after this faulting
+        // instruction gets handled.
+        inst->setSerializeAfter();
 
-            MachInst mach_inst = cache_req->dataPkt->get<MachInst>();
+        // Mark it as complete so it can pass through next stage.
+        // Fault Handling will happen at commit/graduation
+        cache_req->setCompleted();
+    } else {
+        DPRINTF(InOrderTLB, "[tid:%i]: [sn:%i] virt. addr %08p translated "
+                "to phys. addr:%08p.\n", tid, inst->seqNum,
+                cache_req->memReq->getVaddr(),
+                cache_req->memReq->getPaddr());
+    }
+}
 
-            /**
-             * @TODO: May Need This Function for Endianness-Compatibility
-             *  mach_inst =
-             *    gtoh(*reinterpret_cast<MachInst *>(&cacheData[tid][offset]));
-             */
+void
+CacheUnit::trap(Fault fault, ThreadID tid, DynInstPtr inst)
+{
+    tlbBlocked[tid] = false;
+}
 
-            DPRINTF(InOrderCachePort,
-                    "[tid:%i]: Fetched instruction is %08p\n",
-                    tid, mach_inst);
+Fault
+CacheUnit::read(DynInstPtr inst, Addr addr,
+                uint8_t *data, unsigned size, unsigned flags)
+{
+    CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
+    assert(cache_req && "Can't Find Instruction for Read!");
+
+    // The block size of our peer
+    unsigned blockSize = this->cachePort->peerBlockSize();
 
-            // ExtMachInst ext_inst = makeExtMI(mach_inst, cpu->tcBase(tid));
+    //The size of the data we're trying to read.
+    int fullSize = size;
+    inst->totalSize = size;
 
-            inst->setMachInst(mach_inst);
-            inst->setASID(tid);
-            inst->setThreadState(cpu->thread[tid]);
+    if (inst->traceData) {
+        inst->traceData->setAddr(addr);
+    }
 
-            DPRINTF(InOrderStage, "[tid:%i]: Instruction [sn:%i] is: %s\n",
-                    tid, seq_num, inst->staticInst->disassemble(inst->PC));
+    if (inst->split2ndAccess) {     
+        size = inst->split2ndSize;
+        cache_req->splitAccess = true;        
+        cache_req->split2ndAccess = true;
+        
+        DPRINTF(InOrderCachePort, "[sn:%i] Split Read Access (2 of 2) for "
+                "(%#x, %#x).\n", inst->seqNum, inst->getMemAddr(),
+                inst->split2ndAddr);
+    }  
+    
+
+    //The address of the second part of this access if it needs to be split
+    //across a cache line boundary.
+    Addr secondAddr = roundDown(addr + size - 1, blockSize);
+
+    
+    if (secondAddr > addr && !inst->split2ndAccess) {
+
+        if (!inst->splitInst) {
+            DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for "
+                    "(%#x, %#x).\n", curTick(), inst->seqNum, addr, secondAddr);
+
+            unsigned stage_num = cache_req->getStageNum();
+            unsigned cmd = inst->curSkedEntry->cmd;
+
+            // 1. Make A New Inst. Schedule w/Split Read/Complete Entered on
+            // the schedule
+            // ==============================
+            // 2. Reassign curSkedPtr to current command (InitiateRead) on new
+            // schedule
+            // ==============================
+            inst->splitInst = true;
+            inst->setBackSked(cpu->createBackEndSked(inst));
+            inst->curSkedEntry = inst->backSked->find(stage_num, cmd);
+        } else {
+            DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] Retrying Split Read "
+                    "Access (1 of 2) for (%#x, %#x).\n", inst->readTid(),
+                    inst->seqNum, addr, secondAddr);
+        }
+
+        // Save All "Total" Split Information
+        // ==============================
+        inst->splitMemData = new uint8_t[size];
+
+        // Split Information for First Access
+        // ==============================
+        size = secondAddr - addr;
+        cache_req->splitAccess = true;
+
+        // Split Information for Second Access
+        // ==============================
+        inst->split2ndSize = addr + fullSize - secondAddr;
+        inst->split2ndAddr = secondAddr;            
+        inst->split2ndDataPtr = inst->splitMemData + size;
+        inst->split2ndFlags = flags;        
+    }
+    
+    doTLBAccess(inst, cache_req, size, flags, TheISA::TLB::Read);
 
-            // Set Up More TraceData info
-            if (inst->traceData) {
-                inst->traceData->setStaticInst(inst->staticInst);
-                inst->traceData->setPC(inst->readPC());
+    if (inst->fault == NoFault) {
+        if (!cache_req->splitAccess) {            
+            cache_req->reqData = new uint8_t[size];
+            doCacheAccess(inst, NULL);
+        } else {
+            if (!inst->split2ndAccess) {                
+                cache_req->reqData = inst->splitMemData;
+            } else {
+                cache_req->reqData = inst->split2ndDataPtr;                
             }
+            
+            doCacheAccess(inst, NULL, cache_req);            
+        }        
+    }
+
+    return inst->fault;
+}
+
+Fault
+CacheUnit::write(DynInstPtr inst, uint8_t *data, unsigned size,
+                 Addr addr, unsigned flags, uint64_t *write_res)
+{
+    CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
+    assert(cache_req && "Can't Find Instruction for Write!");
+
+    // The block size of our peer
+    unsigned blockSize = this->cachePort->peerBlockSize();
+
+    //The size of the data we're trying to write.
+    int fullSize = size;
+    inst->totalSize = size;
 
-            cache_req->done();
+    if (inst->traceData) {
+        inst->traceData->setAddr(addr);
+    }
+
+    if (inst->split2ndAccess) {     
+        size = inst->split2ndSize;
+        cache_req->splitAccess = true;        
+        cache_req->split2ndAccess = true;
+        
+        DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (2 of 2) for "
+                "(%#x, %#x).\n", inst->seqNum, inst->getMemAddr(),
+                inst->split2ndAddr);
+    }  
+
+    //The address of the second part of this access if it needs to be split
+    //across a cache line boundary.
+    Addr secondAddr = roundDown(addr + size - 1, blockSize);
+
+    if (secondAddr > addr && !inst->split2ndAccess) {
+            
+        DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (1 of 2) for "
+                "(%#x, %#x).\n", inst->seqNum, addr, secondAddr);
+
+        // Save All "Total" Split Information
+        // ==============================
+        inst->splitInst = true;        
+
+        if (!inst->splitInstSked) {
+            assert(0 && "Split Requests Not Supported for Now...");
+
+            // Schedule Split Read/Complete for Instruction
+            // ==============================
+            int stage_num = cache_req->getStageNum();
+            RSkedPtr inst_sked = (stage_num >= ThePipeline::BackEndStartStage) ?
+                inst->backSked : inst->frontSked;
+        
+            // this is just an arbitrarily high priority to ensure that this
+            // gets pushed to the back of the list
+            int stage_pri = 20;
+        
+            int isplit_cmd = CacheUnit::InitSecondSplitWrite;
+            inst_sked->push(new
+                            ScheduleEntry(stage_num,
+                                          stage_pri,
+                                          cpu->resPool->getResIdx(DCache),
+                                          isplit_cmd,
+                                          1));
+
+            int csplit_cmd = CacheUnit::CompleteSecondSplitWrite;
+            inst_sked->push(new
+                            ScheduleEntry(stage_num + 1,
+                                          1/*stage_pri*/,
+                                          cpu->resPool->getResIdx(DCache),
+                                          csplit_cmd,
+                                          1));
+            inst->splitInstSked = true;
         } else {
-            DPRINTF(InOrderCachePort,
-                    "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
-                    tid, inst->seqNum);
-            DPRINTF(InOrderStall,
-                    "STALL: [tid:%i]: Fetch miss from %08p\n",
-                    tid, cache_req->inst->readPC());
-            cache_req->setCompleted(false);
+            DPRINTF(InOrderCachePort, "[tid:%i] sn:%i] Retrying Split Read "
+                    "Access (1 of 2) for (%#x, %#x).\n",
+                    inst->readTid(), inst->seqNum, addr, secondAddr);                   
         }
-        break;
+        
+        
+
+        // Split Information for First Access
+        // ==============================
+        size = secondAddr - addr;
+        cache_req->splitAccess = true;
+
+        // Split Information for Second Access
+        // ==============================
+        inst->split2ndSize = addr + fullSize - secondAddr;
+        inst->split2ndAddr = secondAddr;            
+        inst->split2ndFlags = flags;        
+        inst->splitInstSked = true;
+    }    
+        
+    doTLBAccess(inst, cache_req, size, flags, TheISA::TLB::Write);
+
+    if (inst->fault == NoFault) {
+        if (!cache_req->splitAccess) {
+            cache_req->reqData = new uint8_t[size];
+            memcpy(cache_req->reqData, data, size);
+
+            //inst->split2ndStoreDataPtr = cache_req->reqData;
+            //inst->split2ndStoreDataPtr += size;
+
+            doCacheAccess(inst, write_res);
+        } else {            
+            doCacheAccess(inst, write_res, cache_req);            
+        }        
+        
+    }
+    
+    return inst->fault;
+}
+
+
+void
+CacheUnit::execute(int slot_num)
+{
+    CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
+    assert(cache_req);
+
+    if (cachePortBlocked &&
+        (cache_req->cmd == InitiateReadData ||
+         cache_req->cmd == InitiateWriteData ||
+         cache_req->cmd == InitSecondSplitRead ||
+         cache_req->cmd == InitSecondSplitWrite)) {
+        DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
+        cache_req->done(false);
+        return;
+    }
+
+    DynInstPtr inst = cache_req->inst;
+    if (inst->fault != NoFault) {
+        DPRINTF(InOrderCachePort,
+                "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
+                "next stage.\n", inst->readTid(), inst->seqNum, inst->fault->name(),
+                inst->getMemAddr());
+        finishCacheUnitReq(inst, cache_req);
+        return;
+    }
+
+    if (inst->isSquashed()) {
+        DPRINTF(InOrderCachePort,
+                "[tid:%i]: [sn:%i]: Detected squashed instruction "
+                "next stage.\n", inst->readTid(), inst->seqNum);
+        finishCacheUnitReq(inst, cache_req);
+        return;
+    }
+
+#if TRACING_ON
+    ThreadID tid = inst->readTid();
+    std::string acc_type = "write";
+#endif
+
+    switch (cache_req->cmd)
+    {
 
       case InitiateReadData:
+#if TRACING_ON
+        acc_type = "read";
+#endif        
       case InitiateWriteData:
+        if (cachePortBlocked) {
+            DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
+            cache_req->done(false);
+            return;
+        }
+
         DPRINTF(InOrderCachePort,
-                "[tid:%u]: Initiating data access to %s for addr. %08p\n",
-                tid, name(), cache_req->inst->getMemAddr());
+                "[tid:%u]: [sn:%i] Initiating data %s access to %s for "
+                "addr. %08p\n", tid, inst->seqNum, acc_type, name(),
+                cache_req->inst->getMemAddr());
 
         inst->setCurResSlot(slot_num);
-        //inst->memAccess();
-        inst->initiateAcc();
+
+        if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
+            inst->execute();
+        } else {
+            inst->initiateAcc();
+        }
+        
+        break;
+
+      case InitSecondSplitRead:
+        DPRINTF(InOrderCachePort,
+                "[tid:%u]: [sn:%i] Initiating split data read access to %s "
+                "for addr. %08p\n", tid, inst->seqNum, name(),
+                cache_req->inst->split2ndAddr);
+        inst->split2ndAccess = true;
+        assert(inst->split2ndAddr != 0);
+        read(inst, inst->split2ndAddr, &inst->split2ndData,
+             inst->totalSize, inst->split2ndFlags);
+        break;
+
+      case InitSecondSplitWrite:
+        DPRINTF(InOrderCachePort,
+                "[tid:%u]: [sn:%i] Initiating split data write access to %s "
+                "for addr. %08p\n", tid, inst->seqNum, name(),
+                cache_req->inst->getMemAddr());
+
+        inst->split2ndAccess = true;
+        assert(inst->split2ndAddr != 0);
+        write(inst, &inst->split2ndData, inst->totalSize,
+              inst->split2ndAddr, inst->split2ndFlags, NULL);
         break;
 
       case CompleteReadData:
-      case CompleteWriteData:
         DPRINTF(InOrderCachePort,
-                "[tid:%i]: [sn:%i]: Trying to Complete Data Access\n",
+                "[tid:%i]: [sn:%i]: Trying to Complete Data Read Access\n",
                 tid, inst->seqNum);
-        if (cache_req->isMemAccComplete()) {
-            cache_req->done();
+
+
+        //@todo: timing translations need to check here...
+        assert(!inst->isInstPrefetch() && "Can't Handle Inst. Prefecthes");
+        if (cache_req->isMemAccComplete() || inst->isDataPrefetch()) {
+            finishCacheUnitReq(inst, cache_req);
         } else {
             DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
                     tid, cache_req->inst->getMemAddr());
             cache_req->setCompleted(false);
+            cache_req->setMemStall(true);            
         }
         break;
 
+      case CompleteWriteData:
+        {
+            DPRINTF(InOrderCachePort,
+                    "[tid:%i]: [sn:%i]: Trying to Complete Data Write Access\n",
+                    tid, inst->seqNum);
+
+
+            //@todo: check that timing translation is finished here
+            RequestPtr mem_req = cache_req->memReq;
+            if (mem_req->isCondSwap() || mem_req->isLLSC() || mem_req->isSwap()) {
+                DPRINTF(InOrderCachePort, "Detected Conditional Store Inst.\n");
+
+                if (!cache_req->isMemAccComplete()) {
+                    DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
+                            tid, cache_req->inst->getMemAddr());
+                    cache_req->setCompleted(false);
+                    cache_req->setMemStall(true);
+                    return;
+                } else {
+                    DPRINTF(InOrderStall, "Mem Acc Completed\n");
+                }
+            }
+
+            if (cache_req->isMemAccPending()) {
+                DPRINTF(InOrderCachePort, "Store Instruction Pending Completion.\n");
+                cache_req->dataPkt->reqData = cache_req->reqData;
+                cache_req->dataPkt->memReq = cache_req->memReq;
+            } else
+                DPRINTF(InOrderCachePort, "Store Instruction Finished Completion.\n");
+
+            //@todo: if split inst save data
+            finishCacheUnitReq(inst, cache_req);
+        }
+        break;
+
+      case CompleteSecondSplitRead:
+        DPRINTF(InOrderCachePort,
+                "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read "
+                "Access\n", tid, inst->seqNum);
+
+        //@todo: check that timing translation is finished here
+        assert(!inst->isInstPrefetch() && "Can't Handle Inst. Prefecthes");
+        if (cache_req->isMemAccComplete() || inst->isDataPrefetch()) {
+            finishCacheUnitReq(inst, cache_req);
+        } else {
+            DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
+                    tid, cache_req->inst->split2ndAddr);
+            cache_req->setCompleted(false);
+            cache_req->setMemStall(true);            
+        }
+        break;
+
+      case CompleteSecondSplitWrite:
+        DPRINTF(InOrderCachePort,
+                "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write "
+                "Access\n", tid, inst->seqNum);
+        //@todo: illegal to have a unaligned cond.swap or llsc?
+        assert(!cache_req->memReq->isSwap() && !cache_req->memReq->isCondSwap()
+               && !cache_req->memReq->isLLSC());
+
+        if (cache_req->isMemAccPending()) {
+            cache_req->dataPkt->reqData = cache_req->reqData;
+            cache_req->dataPkt->memReq = cache_req->memReq;
+        }
+
+        //@todo: check that timing translation is finished here
+        finishCacheUnitReq(inst, cache_req);
+        break;
+        
       default:
         fatal("Unrecognized command to %s", resName);
     }
 }
 
-Fault
-CacheUnit::doDataAccess(DynInstPtr inst)
+void
+CacheUnit::finishCacheUnitReq(DynInstPtr inst, CacheRequest *cache_req)
 {
-    Fault fault = NoFault;
-    int tid = 0;
-
-    tid = inst->readTid();
-
-    CacheReqPtr cache_req
-        = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
-    assert(cache_req);
-
-    cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
-                                            Packet::Broadcast);
+    //@note: add back in for speculative load/store capability
+    //removeAddrDependency(inst);
+    cache_req->setMemStall(false);
+    cache_req->done();
+}
 
-    if (cache_req->dataPkt->isRead()) {
-        cache_req->dataPkt->dataStatic(cache_req->reqData);
-    } else if (cache_req->dataPkt->isWrite()) {
-        cache_req->dataPkt->dataStatic(&cache_req->inst->storeData);
+void
+CacheUnit::buildDataPacket(CacheRequest *cache_req)
+{
+    // Check for LL/SC and if so change command
+    if (cache_req->memReq->isLLSC() && cache_req->pktCmd == MemCmd::ReadReq) {
+        cache_req->pktCmd = MemCmd::LoadLockedReq;
+    }
 
+    if (cache_req->pktCmd == MemCmd::WriteReq) {
+        cache_req->pktCmd =
+            cache_req->memReq->isSwap() ? MemCmd::SwapReq :
+            (cache_req->memReq->isLLSC() ? MemCmd::StoreCondReq 
+             : MemCmd::WriteReq);
     }
 
-    cache_req->dataPkt->time = curTick;
+    cache_req->dataPkt = new CacheReqPacket(cache_req,
+                                            cache_req->pktCmd,
+                                            Packet::Broadcast,
+                                            cache_req->instIdx);
+    DPRINTF(InOrderCachePort, "[slot:%i]: Slot marked for %x\n",
+            cache_req->getSlot(),
+            cache_req->dataPkt->getAddr());
 
-    bool do_access = true;  // flag to suppress cache access
+    cache_req->dataPkt->hasSlot = true;
+    cache_req->dataPkt->dataStatic(cache_req->reqData);
+}
 
-    Request *memReq = cache_req->dataPkt->req;
+void
+CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res,
+                         CacheReqPtr split_req)
+{
+    Fault fault = NoFault;
+#if TRACING_ON
+    ThreadID tid = inst->readTid();
+#endif
+    bool do_access = true;  // flag to suppress cache access
 
-    if (cache_req->dataPkt->isWrite() && memReq->isLocked()) {
-        assert(cache_req->inst->isStoreConditional());
-        DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
-        do_access = TheISA::handleLockedWrite(cpu, memReq);
+    // Special Handling if this is a split request
+    CacheReqPtr cache_req;
+    if (split_req == NULL)
+        cache_req = dynamic_cast<CacheReqPtr>(reqs[inst->getCurResSlot()]);
+    else {
+        cache_req = split_req;
+        assert(0);
     }
 
+    // Make a new packet inside the CacheRequest object
+    assert(cache_req);
+    buildDataPacket(cache_req);
+
+    // Special Handling for LL/SC or Compare/Swap
+     bool is_write = cache_req->dataPkt->isWrite();
+     RequestPtr mem_req = cache_req->dataPkt->req;
+     if (is_write) {
+         DPRINTF(InOrderCachePort,
+                 "[tid:%u]: [sn:%i]: Storing data: %s\n",
+                 tid, inst->seqNum,
+                 printMemData(cache_req->dataPkt->getPtr<uint8_t>(),
+                              cache_req->dataPkt->getSize()));
+
+        if (mem_req->isCondSwap()) {
+             assert(write_res);
+             cache_req->memReq->setExtraData(*write_res);
+         }
+        if (mem_req->isLLSC()) {
+            assert(cache_req->inst->isStoreConditional());
+            DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
+            do_access = TheISA::handleLockedWrite(inst.get(), mem_req);
+        }
+     }
+
+    // Finally, go ahead and make the access if we can...
     DPRINTF(InOrderCachePort,
-            "[tid:%i] [sn:%i] attempting to access cache\n",
-            tid, inst->seqNum);
+            "[tid:%i] [sn:%i] attempting to access cache for addr %08p\n",
+            tid, inst->seqNum, cache_req->dataPkt->getAddr());
 
-    //@TODO: If you want to ignore failed store conditional accesses, then
-    //       enable this. However, this might skew memory stats because
-    //       the failed store conditional access will get ignored.
-    // - Remove optionality here ...
-    if (1/*do_access*/) {
+    if (do_access) {
         if (!cachePort->sendTiming(cache_req->dataPkt)) {
             DPRINTF(InOrderCachePort,
-                    "[tid:%i] [sn:%i] is waiting to retry request\n",
-                    tid, inst->seqNum);
-
-            retrySlot = cache_req->getSlot();
-            retryReq = cache_req;
-            retryPkt = cache_req->dataPkt;
+                    "[tid:%i] [sn:%i] cannot access cache, because port "
+                    "is blocked. now waiting to retry request\n", tid, 
+                    inst->seqNum);
+            delete cache_req->dataPkt;
+            cache_req->dataPkt = NULL;
 
-            cacheStatus = cacheWaitRetry;
+            delete cache_req->memReq;
+            cache_req->memReq = NULL;
 
-            //cacheBlocked = true;
-
-            DPRINTF(InOrderStall, "STALL: \n");
-
-            cache_req->setCompleted(false);
+            cache_req->done(false);
+            cachePortBlocked = true;
         } else {
             DPRINTF(InOrderCachePort,
                     "[tid:%i] [sn:%i] is now waiting for cache response\n",
                     tid, inst->seqNum);
             cache_req->setCompleted();
             cache_req->setMemAccPending();
-            cacheStatus = cacheWaitResponse;
-            cacheBlocked = false;
+            cachePortBlocked = false;
         }
-    } else if (!do_access && memReq->isLocked()){
+    } else if (mem_req->isLLSC()){
         // Store-Conditional instructions complete even if they "failed"
         assert(cache_req->inst->isStoreConditional());
         cache_req->setCompleted(true);
@@ -409,151 +942,316 @@ CacheUnit::doDataAccess(DynInstPtr inst)
                 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
                 tid, tid);
 
-        cache_req->dataPkt->req->setExtraData(0);
-
         processCacheCompletion(cache_req->dataPkt);
-
-        // Automatically set these since we ignored the memory access
-        //cache_req->setMemAccPending(false);
-        //cache_req->setMemAccCompleted();
     } else {
+        delete cache_req->dataPkt;
+        cache_req->dataPkt = NULL;
+
+        delete cache_req->memReq;
+        cache_req->memReq = NULL;
+
         // Make cache request again since access due to
         // inability to access
         DPRINTF(InOrderStall, "STALL: \n");
-        cache_req->setCompleted(false);
+        cache_req->done(false);
     }
 
-    return fault;
 }
 
-void
-CacheUnit::processCacheCompletion(PacketPtr pkt)
+bool
+CacheUnit::processSquash(CacheReqPacket *cache_pkt)
 {
-    // Cast to correct packet type
-    CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
-    assert(cache_pkt);
+    // The resource may no longer be actively servicing this
+    // packet. Scenarios like a store that has been sent to the
+    // memory system or access that's been squashed. If that's
+    // the case, we can't access the request slot because it
+    // will be either invalid or servicing another request.
+    if (!cache_pkt->hasSlot) {
+        DPRINTF(InOrderCachePort,
+                "%x does not have a slot in unit, ignoring.\n",
+                cache_pkt->getAddr());
+
+        if (cache_pkt->reqData) {
+            delete [] cache_pkt->reqData;
+            cache_pkt->reqData = NULL;
+        }
+
+        if (cache_pkt->memReq) {
+            delete cache_pkt->memReq;
+            cache_pkt->memReq = NULL;
+        }
+
+        delete cache_pkt;
+        cache_pkt = NULL;
+        cpu->wakeCPU();
+        return true;
+    } else {
+        DPRINTF(InOrderCachePort, "%x has slot %i\n",
+                cache_pkt->getAddr(), cache_pkt->cacheReq->getSlot());
+    }
+
 
+    // It's possible that the request is squashed but the
+    // packet is still acknowledged by the resource. Squashes
+    // should happen at the end of the cycles and trigger the
+    // code above, but if not, this would handle any timing
+    // variations due to diff. user parameters.
     if (cache_pkt->cacheReq->isSquashed()) {
         DPRINTF(InOrderCachePort,
                 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
                 cache_pkt->cacheReq->getInst()->readTid(),
                 cache_pkt->cacheReq->getInst()->seqNum);
 
-        cache_pkt->cacheReq->done();
-        return;
+        cache_pkt->cacheReq->setMemAccPending(false);
+        cache_pkt->cacheReq->freeSlot();
+        delete cache_pkt;
+        cache_pkt = NULL;
+        cpu->wakeCPU();
+        return true;
     }
 
-    DPRINTF(InOrderCachePort,
-            "[tid:%u]: [sn:%i]: Waking from cache access to addr. %08p\n",
-            cache_pkt->cacheReq->getInst()->readTid(),
-            cache_pkt->cacheReq->getInst()->seqNum,
-            cache_pkt->cacheReq->getInst()->getMemAddr());
 
-    // Cast to correct request type
+    return false;
+}
+
+void
+CacheUnit::processCacheCompletion(PacketPtr pkt)
+{
+    //@todo: use packet sender state instead of deriving from packet class to
+    //  get special state
+    CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
+    assert(cache_pkt);
+
+    DPRINTF(InOrderCachePort, "Finished request for %x\n", pkt->getAddr());
+
+    if (processSquash(cache_pkt))
+        return;
+
     CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
-        findRequest(cache_pkt->cacheReq->getInst()));
+        findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
+
+    if (!cache_req) {
+        panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
+              "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
+              cache_pkt->cacheReq->getInst()->seqNum,
+              cache_pkt->cacheReq->getInst()->getMemAddr());
+    }
+    
     assert(cache_req);
+    assert(cache_req == cache_pkt->cacheReq);
 
+    DPRINTF(InOrderCachePort,
+            "[tid:%u]: [sn:%i]: [slot:%i] Waking from cache access (vaddr.%08p, paddr:%08p)\n",
+            cache_pkt->cacheReq->getInst()->readTid(),
+            cache_pkt->cacheReq->getInst()->seqNum,
+            cache_req->getSlot(),
+            cache_pkt->req->getVaddr(),
+            cache_pkt->req->getPaddr());
 
     // Get resource request info
-    // @todo: SMT needs to figure out where to get thread # from.
-    unsigned tid = 0;
     unsigned stage_num = cache_req->getStageNum();
     DynInstPtr inst = cache_req->inst;
+    ThreadID tid = cache_req->inst->readTid();
 
-    if (!cache_req->isSquashed()) {
-        if (inst->resSched.top()->cmd == CompleteFetch) {
-            DPRINTF(InOrderCachePort,
-                    "[tid:%u]: [sn:%i]: Processing fetch access\n",
-                    tid, inst->seqNum);
-        } else if (inst->staticInst && inst->isMemRef()) {
-            DPRINTF(InOrderCachePort,
-                    "[tid:%u]: [sn:%i]: Processing cache access\n",
-                    tid, inst->seqNum);
+    assert(!cache_req->isSquashed());
+    assert(inst->staticInst && inst->isMemRef());
 
-            inst->completeAcc(pkt);
 
-            if (inst->isLoad()) {
-                assert(cache_pkt->isRead());
+    DPRINTF(InOrderCachePort,
+            "[tid:%u]: [sn:%i]: Processing cache access\n",
+            tid, inst->seqNum);
 
-                if (cache_pkt->req->isLocked()) {
-                    DPRINTF(InOrderCachePort,
-                            "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
-                            tid, inst->seqNum);
-                    TheISA::handleLockedRead(cpu, cache_pkt->req);
-                }
+    PacketPtr split_pkt = NULL;
+    if (inst->splitInst) {
+        inst->splitFinishCnt++;
 
-                // @TODO: Hardcoded to for load instructions. Assumes that
-                // the dest. idx 0 is always where the data is loaded to.
-                DPRINTF(InOrderCachePort,
-                        "[tid:%u]: [sn:%i]: Data loaded was: %08p\n",
-                        tid, inst->seqNum, inst->readIntResult(0));
-            } else if(inst->isStore()) {
-                assert(cache_pkt->isWrite());
+        if (inst->splitFinishCnt == 2) {
+            cache_req->memReq->setVirt(0/*inst->tid*/,
+                                       inst->getMemAddr(),
+                                       inst->totalSize,
+                                       0,
+                                       0);
 
-                DPRINTF(InOrderCachePort,
-                        "[tid:%u]: [sn:%i]: Data stored was: %08p\n",
-                        tid, inst->seqNum,
-                        getMemData(cache_pkt));
+            split_pkt = new Packet(cache_req->memReq, cache_req->pktCmd,
+                                   Packet::Broadcast);
+            split_pkt->dataStatic(inst->splitMemData);
 
-            }
+            DPRINTF(InOrderCachePort, "Completing Split Access.\n");
+            inst->completeAcc(split_pkt);
         }
+    } else {
+        inst->completeAcc(cache_pkt);
+    }
 
-        cache_req->setMemAccPending(false);
-        cache_req->setMemAccCompleted();
+    inst->setExecuted();
 
-        // Wake up the CPU (if it went to sleep and was waiting on this
-        // completion event).
-        cpu->wakeCPU();
+    if (inst->isLoad()) {
+        assert(cache_pkt->isRead());
 
-        DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
-            tid, cpu->pipelineStage[stage_num]->name());
+        if (cache_pkt->req->isLLSC()) {
+            DPRINTF(InOrderCachePort,
+                    "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
+                    tid, inst->seqNum);
+            TheISA::handleLockedRead(inst.get(), cache_pkt->req);
+        }
+
+        DPRINTF(InOrderCachePort,
+                "[tid:%u]: [sn:%i]: Bytes loaded were: %s\n",
+                tid, inst->seqNum,
+                (split_pkt) ? printMemData(split_pkt->getPtr<uint8_t>(),
+                                           split_pkt->getSize()) :
+                              printMemData(cache_pkt->getPtr<uint8_t>(),
+                                           cache_pkt->getSize()));
+    } else if(inst->isStore()) {
+        assert(cache_pkt->isWrite());
 
-        cpu->switchToActive(stage_num);
-    } else {
         DPRINTF(InOrderCachePort,
-                "[tid:%u] Miss on block @ %08p completed, but squashed\n",
-                tid, cache_req->inst->readPC());
-        cache_req->setMemAccCompleted();
+                "[tid:%u]: [sn:%i]: Bytes stored were: %s\n",
+                tid, inst->seqNum,
+                (split_pkt) ? printMemData(split_pkt->getPtr<uint8_t>(),
+                                           split_pkt->getSize()) :
+                              printMemData(cache_pkt->getPtr<uint8_t>(),
+                                           cache_pkt->getSize()));
     }
 
-    inst->unsetMemAddr();
+
+    if (split_pkt) {
+        delete split_pkt;
+        split_pkt = NULL;
+    }
+
+    cache_req->setMemAccPending(false);
+    cache_req->setMemAccCompleted();
+
+    if (cache_req->isMemStall() &&
+        cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
+        DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
+                tid);
+            
+        cpu->activateContext(tid);
+            
+        DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
+                "miss.\n", tid);
+    }
+        
+    // Wake up the CPU (if it went to sleep and was waiting on this
+    // completion event).
+    cpu->wakeCPU();
+
+    DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
+            tid, cpu->pipelineStage[stage_num]->name());
+
+    cpu->switchToActive(stage_num);
 }
 
 void
 CacheUnit::recvRetry()
 {
-    DPRINTF(InOrderCachePort, "Retrying Request for [tid:%i] [sn:%i]\n",
-            retryReq->inst->readTid(), retryReq->inst->seqNum);
+    DPRINTF(InOrderCachePort, "Unblocking Cache Port. \n");
+    
+    assert(cachePortBlocked);
 
-    assert(retryPkt != NULL);
-    assert(cacheBlocked);
-    assert(cacheStatus == cacheWaitRetry);
+    // Clear the cache port for use again
+    cachePortBlocked = false;
 
-    if (cachePort->sendTiming(retryPkt)) {
-        cacheStatus = cacheWaitResponse;
-        retryPkt = NULL;
-        cacheBlocked = false;
-    } else {
-        DPRINTF(InOrderCachePort,
-                "Retry Request for [tid:%i] [sn:%i] failed\n",
-                retryReq->inst->readTid(), retryReq->inst->seqNum);
+    cpu->wakeCPU();
+}
+
+CacheUnitEvent::CacheUnitEvent()
+    : ResourceEvent()
+{ }
+
+void
+CacheUnitEvent::process()
+{
+    DynInstPtr inst = resource->reqs[slotIdx]->inst;
+    int stage_num = resource->reqs[slotIdx]->getStageNum();
+    ThreadID tid = inst->threadNumber;
+    CacheReqPtr req_ptr = dynamic_cast<CacheReqPtr>(resource->reqs[slotIdx]);
+
+    DPRINTF(InOrderTLB, "Waking up from TLB Miss caused by [sn:%i].\n",
+            inst->seqNum);
+
+    CacheUnit* tlb_res = dynamic_cast<CacheUnit*>(resource);
+    assert(tlb_res);
+
+    //@todo: eventually, we should do a timing translation w/
+    //       hw page table walk on tlb miss
+    DPRINTF(InOrderTLB, "Handling Fault %s : [sn:%i] %x\n", inst->fault->name(), inst->seqNum, inst->getMemAddr());
+    inst->fault->invoke(tlb_res->cpu->tcBase(tid), inst->staticInst);
+
+    tlb_res->tlbBlocked[tid] = false;
+
+    tlb_res->cpu->pipelineStage[stage_num]->
+        unsetResStall(tlb_res->reqs[slotIdx], tid);
+
+    req_ptr->tlbStall = false;
+
+    //@todo: timing translation needs to have some type of independent
+    //       info regarding if it's squashed or not so we can
+    //       free up the resource if a request gets squashed in the middle
+    //       of a table walk
+    if (req_ptr->isSquashed()) {
+        req_ptr->freeSlot();
     }
+
+    tlb_res->cpu->wakeCPU();
 }
 
 void
-CacheUnit::squash(DynInstPtr inst, int stage_num,
-                  InstSeqNum squash_seq_num, unsigned tid)
+CacheUnit::squashDueToMemStall(DynInstPtr inst, int stage_num,
+                               InstSeqNum squash_seq_num, ThreadID tid)
 {
-    vector<int> slot_remove_list;
+    // If squashing due to memory stall, then we do NOT want to 
+    // squash the instruction that caused the stall so we
+    // increment the sequence number here to prevent that.
+    //
+    // NOTE: This is only for the SwitchOnCacheMiss Model
+    // NOTE: If you have multiple outstanding misses from the same
+    //       thread then you need to reevaluate this code
+    // NOTE: squash should originate from 
+    //       pipeline_stage.cc:processInstSchedule
+    DPRINTF(InOrderCachePort, "Squashing above [sn:%u]\n", 
+            squash_seq_num + 1);
+    
+    squash(inst, stage_num, squash_seq_num + 1, tid);    
+}
 
-    map<int, ResReqPtr>::iterator map_it = reqMap.begin();
-    map<int, ResReqPtr>::iterator map_end = reqMap.end();
+void
+CacheUnit::squashCacheRequest(CacheReqPtr req_ptr)
+{
+    DynInstPtr inst =  req_ptr->getInst();
+    req_ptr->setSquashed();
+    inst->setSquashed();
+
+    //@note: add back in for speculative load/store capability
+    /*if (inst->validMemAddr()) {
+        DPRINTF(AddrDep, "Squash of [tid:%i] [sn:%i], attempting to "
+                "remove addr. %08p dependencies.\n",
+                inst->readTid(),
+                inst->seqNum,
+                inst->getMemAddr());
+
+        removeAddrDependency(inst);
+    }*/
+}
 
-    while (map_it != map_end) {
-        ResReqPtr req_ptr = (*map_it).second;
 
-        if (req_ptr &&
+void
+CacheUnit::squash(DynInstPtr inst, int stage_num,
+                  InstSeqNum squash_seq_num, ThreadID tid)
+{
+    if (tlbBlocked[tid] &&
+        tlbBlockSeqNum[tid] > squash_seq_num) {
+        DPRINTF(InOrderCachePort, "Releasing TLB Block due to "
+                " squash after [sn:%i].\n", squash_seq_num);
+        tlbBlocked[tid] = false;
+    }
+
+    for (int i = 0; i < width; i++) {
+        ResReqPtr req_ptr = reqs[i];
+
+        if (req_ptr->valid &&
             req_ptr->getInst()->readTid() == tid &&
             req_ptr->getInst()->seqNum > squash_seq_num) {
 
@@ -561,49 +1259,70 @@ CacheUnit::squash(DynInstPtr inst, int stage_num,
                     "[tid:%i] Squashing request from [sn:%i]\n",
                     req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
 
-            req_ptr->setSquashed();
-
-            req_ptr->getInst()->setSquashed();
+            if (req_ptr->isSquashed()) {
+                DPRINTF(AddrDep, "Request for [tid:%i] [sn:%i] already "
+                        "squashed, ignoring squash process.\n",
+                        req_ptr->getInst()->readTid(),
+                        req_ptr->getInst()->seqNum);
+                continue;                
+            }
 
             CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
             assert(cache_req);
 
-            if (!cache_req->isMemAccPending()) {
-                // Mark request for later removal
-                cpu->reqRemoveList.push(req_ptr);
+            squashCacheRequest(cache_req);
 
-                // Mark slot for removal from resource
-                slot_remove_list.push_back(req_ptr->getSlot());
+            int req_slot_num = req_ptr->getSlot();
+
+            if (cache_req->tlbStall) {
+                tlbBlocked[tid] = false;
+
+                int stall_stage = reqs[req_slot_num]->getStageNum();
+
+                cpu->pipelineStage[stall_stage]->
+                    unsetResStall(reqs[req_slot_num], tid);
+            }
+
+            if (cache_req->isMemAccPending()) {
+                cache_req->dataPkt->reqData = cache_req->reqData;
+                cache_req->dataPkt->memReq = cache_req->memReq;
             }
-        }
 
-        map_it++;
+            if (!cache_req->tlbStall)
+                freeSlot(req_slot_num);
+        }
     }
 
-    // Now Delete Slot Entry from Req. Map
-    for (int i = 0; i < slot_remove_list.size(); i++)
-        freeSlot(slot_remove_list[i]);
 }
 
-uint64_t
-CacheUnit::getMemData(Packet *packet)
+void
+CacheRequest::clearRequest()
 {
-    switch (packet->getSize())
-    {
-      case 8:
-        return packet->get<uint8_t>();
-
-      case 16:
-        return packet->get<uint16_t>();
+    if (!memAccPending) {
+        if (reqData && !splitAccess)
+            delete [] reqData;
 
-      case 32:
-        return packet->get<uint32_t>();
+        if (memReq)
+            delete memReq;
 
-      case 864:
-        return packet->get<uint64_t>();
-
-      default:
-        panic("bad store data size = %d\n", packet->getSize());
+        if (dataPkt)
+            delete dataPkt;
+    } else {
+        if (dataPkt)
+            dataPkt->hasSlot = false;
     }
-}
 
+    memReq = NULL;
+    reqData = NULL;
+    dataPkt = NULL;
+    memAccComplete = false;
+    memAccPending = false;
+    tlbStall = false;
+    splitAccess = false;
+    splitAccessNum = -1;
+    split2ndAccess = false;
+    instIdx = 0;
+    fetchBufferFill = false;
+
+    ResourceRequest::clearRequest();
+}