pwr: Adds logic to enter power gating for the cpu model
[gem5.git] / src / cpu / simple / timing.cc
index 5437e77aadbcf52bade9af849285b722dc3b923e..f57354d56d73812b9636c1f647e5c62acb119a72 100644 (file)
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2010-2012 ARM Limited
+ * Copyright 2014 Google, Inc.
+ * Copyright (c) 2010-2013,2015,2017 ARM Limited
  * All rights reserved
  *
  * The license below extends only to copyright in the software and shall
  * Authors: Steve Reinhardt
  */
 
+#include "cpu/simple/timing.hh"
+
 #include "arch/locked_mem.hh"
 #include "arch/mmapped_ipr.hh"
 #include "arch/utility.hh"
 #include "base/bigint.hh"
 #include "config/the_isa.hh"
-#include "cpu/simple/timing.hh"
 #include "cpu/exetrace.hh"
 #include "debug/Config.hh"
 #include "debug/Drain.hh"
 #include "debug/ExecFaulting.hh"
+#include "debug/Mwait.hh"
 #include "debug/SimpleCPU.hh"
 #include "mem/packet.hh"
 #include "mem/packet_access.hh"
@@ -64,18 +67,7 @@ using namespace TheISA;
 void
 TimingSimpleCPU::init()
 {
-    BaseCPU::init();
-
-    // Initialise the ThreadContext's memory proxies
-    tcBase()->initMemProxies(tcBase());
-
-    if (FullSystem && !params()->defer_registration) {
-        for (int i = 0; i < threadContexts.size(); ++i) {
-            ThreadContext *tc = threadContexts[i];
-            // initialize CPU, including PC
-            TheISA::initCPU(tc, _cpuId);
-        }
-    }
+    BaseSimpleCPU::init();
 }
 
 void
@@ -88,119 +80,152 @@ TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
 TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
     : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
       dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
-      fetchEvent(this)
+      fetchEvent([this]{ fetch(); }, name())
 {
     _status = Idle;
-
-    changeState(SimObject::Running);
-    system->totalNumInsts = 0;
 }
 
 
+
 TimingSimpleCPU::~TimingSimpleCPU()
 {
 }
 
-void
-TimingSimpleCPU::serialize(ostream &os)
+DrainState
+TimingSimpleCPU::drain()
 {
-    SimObject::State so_state = SimObject::getState();
-    SERIALIZE_ENUM(so_state);
-    BaseSimpleCPU::serialize(os);
-}
+    // Deschedule any power gating event (if any)
+    deschedulePowerGatingEvent();
 
-void
-TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
-{
-    SimObject::State so_state;
-    UNSERIALIZE_ENUM(so_state);
-    BaseSimpleCPU::unserialize(cp, section);
-}
+    if (switchedOut())
+        return DrainState::Drained;
 
-unsigned int
-TimingSimpleCPU::drain(Event *drain_event)
-{
-    // TimingSimpleCPU is ready to drain if it's not waiting for
-    // an access to complete.
-    if (_status == Idle || _status == Running || _status == SwitchedOut) {
-        changeState(SimObject::Drained);
-        return 0;
+    if (_status == Idle ||
+        (_status == BaseSimpleCPU::Running && isDrained())) {
+        DPRINTF(Drain, "No need to drain.\n");
+        activeThreads.clear();
+        return DrainState::Drained;
     } else {
-        changeState(SimObject::Draining);
-        drainEvent = drain_event;
-        DPRINTF(Drain, "CPU not drained\n");
-        return 1;
+        DPRINTF(Drain, "Requesting drain.\n");
+
+        // The fetch event can become descheduled if a drain didn't
+        // succeed on the first attempt. We need to reschedule it if
+        // the CPU is waiting for a microcode routine to complete.
+        if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
+            schedule(fetchEvent, clockEdge());
+
+        return DrainState::Draining;
     }
 }
 
 void
-TimingSimpleCPU::resume()
+TimingSimpleCPU::drainResume()
 {
+    assert(!fetchEvent.scheduled());
+    if (switchedOut())
+        return;
+
     DPRINTF(SimpleCPU, "Resume\n");
-    if (_status != SwitchedOut && _status != Idle) {
-        assert(system->getMemoryMode() == Enums::timing);
+    verifyMemoryMode();
+
+    assert(!threadContexts.empty());
+
+    _status = BaseSimpleCPU::Idle;
+
+    for (ThreadID tid = 0; tid < numThreads; tid++) {
+        if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
+            threadInfo[tid]->notIdleFraction = 1;
+
+            activeThreads.push_back(tid);
 
-        if (fetchEvent.scheduled())
-           deschedule(fetchEvent);
+            _status = BaseSimpleCPU::Running;
 
-        schedule(fetchEvent, nextCycle());
+            // Fetch if any threads active
+            if (!fetchEvent.scheduled()) {
+                schedule(fetchEvent, nextCycle());
+            }
+        } else {
+            threadInfo[tid]->notIdleFraction = 0;
+        }
     }
 
-    changeState(SimObject::Running);
+    // Reschedule any power gating event (if any)
+    schedulePowerGatingEvent();
+
+    system->totalNumInsts = 0;
+}
+
+bool
+TimingSimpleCPU::tryCompleteDrain()
+{
+    if (drainState() != DrainState::Draining)
+        return false;
+
+    DPRINTF(Drain, "tryCompleteDrain.\n");
+    if (!isDrained())
+        return false;
+
+    DPRINTF(Drain, "CPU done draining, processing drain event\n");
+    signalDrainDone();
+
+    return true;
 }
 
 void
 TimingSimpleCPU::switchOut()
 {
-    assert(_status == Running || _status == Idle);
-    _status = SwitchedOut;
-    numCycles += curCycle() - previousCycle;
+    SimpleExecContext& t_info = *threadInfo[curThread];
+    M5_VAR_USED SimpleThread* thread = t_info.thread;
 
-    // If we've been scheduled to resume but are then told to switch out,
-    // we'll need to cancel it.
-    if (fetchEvent.scheduled())
-        deschedule(fetchEvent);
+    BaseSimpleCPU::switchOut();
+
+    assert(!fetchEvent.scheduled());
+    assert(_status == BaseSimpleCPU::Running || _status == Idle);
+    assert(!t_info.stayAtPC);
+    assert(thread->microPC() == 0);
+
+    updateCycleCounts();
 }
 
 
 void
 TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
 {
-    BaseCPU::takeOverFrom(oldCPU);
-
-    // if any of this CPU's ThreadContexts are active, mark the CPU as
-    // running and schedule its tick event.
-    for (int i = 0; i < threadContexts.size(); ++i) {
-        ThreadContext *tc = threadContexts[i];
-        if (tc->status() == ThreadContext::Active && _status != Running) {
-            _status = Running;
-            break;
-        }
-    }
+    BaseSimpleCPU::takeOverFrom(oldCPU);
 
-    if (_status != Running) {
-        _status = Idle;
-    }
-    assert(threadContexts.size() == 1);
     previousCycle = curCycle();
 }
 
-
 void
-TimingSimpleCPU::activateContext(ThreadID thread_num, int delay)
+TimingSimpleCPU::verifyMemoryMode() const
 {
-    DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
+    if (!system->isTimingMode()) {
+        fatal("The timing CPU requires the memory system to be in "
+              "'timing' mode.\n");
+    }
+}
 
-    assert(thread_num == 0);
-    assert(thread);
+void
+TimingSimpleCPU::activateContext(ThreadID thread_num)
+{
+    DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
 
-    assert(_status == Idle);
+    assert(thread_num < numThreads);
 
-    notIdleFraction++;
-    _status = Running;
+    threadInfo[thread_num]->notIdleFraction = 1;
+    if (_status == BaseSimpleCPU::Idle)
+        _status = BaseSimpleCPU::Running;
 
     // kick things off by initiating the fetch of the next instruction
-    schedule(fetchEvent, clockEdge(delay));
+    if (!fetchEvent.scheduled())
+        schedule(fetchEvent, clockEdge(Cycles(0)));
+
+    if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
+         == activeThreads.end()) {
+        activeThreads.push_back(thread_num);
+    }
+
+    BaseCPU::activateContext(thread_num);
 }
 
 
@@ -209,27 +234,42 @@ TimingSimpleCPU::suspendContext(ThreadID thread_num)
 {
     DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
 
-    assert(thread_num == 0);
-    assert(thread);
+    assert(thread_num < numThreads);
+    activeThreads.remove(thread_num);
 
     if (_status == Idle)
         return;
 
-    assert(_status == Running);
+    assert(_status == BaseSimpleCPU::Running);
 
-    // just change status to Idle... if status != Running,
-    // completeInst() will not initiate fetch of next instruction.
+    threadInfo[thread_num]->notIdleFraction = 0;
 
-    notIdleFraction--;
-    _status = Idle;
+    if (activeThreads.empty()) {
+        _status = Idle;
+
+        if (fetchEvent.scheduled()) {
+            deschedule(fetchEvent);
+        }
+    }
+
+    BaseCPU::suspendContext(thread_num);
 }
 
 bool
 TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
 {
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread* thread = t_info.thread;
+
     RequestPtr req = pkt->req;
+
+    // We're about the issues a locked load, so tell the monitor
+    // to start caring about this address
+    if (pkt->isRead() && pkt->req->isLLSC()) {
+        TheISA::handleLockedRead(thread, pkt->req);
+    }
     if (req->isMmappedIpr()) {
-        Tick delay = TheISA::handleIprRead(thread->getTC(), pkt);
+        Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
         new IprEvent(pkt, this, clockEdge(delay));
         _status = DcacheWaitResponse;
         dcache_pkt = NULL;
@@ -248,9 +288,11 @@ void
 TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
                           bool read)
 {
-    PacketPtr pkt;
-    buildPacket(pkt, req, read);
-    pkt->dataDynamicArray<uint8_t>(data);
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread* thread = t_info.thread;
+
+    PacketPtr pkt = buildPacket(req, read);
+    pkt->dataDynamic<uint8_t>(data);
     if (req->getFlags().isSet(Request::NO_ACCESS)) {
         assert(!dcache_pkt);
         pkt->makeResponse();
@@ -261,7 +303,7 @@ TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
         bool do_access = true;  // flag to suppress cache access
 
         if (req->isLLSC()) {
-            do_access = TheISA::handleLockedWrite(thread, req);
+            do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
         } else if (req->isCondSwap()) {
             assert(res);
             req->setExtraData(*res);
@@ -270,6 +312,7 @@ TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
         if (do_access) {
             dcache_pkt = pkt;
             handleWritePacket();
+            threadSnoop(pkt, curThread);
         } else {
             _status = DcacheWaitResponse;
             completeDataAccess(pkt);
@@ -315,12 +358,11 @@ TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
 }
 
 void
-TimingSimpleCPU::translationFault(Fault fault)
+TimingSimpleCPU::translationFault(const Fault &fault)
 {
     // fault may be NoFault in cases where a fault is suppressed,
     // for instance prefetches.
-    numCycles += curCycle() - previousCycle;
-    previousCycle = curCycle();
+    updateCycleCounts();
 
     if (traceData) {
         // Since there was a fault, we shouldn't trace this instruction.
@@ -330,31 +372,13 @@ TimingSimpleCPU::translationFault(Fault fault)
 
     postExecute();
 
-    if (getState() == SimObject::Draining) {
-        advancePC(fault);
-        completeDrain();
-    } else {
-        advanceInst(fault);
-    }
+    advanceInst(fault);
 }
 
-void
-TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
+PacketPtr
+TimingSimpleCPU::buildPacket(RequestPtr req, bool read)
 {
-    MemCmd cmd;
-    if (read) {
-        cmd = MemCmd::ReadReq;
-        if (req->isLLSC())
-            cmd = MemCmd::LoadLockedReq;
-    } else {
-        cmd = MemCmd::WriteReq;
-        if (req->isLLSC()) {
-            cmd = MemCmd::StoreCondReq;
-        } else if (req->isSwap()) {
-            cmd = MemCmd::SwapReq;
-        }
-    }
-    pkt = new Packet(req, cmd);
+    return read ? Packet::createRead(req) : Packet::createWrite(req);
 }
 
 void
@@ -367,17 +391,16 @@ TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
     assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
 
     if (req->getFlags().isSet(Request::NO_ACCESS)) {
-        buildPacket(pkt1, req, read);
+        pkt1 = buildPacket(req, read);
         return;
     }
 
-    buildPacket(pkt1, req1, read);
-    buildPacket(pkt2, req2, read);
+    pkt1 = buildPacket(req1, read);
+    pkt2 = buildPacket(req2, read);
 
-    req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
     PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
 
-    pkt->dataDynamicArray<uint8_t>(data);
+    pkt->dataDynamic<uint8_t>(data);
     pkt1->dataStatic<uint8_t>(data);
     pkt2->dataStatic<uint8_t>(data + req1->getSize());
 
@@ -392,21 +415,32 @@ TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
 
 Fault
 TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
-                         unsigned size, unsigned flags)
+                         unsigned size, Request::Flags flags)
 {
+    panic("readMem() is for atomic accesses, and should "
+          "never be called on TimingSimpleCPU.\n");
+}
+
+Fault
+TimingSimpleCPU::initiateMemRead(Addr addr, unsigned size,
+                                 Request::Flags flags)
+{
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread* thread = t_info.thread;
+
     Fault fault;
     const int asid = 0;
-    const ThreadID tid = 0;
     const Addr pc = thread->instAddr();
-    unsigned block_size = dcachePort.peerBlockSize();
+    unsigned block_size = cacheLineSize();
     BaseTLB::Mode mode = BaseTLB::Read;
 
-    if (traceData) {
-        traceData->setAddr(addr);
-    }
+    if (traceData)
+        traceData->setMem(addr, size, flags);
+
+    RequestPtr req = new Request(asid, addr, size, flags, dataMasterId(), pc,
+                                 thread->contextId());
 
-    RequestPtr req  = new Request(asid, addr, size,
-                                  flags, dataMasterId(), pc, _cpuId, tid);
+    req->taskId(taskId());
 
     Addr split_addr = roundDown(addr + size - 1, block_size);
     assert(split_addr <= addr || split_addr - addr < block_size);
@@ -425,14 +459,14 @@ TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
         DataTranslation<TimingSimpleCPU *> *trans2 =
             new DataTranslation<TimingSimpleCPU *>(this, state, 1);
 
-        thread->dtb->translateTiming(req1, tc, trans1, mode);
-        thread->dtb->translateTiming(req2, tc, trans2, mode);
+        thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode);
+        thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode);
     } else {
         WholeTranslationState *state =
             new WholeTranslationState(req, new uint8_t[size], NULL, mode);
         DataTranslation<TimingSimpleCPU *> *translation
             = new DataTranslation<TimingSimpleCPU *>(this, state);
-        thread->dtb->translateTiming(req, tc, translation, mode);
+        thread->dtb->translateTiming(req, thread->getTC(), translation, mode);
     }
 
     return NoFault;
@@ -441,9 +475,12 @@ TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
 bool
 TimingSimpleCPU::handleWritePacket()
 {
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread* thread = t_info.thread;
+
     RequestPtr req = dcache_pkt->req;
     if (req->isMmappedIpr()) {
-        Tick delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
+        Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
         new IprEvent(dcache_pkt, this, clockEdge(delay));
         _status = DcacheWaitResponse;
         dcache_pkt = NULL;
@@ -459,23 +496,32 @@ TimingSimpleCPU::handleWritePacket()
 
 Fault
 TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
-                          Addr addr, unsigned flags, uint64_t *res)
+                          Addr addr, Request::Flags flags, uint64_t *res)
 {
-    uint8_t *newData = new uint8_t[size];
-    memcpy(newData, data, size);
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread* thread = t_info.thread;
 
+    uint8_t *newData = new uint8_t[size];
     const int asid = 0;
-    const ThreadID tid = 0;
     const Addr pc = thread->instAddr();
-    unsigned block_size = dcachePort.peerBlockSize();
+    unsigned block_size = cacheLineSize();
     BaseTLB::Mode mode = BaseTLB::Write;
 
-    if (traceData) {
-        traceData->setAddr(addr);
+    if (data == NULL) {
+        assert(flags & Request::CACHE_BLOCK_ZERO);
+        // This must be a cache block cleaning request
+        memset(newData, 0, size);
+    } else {
+        memcpy(newData, data, size);
     }
 
-    RequestPtr req = new Request(asid, addr, size,
-                                 flags, dataMasterId(), pc, _cpuId, tid);
+    if (traceData)
+        traceData->setMem(addr, size, flags);
+
+    RequestPtr req = new Request(asid, addr, size, flags, dataMasterId(), pc,
+                                 thread->contextId());
+
+    req->taskId(taskId());
 
     Addr split_addr = roundDown(addr + size - 1, block_size);
     assert(split_addr <= addr || split_addr - addr < block_size);
@@ -493,25 +539,38 @@ TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
         DataTranslation<TimingSimpleCPU *> *trans2 =
             new DataTranslation<TimingSimpleCPU *>(this, state, 1);
 
-        thread->dtb->translateTiming(req1, tc, trans1, mode);
-        thread->dtb->translateTiming(req2, tc, trans2, mode);
+        thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode);
+        thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode);
     } else {
         WholeTranslationState *state =
             new WholeTranslationState(req, newData, res, mode);
         DataTranslation<TimingSimpleCPU *> *translation =
             new DataTranslation<TimingSimpleCPU *>(this, state);
-        thread->dtb->translateTiming(req, tc, translation, mode);
+        thread->dtb->translateTiming(req, thread->getTC(), translation, mode);
     }
 
     // Translation faults will be returned via finishTranslation()
     return NoFault;
 }
 
+void
+TimingSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender)
+{
+    for (ThreadID tid = 0; tid < numThreads; tid++) {
+        if (tid != sender) {
+            if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+                wakeup(tid);
+            }
+            TheISA::handleLockedSnoop(threadInfo[tid]->thread, pkt,
+                    dcachePort.cacheBlockMask);
+        }
+    }
+}
 
 void
 TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
 {
-    _status = Running;
+    _status = BaseSimpleCPU::Running;
 
     if (state->getFault() != NoFault) {
         if (state->isPrefetch()) {
@@ -537,40 +596,48 @@ TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
 void
 TimingSimpleCPU::fetch()
 {
+    // Change thread if multi-threaded
+    swapActiveThread();
+
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread* thread = t_info.thread;
+
     DPRINTF(SimpleCPU, "Fetch\n");
 
-    if (!curStaticInst || !curStaticInst->isDelayedCommit())
+    if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
         checkForInterrupts();
-
-    checkPcEventQueue();
+        checkPcEventQueue();
+    }
 
     // We must have just got suspended by a PC event
     if (_status == Idle)
         return;
 
     TheISA::PCState pcState = thread->pcState();
-    bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
+    bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
+                       !curMacroStaticInst;
 
     if (needToFetch) {
-        _status = Running;
+        _status = BaseSimpleCPU::Running;
         Request *ifetch_req = new Request();
-        ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
+        ifetch_req->taskId(taskId());
+        ifetch_req->setContext(thread->contextId());
         setupFetchRequest(ifetch_req);
         DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
-        thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
-                BaseTLB::Execute);
+        thread->itb->translateTiming(ifetch_req, thread->getTC(),
+                &fetchTranslation, BaseTLB::Execute);
     } else {
         _status = IcacheWaitResponse;
         completeIfetch(NULL);
 
-        numCycles += curCycle() - previousCycle;
-        previousCycle = curCycle();
+        updateCycleCounts();
     }
 }
 
 
 void
-TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
+TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
+                           ThreadContext *tc)
 {
     if (fault == NoFault) {
         DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
@@ -592,35 +659,44 @@ TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
         DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
         delete req;
         // fetch fault: advance directly to next instruction (fault handler)
-        _status = Running;
+        _status = BaseSimpleCPU::Running;
         advanceInst(fault);
     }
 
-    numCycles += curCycle() - previousCycle;
-    previousCycle = curCycle();
+    updateCycleCounts();
 }
 
 
 void
-TimingSimpleCPU::advanceInst(Fault fault)
+TimingSimpleCPU::advanceInst(const Fault &fault)
 {
+    SimpleExecContext &t_info = *threadInfo[curThread];
 
     if (_status == Faulting)
         return;
 
     if (fault != NoFault) {
-        advancePC(fault);
         DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
-        reschedule(fetchEvent, nextCycle(), true);
+
+        advancePC(fault);
+
+        Tick stall = dynamic_pointer_cast<SyscallRetryFault>(fault) ?
+                     clockEdge(syscallRetryLatency) : clockEdge();
+
+        reschedule(fetchEvent, stall, true);
+
         _status = Faulting;
         return;
     }
 
 
-    if (!stayAtPC)
+    if (!t_info.stayAtPC)
         advancePC(fault);
 
-    if (_status == Running) {
+    if (tryCompleteDrain())
+            return;
+
+    if (_status == BaseSimpleCPU::Running) {
         // kick off fetch of next instruction... callback from icache
         // response will cause that instruction to be executed,
         // keeping the CPU running.
@@ -632,39 +708,33 @@ TimingSimpleCPU::advanceInst(Fault fault)
 void
 TimingSimpleCPU::completeIfetch(PacketPtr pkt)
 {
+    SimpleExecContext& t_info = *threadInfo[curThread];
+
     DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
             pkt->getAddr() : 0);
 
     // received a response from the icache: execute the received
     // instruction
-
     assert(!pkt || !pkt->isError());
     assert(_status == IcacheWaitResponse);
 
-    _status = Running;
+    _status = BaseSimpleCPU::Running;
 
-    numCycles += curCycle() - previousCycle;
-    previousCycle = curCycle();
+    updateCycleCounts();
 
-    if (getState() == SimObject::Draining) {
-        if (pkt) {
-            delete pkt->req;
-            delete pkt;
-        }
+    if (pkt)
+        pkt->req->setAccessLatency();
 
-        completeDrain();
-        return;
-    }
 
     preExecute();
     if (curStaticInst && curStaticInst->isMemRef()) {
         // load or store: just send to dcache
-        Fault fault = curStaticInst->initiateAcc(this, traceData);
+        Fault fault = curStaticInst->initiateAcc(&t_info, traceData);
 
         // If we're not running now the instruction will complete in a dcache
         // response callback or the instruction faulted and has started an
         // ifetch
-        if (_status == Running) {
+        if (_status == BaseSimpleCPU::Running) {
             if (fault != NoFault && traceData) {
                 // If there was a fault, we shouldn't trace this instruction.
                 delete traceData;
@@ -680,7 +750,7 @@ TimingSimpleCPU::completeIfetch(PacketPtr pkt)
         }
     } else if (curStaticInst) {
         // non-memory instruction: execute completely now
-        Fault fault = curStaticInst->execute(this, traceData);
+        Fault fault = curStaticInst->execute(&t_info, traceData);
 
         // keep an instruction count
         if (fault == NoFault)
@@ -693,7 +763,7 @@ TimingSimpleCPU::completeIfetch(PacketPtr pkt)
         postExecute();
         // @todo remove me after debugging with legion done
         if (curStaticInst && (!curStaticInst->isMicroop() ||
-                    curStaticInst->isFirstMicroop()))
+                curStaticInst->isFirstMicroop()))
             instCnt++;
         advanceInst(fault);
     } else {
@@ -715,20 +785,18 @@ TimingSimpleCPU::IcachePort::ITickEvent::process()
 bool
 TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
 {
-    DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
+    DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr());
+    // we should only ever see one response per cycle since we only
+    // issue a new request once this response is sunk
+    assert(!tickEvent.scheduled());
     // delay processing of returned data until next CPU clock edge
-    Tick next_tick = cpu->nextCycle();
-
-    if (next_tick == curTick())
-        cpu->completeIfetch(pkt);
-    else
-        tickEvent.schedule(pkt, next_tick);
+    tickEvent.schedule(pkt, cpu->clockEdge());
 
     return true;
 }
 
 void
-TimingSimpleCPU::IcachePort::recvRetry()
+TimingSimpleCPU::IcachePort::recvReqRetry()
 {
     // we shouldn't get a retry unless we have a packet that we're
     // waiting to transmit
@@ -750,8 +818,9 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
     assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
            pkt->req->getFlags().isSet(Request::NO_ACCESS));
 
-    numCycles += curCycle() - previousCycle;
-    previousCycle = curCycle();
+    pkt->req->setAccessLatency();
+
+    updateCycleCounts();
 
     if (pkt->senderState) {
         SplitFragmentSenderState * send_state =
@@ -761,7 +830,7 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
         delete pkt;
         PacketPtr big_pkt = send_state->bigPkt;
         delete send_state;
-        
+
         SplitMainSenderState * main_send_state =
             dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
         assert(main_send_state);
@@ -778,9 +847,10 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
         }
     }
 
-    _status = Running;
+    _status = BaseSimpleCPU::Running;
 
-    Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
+    Fault fault = curStaticInst->completeAcc(pkt, threadInfo[curThread],
+                                             traceData);
 
     // keep an instruction count
     if (fault == NoFault)
@@ -791,58 +861,74 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
         traceData = NULL;
     }
 
-    // the locked flag may be cleared on the response packet, so check
-    // pkt->req and not pkt to see if it was a load-locked
-    if (pkt->isRead() && pkt->req->isLLSC()) {
-        TheISA::handleLockedRead(thread, pkt->req);
-    }
-
     delete pkt->req;
     delete pkt;
 
     postExecute();
 
-    if (getState() == SimObject::Draining) {
-        advancePC(fault);
-        completeDrain();
+    advanceInst(fault);
+}
 
-        return;
-    }
+void
+TimingSimpleCPU::updateCycleCounts()
+{
+    const Cycles delta(curCycle() - previousCycle);
 
-    advanceInst(fault);
+    numCycles += delta;
+    ppCycles->notify(delta);
+
+    previousCycle = curCycle();
 }
 
+void
+TimingSimpleCPU::DcachePort::recvTimingSnoopReq(PacketPtr pkt)
+{
+    for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
+        if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+            cpu->wakeup(tid);
+        }
+    }
+
+    // Making it uniform across all CPUs:
+    // The CPUs need to be woken up only on an invalidation packet (when using caches)
+    // or on an incoming write packet (when not using caches)
+    // It is not necessary to wake up the processor on all incoming packets
+    if (pkt->isInvalidate() || pkt->isWrite()) {
+        for (auto &t_info : cpu->threadInfo) {
+            TheISA::handleLockedSnoop(t_info->thread, pkt, cacheBlockMask);
+        }
+    }
+}
 
 void
-TimingSimpleCPU::completeDrain()
+TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
 {
-    DPRINTF(Drain, "CPU done draining, processing drain event\n");
-    changeState(SimObject::Drained);
-    drainEvent->process();
+    for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
+        if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+            cpu->wakeup(tid);
+        }
+    }
 }
 
 bool
 TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
 {
-    // delay processing of returned data until next CPU clock edge
-    Tick next_tick = cpu->nextCycle();
+    DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr());
 
-    if (next_tick == curTick()) {
-        cpu->completeDataAccess(pkt);
+    // The timing CPU is not really ticked, instead it relies on the
+    // memory system (fetch and load/store) to set the pace.
+    if (!tickEvent.scheduled()) {
+        // Delay processing of returned data until next CPU clock edge
+        tickEvent.schedule(pkt, cpu->clockEdge());
+        return true;
     } else {
-        if (!tickEvent.scheduled()) {
-            tickEvent.schedule(pkt, next_tick);
-        } else {
-            // In the case of a split transaction and a cache that is
-            // faster than a CPU we could get two responses before
-            // next_tick expires
-            if (!retryEvent.scheduled())
-                cpu->schedule(retryEvent, next_tick);
-            return false;
-        }
+        // In the case of a split transaction and a cache that is
+        // faster than a CPU we could get two responses in the
+        // same tick, delay the second one
+        if (!retryRespEvent.scheduled())
+            cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1)));
+        return false;
     }
-
-    return true;
 }
 
 void
@@ -852,7 +938,7 @@ TimingSimpleCPU::DcachePort::DTickEvent::process()
 }
 
 void
-TimingSimpleCPU::DcachePort::recvRetry()
+TimingSimpleCPU::DcachePort::recvReqRetry()
 {
     // we shouldn't get a retry unless we have a packet that we're
     // waiting to transmit
@@ -865,7 +951,7 @@ TimingSimpleCPU::DcachePort::recvRetry()
             dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
         assert(send_state);
         PacketPtr big_pkt = send_state->bigPkt;
-        
+
         SplitMainSenderState * main_send_state =
             dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
         assert(main_send_state);
@@ -929,8 +1015,5 @@ TimingSimpleCPU::printAddr(Addr a)
 TimingSimpleCPU *
 TimingSimpleCPUParams::create()
 {
-    numThreads = 1;
-    if (!FullSystem && workload.size() != 1)
-        panic("only one workload allowed");
     return new TimingSimpleCPU(this);
 }