X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcpu%2Fbase.cc;h=6f76b8c6f0e78522388b020a11947c8ec5d0a6a3;hb=db522eb930020a7a9caf1ea6e289fc81a0bcc842;hp=8c461cccbe3d9ae3c88aae02c5a0db47404e6b94;hpb=9836d81c2bba97e36c43ca22feee1d51a12ce6ac;p=gem5.git diff --git a/src/cpu/base.cc b/src/cpu/base.cc index 8c461cccb..6f76b8c6f 100644 --- a/src/cpu/base.cc +++ b/src/cpu/base.cc @@ -1,5 +1,20 @@ /* + * Copyright (c) 2011-2012,2016 ARM Limited + * All rights reserved + * + * The license below extends only to copyright in the software and shall + * not be construed as granting a license to any other intellectual + * property including but not limited to intellectual property relating + * to a hardware implementation of the functionality of the software + * licensed hereunder. You may use the software subject to the license + * terms below provided that you ensure that this notice is replicated + * unmodified and in its entirety in all distributions of the software, + * modified or unmodified, in source code or in binary form. + * * Copyright (c) 2002-2005 The Regents of The University of Michigan + * Copyright (c) 2011 Regents of the University of California + * Copyright (c) 2013 Advanced Micro Devices, Inc. + * Copyright (c) 2013 Mark D. Hill and David A. Wood * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -27,25 +42,34 @@ * * Authors: Steve Reinhardt * Nathan Binkert + * Rick Strong */ +#include "cpu/base.hh" + #include -#include #include +#include +#include "arch/tlb.hh" #include "base/cprintf.hh" #include "base/loader/symtab.hh" #include "base/misc.hh" #include "base/output.hh" #include "base/trace.hh" -#include "cpu/base.hh" +#include "cpu/checker/cpu.hh" #include "cpu/cpuevent.hh" -#include "cpu/thread_context.hh" #include "cpu/profile.hh" +#include "cpu/thread_context.hh" +#include "debug/Mwait.hh" +#include "debug/SyscallVerbose.hh" +#include "mem/page_table.hh" #include "params/BaseCPU.hh" -#include "sim/sim_exit.hh" +#include "sim/clocked_object.hh" +#include "sim/full_system.hh" #include "sim/process.hh" #include "sim/sim_events.hh" +#include "sim/sim_exit.hh" #include "sim/system.hh" // Hack @@ -61,29 +85,38 @@ vector BaseCPU::cpuList; int maxThreadsPerCPU = 1; CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival) - : Event(Event::Progress_Event_Pri), interval(ival), lastNumInst(0), - cpu(_cpu) + : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0), + cpu(_cpu), _repeatEvent(true) { - if (interval) - cpu->schedule(this, curTick + interval); + if (_interval) + cpu->schedule(this, curTick() + _interval); } void CPUProgressEvent::process() { - Counter temp = cpu->totalInstructions(); + Counter temp = cpu->totalOps(); + + if (_repeatEvent) + cpu->schedule(this, curTick() + _interval); + + if (cpu->switchedOut()) { + return; + } + #ifndef NDEBUG - double ipc = double(temp - lastNumInst) / (interval / cpu->ticks(1)); + double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod()); - DPRINTFN("%s progress event, instructions committed: %lli, IPC: %0.8d\n", - cpu->name(), temp - lastNumInst, ipc); + DPRINTFN("%s progress event, total committed:%i, progress insts committed: " + "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst, + ipc); ipc = 0.0; #else - cprintf("%lli: %s progress event, instructions committed: %lli\n", - curTick, cpu->name(), temp - lastNumInst); + cprintf("%lli: %s progress event, total committed:%i, progress insts " + "committed: %lli\n", curTick(), cpu->name(), temp, + temp - lastNumInst); #endif lastNumInst = temp; - cpu->schedule(this, curTick + interval); } const char * @@ -92,40 +125,56 @@ CPUProgressEvent::description() const return "CPU Progress"; } -#if FULL_SYSTEM -BaseCPU::BaseCPU(Params *p) - : MemObject(p), clock(p->clock), instCnt(0), interrupts(p->interrupts), - number_of_threads(p->numThreads), system(p->system), - phase(p->phase) -#else -BaseCPU::BaseCPU(Params *p) - : MemObject(p), clock(p->clock), - number_of_threads(p->numThreads), system(p->system), - phase(p->phase) -#endif +BaseCPU::BaseCPU(Params *p, bool is_checker) + : MemObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id), + _instMasterId(p->system->getMasterId(name() + ".inst")), + _dataMasterId(p->system->getMasterId(name() + ".data")), + _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid), + _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), + interrupts(p->interrupts), profileEvent(NULL), + numThreads(p->numThreads), system(p->system), + functionTraceStream(nullptr), currentFunctionStart(0), + currentFunctionEnd(0), functionEntryTick(0), + addressMonitor(p->numThreads), + syscallRetryLatency(p->syscallRetryLatency) { -// currentTick = curTick; + // if Python did not provide a valid ID, do it here + if (_cpuId == -1 ) { + _cpuId = cpuList.size(); + } // add self to global list of CPUs cpuList.push_back(this); - if (number_of_threads > maxThreadsPerCPU) - maxThreadsPerCPU = number_of_threads; + DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n", + _cpuId, _socketId); + + if (numThreads > maxThreadsPerCPU) + maxThreadsPerCPU = numThreads; // allocate per-thread instruction-based event queues - comInstEventQueue = new EventQueue *[number_of_threads]; - for (int i = 0; i < number_of_threads; ++i) - comInstEventQueue[i] = new EventQueue("instruction-based event queue"); + comInstEventQueue = new EventQueue *[numThreads]; + for (ThreadID tid = 0; tid < numThreads; ++tid) + comInstEventQueue[tid] = + new EventQueue("instruction-based event queue"); // // set up instruction-count-based termination events, if any // if (p->max_insts_any_thread != 0) { const char *cause = "a thread reached the max instruction count"; - for (int i = 0; i < number_of_threads; ++i) { - Event *event = new SimLoopExitEvent(cause, 0); - comInstEventQueue[i]->schedule(event, p->max_insts_any_thread); - } + for (ThreadID tid = 0; tid < numThreads; ++tid) + scheduleInstStop(tid, p->max_insts_any_thread, cause); + } + + // Set up instruction-count-based termination events for SimPoints + // Typically, there are more than one action points. + // Simulation.py is responsible to take the necessary actions upon + // exitting the simulation loop. + if (!p->simpoint_start_insts.empty()) { + const char *cause = "simpoint starting point found"; + for (size_t i = 0; i < p->simpoint_start_insts.size(); ++i) + scheduleInstStop(0, p->simpoint_start_insts[i], cause); } if (p->max_insts_all_threads != 0) { @@ -135,27 +184,25 @@ BaseCPU::BaseCPU(Params *p) // decrement this when triggered; simulation will terminate // when counter reaches 0 int *counter = new int; - *counter = number_of_threads; - for (int i = 0; i < number_of_threads; ++i) { + *counter = numThreads; + for (ThreadID tid = 0; tid < numThreads; ++tid) { Event *event = new CountedExitEvent(cause, *counter); - comInstEventQueue[i]->schedule(event, p->max_insts_any_thread); + comInstEventQueue[tid]->schedule(event, p->max_insts_all_threads); } } // allocate per-thread load-based event queues - comLoadEventQueue = new EventQueue *[number_of_threads]; - for (int i = 0; i < number_of_threads; ++i) - comLoadEventQueue[i] = new EventQueue("load-based event queue"); + comLoadEventQueue = new EventQueue *[numThreads]; + for (ThreadID tid = 0; tid < numThreads; ++tid) + comLoadEventQueue[tid] = new EventQueue("load-based event queue"); // // set up instruction-count-based termination events, if any // if (p->max_loads_any_thread != 0) { const char *cause = "a thread reached the max load count"; - for (int i = 0; i < number_of_threads; ++i) { - Event *event = new SimLoopExitEvent(cause, 0); - comLoadEventQueue[i]->schedule(event, p->max_loads_any_thread); - } + for (ThreadID tid = 0; tid < numThreads; ++tid) + scheduleLoadStop(tid, p->max_loads_any_thread, cause); } if (p->max_loads_all_threads != 0) { @@ -164,33 +211,53 @@ BaseCPU::BaseCPU(Params *p) // decrement this when triggered; simulation will terminate // when counter reaches 0 int *counter = new int; - *counter = number_of_threads; - for (int i = 0; i < number_of_threads; ++i) { + *counter = numThreads; + for (ThreadID tid = 0; tid < numThreads; ++tid) { Event *event = new CountedExitEvent(cause, *counter); - comLoadEventQueue[i]->schedule(event, p->max_loads_all_threads); + comLoadEventQueue[tid]->schedule(event, p->max_loads_all_threads); } } functionTracingEnabled = false; if (p->function_trace) { - functionTraceStream = simout.find(csprintf("ftrace.%s", name())); + const string fname = csprintf("ftrace.%s", name()); + functionTraceStream = simout.findOrCreate(fname)->stream(); + currentFunctionStart = currentFunctionEnd = 0; functionEntryTick = p->function_trace_start; if (p->function_trace_start == 0) { functionTracingEnabled = true; } else { - typedef EventWrapper wrap; - Event *event = new wrap(this, true); + Event *event = new EventFunctionWrapper( + [this]{ enableFunctionTrace(); }, name(), true); schedule(event, p->function_trace_start); } } -#if FULL_SYSTEM - profileEvent = NULL; - if (params()->profile) - profileEvent = new ProfileEvent(this, params()->profile); -#endif + + // The interrupts should always be present unless this CPU is + // switched in later or in case it is a checker CPU + if (!params()->switched_out && !is_checker) { + fatal_if(interrupts.size() != numThreads, + "CPU %s has %i interrupt controllers, but is expecting one " + "per thread (%i)\n", + name(), interrupts.size(), numThreads); + for (ThreadID tid = 0; tid < numThreads; tid++) + interrupts[tid]->setCPU(this); + } + + if (FullSystem) { + if (params()->profile) + profileEvent = new EventFunctionWrapper( + [this]{ processProfileEvent(); }, + name()); + } tracer = params()->tracer; + + if (params()->isa.size() != numThreads) { + fatal("Number of ISAs (%i) assigned to the CPU does not equal number " + "of threads (%i).\n", params()->isa.size(), numThreads); + } } void @@ -201,34 +268,147 @@ BaseCPU::enableFunctionTrace() BaseCPU::~BaseCPU() { + delete profileEvent; + delete[] comLoadEventQueue; + delete[] comInstEventQueue; +} + +void +BaseCPU::armMonitor(ThreadID tid, Addr address) +{ + assert(tid < numThreads); + AddressMonitor &monitor = addressMonitor[tid]; + + monitor.armed = true; + monitor.vAddr = address; + monitor.pAddr = 0x0; + DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address); +} + +bool +BaseCPU::mwait(ThreadID tid, PacketPtr pkt) +{ + assert(tid < numThreads); + AddressMonitor &monitor = addressMonitor[tid]; + + if (!monitor.gotWakeup) { + int block_size = cacheLineSize(); + uint64_t mask = ~((uint64_t)(block_size - 1)); + + assert(pkt->req->hasPaddr()); + monitor.pAddr = pkt->getAddr() & mask; + monitor.waiting = true; + + DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, " + "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr); + return true; + } else { + monitor.gotWakeup = false; + return false; + } +} + +void +BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, TheISA::TLB *dtb) +{ + assert(tid < numThreads); + AddressMonitor &monitor = addressMonitor[tid]; + + Request req; + Addr addr = monitor.vAddr; + int block_size = cacheLineSize(); + uint64_t mask = ~((uint64_t)(block_size - 1)); + int size = block_size; + + //The address of the next line if it crosses a cache line boundary. + Addr secondAddr = roundDown(addr + size - 1, block_size); + + if (secondAddr > addr) + size = secondAddr - addr; + + req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr()); + + // translate to physical address + Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read); + assert(fault == NoFault); + + monitor.pAddr = req.getPaddr() & mask; + monitor.waiting = true; + + DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n", + tid, monitor.vAddr, monitor.pAddr); } void BaseCPU::init() { - if (!params()->defer_registration) + if (!params()->switched_out) { registerThreadContexts(); + + verifyMemoryMode(); + } } void BaseCPU::startup() { -#if FULL_SYSTEM - if (!params()->defer_registration && profileEvent) - schedule(profileEvent, curTick); -#endif + if (FullSystem) { + if (!params()->switched_out && profileEvent) + schedule(profileEvent, curTick()); + } if (params()->progress_interval) { - Tick num_ticks = ticks(params()->progress_interval); - Event *event = new CPUProgressEvent(this, num_ticks); - schedule(event, curTick + num_ticks); + new CPUProgressEvent(this, params()->progress_interval); } + + // Assumption CPU start to operate instantaneously without any latency + if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED) + ClockedObject::pwrState(Enums::PwrState::ON); + } +ProbePoints::PMUUPtr +BaseCPU::pmuProbePoint(const char *name) +{ + ProbePoints::PMUUPtr ptr; + ptr.reset(new ProbePoints::PMU(getProbeManager(), name)); + + return ptr; +} + +void +BaseCPU::regProbePoints() +{ + ppCycles = pmuProbePoint("Cycles"); + + ppRetiredInsts = pmuProbePoint("RetiredInsts"); + ppRetiredLoads = pmuProbePoint("RetiredLoads"); + ppRetiredStores = pmuProbePoint("RetiredStores"); + ppRetiredBranches = pmuProbePoint("RetiredBranches"); +} + +void +BaseCPU::probeInstCommit(const StaticInstPtr &inst) +{ + if (!inst->isMicroop() || inst->isLastMicroop()) + ppRetiredInsts->notify(1); + + + if (inst->isLoad()) + ppRetiredLoads->notify(1); + + if (inst->isStore()) + ppRetiredStores->notify(1); + + if (inst->isControl()) + ppRetiredBranches->notify(1); +} void BaseCPU::regStats() { + MemObject::regStats(); + using namespace Stats; numCycles @@ -236,6 +416,16 @@ BaseCPU::regStats() .desc("number of cpu cycles simulated") ; + numWorkItemsStarted + .name(name() + ".numWorkItemsStarted") + .desc("number of work items this cpu started") + ; + + numWorkItemsCompleted + .name(name() + ".numWorkItemsCompleted") + .desc("number of work items this cpu completed") + ; + int size = threadContexts.size(); if (size > 1) { for (int i = 0; i < size; ++i) { @@ -245,47 +435,40 @@ BaseCPU::regStats() } } else if (size == 1) threadContexts[0]->regStats(name()); - -#if FULL_SYSTEM -#endif -} - -Tick -BaseCPU::nextCycle() -{ - Tick next_tick = curTick - phase + clock - 1; - next_tick -= (next_tick % clock); - next_tick += phase; - return next_tick; } -Tick -BaseCPU::nextCycle(Tick begin_tick) +BaseMasterPort & +BaseCPU::getMasterPort(const string &if_name, PortID idx) { - Tick next_tick = begin_tick; - if (next_tick % clock != 0) - next_tick = next_tick - (next_tick % clock) + clock; - next_tick += phase; - - assert(next_tick >= curTick); - return next_tick; + // Get the right port based on name. This applies to all the + // subclasses of the base CPU and relies on their implementation + // of getDataPort and getInstPort. In all cases there methods + // return a MasterPort pointer. + if (if_name == "dcache_port") + return getDataPort(); + else if (if_name == "icache_port") + return getInstPort(); + else + return MemObject::getMasterPort(if_name, idx); } void BaseCPU::registerThreadContexts() { - for (int i = 0; i < threadContexts.size(); ++i) { - ThreadContext *tc = threadContexts[i]; + assert(system->multiThread || numThreads == 1); -#if FULL_SYSTEM - int id = params()->cpu_id; - if (id != -1) - id += i; + ThreadID size = threadContexts.size(); + for (ThreadID tid = 0; tid < size; ++tid) { + ThreadContext *tc = threadContexts[tid]; - tc->setCpuId(system->registerThreadContext(tc, id)); -#else - tc->setCpuId(tc->getProcessPtr()->registerThreadContext(tc)); -#endif + if (system->multiThread) { + tc->setContextId(system->registerThreadContext(tc)); + } else { + tc->setContextId(system->registerThreadContext(tc, _cpuId)); + } + + if (!FullSystem) + tc->getProcessPtr()->assignThreadContext(tc->contextId()); } } @@ -293,29 +476,61 @@ BaseCPU::registerThreadContexts() int BaseCPU::findContext(ThreadContext *tc) { - for (int i = 0; i < threadContexts.size(); ++i) { - if (tc == threadContexts[i]) - return i; + ThreadID size = threadContexts.size(); + for (ThreadID tid = 0; tid < size; ++tid) { + if (tc == threadContexts[tid]) + return tid; } return 0; } +void +BaseCPU::activateContext(ThreadID thread_num) +{ + // For any active thread running, update CPU power state to active (ON) + ClockedObject::pwrState(Enums::PwrState::ON); +} + +void +BaseCPU::suspendContext(ThreadID thread_num) +{ + // Check if all threads are suspended + for (auto t : threadContexts) { + if (t->status() != ThreadContext::Suspended) { + return; + } + } + + // All CPU threads suspended, enter lower power state for the CPU + ClockedObject::pwrState(Enums::PwrState::CLK_GATED); +} + void BaseCPU::switchOut() { -// panic("This CPU doesn't support sampling!"); -#if FULL_SYSTEM + assert(!_switchedOut); + _switchedOut = true; if (profileEvent && profileEvent->scheduled()) deschedule(profileEvent); -#endif + + // Flush all TLBs in the CPU to avoid having stale translations if + // it gets switched in later. + flushTLBs(); } void -BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc) +BaseCPU::takeOverFrom(BaseCPU *oldCPU) { assert(threadContexts.size() == oldCPU->threadContexts.size()); - - for (int i = 0; i < threadContexts.size(); ++i) { + assert(_cpuId == oldCPU->cpuId()); + assert(_switchedOut); + assert(oldCPU != this); + _pid = oldCPU->getPid(); + _taskId = oldCPU->taskId(); + _switchedOut = false; + + ThreadID size = threadContexts.size(); + for (ThreadID i = 0; i < size; ++i) { ThreadContext *newTC = threadContexts[i]; ThreadContext *oldTC = oldCPU->threadContexts[i]; @@ -323,94 +538,220 @@ BaseCPU::takeOverFrom(BaseCPU *oldCPU, Port *ic, Port *dc) CpuEvent::replaceThreadContext(oldTC, newTC); - assert(newTC->readCpuId() == oldTC->readCpuId()); -#if FULL_SYSTEM - system->replaceThreadContext(newTC, newTC->readCpuId()); -#else - assert(newTC->getProcessPtr() == oldTC->getProcessPtr()); - newTC->getProcessPtr()->replaceThreadContext(newTC, newTC->readCpuId()); -#endif + assert(newTC->contextId() == oldTC->contextId()); + assert(newTC->threadId() == oldTC->threadId()); + system->replaceThreadContext(newTC, newTC->contextId()); - if (DTRACE(Context)) + /* This code no longer works since the zero register (e.g., + * r31 on Alpha) doesn't necessarily contain zero at this + * point. + if (DTRACE(Context)) ThreadContext::compare(oldTC, newTC); + */ + + BaseMasterPort *old_itb_port = oldTC->getITBPtr()->getMasterPort(); + BaseMasterPort *old_dtb_port = oldTC->getDTBPtr()->getMasterPort(); + BaseMasterPort *new_itb_port = newTC->getITBPtr()->getMasterPort(); + BaseMasterPort *new_dtb_port = newTC->getDTBPtr()->getMasterPort(); + + // Move over any table walker ports if they exist + if (new_itb_port) { + assert(!new_itb_port->isConnected()); + assert(old_itb_port); + assert(old_itb_port->isConnected()); + BaseSlavePort &slavePort = old_itb_port->getSlavePort(); + old_itb_port->unbind(); + new_itb_port->bind(slavePort); + } + if (new_dtb_port) { + assert(!new_dtb_port->isConnected()); + assert(old_dtb_port); + assert(old_dtb_port->isConnected()); + BaseSlavePort &slavePort = old_dtb_port->getSlavePort(); + old_dtb_port->unbind(); + new_dtb_port->bind(slavePort); + } + newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr()); + newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr()); + + // Checker whether or not we have to transfer CheckerCPU + // objects over in the switch + CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr(); + CheckerCPU *newChecker = newTC->getCheckerCpuPtr(); + if (oldChecker && newChecker) { + BaseMasterPort *old_checker_itb_port = + oldChecker->getITBPtr()->getMasterPort(); + BaseMasterPort *old_checker_dtb_port = + oldChecker->getDTBPtr()->getMasterPort(); + BaseMasterPort *new_checker_itb_port = + newChecker->getITBPtr()->getMasterPort(); + BaseMasterPort *new_checker_dtb_port = + newChecker->getDTBPtr()->getMasterPort(); + + newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr()); + newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr()); + + // Move over any table walker ports if they exist for checker + if (new_checker_itb_port) { + assert(!new_checker_itb_port->isConnected()); + assert(old_checker_itb_port); + assert(old_checker_itb_port->isConnected()); + BaseSlavePort &slavePort = + old_checker_itb_port->getSlavePort(); + old_checker_itb_port->unbind(); + new_checker_itb_port->bind(slavePort); + } + if (new_checker_dtb_port) { + assert(!new_checker_dtb_port->isConnected()); + assert(old_checker_dtb_port); + assert(old_checker_dtb_port->isConnected()); + BaseSlavePort &slavePort = + old_checker_dtb_port->getSlavePort(); + old_checker_dtb_port->unbind(); + new_checker_dtb_port->bind(slavePort); + } + } } -#if FULL_SYSTEM interrupts = oldCPU->interrupts; + for (ThreadID tid = 0; tid < numThreads; tid++) { + interrupts[tid]->setCPU(this); + } + oldCPU->interrupts.clear(); - for (int i = 0; i < threadContexts.size(); ++i) - threadContexts[i]->profileClear(); - - if (profileEvent) - schedule(profileEvent, curTick); -#endif + if (FullSystem) { + for (ThreadID i = 0; i < size; ++i) + threadContexts[i]->profileClear(); - // Connect new CPU to old CPU's memory only if new CPU isn't - // connected to anything. Also connect old CPU's memory to new - // CPU. - if (!ic->isConnected()) { - Port *peer = oldCPU->getPort("icache_port")->getPeer(); - ic->setPeer(peer); - peer->setPeer(ic); + if (profileEvent) + schedule(profileEvent, curTick()); } - if (!dc->isConnected()) { - Port *peer = oldCPU->getPort("dcache_port")->getPeer(); - dc->setPeer(peer); - peer->setPeer(dc); - } + // All CPUs have an instruction and a data port, and the new CPU's + // ports are dangling while the old CPU has its ports connected + // already. Unbind the old CPU and then bind the ports of the one + // we are switching to. + assert(!getInstPort().isConnected()); + assert(oldCPU->getInstPort().isConnected()); + BaseSlavePort &inst_peer_port = oldCPU->getInstPort().getSlavePort(); + oldCPU->getInstPort().unbind(); + getInstPort().bind(inst_peer_port); + + assert(!getDataPort().isConnected()); + assert(oldCPU->getDataPort().isConnected()); + BaseSlavePort &data_peer_port = oldCPU->getDataPort().getSlavePort(); + oldCPU->getDataPort().unbind(); + getDataPort().bind(data_peer_port); } - -#if FULL_SYSTEM -BaseCPU::ProfileEvent::ProfileEvent(BaseCPU *_cpu, Tick _interval) - : cpu(_cpu), interval(_interval) -{ } - void -BaseCPU::ProfileEvent::process() +BaseCPU::flushTLBs() { - for (int i = 0, size = cpu->threadContexts.size(); i < size; ++i) { - ThreadContext *tc = cpu->threadContexts[i]; - tc->profileSample(); + for (ThreadID i = 0; i < threadContexts.size(); ++i) { + ThreadContext &tc(*threadContexts[i]); + CheckerCPU *checker(tc.getCheckerCpuPtr()); + + tc.getITBPtr()->flushAll(); + tc.getDTBPtr()->flushAll(); + if (checker) { + checker->getITBPtr()->flushAll(); + checker->getDTBPtr()->flushAll(); + } } - - cpu->schedule(this, curTick + interval); } void -BaseCPU::postInterrupt(int int_num, int index) +BaseCPU::processProfileEvent() { - interrupts->post(int_num, index); + ThreadID size = threadContexts.size(); + + for (ThreadID i = 0; i < size; ++i) + threadContexts[i]->profileSample(); + + schedule(profileEvent, curTick() + params()->profile); } void -BaseCPU::clearInterrupt(int int_num, int index) +BaseCPU::serialize(CheckpointOut &cp) const { - interrupts->clear(int_num, index); + SERIALIZE_SCALAR(instCnt); + + if (!_switchedOut) { + /* Unlike _pid, _taskId is not serialized, as they are dynamically + * assigned unique ids that are only meaningful for the duration of + * a specific run. We will need to serialize the entire taskMap in + * system. */ + SERIALIZE_SCALAR(_pid); + + // Serialize the threads, this is done by the CPU implementation. + for (ThreadID i = 0; i < numThreads; ++i) { + ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); + interrupts[i]->serialize(cp); + serializeThread(cp, i); + } + } } void -BaseCPU::clearInterrupts() +BaseCPU::unserialize(CheckpointIn &cp) { - interrupts->clearAll(); + UNSERIALIZE_SCALAR(instCnt); + + if (!_switchedOut) { + UNSERIALIZE_SCALAR(_pid); + + // Unserialize the threads, this is done by the CPU implementation. + for (ThreadID i = 0; i < numThreads; ++i) { + ScopedCheckpointSection sec(cp, csprintf("xc.%i", i)); + interrupts[i]->unserialize(cp); + unserializeThread(cp, i); + } + } } void -BaseCPU::serialize(std::ostream &os) +BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause) { - SERIALIZE_SCALAR(instCnt); - interrupts->serialize(os); + const Tick now(comInstEventQueue[tid]->getCurTick()); + Event *event(new LocalSimLoopExitEvent(cause, 0)); + + comInstEventQueue[tid]->schedule(event, now + insts); +} + +uint64_t +BaseCPU::getCurrentInstCount(ThreadID tid) +{ + return Tick(comInstEventQueue[tid]->getCurTick()); +} + +AddressMonitor::AddressMonitor() { + armed = false; + waiting = false; + gotWakeup = false; +} + +bool AddressMonitor::doMonitor(PacketPtr pkt) { + assert(pkt->req->hasPaddr()); + if (armed && waiting) { + if (pAddr == pkt->getAddr()) { + DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n", + pkt->getAddr()); + waiting = false; + return true; + } + } + return false; } void -BaseCPU::unserialize(Checkpoint *cp, const std::string §ion) +BaseCPU::scheduleLoadStop(ThreadID tid, Counter loads, const char *cause) { - UNSERIALIZE_SCALAR(instCnt); - interrupts->unserialize(cp, section); + const Tick now(comLoadEventQueue[tid]->getCurTick()); + Event *event(new LocalSimLoopExitEvent(cause, 0)); + + comLoadEventQueue[tid]->schedule(event, now + loads); } -#endif // FULL_SYSTEM void BaseCPU::traceFunctionsInternal(Addr pc) @@ -434,7 +775,13 @@ BaseCPU::traceFunctionsInternal(Addr pc) } ccprintf(*functionTraceStream, " (%d)\n%d: %s", - curTick - functionEntryTick, curTick, sym_str); - functionEntryTick = curTick; + curTick() - functionEntryTick, curTick(), sym_str); + functionEntryTick = curTick(); } } + +bool +BaseCPU::waitForRemoteGDB() const +{ + return params()->wait_for_remote_gdb; +}